aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 12:33:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 12:33:06 -0500
commit3e82806b97398d542a5e03bd94861f79ce10ecee (patch)
tree467753d23d422fc42a07992ac25cae7889e48c18
parentbd4f203e433387d39be404b67ad02acf6f76b7bc (diff)
parent816d2206f0f9953ca854e4ff1a2749a5cbd62715 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "I Was Almost Tempted To Capitalise Every Word, but then I decided I couldn't read it myself! I've also got one pull request for the sti driver outstanding. It relied on a commit in Greg's tree and I didn't find out in time, that commit is in your tree now so I might send that along once this is merged. I also had the accidental misfortune to have access to a Skylake on my desk for a few days, and I've had to encourage Intel to try harder, which seems to be happening now. Here is the main drm-next pull request for 4.4. Highlights: New driver: vc4 driver for the Rasberry Pi VPU. (From Eric Anholt at Broadcom.) Core: Atomic fbdev support Atomic helpers for runtime pm dp/aux i2c STATUS_UPDATE handling struct_mutex usage cleanups. Generic of probing support. Documentation: Kerneldoc for VGA switcheroo code. Rename to gpu instead of drm to reflect scope. i915: Skylake GuC firmware fixes HPD A support VBT backlight fallbacks Fastboot by default for some systems FBC work BXT/SKL workarounds Skylake deeper sleep state fixes amdgpu: Enable GPU scheduler by default New atombios opcodes GPUVM debugging options Stoney support. Fencing cleanups. radeon: More efficient CS checking nouveau: gk20a instance memory handling improvements. Improved PGOB detection and GK107 support Kepler GDDR5 PLL statbility improvement G8x/GT2xx reclock improvements new userspace API compatiblity fixes. virtio-gpu: Add 3D support - qemu 2.5 has it merged for it's gtk backend. msm: Initial msm88896 (snapdragon 8200) exynos: HDMI cleanups Enable mixer driver byt default Add DECON-TV support vmwgfx: Move to using memremap + fixes. rcar-du: Add support for R8A7793/4 DU armada: Remove support for non-component mode Improved plane handling Power savings while in DPMS off. tda998x: Remove unused slave encoder support Use more HDMI helpers Fix EDID read handling dwhdmi: Interlace video mode support for ipu-v3/dw_hdmi Hotplug state fixes Audio driver integration imx: More color formats support. tegra: Minor fixes/improvements" [ Merge fixup: remove unused variable 'dev' that had all uses removed in commit 4e270f088011: "drm/gem: Drop struct_mutex requirement from drm_gem_mmap_obj" ] * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (764 commits) drm/vmwgfx: Relax irq locking somewhat drm/vmwgfx: Properly flush cursor updates and page-flips drm/i915/skl: disable display side power well support for now drm/i915: Extend DSL readout fix to BDW and SKL. drm/i915: Do graphics device reset under forcewake drm/i915: Skip fence installation for objects with rotated views (v4) vga_switcheroo: Drop client power state VGA_SWITCHEROO_INIT drm/amdgpu: group together common fence implementation drm/amdgpu: remove AMDGPU_FENCE_OWNER_MOVE drm/amdgpu: remove now unused fence functions drm/amdgpu: fix fence fallback check drm/amdgpu: fix stoping the scheduler timeout drm/amdgpu: cleanup on error in amdgpu_cs_ioctl() drm/i915: Fix locking around GuC firmware load drm/amdgpu: update Fiji's Golden setting drm/amdgpu: update Fiji's rev id drm/amdgpu: extract common code in vi_common_early_init drm/amd/scheduler: don't oops on failure to load drm/amdgpu: don't oops on failure to load (v2) drm/amdgpu: don't VT switch on suspend ...
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/gpu.tmpl (renamed from Documentation/DocBook/drm.tmpl)209
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt65
-rw-r--r--Documentation/devicetree/bindings/display/msm/hdmi.txt3
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp.txt3
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt14
-rw-r--r--Documentation/kernel-parameters.txt15
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--drivers/dma-buf/fence.c98
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c397
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c247
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c828
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c213
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c50
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_d.h2791
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_enum.h6808
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_sh_mask.h21368
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c52
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h7
-rw-r--r--drivers/gpu/drm/armada/Kconfig9
-rw-r--r--drivers/gpu/drm/armada/Makefile3
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c258
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h34
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h16
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c221
-rw-r--r--drivers/gpu/drm/armada/armada_output.c142
-rw-r--r--drivers/gpu/drm/armada/armada_output.h33
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c147
-rw-r--r--drivers/gpu/drm/armada/armada_slave.c139
-rw-r--r--drivers/gpu/drm/armada/armada_slave.h26
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c8
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c6
-rw-r--r--drivers/gpu/drm/bridge/Kconfig12
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c653
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi-audio.h14
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.c391
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.h3
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c28
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c325
-rw-r--r--drivers/gpu/drm/drm_bufs.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c148
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c44
-rw-r--r--drivers/gpu/drm/drm_drv.c67
-rw-r--r--drivers/gpu/drm/drm_edid.c60
-rw-r--r--drivers/gpu/drm/drm_edid_load.c43
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c212
-rw-r--r--drivers/gpu/drm/drm_gem.c47
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c3
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_ioc32.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c93
-rw-r--r--drivers/gpu/drm/drm_irq.c367
-rw-r--r--drivers/gpu/drm/drm_memory.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c88
-rw-r--r--drivers/gpu/drm/drm_pci.c11
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c6
-rw-r--r--drivers/gpu/drm/drm_platform.c3
-rw-r--r--drivers/gpu/drm/drm_rect.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c49
-rw-r--r--drivers/gpu/drm/drm_vm.c8
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c40
-rw-r--r--drivers/gpu/drm/exynos/Kconfig75
-rw-r--r--drivers/gpu/drm/exynos/Makefile3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c324
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c45
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c275
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c496
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h20
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h33
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h6
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h6
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c6
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c26
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h12
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c487
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/dvo.h4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c12
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c17
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c264
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c245
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c83
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h149
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c272
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c167
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c878
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h77
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c181
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c333
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c78
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c975
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c686
-rw-r--r--drivers/gpu/drm/i915/i915_params.c43
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h593
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c45
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c34
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h78
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h34
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c13
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c6
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c47
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c43
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c15
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c41
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c182
-rw-r--r--drivers/gpu/drm/i915/intel_display.c929
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c838
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c26
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h101
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c323
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h7
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c243
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c64
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c264
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c23
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h124
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h31
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c608
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c306
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c427
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c50
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c9
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c92
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c525
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c569
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c262
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c492
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c51
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c59
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c49
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c16
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c65
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c16
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h6
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c20
-rw-r--r--drivers/gpu/drm/msm/Kconfig14
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h27
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h15
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h238
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h21
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h86
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c95
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c211
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h6
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c36
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c141
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c121
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c371
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c14
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c12
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h6
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c1
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c32
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c104
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c138
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c48
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c45
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c89
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c16
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h4
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/tegra/dc.c16
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.c37
-rw-r--r--drivers/gpu/drm/tegra/fb.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/Kconfig13
-rw-r--r--drivers/gpu/drm/vc4/Makefile17
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c52
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c672
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c39
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c298
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h145
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c590
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c163
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c67
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c320
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h570
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c55
-rw-r--r--drivers/gpu/drm/via/via_drv.h10
-rw-r--r--drivers/gpu/drm/via/via_irq.c17
-rw-r--r--drivers/gpu/drm/virtio/Makefile3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c57
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c28
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h72
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c573
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c133
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c71
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c322
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c132
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c113
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_sync.h8
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_sync.h8
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_sync.h8
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c5
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c87
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c5
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c15
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c129
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c382
-rw-r--r--drivers/gpu/vga/vgaarb.c4
-rw-r--r--drivers/platform/x86/apple-gmux.c2
-rw-r--r--include/drm/drmP.h48
-rw-r--r--include/drm/drm_agpsupport.h57
-rw-r--r--include/drm/drm_atomic_helper.h12
-rw-r--r--include/drm/drm_crtc.h18
-rw-r--r--include/drm/drm_dp_helper.h9
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_fb_helper.h31
-rw-r--r--include/drm/drm_gem.h5
-rw-r--r--include/drm/drm_modeset_lock.h10
-rw-r--r--include/drm/drm_of.h13
-rw-r--r--include/drm/drm_plane_helper.h2
-rw-r--r--include/drm/drm_vma_manager.h24
-rw-r--r--include/drm/i915_component.h65
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fence.h25
-rw-r--r--include/linux/vga_switcheroo.h100
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/drm_mode.h42
-rw-r--r--include/uapi/drm/i810_drm.h2
-rw-r--r--include/uapi/drm/i915_drm.h3
-rw-r--r--include/uapi/drm/nouveau_drm.h8
-rw-r--r--include/uapi/drm/r128_drm.h2
-rw-r--r--include/uapi/drm/savage_drm.h2
-rw-r--r--include/uapi/drm/sis_drm.h4
-rw-r--r--include/uapi/drm/via_drm.h4
-rw-r--r--include/uapi/drm/virtgpu_drm.h167
-rw-r--r--include/uapi/linux/virtio_gpu.h112
-rw-r--r--include/video/exynos5433_decon.h29
-rw-r--r--sound/pci/hda/hda_controller.h2
-rw-r--r--sound/pci/hda/hda_intel.c15
-rw-r--r--sound/pci/hda/hda_intel.h2
468 files changed, 53338 insertions, 9797 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index d2544961b67a..91f6d89bb19f 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
15 80211.xml debugobjects.xml sh.xml regulator.xml \ 15 80211.xml debugobjects.xml sh.xml regulator.xml \
16 alsa-driver-api.xml writing-an-alsa-driver.xml \ 16 alsa-driver-api.xml writing-an-alsa-driver.xml \
17 tracepoint.xml drm.xml media_api.xml w1.xml \ 17 tracepoint.xml gpu.xml media_api.xml w1.xml \
18 writing_musb_glue_layer.xml crypto-API.xml iio.xml 18 writing_musb_glue_layer.xml crypto-API.xml iio.xml
19 19
20include Documentation/DocBook/media/Makefile 20include Documentation/DocBook/media/Makefile
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/gpu.tmpl
index 9ddf8c6cb887..201dcd3c2e9d 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -2,9 +2,9 @@
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" 2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []> 3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4 4
5<book id="drmDevelopersGuide"> 5<book id="gpuDevelopersGuide">
6 <bookinfo> 6 <bookinfo>
7 <title>Linux DRM Developer's Guide</title> 7 <title>Linux GPU Driver Developer's Guide</title>
8 8
9 <authorgroup> 9 <authorgroup>
10 <author> 10 <author>
@@ -40,6 +40,16 @@
40 </address> 40 </address>
41 </affiliation> 41 </affiliation>
42 </author> 42 </author>
43 <author>
44 <firstname>Lukas</firstname>
45 <surname>Wunner</surname>
46 <contrib>vga_switcheroo documentation</contrib>
47 <affiliation>
48 <address>
49 <email>lukas@wunner.de</email>
50 </address>
51 </affiliation>
52 </author>
43 </authorgroup> 53 </authorgroup>
44 54
45 <copyright> 55 <copyright>
@@ -51,6 +61,10 @@
51 <year>2012</year> 61 <year>2012</year>
52 <holder>Laurent Pinchart</holder> 62 <holder>Laurent Pinchart</holder>
53 </copyright> 63 </copyright>
64 <copyright>
65 <year>2015</year>
66 <holder>Lukas Wunner</holder>
67 </copyright>
54 68
55 <legalnotice> 69 <legalnotice>
56 <para> 70 <para>
@@ -69,6 +83,13 @@
69 <revremark>Added extensive documentation about driver internals. 83 <revremark>Added extensive documentation about driver internals.
70 </revremark> 84 </revremark>
71 </revision> 85 </revision>
86 <revision>
87 <revnumber>1.1</revnumber>
88 <date>2015-10-11</date>
89 <authorinitials>LW</authorinitials>
90 <revremark>Added vga_switcheroo documentation.
91 </revremark>
92 </revision>
72 </revhistory> 93 </revhistory>
73 </bookinfo> 94 </bookinfo>
74 95
@@ -78,9 +99,9 @@
78 <title>DRM Core</title> 99 <title>DRM Core</title>
79 <partintro> 100 <partintro>
80 <para> 101 <para>
81 This first part of the DRM Developer's Guide documents core DRM code, 102 This first part of the GPU Driver Developer's Guide documents core DRM
82 helper libraries for writing drivers and generic userspace interfaces 103 code, helper libraries for writing drivers and generic userspace
83 exposed by DRM drivers. 104 interfaces exposed by DRM drivers.
84 </para> 105 </para>
85 </partintro> 106 </partintro>
86 107
@@ -138,14 +159,10 @@
138 <para> 159 <para>
139 At the core of every DRM driver is a <structname>drm_driver</structname> 160 At the core of every DRM driver is a <structname>drm_driver</structname>
140 structure. Drivers typically statically initialize a drm_driver structure, 161 structure. Drivers typically statically initialize a drm_driver structure,
141 and then pass it to one of the <function>drm_*_init()</function> functions 162 and then pass it to <function>drm_dev_alloc()</function> to allocate a
142 to register it with the DRM subsystem. 163 device instance. After the device instance is fully initialized it can be
143 </para> 164 registered (which makes it accessible from userspace) using
144 <para> 165 <function>drm_dev_register()</function>.
145 Newer drivers that no longer require a <structname>drm_bus</structname>
146 structure can alternatively use the low-level device initialization and
147 registration functions such as <function>drm_dev_alloc()</function> and
148 <function>drm_dev_register()</function> directly.
149 </para> 166 </para>
150 <para> 167 <para>
151 The <structname>drm_driver</structname> structure contains static 168 The <structname>drm_driver</structname> structure contains static
@@ -296,83 +313,12 @@ char *date;</synopsis>
296 </sect3> 313 </sect3>
297 </sect2> 314 </sect2>
298 <sect2> 315 <sect2>
299 <title>Device Registration</title> 316 <title>Device Instance and Driver Handling</title>
300 <para> 317!Pdrivers/gpu/drm/drm_drv.c driver instance overview
301 A number of functions are provided to help with device registration.
302 The functions deal with PCI and platform devices, respectively.
303 </para>
304!Edrivers/gpu/drm/drm_pci.c
305!Edrivers/gpu/drm/drm_platform.c
306 <para>
307 New drivers that no longer rely on the services provided by the
308 <structname>drm_bus</structname> structure can call the low-level
309 device registration functions directly. The
310 <function>drm_dev_alloc()</function> function can be used to allocate
311 and initialize a new <structname>drm_device</structname> structure.
312 Drivers will typically want to perform some additional setup on this
313 structure, such as allocating driver-specific data and storing a
314 pointer to it in the DRM device's <structfield>dev_private</structfield>
315 field. Drivers should also set the device's unique name using the
316 <function>drm_dev_set_unique()</function> function. After it has been
317 set up a device can be registered with the DRM subsystem by calling
318 <function>drm_dev_register()</function>. This will cause the device to
319 be exposed to userspace and will call the driver's
320 <structfield>.load()</structfield> implementation. When a device is
321 removed, the DRM device can safely be unregistered and freed by calling
322 <function>drm_dev_unregister()</function> followed by a call to
323 <function>drm_dev_unref()</function>.
324 </para>
325!Edrivers/gpu/drm/drm_drv.c 318!Edrivers/gpu/drm/drm_drv.c
326 </sect2> 319 </sect2>
327 <sect2> 320 <sect2>
328 <title>Driver Load</title> 321 <title>Driver Load</title>
329 <para>
330 The <methodname>load</methodname> method is the driver and device
331 initialization entry point. The method is responsible for allocating and
332 initializing driver private data, performing resource allocation and
333 mapping (e.g. acquiring
334 clocks, mapping registers or allocating command buffers), initializing
335 the memory manager (<xref linkend="drm-memory-management"/>), installing
336 the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up
337 vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode
338 setting (<xref linkend="drm-mode-setting"/>) and initial output
339 configuration (<xref linkend="drm-kms-init"/>).
340 </para>
341 <note><para>
342 If compatibility is a concern (e.g. with drivers converted over from
343 User Mode Setting to Kernel Mode Setting), care must be taken to prevent
344 device initialization and control that is incompatible with currently
345 active userspace drivers. For instance, if user level mode setting
346 drivers are in use, it would be problematic to perform output discovery
347 &amp; configuration at load time. Likewise, if user-level drivers
348 unaware of memory management are in use, memory management and command
349 buffer setup may need to be omitted. These requirements are
350 driver-specific, and care needs to be taken to keep both old and new
351 applications and libraries working.
352 </para></note>
353 <synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>
354 <para>
355 The method takes two arguments, a pointer to the newly created
356 <structname>drm_device</structname> and flags. The flags are used to
357 pass the <structfield>driver_data</structfield> field of the device id
358 corresponding to the device passed to <function>drm_*_init()</function>.
359 Only PCI devices currently use this, USB and platform DRM drivers have
360 their <methodname>load</methodname> method called with flags to 0.
361 </para>
362 <sect3>
363 <title>Driver Private Data</title>
364 <para>
365 The driver private hangs off the main
366 <structname>drm_device</structname> structure and can be used for
367 tracking various device-specific bits of information, like register
368 offsets, command buffer status, register state for suspend/resume, etc.
369 At load time, a driver may simply allocate one and set
370 <structname>drm_device</structname>.<structfield>dev_priv</structfield>
371 appropriately; it should be freed and
372 <structname>drm_device</structname>.<structfield>dev_priv</structfield>
373 set to NULL when the driver is unloaded.
374 </para>
375 </sect3>
376 <sect3 id="drm-irq-registration"> 322 <sect3 id="drm-irq-registration">
377 <title>IRQ Registration</title> 323 <title>IRQ Registration</title>
378 <para> 324 <para>
@@ -465,6 +411,18 @@ char *date;</synopsis>
465 </para> 411 </para>
466 </sect3> 412 </sect3>
467 </sect2> 413 </sect2>
414 <sect2>
415 <title>Bus-specific Device Registration and PCI Support</title>
416 <para>
417 A number of functions are provided to help with device registration.
418 The functions deal with PCI and platform devices respectively and are
419 only provided for historical reasons. These are all deprecated and
420 shouldn't be used in new drivers. Besides that there's a few
421 helpers for pci drivers.
422 </para>
423!Edrivers/gpu/drm/drm_pci.c
424!Edrivers/gpu/drm/drm_platform.c
425 </sect2>
468 </sect1> 426 </sect1>
469 427
470 <!-- Internals: memory management --> 428 <!-- Internals: memory management -->
@@ -3646,10 +3604,11 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
3646 plane properties to default value, so that a subsequent open of the 3604 plane properties to default value, so that a subsequent open of the
3647 device will not inherit state from the previous user. It can also be 3605 device will not inherit state from the previous user. It can also be
3648 used to execute delayed power switching state changes, e.g. in 3606 used to execute delayed power switching state changes, e.g. in
3649 conjunction with the vga-switcheroo infrastructure. Beyond that KMS 3607 conjunction with the vga_switcheroo infrastructure (see
3650 drivers should not do any further cleanup. Only legacy UMS drivers might 3608 <xref linkend="vga_switcheroo"/>). Beyond that KMS drivers should not
3651 need to clean up device state so that the vga console or an independent 3609 do any further cleanup. Only legacy UMS drivers might need to clean up
3652 fbdev driver could take over. 3610 device state so that the vga console or an independent fbdev driver
3611 could take over.
3653 </para> 3612 </para>
3654 </sect2> 3613 </sect2>
3655 <sect2> 3614 <sect2>
@@ -3747,11 +3706,14 @@ int num_ioctls;</synopsis>
3747 </para></listitem> 3706 </para></listitem>
3748 <listitem><para> 3707 <listitem><para>
3749 DRM_UNLOCKED - The ioctl handler will be called without locking 3708 DRM_UNLOCKED - The ioctl handler will be called without locking
3750 the DRM global mutex 3709 the DRM global mutex. This is the enforced default for kms drivers
3710 (i.e. using the DRIVER_MODESET flag) and hence shouldn't be used
3711 any more for new drivers.
3751 </para></listitem> 3712 </para></listitem>
3752 </itemizedlist> 3713 </itemizedlist>
3753 </para> 3714 </para>
3754 </para> 3715 </para>
3716!Edrivers/gpu/drm/drm_ioctl.c
3755 </sect2> 3717 </sect2>
3756 </sect1> 3718 </sect1>
3757 <sect1> 3719 <sect1>
@@ -3949,8 +3911,8 @@ int num_ioctls;</synopsis>
3949 3911
3950 <partintro> 3912 <partintro>
3951 <para> 3913 <para>
3952 This second part of the DRM Developer's Guide documents driver code, 3914 This second part of the GPU Driver Developer's Guide documents driver
3953 implementation details and also all the driver-specific userspace 3915 code, implementation details and also all the driver-specific userspace
3954 interfaces. Especially since all hardware-acceleration interfaces to 3916 interfaces. Especially since all hardware-acceleration interfaces to
3955 userspace are driver specific for efficiency and other reasons these 3917 userspace are driver specific for efficiency and other reasons these
3956 interfaces can be rather substantial. Hence every driver has its own 3918 interfaces can be rather substantial. Hence every driver has its own
@@ -4051,6 +4013,7 @@ int num_ioctls;</synopsis>
4051 <title>High Definition Audio</title> 4013 <title>High Definition Audio</title>
4052!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port 4014!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
4053!Idrivers/gpu/drm/i915/intel_audio.c 4015!Idrivers/gpu/drm/i915/intel_audio.c
4016!Iinclude/drm/i915_component.h
4054 </sect2> 4017 </sect2>
4055 <sect2> 4018 <sect2>
4056 <title>Panel Self Refresh PSR (PSR/SRD)</title> 4019 <title>Panel Self Refresh PSR (PSR/SRD)</title>
@@ -4238,6 +4201,20 @@ int num_ioctls;</synopsis>
4238 </sect2> 4201 </sect2>
4239 </sect1> 4202 </sect1>
4240 <sect1> 4203 <sect1>
4204 <title>GuC-based Command Submission</title>
4205 <sect2>
4206 <title>GuC</title>
4207!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
4208!Idrivers/gpu/drm/i915/intel_guc_loader.c
4209 </sect2>
4210 <sect2>
4211 <title>GuC Client</title>
4212!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison
4213!Idrivers/gpu/drm/i915/i915_guc_submission.c
4214 </sect2>
4215 </sect1>
4216
4217 <sect1>
4241 <title> Tracing </title> 4218 <title> Tracing </title>
4242 <para> 4219 <para>
4243 This sections covers all things related to the tracepoints implemented in 4220 This sections covers all things related to the tracepoints implemented in
@@ -4260,4 +4237,50 @@ int num_ioctls;</synopsis>
4260 </chapter> 4237 </chapter>
4261!Cdrivers/gpu/drm/i915/i915_irq.c 4238!Cdrivers/gpu/drm/i915/i915_irq.c
4262</part> 4239</part>
4240
4241<part id="vga_switcheroo">
4242 <title>vga_switcheroo</title>
4243 <partintro>
4244!Pdrivers/gpu/vga/vga_switcheroo.c Overview
4245 </partintro>
4246
4247 <chapter id="modes_of_use">
4248 <title>Modes of Use</title>
4249 <sect1>
4250 <title>Manual switching and manual power control</title>
4251!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
4252 </sect1>
4253 <sect1>
4254 <title>Driver power control</title>
4255!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
4256 </sect1>
4257 </chapter>
4258
4259 <chapter id="pubfunctions">
4260 <title>Public functions</title>
4261!Edrivers/gpu/vga/vga_switcheroo.c
4262 </chapter>
4263
4264 <chapter id="pubstructures">
4265 <title>Public structures</title>
4266!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
4267!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
4268 </chapter>
4269
4270 <chapter id="pubconstants">
4271 <title>Public constants</title>
4272!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
4273!Finclude/linux/vga_switcheroo.h vga_switcheroo_state
4274 </chapter>
4275
4276 <chapter id="privstructures">
4277 <title>Private structures</title>
4278!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
4279!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
4280 </chapter>
4281
4282!Cdrivers/gpu/vga/vga_switcheroo.c
4283!Cinclude/linux/vga_switcheroo.h
4284</part>
4285
4263</book> 4286</book>
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
new file mode 100644
index 000000000000..56a961aa5061
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -0,0 +1,65 @@
1Broadcom VC4 (VideoCore4) GPU
2
3The VC4 device present on the Raspberry Pi includes a display system
4with HDMI output and the HVS (Hardware Video Scaler) for compositing
5display planes.
6
7Required properties for VC4:
8- compatible: Should be "brcm,bcm2835-vc4"
9
10Required properties for Pixel Valve:
11- compatible: Should be one of "brcm,bcm2835-pixelvalve0",
12 "brcm,bcm2835-pixelvalve1", or "brcm,bcm2835-pixelvalve2"
13- reg: Physical base address and length of the PV's registers
14- interrupts: The interrupt number
15 See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
16
17Required properties for HVS:
18- compatible: Should be "brcm,bcm2835-hvs"
19- reg: Physical base address and length of the HVS's registers
20- interrupts: The interrupt number
21 See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
22
23Required properties for HDMI
24- compatible: Should be "brcm,bcm2835-hdmi"
25- reg: Physical base address and length of the two register ranges
26 ("HDMI" and "HD", in that order)
27- interrupts: The interrupt numbers
28 See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
29- ddc: phandle of the I2C controller used for DDC EDID probing
30- clocks: a) hdmi: The HDMI state machine clock
31 b) pixel: The pixel clock.
32
33Optional properties for HDMI:
34- hpd-gpios: The GPIO pin for HDMI hotplug detect (if it doesn't appear
35 as an interrupt/status bit in the HDMI controller
36 itself). See bindings/pinctrl/brcm,bcm2835-gpio.txt
37
38Example:
39pixelvalve@7e807000 {
40 compatible = "brcm,bcm2835-pixelvalve2";
41 reg = <0x7e807000 0x100>;
42 interrupts = <2 10>; /* pixelvalve */
43};
44
45hvs@7e400000 {
46 compatible = "brcm,bcm2835-hvs";
47 reg = <0x7e400000 0x6000>;
48 interrupts = <2 1>;
49};
50
51hdmi: hdmi@7e902000 {
52 compatible = "brcm,bcm2835-hdmi";
53 reg = <0x7e902000 0x600>,
54 <0x7e808000 0x100>;
55 interrupts = <2 8>, <2 9>;
56 ddc = <&i2c2>;
57 hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
58 clocks = <&clocks BCM2835_PLLH_PIX>,
59 <&clocks BCM2835_CLOCK_HSM>;
60 clock-names = "pixel", "hdmi";
61};
62
63vc4: gpu {
64 compatible = "brcm,bcm2835-vc4";
65};
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt
index e926239e1101..379ee2ea9a3d 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt
@@ -2,6 +2,7 @@ Qualcomm adreno/snapdragon hdmi output
2 2
3Required properties: 3Required properties:
4- compatible: one of the following 4- compatible: one of the following
5 * "qcom,hdmi-tx-8996"
5 * "qcom,hdmi-tx-8994" 6 * "qcom,hdmi-tx-8994"
6 * "qcom,hdmi-tx-8084" 7 * "qcom,hdmi-tx-8084"
7 * "qcom,hdmi-tx-8974" 8 * "qcom,hdmi-tx-8974"
@@ -21,6 +22,7 @@ Required properties:
21Optional properties: 22Optional properties:
22- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin 23- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
23- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin 24- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
25- power-domains: reference to the power domain(s), if available.
24- pinctrl-names: the pin control state names; should contain "default" 26- pinctrl-names: the pin control state names; should contain "default"
25- pinctrl-0: the default pinctrl state (active) 27- pinctrl-0: the default pinctrl state (active)
26- pinctrl-1: the "sleep" pinctrl state 28- pinctrl-1: the "sleep" pinctrl state
@@ -35,6 +37,7 @@ Example:
35 reg-names = "core_physical"; 37 reg-names = "core_physical";
36 reg = <0x04a00000 0x1000>; 38 reg = <0x04a00000 0x1000>;
37 interrupts = <GIC_SPI 79 0>; 39 interrupts = <GIC_SPI 79 0>;
40 power-domains = <&mmcc MDSS_GDSC>;
38 clock-names = 41 clock-names =
39 "core_clk", 42 "core_clk",
40 "master_iface_clk", 43 "master_iface_clk",
diff --git a/Documentation/devicetree/bindings/display/msm/mdp.txt b/Documentation/devicetree/bindings/display/msm/mdp.txt
index 1a0598e5279d..0833edaba4c3 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp.txt
@@ -11,13 +11,14 @@ Required properties:
11- clock-names: the following clocks are required: 11- clock-names: the following clocks are required:
12 * "core_clk" 12 * "core_clk"
13 * "iface_clk" 13 * "iface_clk"
14 * "lut_clk"
15 * "src_clk" 14 * "src_clk"
16 * "hdmi_clk" 15 * "hdmi_clk"
17 * "mpd_clk" 16 * "mpd_clk"
18 17
19Optional properties: 18Optional properties:
20- gpus: phandle for gpu device 19- gpus: phandle for gpu device
20- clock-names: the following clocks are optional:
21 * "lut_clk"
21 22
22Example: 23Example:
23 24
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index c902323928f7..eccd4f4867b2 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -5,7 +5,9 @@ Required Properties:
5 - compatible: must be one of the following. 5 - compatible: must be one of the following.
6 - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU 6 - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
7 - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU 7 - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
8 - "renesas,du-r8a7791" for R8A7791 (R-Car M2) compatible DU 8 - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
9 - "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU
10 - "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
9 11
10 - reg: A list of base address and length of each memory resource, one for 12 - reg: A list of base address and length of each memory resource, one for
11 each entry in the reg-names property. 13 each entry in the reg-names property.
@@ -22,9 +24,9 @@ Required Properties:
22 - clock-names: Name of the clocks. This property is model-dependent. 24 - clock-names: Name of the clocks. This property is model-dependent.
23 - R8A7779 uses a single functional clock. The clock doesn't need to be 25 - R8A7779 uses a single functional clock. The clock doesn't need to be
24 named. 26 named.
25 - R8A7790 and R8A7791 use one functional clock per channel and one clock 27 - R8A779[0134] use one functional clock per channel and one clock per LVDS
26 per LVDS encoder. The functional clocks must be named "du.x" with "x" 28 encoder (if available). The functional clocks must be named "du.x" with
27 being the channel numerical index. The LVDS clocks must be named 29 "x" being the channel numerical index. The LVDS clocks must be named
28 "lvds.x" with "x" being the LVDS encoder numerical index. 30 "lvds.x" with "x" being the LVDS encoder numerical index.
29 - In addition to the functional and encoder clocks, all DU versions also 31 - In addition to the functional and encoder clocks, all DU versions also
30 support externally supplied pixel clocks. Those clocks are optional. 32 support externally supplied pixel clocks. Those clocks are optional.
@@ -43,7 +45,9 @@ corresponding to each DU output.
43----------------------------------------------------------------------------- 45-----------------------------------------------------------------------------
44 R8A7779 (H1) DPAD 0 DPAD 1 - 46 R8A7779 (H1) DPAD 0 DPAD 1 -
45 R8A7790 (H2) DPAD LVDS 0 LVDS 1 47 R8A7790 (H2) DPAD LVDS 0 LVDS 1
46 R8A7791 (M2) DPAD LVDS 0 - 48 R8A7791 (M2-W) DPAD LVDS 0 -
49 R8A7793 (M2-N) DPAD LVDS 0 -
50 R8A7794 (E2) DPAD 0 DPAD 1 -
47 51
48 52
49Example: R8A7790 (R-Car H2) DU 53Example: R8A7790 (R-Car H2) DU
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 84c0214b64a7..f8aae632f02f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -932,11 +932,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
932 The filter can be disabled or changed to another 932 The filter can be disabled or changed to another
933 driver later using sysfs. 933 driver later using sysfs.
934 934
935 drm_kms_helper.edid_firmware=[<connector>:]<file> 935 drm_kms_helper.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
936 Broken monitors, graphic adapters and KVMs may 936 Broken monitors, graphic adapters, KVMs and EDIDless
937 send no or incorrect EDID data sets. This parameter 937 panels may send no or incorrect EDID data sets.
938 allows to specify an EDID data set in the 938 This parameter allows to specify an EDID data sets
939 /lib/firmware directory that is used instead. 939 in the /lib/firmware directory that are used instead.
940 Generic built-in EDID data sets are used, if one of 940 Generic built-in EDID data sets are used, if one of
941 edid/1024x768.bin, edid/1280x1024.bin, 941 edid/1024x768.bin, edid/1280x1024.bin,
942 edid/1680x1050.bin, or edid/1920x1080.bin is given 942 edid/1680x1050.bin, or edid/1920x1080.bin is given
@@ -945,7 +945,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
945 available in Documentation/EDID/HOWTO.txt. An EDID 945 available in Documentation/EDID/HOWTO.txt. An EDID
946 data set will only be used for a particular connector, 946 data set will only be used for a particular connector,
947 if its name and a colon are prepended to the EDID 947 if its name and a colon are prepended to the EDID
948 name. 948 name. Each connector may use a unique EDID data
949 set by separating the files with a comma. An EDID
950 data set with no connector name will be used for
951 any connectors not explicitly specified.
949 952
950 dscc4.setup= [NET] 953 dscc4.setup= [NET]
951 954
diff --git a/MAINTAINERS b/MAINTAINERS
index 7af7f4a01f0b..1a73c20bc400 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3624,6 +3624,7 @@ M: Daniel Vetter <daniel.vetter@intel.com>
3624M: Jani Nikula <jani.nikula@linux.intel.com> 3624M: Jani Nikula <jani.nikula@linux.intel.com>
3625L: intel-gfx@lists.freedesktop.org 3625L: intel-gfx@lists.freedesktop.org
3626L: dri-devel@lists.freedesktop.org 3626L: dri-devel@lists.freedesktop.org
3627W: https://01.org/linuxgraphics/
3627Q: http://patchwork.freedesktop.org/project/intel-gfx/ 3628Q: http://patchwork.freedesktop.org/project/intel-gfx/
3628T: git git://anongit.freedesktop.org/drm-intel 3629T: git git://anongit.freedesktop.org/drm-intel
3629S: Supported 3630S: Supported
@@ -3664,6 +3665,7 @@ M: Philipp Zabel <p.zabel@pengutronix.de>
3664L: dri-devel@lists.freedesktop.org 3665L: dri-devel@lists.freedesktop.org
3665S: Maintained 3666S: Maintained
3666F: drivers/gpu/drm/imx/ 3667F: drivers/gpu/drm/imx/
3668F: drivers/gpu/ipu-v3/
3667F: Documentation/devicetree/bindings/display/imx/ 3669F: Documentation/devicetree/bindings/display/imx/
3668 3670
3669DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets) 3671DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 13ba48c4b03b..0ff608fd7f0f 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -132,6 +132,7 @@ CONFIG_DRM_PARADE_PS8622=y
132CONFIG_DRM_EXYNOS=y 132CONFIG_DRM_EXYNOS=y
133CONFIG_DRM_EXYNOS_FIMD=y 133CONFIG_DRM_EXYNOS_FIMD=y
134CONFIG_DRM_EXYNOS_DSI=y 134CONFIG_DRM_EXYNOS_DSI=y
135CONFIG_DRM_EXYNOS_MIXER=y
135CONFIG_DRM_EXYNOS_HDMI=y 136CONFIG_DRM_EXYNOS_HDMI=y
136CONFIG_DRM_PANEL_SIMPLE=y 137CONFIG_DRM_PANEL_SIMPLE=y
137CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y 138CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 50ef8bd8708b..7b05dbe9b296 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -397,6 +397,104 @@ out:
397} 397}
398EXPORT_SYMBOL(fence_default_wait); 398EXPORT_SYMBOL(fence_default_wait);
399 399
400static bool
401fence_test_signaled_any(struct fence **fences, uint32_t count)
402{
403 int i;
404
405 for (i = 0; i < count; ++i) {
406 struct fence *fence = fences[i];
407 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
408 return true;
409 }
410 return false;
411}
412
413/**
414 * fence_wait_any_timeout - sleep until any fence gets signaled
415 * or until timeout elapses
416 * @fences: [in] array of fences to wait on
417 * @count: [in] number of fences to wait on
418 * @intr: [in] if true, do an interruptible wait
419 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
420 *
421 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
422 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
423 * on success.
424 *
425 * Synchronous waits for the first fence in the array to be signaled. The
426 * caller needs to hold a reference to all fences in the array, otherwise a
427 * fence might be freed before return, resulting in undefined behavior.
428 */
429signed long
430fence_wait_any_timeout(struct fence **fences, uint32_t count,
431 bool intr, signed long timeout)
432{
433 struct default_wait_cb *cb;
434 signed long ret = timeout;
435 unsigned i;
436
437 if (WARN_ON(!fences || !count || timeout < 0))
438 return -EINVAL;
439
440 if (timeout == 0) {
441 for (i = 0; i < count; ++i)
442 if (fence_is_signaled(fences[i]))
443 return 1;
444
445 return 0;
446 }
447
448 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
449 if (cb == NULL) {
450 ret = -ENOMEM;
451 goto err_free_cb;
452 }
453
454 for (i = 0; i < count; ++i) {
455 struct fence *fence = fences[i];
456
457 if (fence->ops->wait != fence_default_wait) {
458 ret = -EINVAL;
459 goto fence_rm_cb;
460 }
461
462 cb[i].task = current;
463 if (fence_add_callback(fence, &cb[i].base,
464 fence_default_wait_cb)) {
465 /* This fence is already signaled */
466 goto fence_rm_cb;
467 }
468 }
469
470 while (ret > 0) {
471 if (intr)
472 set_current_state(TASK_INTERRUPTIBLE);
473 else
474 set_current_state(TASK_UNINTERRUPTIBLE);
475
476 if (fence_test_signaled_any(fences, count))
477 break;
478
479 ret = schedule_timeout(ret);
480
481 if (ret > 0 && intr && signal_pending(current))
482 ret = -ERESTARTSYS;
483 }
484
485 __set_current_state(TASK_RUNNING);
486
487fence_rm_cb:
488 while (i-- > 0)
489 fence_remove_callback(fences[i], &cb[i].base);
490
491err_free_cb:
492 kfree(cb);
493
494 return ret;
495}
496EXPORT_SYMBOL(fence_wait_any_timeout);
497
400/** 498/**
401 * fence_init - Initialize a custom fence. 499 * fence_init - Initialize a custom fence.
402 * @fence: [in] the fence to initialize 500 * @fence: [in] the fence to initialize
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1a0a8df2eed8..c4bf9a1cf4a6 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -264,3 +264,5 @@ source "drivers/gpu/drm/sti/Kconfig"
264source "drivers/gpu/drm/amd/amdkfd/Kconfig" 264source "drivers/gpu/drm/amd/amdkfd/Kconfig"
265 265
266source "drivers/gpu/drm/imx/Kconfig" 266source "drivers/gpu/drm/imx/Kconfig"
267
268source "drivers/gpu/drm/vc4/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 45e7719846b1..1e9ff4c3e3db 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,7 +6,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
6 drm_context.o drm_dma.o \ 6 drm_context.o drm_dma.o \
7 drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 7 drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ 8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
9 drm_agpsupport.o drm_scatter.o drm_pci.o \ 9 drm_scatter.o drm_pci.o \
10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
11 drm_crtc.o drm_modes.o drm_edid.o \ 11 drm_crtc.o drm_modes.o drm_edid.o \
12 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 12 drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -19,6 +19,9 @@ drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
19drm-$(CONFIG_PCI) += ati_pcigart.o 19drm-$(CONFIG_PCI) += ati_pcigart.o
20drm-$(CONFIG_DRM_PANEL) += drm_panel.o 20drm-$(CONFIG_DRM_PANEL) += drm_panel.o
21drm-$(CONFIG_OF) += drm_of.o 21drm-$(CONFIG_OF) += drm_of.o
22drm-$(CONFIG_AGP) += drm_agpsupport.o
23
24drm-y += $(drm-m)
22 25
23drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 26drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
24 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o 27 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
@@ -42,6 +45,7 @@ obj-$(CONFIG_DRM_MGA) += mga/
42obj-$(CONFIG_DRM_I810) += i810/ 45obj-$(CONFIG_DRM_I810) += i810/
43obj-$(CONFIG_DRM_I915) += i915/ 46obj-$(CONFIG_DRM_I915) += i915/
44obj-$(CONFIG_DRM_MGAG200) += mgag200/ 47obj-$(CONFIG_DRM_MGAG200) += mgag200/
48obj-$(CONFIG_DRM_VC4) += vc4/
45obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/ 49obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
46obj-$(CONFIG_DRM_SIS) += sis/ 50obj-$(CONFIG_DRM_SIS) += sis/
47obj-$(CONFIG_DRM_SAVAGE)+= savage/ 51obj-$(CONFIG_DRM_SAVAGE)+= savage/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0d13e6368b96..615ce6d464fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -79,6 +79,8 @@ extern int amdgpu_bapm;
79extern int amdgpu_deep_color; 79extern int amdgpu_deep_color;
80extern int amdgpu_vm_size; 80extern int amdgpu_vm_size;
81extern int amdgpu_vm_block_size; 81extern int amdgpu_vm_block_size;
82extern int amdgpu_vm_fault_stop;
83extern int amdgpu_vm_debug;
82extern int amdgpu_enable_scheduler; 84extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs; 85extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission; 86extern int amdgpu_sched_hw_submission;
@@ -343,7 +345,6 @@ struct amdgpu_ring_funcs {
343 /* testing functions */ 345 /* testing functions */
344 int (*test_ring)(struct amdgpu_ring *ring); 346 int (*test_ring)(struct amdgpu_ring *ring);
345 int (*test_ib)(struct amdgpu_ring *ring); 347 int (*test_ib)(struct amdgpu_ring *ring);
346 bool (*is_lockup)(struct amdgpu_ring *ring);
347 /* insert NOP packets */ 348 /* insert NOP packets */
348 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 349 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
349}; 350};
@@ -404,7 +405,6 @@ struct amdgpu_fence_driver {
404/* some special values for the owner field */ 405/* some special values for the owner field */
405#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 406#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
406#define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 407#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
407#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
408 408
409#define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 409#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
410#define AMDGPU_FENCE_FLAG_INT (1 << 1) 410#define AMDGPU_FENCE_FLAG_INT (1 << 1)
@@ -446,58 +446,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
446int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 446int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
447unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 447unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
448 448
449signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
450 struct fence **array,
451 uint32_t count,
452 bool intr,
453 signed long t);
454struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
455void amdgpu_fence_unref(struct amdgpu_fence **fence);
456
457bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 449bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
458 struct amdgpu_ring *ring); 450 struct amdgpu_ring *ring);
459void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 451void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
460 struct amdgpu_ring *ring); 452 struct amdgpu_ring *ring);
461 453
462static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
463 struct amdgpu_fence *b)
464{
465 if (!a) {
466 return b;
467 }
468
469 if (!b) {
470 return a;
471 }
472
473 BUG_ON(a->ring != b->ring);
474
475 if (a->seq > b->seq) {
476 return a;
477 } else {
478 return b;
479 }
480}
481
482static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
483 struct amdgpu_fence *b)
484{
485 if (!a) {
486 return false;
487 }
488
489 if (!b) {
490 return true;
491 }
492
493 BUG_ON(a->ring != b->ring);
494
495 return a->seq < b->seq;
496}
497
498int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
499 void *owner, struct amdgpu_fence **fence);
500
501/* 454/*
502 * TTM. 455 * TTM.
503 */ 456 */
@@ -708,7 +661,7 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
708 */ 661 */
709struct amdgpu_sync { 662struct amdgpu_sync {
710 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; 663 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
711 struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; 664 struct fence *sync_to[AMDGPU_MAX_RINGS];
712 DECLARE_HASHTABLE(fences, 4); 665 DECLARE_HASHTABLE(fences, 4);
713 struct fence *last_vm_update; 666 struct fence *last_vm_update;
714}; 667};
@@ -905,8 +858,6 @@ struct amdgpu_ring {
905 unsigned ring_size; 858 unsigned ring_size;
906 unsigned ring_free_dw; 859 unsigned ring_free_dw;
907 int count_dw; 860 int count_dw;
908 atomic_t last_rptr;
909 atomic64_t last_activity;
910 uint64_t gpu_addr; 861 uint64_t gpu_addr;
911 uint32_t align_mask; 862 uint32_t align_mask;
912 uint32_t ptr_mask; 863 uint32_t ptr_mask;
@@ -960,6 +911,11 @@ struct amdgpu_ring {
960#define AMDGPU_PTE_FRAG_64KB (4 << 7) 911#define AMDGPU_PTE_FRAG_64KB (4 << 7)
961#define AMDGPU_LOG2_PAGES_PER_FRAG 4 912#define AMDGPU_LOG2_PAGES_PER_FRAG 4
962 913
914/* How to programm VM fault handling */
915#define AMDGPU_VM_FAULT_STOP_NEVER 0
916#define AMDGPU_VM_FAULT_STOP_FIRST 1
917#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
918
963struct amdgpu_vm_pt { 919struct amdgpu_vm_pt {
964 struct amdgpu_bo *bo; 920 struct amdgpu_bo *bo;
965 uint64_t addr; 921 uint64_t addr;
@@ -971,7 +927,7 @@ struct amdgpu_vm_id {
971 /* last flushed PD/PT update */ 927 /* last flushed PD/PT update */
972 struct fence *flushed_updates; 928 struct fence *flushed_updates;
973 /* last use of vmid */ 929 /* last use of vmid */
974 struct amdgpu_fence *last_id_use; 930 struct fence *last_id_use;
975}; 931};
976 932
977struct amdgpu_vm { 933struct amdgpu_vm {
@@ -1004,7 +960,7 @@ struct amdgpu_vm {
1004}; 960};
1005 961
1006struct amdgpu_vm_manager { 962struct amdgpu_vm_manager {
1007 struct amdgpu_fence *active[AMDGPU_NUM_VM]; 963 struct fence *active[AMDGPU_NUM_VM];
1008 uint32_t max_pfn; 964 uint32_t max_pfn;
1009 /* number of VMIDs */ 965 /* number of VMIDs */
1010 unsigned nvm; 966 unsigned nvm;
@@ -1223,8 +1179,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
1223void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); 1179void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
1224void amdgpu_ring_undo(struct amdgpu_ring *ring); 1180void amdgpu_ring_undo(struct amdgpu_ring *ring);
1225void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); 1181void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
1226void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
1227bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
1228unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 1182unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1229 uint32_t **data); 1183 uint32_t **data);
1230int amdgpu_ring_restore(struct amdgpu_ring *ring, 1184int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -1234,6 +1188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1234 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1188 struct amdgpu_irq_src *irq_src, unsigned irq_type,
1235 enum amdgpu_ring_type ring_type); 1189 enum amdgpu_ring_type ring_type);
1236void amdgpu_ring_fini(struct amdgpu_ring *ring); 1190void amdgpu_ring_fini(struct amdgpu_ring *ring);
1191struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
1237 1192
1238/* 1193/*
1239 * CS. 1194 * CS.
@@ -1709,7 +1664,7 @@ struct amdgpu_vce {
1709/* 1664/*
1710 * SDMA 1665 * SDMA
1711 */ 1666 */
1712struct amdgpu_sdma { 1667struct amdgpu_sdma_instance {
1713 /* SDMA firmware */ 1668 /* SDMA firmware */
1714 const struct firmware *fw; 1669 const struct firmware *fw;
1715 uint32_t fw_version; 1670 uint32_t fw_version;
@@ -1719,6 +1674,13 @@ struct amdgpu_sdma {
1719 bool burst_nop; 1674 bool burst_nop;
1720}; 1675};
1721 1676
1677struct amdgpu_sdma {
1678 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1679 struct amdgpu_irq_src trap_irq;
1680 struct amdgpu_irq_src illegal_inst_irq;
1681 int num_instances;
1682};
1683
1722/* 1684/*
1723 * Firmware 1685 * Firmware
1724 */ 1686 */
@@ -1751,11 +1713,11 @@ void amdgpu_test_syncing(struct amdgpu_device *adev);
1751int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1713int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1752void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1714void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1753#else 1715#else
1754static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1716static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1755{ 1717{
1756 return -ENODEV; 1718 return -ENODEV;
1757} 1719}
1758static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1720static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1759#endif 1721#endif
1760 1722
1761/* 1723/*
@@ -1947,7 +1909,6 @@ struct amdgpu_device {
1947 struct device *dev; 1909 struct device *dev;
1948 struct drm_device *ddev; 1910 struct drm_device *ddev;
1949 struct pci_dev *pdev; 1911 struct pci_dev *pdev;
1950 struct rw_semaphore exclusive_lock;
1951 1912
1952 /* ASIC */ 1913 /* ASIC */
1953 enum amd_asic_type asic_type; 1914 enum amd_asic_type asic_type;
@@ -1961,7 +1922,6 @@ struct amdgpu_device {
1961 bool suspend; 1922 bool suspend;
1962 bool need_dma32; 1923 bool need_dma32;
1963 bool accel_working; 1924 bool accel_working;
1964 bool needs_reset;
1965 struct work_struct reset_work; 1925 struct work_struct reset_work;
1966 struct notifier_block acpi_nb; 1926 struct notifier_block acpi_nb;
1967 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 1927 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
@@ -2065,9 +2025,7 @@ struct amdgpu_device {
2065 struct amdgpu_gfx gfx; 2025 struct amdgpu_gfx gfx;
2066 2026
2067 /* sdma */ 2027 /* sdma */
2068 struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; 2028 struct amdgpu_sdma sdma;
2069 struct amdgpu_irq_src sdma_trap_irq;
2070 struct amdgpu_irq_src sdma_illegal_inst_irq;
2071 2029
2072 /* uvd */ 2030 /* uvd */
2073 bool has_uvd; 2031 bool has_uvd;
@@ -2204,17 +2162,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2204 ring->ring_free_dw--; 2162 ring->ring_free_dw--;
2205} 2163}
2206 2164
2207static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) 2165static inline struct amdgpu_sdma_instance *
2166amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2208{ 2167{
2209 struct amdgpu_device *adev = ring->adev; 2168 struct amdgpu_device *adev = ring->adev;
2210 int i; 2169 int i;
2211 2170
2212 for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) 2171 for (i = 0; i < adev->sdma.num_instances; i++)
2213 if (&adev->sdma[i].ring == ring) 2172 if (&adev->sdma.instance[i].ring == ring)
2214 break; 2173 break;
2215 2174
2216 if (i < AMDGPU_MAX_SDMA_INSTANCES) 2175 if (i < AMDGPU_MAX_SDMA_INSTANCES)
2217 return &adev->sdma[i]; 2176 return &adev->sdma.instance[i];
2218 else 2177 else
2219 return NULL; 2178 return NULL;
2220} 2179}
@@ -2241,7 +2200,6 @@ static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *
2241#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2200#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2242#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2201#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2243#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) 2202#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2244#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
2245#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2203#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2246#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2204#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2247#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2205#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
@@ -2350,10 +2308,10 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
2350 struct drm_file *file_priv); 2308 struct drm_file *file_priv);
2351int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); 2309int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2352int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2310int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2353u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc); 2311u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
2354int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc); 2312int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2355void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc); 2313void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2356int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 2314int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
2357 int *max_error, 2315 int *max_error,
2358 struct timeval *vblank_time, 2316 struct timeval *vblank_time,
2359 unsigned flags); 2317 unsigned flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index aef4a7aac0f7..a142d5ae148d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -25,7 +25,6 @@
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/power_supply.h> 27#include <linux/power_supply.h>
28#include <linux/vga_switcheroo.h>
29#include <acpi/video.h> 28#include <acpi/video.h>
30#include <drm/drmP.h> 29#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index dd2037bc0b4a..0e1376317683 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
649 649
650 case KGD_ENGINE_SDMA1: 650 case KGD_ENGINE_SDMA1:
651 hdr = (const union amdgpu_firmware_header *) 651 hdr = (const union amdgpu_firmware_header *)
652 adev->sdma[0].fw->data; 652 adev->sdma.instance[0].fw->data;
653 break; 653 break;
654 654
655 case KGD_ENGINE_SDMA2: 655 case KGD_ENGINE_SDMA2:
656 hdr = (const union amdgpu_firmware_header *) 656 hdr = (const union amdgpu_firmware_header *)
657 adev->sdma[1].fw->data; 657 adev->sdma.instance[1].fw->data;
658 break; 658 break;
659 659
660 default: 660 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index dfd1d503bccf..79fa5c7de856 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
523 523
524 case KGD_ENGINE_SDMA1: 524 case KGD_ENGINE_SDMA1:
525 hdr = (const union amdgpu_firmware_header *) 525 hdr = (const union amdgpu_firmware_header *)
526 adev->sdma[0].fw->data; 526 adev->sdma.instance[0].fw->data;
527 break; 527 break;
528 528
529 case KGD_ENGINE_SDMA2: 529 case KGD_ENGINE_SDMA2:
530 hdr = (const union amdgpu_firmware_header *) 530 hdr = (const union amdgpu_firmware_header *)
531 adev->sdma[1].fw->data; 531 adev->sdma.instance[1].fw->data;
532 break; 532 break;
533 533
534 default: 534 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3f7aaa45bf8e..5a8fbadbd27b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -501,7 +501,7 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
501 return VGA_SWITCHEROO_DIS; 501 return VGA_SWITCHEROO_DIS;
502} 502}
503 503
504static struct vga_switcheroo_handler amdgpu_atpx_handler = { 504static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
505 .switchto = amdgpu_atpx_switchto, 505 .switchto = amdgpu_atpx_switchto,
506 .power_state = amdgpu_atpx_power_state, 506 .power_state = amdgpu_atpx_power_state,
507 .init = amdgpu_atpx_init, 507 .init = amdgpu_atpx_init,
@@ -536,7 +536,7 @@ static bool amdgpu_atpx_detect(void)
536 536
537 if (has_atpx && vga_count == 2) { 537 if (has_atpx && vga_count == 2) {
538 acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); 538 acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
539 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 539 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
540 acpi_method_name); 540 acpi_method_name);
541 amdgpu_atpx_priv.atpx_detected = true; 541 amdgpu_atpx_priv.atpx_detected = true;
542 return true; 542 return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 02add0a508cb..c44c0c6afd1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -29,7 +29,6 @@
29#include "amdgpu.h" 29#include "amdgpu.h"
30#include "atom.h" 30#include "atom.h"
31 31
32#include <linux/vga_switcheroo.h>
33#include <linux/slab.h> 32#include <linux/slab.h>
34#include <linux/acpi.h> 33#include <linux/acpi.h>
35/* 34/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index fd16652aa277..dfc4d02c7a38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
104 } 104 }
105 break; 105 break;
106 case AMDGPU_HW_IP_DMA: 106 case AMDGPU_HW_IP_DMA:
107 if (ring < 2) { 107 if (ring < adev->sdma.num_instances) {
108 *out_ring = &adev->sdma[ring].ring; 108 *out_ring = &adev->sdma.instance[ring].ring;
109 } else { 109 } else {
110 DRM_ERROR("only two SDMA rings are supported\n"); 110 DRM_ERROR("only %d SDMA rings are supported\n",
111 adev->sdma.num_instances);
111 return -EINVAL; 112 return -EINVAL;
112 } 113 }
113 break; 114 break;
@@ -567,9 +568,24 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
567 if (r) 568 if (r)
568 return r; 569 return r;
569 } 570 }
571
572 }
573
574 r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
575
576 if (amdgpu_vm_debug && p->bo_list) {
577 /* Invalidate all BOs to test for userspace bugs */
578 for (i = 0; i < p->bo_list->num_entries; i++) {
579 /* ignore duplicates */
580 bo = p->bo_list->array[i].robj;
581 if (!bo)
582 continue;
583
584 amdgpu_vm_bo_invalidate(adev, bo);
585 }
570 } 586 }
571 587
572 return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); 588 return r;
573} 589}
574 590
575static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, 591static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
@@ -593,7 +609,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
593 } 609 }
594 } 610 }
595 611
596 mutex_lock(&vm->mutex);
597 r = amdgpu_bo_vm_update_pte(parser, vm); 612 r = amdgpu_bo_vm_update_pte(parser, vm);
598 if (r) { 613 if (r) {
599 goto out; 614 goto out;
@@ -604,7 +619,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
604 parser->filp); 619 parser->filp);
605 620
606out: 621out:
607 mutex_unlock(&vm->mutex);
608 return r; 622 return r;
609} 623}
610 624
@@ -812,15 +826,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
812{ 826{
813 struct amdgpu_device *adev = dev->dev_private; 827 struct amdgpu_device *adev = dev->dev_private;
814 union drm_amdgpu_cs *cs = data; 828 union drm_amdgpu_cs *cs = data;
829 struct amdgpu_fpriv *fpriv = filp->driver_priv;
830 struct amdgpu_vm *vm = &fpriv->vm;
815 struct amdgpu_cs_parser *parser; 831 struct amdgpu_cs_parser *parser;
816 bool reserved_buffers = false; 832 bool reserved_buffers = false;
817 int i, r; 833 int i, r;
818 834
819 down_read(&adev->exclusive_lock); 835 if (!adev->accel_working)
820 if (!adev->accel_working) {
821 up_read(&adev->exclusive_lock);
822 return -EBUSY; 836 return -EBUSY;
823 }
824 837
825 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); 838 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
826 if (!parser) 839 if (!parser)
@@ -828,12 +841,11 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
828 r = amdgpu_cs_parser_init(parser, data); 841 r = amdgpu_cs_parser_init(parser, data);
829 if (r) { 842 if (r) {
830 DRM_ERROR("Failed to initialize parser !\n"); 843 DRM_ERROR("Failed to initialize parser !\n");
831 kfree(parser); 844 amdgpu_cs_parser_fini(parser, r, false);
832 up_read(&adev->exclusive_lock);
833 r = amdgpu_cs_handle_lockup(adev, r); 845 r = amdgpu_cs_handle_lockup(adev, r);
834 return r; 846 return r;
835 } 847 }
836 848 mutex_lock(&vm->mutex);
837 r = amdgpu_cs_parser_relocs(parser); 849 r = amdgpu_cs_parser_relocs(parser);
838 if (r == -ENOMEM) 850 if (r == -ENOMEM)
839 DRM_ERROR("Not enough memory for command submission!\n"); 851 DRM_ERROR("Not enough memory for command submission!\n");
@@ -864,8 +876,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
864 struct amdgpu_job *job; 876 struct amdgpu_job *job;
865 struct amdgpu_ring * ring = parser->ibs->ring; 877 struct amdgpu_ring * ring = parser->ibs->ring;
866 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 878 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
867 if (!job) 879 if (!job) {
868 return -ENOMEM; 880 r = -ENOMEM;
881 goto out;
882 }
869 job->base.sched = &ring->sched; 883 job->base.sched = &ring->sched;
870 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 884 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
871 job->adev = parser->adev; 885 job->adev = parser->adev;
@@ -900,14 +914,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
900 914
901 mutex_unlock(&job->job_lock); 915 mutex_unlock(&job->job_lock);
902 amdgpu_cs_parser_fini_late(parser); 916 amdgpu_cs_parser_fini_late(parser);
903 up_read(&adev->exclusive_lock); 917 mutex_unlock(&vm->mutex);
904 return 0; 918 return 0;
905 } 919 }
906 920
907 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; 921 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
908out: 922out:
909 amdgpu_cs_parser_fini(parser, r, reserved_buffers); 923 amdgpu_cs_parser_fini(parser, r, reserved_buffers);
910 up_read(&adev->exclusive_lock); 924 mutex_unlock(&vm->mutex);
911 r = amdgpu_cs_handle_lockup(adev, r); 925 r = amdgpu_cs_handle_lockup(adev, r);
912 return r; 926 return r;
913} 927}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e0b80ccdfe8a..fec65f01c031 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -69,6 +69,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
69 struct amdgpu_device *adev = ctx->adev; 69 struct amdgpu_device *adev = ctx->adev;
70 unsigned i, j; 70 unsigned i, j;
71 71
72 if (!adev)
73 return;
74
72 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 75 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
73 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) 76 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
74 fence_put(ctx->rings[i].fences[j]); 77 fence_put(ctx->rings[i].fences[j]);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6068d8207d10..d5b421330145 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -57,6 +57,7 @@ static const char *amdgpu_asic_name[] = {
57 "TONGA", 57 "TONGA",
58 "FIJI", 58 "FIJI",
59 "CARRIZO", 59 "CARRIZO",
60 "STONEY",
60 "LAST", 61 "LAST",
61}; 62};
62 63
@@ -1022,7 +1023,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
1022 * amdgpu_switcheroo_set_state - set switcheroo state 1023 * amdgpu_switcheroo_set_state - set switcheroo state
1023 * 1024 *
1024 * @pdev: pci dev pointer 1025 * @pdev: pci dev pointer
1025 * @state: vga switcheroo state 1026 * @state: vga_switcheroo state
1026 * 1027 *
1027 * Callback for the switcheroo driver. Suspends or resumes the 1028 * Callback for the switcheroo driver. Suspends or resumes the
1028 * the asics before or after it is powered up using ACPI methods. 1029 * the asics before or after it is powered up using ACPI methods.
@@ -1165,7 +1166,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1165 case CHIP_TONGA: 1166 case CHIP_TONGA:
1166 case CHIP_FIJI: 1167 case CHIP_FIJI:
1167 case CHIP_CARRIZO: 1168 case CHIP_CARRIZO:
1168 if (adev->asic_type == CHIP_CARRIZO) 1169 case CHIP_STONEY:
1170 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1169 adev->family = AMDGPU_FAMILY_CZ; 1171 adev->family = AMDGPU_FAMILY_CZ;
1170 else 1172 else
1171 adev->family = AMDGPU_FAMILY_VI; 1173 adev->family = AMDGPU_FAMILY_VI;
@@ -1418,7 +1420,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1418 mutex_init(&adev->gfx.gpu_clock_mutex); 1420 mutex_init(&adev->gfx.gpu_clock_mutex);
1419 mutex_init(&adev->srbm_mutex); 1421 mutex_init(&adev->srbm_mutex);
1420 mutex_init(&adev->grbm_idx_mutex); 1422 mutex_init(&adev->grbm_idx_mutex);
1421 init_rwsem(&adev->exclusive_lock);
1422 mutex_init(&adev->mn_lock); 1423 mutex_init(&adev->mn_lock);
1423 hash_init(adev->mn_hash); 1424 hash_init(adev->mn_hash);
1424 1425
@@ -1657,11 +1658,21 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1657 } 1658 }
1658 drm_modeset_unlock_all(dev); 1659 drm_modeset_unlock_all(dev);
1659 1660
1660 /* unpin the front buffers */ 1661 /* unpin the front buffers and cursors */
1661 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1662 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1663 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1662 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); 1664 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1663 struct amdgpu_bo *robj; 1665 struct amdgpu_bo *robj;
1664 1666
1667 if (amdgpu_crtc->cursor_bo) {
1668 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1669 r = amdgpu_bo_reserve(aobj, false);
1670 if (r == 0) {
1671 amdgpu_bo_unpin(aobj);
1672 amdgpu_bo_unreserve(aobj);
1673 }
1674 }
1675
1665 if (rfb == NULL || rfb->obj == NULL) { 1676 if (rfb == NULL || rfb->obj == NULL) {
1666 continue; 1677 continue;
1667 } 1678 }
@@ -1713,6 +1724,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1713{ 1724{
1714 struct drm_connector *connector; 1725 struct drm_connector *connector;
1715 struct amdgpu_device *adev = dev->dev_private; 1726 struct amdgpu_device *adev = dev->dev_private;
1727 struct drm_crtc *crtc;
1716 int r; 1728 int r;
1717 1729
1718 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1730 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1746,6 +1758,24 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1746 if (r) 1758 if (r)
1747 return r; 1759 return r;
1748 1760
1761 /* pin cursors */
1762 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1763 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1764
1765 if (amdgpu_crtc->cursor_bo) {
1766 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1767 r = amdgpu_bo_reserve(aobj, false);
1768 if (r == 0) {
1769 r = amdgpu_bo_pin(aobj,
1770 AMDGPU_GEM_DOMAIN_VRAM,
1771 &amdgpu_crtc->cursor_addr);
1772 if (r != 0)
1773 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1774 amdgpu_bo_unreserve(aobj);
1775 }
1776 }
1777 }
1778
1749 /* blat the mode back in */ 1779 /* blat the mode back in */
1750 if (fbcon) { 1780 if (fbcon) {
1751 drm_helper_resume_force_mode(dev); 1781 drm_helper_resume_force_mode(dev);
@@ -1785,14 +1815,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
1785 int i, r; 1815 int i, r;
1786 int resched; 1816 int resched;
1787 1817
1788 down_write(&adev->exclusive_lock);
1789
1790 if (!adev->needs_reset) {
1791 up_write(&adev->exclusive_lock);
1792 return 0;
1793 }
1794
1795 adev->needs_reset = false;
1796 atomic_inc(&adev->gpu_reset_counter); 1818 atomic_inc(&adev->gpu_reset_counter);
1797 1819
1798 /* block TTM */ 1820 /* block TTM */
@@ -1856,7 +1878,6 @@ retry:
1856 dev_info(adev->dev, "GPU reset failed\n"); 1878 dev_info(adev->dev, "GPU reset failed\n");
1857 } 1879 }
1858 1880
1859 up_write(&adev->exclusive_lock);
1860 return r; 1881 return r;
1861} 1882}
1862 1883
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6c9e0902a414..e173a5a02f0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -47,11 +47,8 @@ static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
47 fence = to_amdgpu_fence(*f); 47 fence = to_amdgpu_fence(*f);
48 if (fence) { 48 if (fence) {
49 r = fence_wait(&fence->base, false); 49 r = fence_wait(&fence->base, false);
50 if (r == -EDEADLK) { 50 if (r == -EDEADLK)
51 up_read(&adev->exclusive_lock);
52 r = amdgpu_gpu_reset(adev); 51 r = amdgpu_gpu_reset(adev);
53 down_read(&adev->exclusive_lock);
54 }
55 } else 52 } else
56 r = fence_wait(*f, false); 53 r = fence_wait(*f, false);
57 54
@@ -77,7 +74,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
77 unsigned long flags; 74 unsigned long flags;
78 unsigned i; 75 unsigned i;
79 76
80 down_read(&adev->exclusive_lock);
81 amdgpu_flip_wait_fence(adev, &work->excl); 77 amdgpu_flip_wait_fence(adev, &work->excl);
82 for (i = 0; i < work->shared_count; ++i) 78 for (i = 0; i < work->shared_count; ++i)
83 amdgpu_flip_wait_fence(adev, &work->shared[i]); 79 amdgpu_flip_wait_fence(adev, &work->shared[i]);
@@ -91,7 +87,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
91 amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 87 amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
92 88
93 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 89 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
94 up_read(&adev->exclusive_lock);
95} 90}
96 91
97/* 92/*
@@ -715,7 +710,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
715 * an optional accurate timestamp of when query happened. 710 * an optional accurate timestamp of when query happened.
716 * 711 *
717 * \param dev Device to query. 712 * \param dev Device to query.
718 * \param crtc Crtc to query. 713 * \param pipe Crtc to query.
719 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 714 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
720 * \param *vpos Location where vertical scanout position should be stored. 715 * \param *vpos Location where vertical scanout position should be stored.
721 * \param *hpos Location where horizontal scanout position should go. 716 * \param *hpos Location where horizontal scanout position should go.
@@ -738,8 +733,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
738 * unknown small number of scanlines wrt. real scanout position. 733 * unknown small number of scanlines wrt. real scanout position.
739 * 734 *
740 */ 735 */
741int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, 736int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
742 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) 737 unsigned int flags, int *vpos, int *hpos,
738 ktime_t *stime, ktime_t *etime,
739 const struct drm_display_mode *mode)
743{ 740{
744 u32 vbl = 0, position = 0; 741 u32 vbl = 0, position = 0;
745 int vbl_start, vbl_end, vtotal, ret = 0; 742 int vbl_start, vbl_end, vtotal, ret = 0;
@@ -753,7 +750,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
753 if (stime) 750 if (stime)
754 *stime = ktime_get(); 751 *stime = ktime_get();
755 752
756 if (amdgpu_display_page_flip_get_scanoutpos(adev, crtc, &vbl, &position) == 0) 753 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
757 ret |= DRM_SCANOUTPOS_VALID; 754 ret |= DRM_SCANOUTPOS_VALID;
758 755
759 /* Get optional system timestamp after query. */ 756 /* Get optional system timestamp after query. */
@@ -775,7 +772,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
775 } 772 }
776 else { 773 else {
777 /* No: Fake something reasonable which gives at least ok results. */ 774 /* No: Fake something reasonable which gives at least ok results. */
778 vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 775 vbl_start = mode->crtc_vdisplay;
779 vbl_end = 0; 776 vbl_end = 0;
780 } 777 }
781 778
@@ -791,7 +788,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
791 788
792 /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 789 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
793 if (in_vbl && (*vpos >= vbl_start)) { 790 if (in_vbl && (*vpos >= vbl_start)) {
794 vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 791 vtotal = mode->crtc_vtotal;
795 *vpos = *vpos - vtotal; 792 *vpos = *vpos - vtotal;
796 } 793 }
797 794
@@ -813,8 +810,8 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
813 * We only do this if DRM_CALLED_FROM_VBLIRQ. 810 * We only do this if DRM_CALLED_FROM_VBLIRQ.
814 */ 811 */
815 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { 812 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
816 vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 813 vbl_start = mode->crtc_vdisplay;
817 vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 814 vtotal = mode->crtc_vtotal;
818 815
819 if (vbl_start - *vpos < vtotal / 100) { 816 if (vbl_start - *vpos < vtotal / 100) {
820 *vpos -= vtotal; 817 *vpos -= vtotal;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b190c2a83680..0508c5cd103a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -73,13 +73,15 @@ int amdgpu_hard_reset = 0;
73unsigned amdgpu_ip_block_mask = 0xffffffff; 73unsigned amdgpu_ip_block_mask = 0xffffffff;
74int amdgpu_bapm = -1; 74int amdgpu_bapm = -1;
75int amdgpu_deep_color = 0; 75int amdgpu_deep_color = 0;
76int amdgpu_vm_size = 8; 76int amdgpu_vm_size = 64;
77int amdgpu_vm_block_size = -1; 77int amdgpu_vm_block_size = -1;
78int amdgpu_vm_fault_stop = 0;
79int amdgpu_vm_debug = 0;
78int amdgpu_exp_hw_support = 0; 80int amdgpu_exp_hw_support = 0;
79int amdgpu_enable_scheduler = 0; 81int amdgpu_enable_scheduler = 1;
80int amdgpu_sched_jobs = 16; 82int amdgpu_sched_jobs = 16;
81int amdgpu_sched_hw_submission = 2; 83int amdgpu_sched_hw_submission = 2;
82int amdgpu_enable_semaphores = 1; 84int amdgpu_enable_semaphores = 0;
83 85
84MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 86MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
85module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 87module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -135,16 +137,22 @@ module_param_named(bapm, amdgpu_bapm, int, 0444);
135MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); 137MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
136module_param_named(deep_color, amdgpu_deep_color, int, 0444); 138module_param_named(deep_color, amdgpu_deep_color, int, 0444);
137 139
138MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 8GB)"); 140MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
139module_param_named(vm_size, amdgpu_vm_size, int, 0444); 141module_param_named(vm_size, amdgpu_vm_size, int, 0444);
140 142
141MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); 143MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
142module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); 144module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
143 145
146MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
147module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
148
149MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
150module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
151
144MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); 152MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
145module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); 153module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
146 154
147MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))"); 155MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
148module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); 156module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
149 157
150MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)"); 158MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
@@ -153,7 +161,7 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
153MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 161MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
154module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 162module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
155 163
156MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)"); 164MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
157module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); 165module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
158 166
159static struct pci_device_id pciidlist[] = { 167static struct pci_device_id pciidlist[] = {
@@ -265,6 +273,8 @@ static struct pci_device_id pciidlist[] = {
265 {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 273 {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
266 {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 274 {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
267 {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 275 {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
276 /* stoney */
277 {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
268 278
269 {0, 0, 0} 279 {0, 0, 0}
270}; 280};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 96290d9cddca..093a8c618931 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -207,6 +207,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
207 } 207 }
208 208
209 info->par = rfbdev; 209 info->par = rfbdev;
210 info->skip_vt_switch = true;
210 211
211 ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 212 ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
212 if (ret) { 213 if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index b3fc26c59787..257d72205bb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -137,42 +137,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
137} 137}
138 138
139/** 139/**
140 * amdgpu_fence_check_signaled - callback from fence_queue
141 *
142 * this function is called with fence_queue lock held, which is also used
143 * for the fence locking itself, so unlocked variants are used for
144 * fence_signal, and remove_wait_queue.
145 */
146static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
147{
148 struct amdgpu_fence *fence;
149 struct amdgpu_device *adev;
150 u64 seq;
151 int ret;
152
153 fence = container_of(wait, struct amdgpu_fence, fence_wake);
154 adev = fence->ring->adev;
155
156 /*
157 * We cannot use amdgpu_fence_process here because we're already
158 * in the waitqueue, in a call from wake_up_all.
159 */
160 seq = atomic64_read(&fence->ring->fence_drv.last_seq);
161 if (seq >= fence->seq) {
162 ret = fence_signal_locked(&fence->base);
163 if (!ret)
164 FENCE_TRACE(&fence->base, "signaled from irq context\n");
165 else
166 FENCE_TRACE(&fence->base, "was already signaled\n");
167
168 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
169 fence_put(&fence->base);
170 } else
171 FENCE_TRACE(&fence->base, "pending\n");
172 return 0;
173}
174
175/**
176 * amdgpu_fence_activity - check for fence activity 140 * amdgpu_fence_activity - check for fence activity
177 * 141 *
178 * @ring: pointer to struct amdgpu_ring 142 * @ring: pointer to struct amdgpu_ring
@@ -260,27 +224,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
260 lockup_work.work); 224 lockup_work.work);
261 ring = fence_drv->ring; 225 ring = fence_drv->ring;
262 226
263 if (!down_read_trylock(&ring->adev->exclusive_lock)) { 227 if (amdgpu_fence_activity(ring))
264 /* just reschedule the check if a reset is going on */
265 amdgpu_fence_schedule_check(ring);
266 return;
267 }
268
269 if (amdgpu_fence_activity(ring)) {
270 wake_up_all(&ring->fence_drv.fence_queue);
271 }
272 else if (amdgpu_ring_is_lockup(ring)) {
273 /* good news we believe it's a lockup */
274 dev_warn(ring->adev->dev, "GPU lockup (current fence id "
275 "0x%016llx last fence id 0x%016llx on ring %d)\n",
276 (uint64_t)atomic64_read(&fence_drv->last_seq),
277 fence_drv->sync_seq[ring->idx], ring->idx);
278
279 /* remember that we need an reset */
280 ring->adev->needs_reset = true;
281 wake_up_all(&ring->fence_drv.fence_queue); 228 wake_up_all(&ring->fence_drv.fence_queue);
282 }
283 up_read(&ring->adev->exclusive_lock);
284} 229}
285 230
286/** 231/**
@@ -324,50 +269,6 @@ static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
324 return false; 269 return false;
325} 270}
326 271
327static bool amdgpu_fence_is_signaled(struct fence *f)
328{
329 struct amdgpu_fence *fence = to_amdgpu_fence(f);
330 struct amdgpu_ring *ring = fence->ring;
331 struct amdgpu_device *adev = ring->adev;
332
333 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
334 return true;
335
336 if (down_read_trylock(&adev->exclusive_lock)) {
337 amdgpu_fence_process(ring);
338 up_read(&adev->exclusive_lock);
339
340 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
341 return true;
342 }
343 return false;
344}
345
346/**
347 * amdgpu_fence_enable_signaling - enable signalling on fence
348 * @fence: fence
349 *
350 * This function is called with fence_queue lock held, and adds a callback
351 * to fence_queue that checks if this fence is signaled, and if so it
352 * signals the fence and removes itself.
353 */
354static bool amdgpu_fence_enable_signaling(struct fence *f)
355{
356 struct amdgpu_fence *fence = to_amdgpu_fence(f);
357 struct amdgpu_ring *ring = fence->ring;
358
359 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
360 return false;
361
362 fence->fence_wake.flags = 0;
363 fence->fence_wake.private = NULL;
364 fence->fence_wake.func = amdgpu_fence_check_signaled;
365 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
366 fence_get(f);
367 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
368 return true;
369}
370
371/* 272/*
372 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal 273 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
373 * @ring: ring to wait on for the seq number 274 * @ring: ring to wait on for the seq number
@@ -380,7 +281,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
380 */ 281 */
381static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) 282static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
382{ 283{
383 struct amdgpu_device *adev = ring->adev;
384 bool signaled = false; 284 bool signaled = false;
385 285
386 BUG_ON(!ring); 286 BUG_ON(!ring);
@@ -390,9 +290,9 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
390 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 290 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
391 return 0; 291 return 0;
392 292
293 amdgpu_fence_schedule_check(ring);
393 wait_event(ring->fence_drv.fence_queue, ( 294 wait_event(ring->fence_drv.fence_queue, (
394 (signaled = amdgpu_fence_seq_signaled(ring, seq)) 295 (signaled = amdgpu_fence_seq_signaled(ring, seq))));
395 || adev->needs_reset));
396 296
397 if (signaled) 297 if (signaled)
398 return 0; 298 return 0;
@@ -441,36 +341,6 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
441} 341}
442 342
443/** 343/**
444 * amdgpu_fence_ref - take a ref on a fence
445 *
446 * @fence: amdgpu fence object
447 *
448 * Take a reference on a fence (all asics).
449 * Returns the fence.
450 */
451struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
452{
453 fence_get(&fence->base);
454 return fence;
455}
456
457/**
458 * amdgpu_fence_unref - remove a ref on a fence
459 *
460 * @fence: amdgpu fence object
461 *
462 * Remove a reference on a fence (all asics).
463 */
464void amdgpu_fence_unref(struct amdgpu_fence **fence)
465{
466 struct amdgpu_fence *tmp = *fence;
467
468 *fence = NULL;
469 if (tmp)
470 fence_put(&tmp->base);
471}
472
473/**
474 * amdgpu_fence_count_emitted - get the count of emitted fences 344 * amdgpu_fence_count_emitted - get the count of emitted fences
475 * 345 *
476 * @ring: ring the fence is associated with 346 * @ring: ring the fence is associated with
@@ -628,8 +498,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
628 init_waitqueue_head(&ring->fence_drv.fence_queue); 498 init_waitqueue_head(&ring->fence_drv.fence_queue);
629 499
630 if (amdgpu_enable_scheduler) { 500 if (amdgpu_enable_scheduler) {
501 long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
502 if (timeout == 0) {
503 /*
504 * FIXME:
505 * Delayed workqueue cannot use it directly,
506 * so the scheduler will not use delayed workqueue if
507 * MAX_SCHEDULE_TIMEOUT is set.
508 * Currently keep it simple and silly.
509 */
510 timeout = MAX_SCHEDULE_TIMEOUT;
511 }
631 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, 512 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
632 amdgpu_sched_hw_submission, ring->name); 513 amdgpu_sched_hw_submission,
514 timeout, ring->name);
633 if (r) { 515 if (r) {
634 DRM_ERROR("Failed to create scheduler on ring %s.\n", 516 DRM_ERROR("Failed to create scheduler on ring %s.\n",
635 ring->name); 517 ring->name);
@@ -773,6 +655,115 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
773 } 655 }
774} 656}
775 657
658/*
659 * Common fence implementation
660 */
661
662static const char *amdgpu_fence_get_driver_name(struct fence *fence)
663{
664 return "amdgpu";
665}
666
667static const char *amdgpu_fence_get_timeline_name(struct fence *f)
668{
669 struct amdgpu_fence *fence = to_amdgpu_fence(f);
670 return (const char *)fence->ring->name;
671}
672
673/**
674 * amdgpu_fence_is_signaled - test if fence is signaled
675 *
676 * @f: fence to test
677 *
678 * Test the fence sequence number if it is already signaled. If it isn't
679 * signaled start fence processing. Returns True if the fence is signaled.
680 */
681static bool amdgpu_fence_is_signaled(struct fence *f)
682{
683 struct amdgpu_fence *fence = to_amdgpu_fence(f);
684 struct amdgpu_ring *ring = fence->ring;
685
686 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
687 return true;
688
689 amdgpu_fence_process(ring);
690
691 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
692 return true;
693
694 return false;
695}
696
697/**
698 * amdgpu_fence_check_signaled - callback from fence_queue
699 *
700 * this function is called with fence_queue lock held, which is also used
701 * for the fence locking itself, so unlocked variants are used for
702 * fence_signal, and remove_wait_queue.
703 */
704static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
705{
706 struct amdgpu_fence *fence;
707 struct amdgpu_device *adev;
708 u64 seq;
709 int ret;
710
711 fence = container_of(wait, struct amdgpu_fence, fence_wake);
712 adev = fence->ring->adev;
713
714 /*
715 * We cannot use amdgpu_fence_process here because we're already
716 * in the waitqueue, in a call from wake_up_all.
717 */
718 seq = atomic64_read(&fence->ring->fence_drv.last_seq);
719 if (seq >= fence->seq) {
720 ret = fence_signal_locked(&fence->base);
721 if (!ret)
722 FENCE_TRACE(&fence->base, "signaled from irq context\n");
723 else
724 FENCE_TRACE(&fence->base, "was already signaled\n");
725
726 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
727 fence_put(&fence->base);
728 } else
729 FENCE_TRACE(&fence->base, "pending\n");
730 return 0;
731}
732
733/**
734 * amdgpu_fence_enable_signaling - enable signalling on fence
735 * @fence: fence
736 *
737 * This function is called with fence_queue lock held, and adds a callback
738 * to fence_queue that checks if this fence is signaled, and if so it
739 * signals the fence and removes itself.
740 */
741static bool amdgpu_fence_enable_signaling(struct fence *f)
742{
743 struct amdgpu_fence *fence = to_amdgpu_fence(f);
744 struct amdgpu_ring *ring = fence->ring;
745
746 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
747 return false;
748
749 fence->fence_wake.flags = 0;
750 fence->fence_wake.private = NULL;
751 fence->fence_wake.func = amdgpu_fence_check_signaled;
752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
753 fence_get(f);
754 amdgpu_fence_schedule_check(ring);
755 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
756 return true;
757}
758
759const struct fence_ops amdgpu_fence_ops = {
760 .get_driver_name = amdgpu_fence_get_driver_name,
761 .get_timeline_name = amdgpu_fence_get_timeline_name,
762 .enable_signaling = amdgpu_fence_enable_signaling,
763 .signaled = amdgpu_fence_is_signaled,
764 .wait = fence_default_wait,
765 .release = NULL,
766};
776 767
777/* 768/*
778 * Fence debugfs 769 * Fence debugfs
@@ -823,141 +814,3 @@ int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
823#endif 814#endif
824} 815}
825 816
826static const char *amdgpu_fence_get_driver_name(struct fence *fence)
827{
828 return "amdgpu";
829}
830
831static const char *amdgpu_fence_get_timeline_name(struct fence *f)
832{
833 struct amdgpu_fence *fence = to_amdgpu_fence(f);
834 return (const char *)fence->ring->name;
835}
836
837static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
838{
839 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
840}
841
842static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
843{
844 int idx;
845 struct fence *fence;
846
847 for (idx = 0; idx < count; ++idx) {
848 fence = fences[idx];
849 if (fence) {
850 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
851 return true;
852 }
853 }
854 return false;
855}
856
857struct amdgpu_wait_cb {
858 struct fence_cb base;
859 struct task_struct *task;
860};
861
862static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
863{
864 struct amdgpu_wait_cb *wait =
865 container_of(cb, struct amdgpu_wait_cb, base);
866 wake_up_process(wait->task);
867}
868
869static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
870 signed long t)
871{
872 struct amdgpu_fence *fence = to_amdgpu_fence(f);
873 struct amdgpu_device *adev = fence->ring->adev;
874
875 return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
876}
877
878/**
879 * Wait the fence array with timeout
880 *
881 * @adev: amdgpu device
882 * @array: the fence array with amdgpu fence pointer
883 * @count: the number of the fence array
884 * @intr: when sleep, set the current task interruptable or not
885 * @t: timeout to wait
886 *
887 * It will return when any fence is signaled or timeout.
888 */
889signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
890 struct fence **array, uint32_t count,
891 bool intr, signed long t)
892{
893 struct amdgpu_wait_cb *cb;
894 struct fence *fence;
895 unsigned idx;
896
897 BUG_ON(!array);
898
899 cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
900 if (cb == NULL) {
901 t = -ENOMEM;
902 goto err_free_cb;
903 }
904
905 for (idx = 0; idx < count; ++idx) {
906 fence = array[idx];
907 if (fence) {
908 cb[idx].task = current;
909 if (fence_add_callback(fence,
910 &cb[idx].base, amdgpu_fence_wait_cb)) {
911 /* The fence is already signaled */
912 goto fence_rm_cb;
913 }
914 }
915 }
916
917 while (t > 0) {
918 if (intr)
919 set_current_state(TASK_INTERRUPTIBLE);
920 else
921 set_current_state(TASK_UNINTERRUPTIBLE);
922
923 /*
924 * amdgpu_test_signaled_any must be called after
925 * set_current_state to prevent a race with wake_up_process
926 */
927 if (amdgpu_test_signaled_any(array, count))
928 break;
929
930 if (adev->needs_reset) {
931 t = -EDEADLK;
932 break;
933 }
934
935 t = schedule_timeout(t);
936
937 if (t > 0 && intr && signal_pending(current))
938 t = -ERESTARTSYS;
939 }
940
941 __set_current_state(TASK_RUNNING);
942
943fence_rm_cb:
944 for (idx = 0; idx < count; ++idx) {
945 fence = array[idx];
946 if (fence && cb[idx].base.func)
947 fence_remove_callback(fence, &cb[idx].base);
948 }
949
950err_free_cb:
951 kfree(cb);
952
953 return t;
954}
955
956const struct fence_ops amdgpu_fence_ops = {
957 .get_driver_name = amdgpu_fence_get_driver_name,
958 .get_timeline_name = amdgpu_fence_get_timeline_name,
959 .enable_signaling = amdgpu_fence_enable_signaling,
960 .signaled = amdgpu_fence_is_signaled,
961 .wait = amdgpu_fence_default_wait,
962 .release = NULL,
963};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7297ca3a0ba7..087332858853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -115,9 +115,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
115 struct amdgpu_vm *vm = &fpriv->vm; 115 struct amdgpu_vm *vm = &fpriv->vm;
116 struct amdgpu_bo_va *bo_va; 116 struct amdgpu_bo_va *bo_va;
117 int r; 117 int r;
118 118 mutex_lock(&vm->mutex);
119 r = amdgpu_bo_reserve(rbo, false); 119 r = amdgpu_bo_reserve(rbo, false);
120 if (r) { 120 if (r) {
121 mutex_unlock(&vm->mutex);
121 return r; 122 return r;
122 } 123 }
123 124
@@ -128,7 +129,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
128 ++bo_va->ref_count; 129 ++bo_va->ref_count;
129 } 130 }
130 amdgpu_bo_unreserve(rbo); 131 amdgpu_bo_unreserve(rbo);
131 132 mutex_unlock(&vm->mutex);
132 return 0; 133 return 0;
133} 134}
134 135
@@ -141,9 +142,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
141 struct amdgpu_vm *vm = &fpriv->vm; 142 struct amdgpu_vm *vm = &fpriv->vm;
142 struct amdgpu_bo_va *bo_va; 143 struct amdgpu_bo_va *bo_va;
143 int r; 144 int r;
144 145 mutex_lock(&vm->mutex);
145 r = amdgpu_bo_reserve(rbo, true); 146 r = amdgpu_bo_reserve(rbo, true);
146 if (r) { 147 if (r) {
148 mutex_unlock(&vm->mutex);
147 dev_err(adev->dev, "leaking bo va because " 149 dev_err(adev->dev, "leaking bo va because "
148 "we fail to reserve bo (%d)\n", r); 150 "we fail to reserve bo (%d)\n", r);
149 return; 151 return;
@@ -155,6 +157,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
155 } 157 }
156 } 158 }
157 amdgpu_bo_unreserve(rbo); 159 amdgpu_bo_unreserve(rbo);
160 mutex_unlock(&vm->mutex);
158} 161}
159 162
160static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 163static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -181,7 +184,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
181 bool kernel = false; 184 bool kernel = false;
182 int r; 185 int r;
183 186
184 down_read(&adev->exclusive_lock);
185 /* create a gem object to contain this object in */ 187 /* create a gem object to contain this object in */
186 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 188 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
187 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 189 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -214,11 +216,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
214 216
215 memset(args, 0, sizeof(*args)); 217 memset(args, 0, sizeof(*args));
216 args->out.handle = handle; 218 args->out.handle = handle;
217 up_read(&adev->exclusive_lock);
218 return 0; 219 return 0;
219 220
220error_unlock: 221error_unlock:
221 up_read(&adev->exclusive_lock);
222 r = amdgpu_gem_handle_lockup(adev, r); 222 r = amdgpu_gem_handle_lockup(adev, r);
223 return r; 223 return r;
224} 224}
@@ -250,8 +250,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
250 return -EACCES; 250 return -EACCES;
251 } 251 }
252 252
253 down_read(&adev->exclusive_lock);
254
255 /* create a gem object to contain this object in */ 253 /* create a gem object to contain this object in */
256 r = amdgpu_gem_object_create(adev, args->size, 0, 254 r = amdgpu_gem_object_create(adev, args->size, 0,
257 AMDGPU_GEM_DOMAIN_CPU, 0, 255 AMDGPU_GEM_DOMAIN_CPU, 0,
@@ -293,14 +291,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
293 goto handle_lockup; 291 goto handle_lockup;
294 292
295 args->handle = handle; 293 args->handle = handle;
296 up_read(&adev->exclusive_lock);
297 return 0; 294 return 0;
298 295
299release_object: 296release_object:
300 drm_gem_object_unreference_unlocked(gobj); 297 drm_gem_object_unreference_unlocked(gobj);
301 298
302handle_lockup: 299handle_lockup:
303 up_read(&adev->exclusive_lock);
304 r = amdgpu_gem_handle_lockup(adev, r); 300 r = amdgpu_gem_handle_lockup(adev, r);
305 301
306 return r; 302 return r;
@@ -488,18 +484,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
488 goto error_unreserve; 484 goto error_unreserve;
489 } 485 }
490 486
491 mutex_lock(&bo_va->vm->mutex);
492 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 487 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
493 if (r) 488 if (r)
494 goto error_unlock; 489 goto error_unreserve;
495
496 490
497 if (operation == AMDGPU_VA_OP_MAP) 491 if (operation == AMDGPU_VA_OP_MAP)
498 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 492 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
499 493
500error_unlock:
501 mutex_unlock(&bo_va->vm->mutex);
502
503error_unreserve: 494error_unreserve:
504 ttm_eu_backoff_reservation(&ticket, &list); 495 ttm_eu_backoff_reservation(&ticket, &list);
505 496
@@ -556,10 +547,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
556 gobj = drm_gem_object_lookup(dev, filp, args->handle); 547 gobj = drm_gem_object_lookup(dev, filp, args->handle);
557 if (gobj == NULL) 548 if (gobj == NULL)
558 return -ENOENT; 549 return -ENOENT;
559 550 mutex_lock(&fpriv->vm.mutex);
560 rbo = gem_to_amdgpu_bo(gobj); 551 rbo = gem_to_amdgpu_bo(gobj);
561 r = amdgpu_bo_reserve(rbo, false); 552 r = amdgpu_bo_reserve(rbo, false);
562 if (r) { 553 if (r) {
554 mutex_unlock(&fpriv->vm.mutex);
563 drm_gem_object_unreference_unlocked(gobj); 555 drm_gem_object_unreference_unlocked(gobj);
564 return r; 556 return r;
565 } 557 }
@@ -567,6 +559,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
567 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
568 if (!bo_va) { 560 if (!bo_va) {
569 amdgpu_bo_unreserve(rbo); 561 amdgpu_bo_unreserve(rbo);
562 mutex_unlock(&fpriv->vm.mutex);
570 return -ENOENT; 563 return -ENOENT;
571 } 564 }
572 565
@@ -591,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
591 584
592 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
593 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
594 587 mutex_unlock(&fpriv->vm.mutex);
595 drm_gem_object_unreference_unlocked(gobj); 588 drm_gem_object_unreference_unlocked(gobj);
596 return r; 589 return r;
597} 590}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index c439735ee670..e65987743871 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -95,7 +95,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
95{ 95{
96 amdgpu_sync_free(adev, &ib->sync, &ib->fence->base); 96 amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
97 amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); 97 amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
98 amdgpu_fence_unref(&ib->fence); 98 if (ib->fence)
99 fence_put(&ib->fence->base);
99} 100}
100 101
101/** 102/**
@@ -298,7 +299,6 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
298 r = amdgpu_ring_test_ib(ring); 299 r = amdgpu_ring_test_ib(ring);
299 if (r) { 300 if (r) {
300 ring->ready = false; 301 ring->ready = false;
301 adev->needs_reset = false;
302 302
303 if (ring == &adev->gfx.gfx_ring[0]) { 303 if (ring == &adev->gfx.gfx_ring[0]) {
304 /* oh, oh, that's really bad */ 304 /* oh, oh, that's really bad */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5d11e798230c..1618e2294a16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
218 break; 218 break;
219 case AMDGPU_HW_IP_DMA: 219 case AMDGPU_HW_IP_DMA:
220 type = AMD_IP_BLOCK_TYPE_SDMA; 220 type = AMD_IP_BLOCK_TYPE_SDMA;
221 ring_mask = adev->sdma[0].ring.ready ? 1 : 0; 221 for (i = 0; i < adev->sdma.num_instances; i++)
222 ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); 222 ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
223 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; 223 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
224 ib_size_alignment = 1; 224 ib_size_alignment = 1;
225 break; 225 break;
@@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
341 fw_info.feature = 0; 341 fw_info.feature = 0;
342 break; 342 break;
343 case AMDGPU_INFO_FW_SDMA: 343 case AMDGPU_INFO_FW_SDMA:
344 if (info->query_fw.index >= 2) 344 if (info->query_fw.index >= adev->sdma.num_instances)
345 return -EINVAL; 345 return -EINVAL;
346 fw_info.ver = adev->sdma[info->query_fw.index].fw_version; 346 fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
347 fw_info.feature = adev->sdma[info->query_fw.index].feature_version; 347 fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
348 break; 348 break;
349 default: 349 default:
350 return -EINVAL; 350 return -EINVAL;
@@ -489,7 +489,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
489 * 489 *
490 * @dev: drm dev pointer 490 * @dev: drm dev pointer
491 * 491 *
492 * Switch vga switcheroo state after last close (all asics). 492 * Switch vga_switcheroo state after last close (all asics).
493 */ 493 */
494void amdgpu_driver_lastclose_kms(struct drm_device *dev) 494void amdgpu_driver_lastclose_kms(struct drm_device *dev)
495{ 495{
@@ -603,36 +603,36 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
603 * amdgpu_get_vblank_counter_kms - get frame count 603 * amdgpu_get_vblank_counter_kms - get frame count
604 * 604 *
605 * @dev: drm dev pointer 605 * @dev: drm dev pointer
606 * @crtc: crtc to get the frame count from 606 * @pipe: crtc to get the frame count from
607 * 607 *
608 * Gets the frame count on the requested crtc (all asics). 608 * Gets the frame count on the requested crtc (all asics).
609 * Returns frame count on success, -EINVAL on failure. 609 * Returns frame count on success, -EINVAL on failure.
610 */ 610 */
611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc) 611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
612{ 612{
613 struct amdgpu_device *adev = dev->dev_private; 613 struct amdgpu_device *adev = dev->dev_private;
614 614
615 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { 615 if (pipe >= adev->mode_info.num_crtc) {
616 DRM_ERROR("Invalid crtc %d\n", crtc); 616 DRM_ERROR("Invalid crtc %u\n", pipe);
617 return -EINVAL; 617 return -EINVAL;
618 } 618 }
619 619
620 return amdgpu_display_vblank_get_counter(adev, crtc); 620 return amdgpu_display_vblank_get_counter(adev, pipe);
621} 621}
622 622
623/** 623/**
624 * amdgpu_enable_vblank_kms - enable vblank interrupt 624 * amdgpu_enable_vblank_kms - enable vblank interrupt
625 * 625 *
626 * @dev: drm dev pointer 626 * @dev: drm dev pointer
627 * @crtc: crtc to enable vblank interrupt for 627 * @pipe: crtc to enable vblank interrupt for
628 * 628 *
629 * Enable the interrupt on the requested crtc (all asics). 629 * Enable the interrupt on the requested crtc (all asics).
630 * Returns 0 on success, -EINVAL on failure. 630 * Returns 0 on success, -EINVAL on failure.
631 */ 631 */
632int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc) 632int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
633{ 633{
634 struct amdgpu_device *adev = dev->dev_private; 634 struct amdgpu_device *adev = dev->dev_private;
635 int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc); 635 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
636 636
637 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 637 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
638} 638}
@@ -641,14 +641,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
641 * amdgpu_disable_vblank_kms - disable vblank interrupt 641 * amdgpu_disable_vblank_kms - disable vblank interrupt
642 * 642 *
643 * @dev: drm dev pointer 643 * @dev: drm dev pointer
644 * @crtc: crtc to disable vblank interrupt for 644 * @pipe: crtc to disable vblank interrupt for
645 * 645 *
646 * Disable the interrupt on the requested crtc (all asics). 646 * Disable the interrupt on the requested crtc (all asics).
647 */ 647 */
648void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc) 648void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
649{ 649{
650 struct amdgpu_device *adev = dev->dev_private; 650 struct amdgpu_device *adev = dev->dev_private;
651 int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc); 651 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
652 652
653 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 653 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
654} 654}
@@ -666,41 +666,41 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
666 * scanout position. (all asics). 666 * scanout position. (all asics).
667 * Returns postive status flags on success, negative error on failure. 667 * Returns postive status flags on success, negative error on failure.
668 */ 668 */
669int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 669int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
670 int *max_error, 670 int *max_error,
671 struct timeval *vblank_time, 671 struct timeval *vblank_time,
672 unsigned flags) 672 unsigned flags)
673{ 673{
674 struct drm_crtc *drmcrtc; 674 struct drm_crtc *crtc;
675 struct amdgpu_device *adev = dev->dev_private; 675 struct amdgpu_device *adev = dev->dev_private;
676 676
677 if (crtc < 0 || crtc >= dev->num_crtcs) { 677 if (pipe >= dev->num_crtcs) {
678 DRM_ERROR("Invalid crtc %d\n", crtc); 678 DRM_ERROR("Invalid crtc %u\n", pipe);
679 return -EINVAL; 679 return -EINVAL;
680 } 680 }
681 681
682 /* Get associated drm_crtc: */ 682 /* Get associated drm_crtc: */
683 drmcrtc = &adev->mode_info.crtcs[crtc]->base; 683 crtc = &adev->mode_info.crtcs[pipe]->base;
684 684
685 /* Helper routine in DRM core does all the work: */ 685 /* Helper routine in DRM core does all the work: */
686 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 686 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
687 vblank_time, flags, 687 vblank_time, flags,
688 drmcrtc, &drmcrtc->hwmode); 688 &crtc->hwmode);
689} 689}
690 690
691const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 691const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
692 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 692 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
693 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 693 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
694 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 694 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
695 /* KMS */ 695 /* KMS */
696 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 696 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
697 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 697 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
698 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 698 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
699 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 699 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
700 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 700 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
701 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 701 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
702 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 702 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
703 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 703 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
704 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 704 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
705}; 705};
706int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); 706int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7bd470d9ac30..b62c1710cab6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -373,6 +373,10 @@ struct amdgpu_crtc {
373 uint32_t crtc_offset; 373 uint32_t crtc_offset;
374 struct drm_gem_object *cursor_bo; 374 struct drm_gem_object *cursor_bo;
375 uint64_t cursor_addr; 375 uint64_t cursor_addr;
376 int cursor_x;
377 int cursor_y;
378 int cursor_hot_x;
379 int cursor_hot_y;
376 int cursor_width; 380 int cursor_width;
377 int cursor_height; 381 int cursor_height;
378 int max_cursor_width; 382 int max_cursor_width;
@@ -540,10 +544,10 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
540 544
541void amdgpu_encoder_set_active_device(struct drm_encoder *encoder); 545void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
542 546
543int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 547int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
544 unsigned int flags, 548 unsigned int flags, int *vpos, int *hpos,
545 int *vpos, int *hpos, ktime_t *stime, 549 ktime_t *stime, ktime_t *etime,
546 ktime_t *etime); 550 const struct drm_display_mode *mode);
547 551
548int amdgpu_framebuffer_init(struct drm_device *dev, 552int amdgpu_framebuffer_init(struct drm_device *dev,
549 struct amdgpu_framebuffer *rfb, 553 struct amdgpu_framebuffer *rfb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1a7708f365f3..0d524384ff79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -132,6 +132,8 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
132 placements[c].fpfn = 0; 132 placements[c].fpfn = 0;
133 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 133 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
134 TTM_PL_FLAG_VRAM; 134 TTM_PL_FLAG_VRAM;
135 if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
136 placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
135 } 137 }
136 138
137 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 139 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 30dce235ddeb..78e9b0f14661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -67,8 +67,6 @@ void amdgpu_ring_free_size(struct amdgpu_ring *ring)
67 if (!ring->ring_free_dw) { 67 if (!ring->ring_free_dw) {
68 /* this is an empty ring */ 68 /* this is an empty ring */
69 ring->ring_free_dw = ring->ring_size / 4; 69 ring->ring_free_dw = ring->ring_size / 4;
70 /* update lockup info to avoid false positive */
71 amdgpu_ring_lockup_update(ring);
72 } 70 }
73} 71}
74 72
@@ -209,46 +207,6 @@ void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
209} 207}
210 208
211/** 209/**
212 * amdgpu_ring_lockup_update - update lockup variables
213 *
214 * @ring: amdgpu_ring structure holding ring information
215 *
216 * Update the last rptr value and timestamp (all asics).
217 */
218void amdgpu_ring_lockup_update(struct amdgpu_ring *ring)
219{
220 atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring));
221 atomic64_set(&ring->last_activity, jiffies_64);
222}
223
224/**
225 * amdgpu_ring_test_lockup() - check if ring is lockedup by recording information
226 * @ring: amdgpu_ring structure holding ring information
227 *
228 */
229bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring)
230{
231 uint32_t rptr = amdgpu_ring_get_rptr(ring);
232 uint64_t last = atomic64_read(&ring->last_activity);
233 uint64_t elapsed;
234
235 if (rptr != atomic_read(&ring->last_rptr)) {
236 /* ring is still working, no lockup */
237 amdgpu_ring_lockup_update(ring);
238 return false;
239 }
240
241 elapsed = jiffies_to_msecs(jiffies_64 - last);
242 if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) {
243 dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n",
244 ring->idx, elapsed);
245 return true;
246 }
247 /* give a chance to the GPU ... */
248 return false;
249}
250
251/**
252 * amdgpu_ring_backup - Back up the content of a ring 210 * amdgpu_ring_backup - Back up the content of a ring
253 * 211 *
254 * @ring: the ring we want to back up 212 * @ring: the ring we want to back up
@@ -436,7 +394,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
436 if (amdgpu_debugfs_ring_init(adev, ring)) { 394 if (amdgpu_debugfs_ring_init(adev, ring)) {
437 DRM_ERROR("Failed to register debugfs file for rings !\n"); 395 DRM_ERROR("Failed to register debugfs file for rings !\n");
438 } 396 }
439 amdgpu_ring_lockup_update(ring);
440 return 0; 397 return 0;
441} 398}
442 399
@@ -479,6 +436,30 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
479 } 436 }
480} 437}
481 438
439/**
440 * amdgpu_ring_from_fence - get ring from fence
441 *
442 * @f: fence structure
443 *
444 * Extract the ring a fence belongs to. Handles both scheduler as
445 * well as hardware fences.
446 */
447struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f)
448{
449 struct amdgpu_fence *a_fence;
450 struct amd_sched_fence *s_fence;
451
452 s_fence = to_amd_sched_fence(f);
453 if (s_fence)
454 return container_of(s_fence->sched, struct amdgpu_ring, sched);
455
456 a_fence = to_amdgpu_fence(f);
457 if (a_fence)
458 return a_fence->ring;
459
460 return NULL;
461}
462
482/* 463/*
483 * Debugfs info 464 * Debugfs info
484 */ 465 */
@@ -540,8 +521,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
540static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); 521static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
541static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); 522static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
542static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); 523static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
543static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring); 524static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
544static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring); 525static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
545static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); 526static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
546static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); 527static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
547static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); 528static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index e90712443fe9..0212b31dc194 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -139,25 +139,6 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
139 return r; 139 return r;
140} 140}
141 141
142static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
143{
144 struct amdgpu_fence *a_fence;
145 struct amd_sched_fence *s_fence;
146
147 s_fence = to_amd_sched_fence(f);
148 if (s_fence) {
149 struct amdgpu_ring *ring;
150
151 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
152 return ring->idx;
153 }
154
155 a_fence = to_amdgpu_fence(f);
156 if (a_fence)
157 return a_fence->ring->idx;
158 return 0;
159}
160
161static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) 142static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
162{ 143{
163 struct amdgpu_sa_manager *sa_manager = sa_bo->manager; 144 struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
@@ -318,7 +299,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
318 } 299 }
319 300
320 if (best_bo) { 301 if (best_bo) {
321 uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence); 302 uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
322 ++tries[idx]; 303 ++tries[idx];
323 sa_manager->hole = best_bo->olist.prev; 304 sa_manager->hole = best_bo->olist.prev;
324 305
@@ -337,6 +318,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
337{ 318{
338 struct fence *fences[AMDGPU_MAX_RINGS]; 319 struct fence *fences[AMDGPU_MAX_RINGS];
339 unsigned tries[AMDGPU_MAX_RINGS]; 320 unsigned tries[AMDGPU_MAX_RINGS];
321 unsigned count;
340 int i, r; 322 int i, r;
341 signed long t; 323 signed long t;
342 324
@@ -371,13 +353,18 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
371 /* see if we can skip over some allocations */ 353 /* see if we can skip over some allocations */
372 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 354 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
373 355
374 spin_unlock(&sa_manager->wq.lock); 356 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
375 t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS, 357 if (fences[i])
376 false, MAX_SCHEDULE_TIMEOUT); 358 fences[count++] = fences[i];
377 r = (t > 0) ? 0 : t; 359
378 spin_lock(&sa_manager->wq.lock); 360 if (count) {
379 /* if we have nothing to wait for block */ 361 spin_unlock(&sa_manager->wq.lock);
380 if (r == -ENOENT) { 362 t = fence_wait_any_timeout(fences, count, false,
363 MAX_SCHEDULE_TIMEOUT);
364 r = (t > 0) ? 0 : t;
365 spin_lock(&sa_manager->wq.lock);
366 } else {
367 /* if we have nothing to wait for block */
381 r = wait_event_interruptible_locked( 368 r = wait_event_interruptible_locked(
382 sa_manager->wq, 369 sa_manager->wq,
383 amdgpu_sa_event(sa_manager, size, align) 370 amdgpu_sa_event(sa_manager, size, align)
@@ -406,7 +393,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
406 if (fence && !fence_is_signaled(fence)) { 393 if (fence && !fence_is_signaled(fence)) {
407 uint32_t idx; 394 uint32_t idx;
408 (*sa_bo)->fence = fence_get(fence); 395 (*sa_bo)->fence = fence_get(fence);
409 idx = amdgpu_sa_get_ring_from_fence(fence); 396 idx = amdgpu_ring_from_fence(fence)->idx;
410 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); 397 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
411 } else { 398 } else {
412 amdgpu_sa_bo_remove_locked(*sa_bo); 399 amdgpu_sa_bo_remove_locked(*sa_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 2e946b2cad88..dcf4a8aca680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -54,7 +54,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
54 goto err; 54 goto err;
55 } 55 }
56 56
57 fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence); 57 fence = job->ibs[job->num_ibs - 1].fence;
58 fence_get(&fence->base);
58 59
59err: 60err:
60 if (job->free_job) 61 if (job->free_job)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 4921de15b451..a6697fd05217 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -87,6 +87,15 @@ static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
87 return false; 87 return false;
88} 88}
89 89
90static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
91{
92 if (*keep && fence_is_later(*keep, fence))
93 return;
94
95 fence_put(*keep);
96 *keep = fence_get(fence);
97}
98
90/** 99/**
91 * amdgpu_sync_fence - remember to sync to this fence 100 * amdgpu_sync_fence - remember to sync to this fence
92 * 101 *
@@ -99,35 +108,21 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
99{ 108{
100 struct amdgpu_sync_entry *e; 109 struct amdgpu_sync_entry *e;
101 struct amdgpu_fence *fence; 110 struct amdgpu_fence *fence;
102 struct amdgpu_fence *other;
103 struct fence *tmp, *later;
104 111
105 if (!f) 112 if (!f)
106 return 0; 113 return 0;
107 114
108 if (amdgpu_sync_same_dev(adev, f) && 115 if (amdgpu_sync_same_dev(adev, f) &&
109 amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) { 116 amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
110 if (sync->last_vm_update) { 117 amdgpu_sync_keep_later(&sync->last_vm_update, f);
111 tmp = sync->last_vm_update;
112 BUG_ON(f->context != tmp->context);
113 later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
114 sync->last_vm_update = fence_get(later);
115 fence_put(tmp);
116 } else
117 sync->last_vm_update = fence_get(f);
118 }
119 118
120 fence = to_amdgpu_fence(f); 119 fence = to_amdgpu_fence(f);
121 if (!fence || fence->ring->adev != adev) { 120 if (!fence || fence->ring->adev != adev) {
122 hash_for_each_possible(sync->fences, e, node, f->context) { 121 hash_for_each_possible(sync->fences, e, node, f->context) {
123 struct fence *new;
124 if (unlikely(e->fence->context != f->context)) 122 if (unlikely(e->fence->context != f->context))
125 continue; 123 continue;
126 new = fence_get(fence_later(e->fence, f)); 124
127 if (new) { 125 amdgpu_sync_keep_later(&e->fence, f);
128 fence_put(e->fence);
129 e->fence = new;
130 }
131 return 0; 126 return 0;
132 } 127 }
133 128
@@ -140,10 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
140 return 0; 135 return 0;
141 } 136 }
142 137
143 other = sync->sync_to[fence->ring->idx]; 138 amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f);
144 sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
145 amdgpu_fence_later(fence, other));
146 amdgpu_fence_unref(&other);
147 139
148 return 0; 140 return 0;
149} 141}
@@ -199,8 +191,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
199 * for other VM updates and moves. 191 * for other VM updates and moves.
200 */ 192 */
201 fence_owner = amdgpu_sync_get_owner(f); 193 fence_owner = amdgpu_sync_get_owner(f);
202 if ((owner != AMDGPU_FENCE_OWNER_MOVE) && 194 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
203 (fence_owner != AMDGPU_FENCE_OWNER_MOVE) && 195 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
204 ((owner == AMDGPU_FENCE_OWNER_VM) != 196 ((owner == AMDGPU_FENCE_OWNER_VM) !=
205 (fence_owner == AMDGPU_FENCE_OWNER_VM))) 197 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
206 continue; 198 continue;
@@ -262,11 +254,11 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
262 return 0; 254 return 0;
263 255
264 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 256 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
265 struct amdgpu_fence *fence = sync->sync_to[i]; 257 struct fence *fence = sync->sync_to[i];
266 if (!fence) 258 if (!fence)
267 continue; 259 continue;
268 260
269 r = fence_wait(&fence->base, false); 261 r = fence_wait(fence, false);
270 if (r) 262 if (r)
271 return r; 263 return r;
272 } 264 }
@@ -291,9 +283,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
291 int i, r; 283 int i, r;
292 284
293 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 285 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
294 struct amdgpu_fence *fence = sync->sync_to[i];
295 struct amdgpu_semaphore *semaphore;
296 struct amdgpu_ring *other = adev->rings[i]; 286 struct amdgpu_ring *other = adev->rings[i];
287 struct amdgpu_semaphore *semaphore;
288 struct amdgpu_fence *fence;
289
290 if (!sync->sync_to[i])
291 continue;
292
293 fence = to_amdgpu_fence(sync->sync_to[i]);
297 294
298 /* check if we really need to sync */ 295 /* check if we really need to sync */
299 if (!amdgpu_fence_need_sync(fence, ring)) 296 if (!amdgpu_fence_need_sync(fence, ring))
@@ -378,7 +375,7 @@ void amdgpu_sync_free(struct amdgpu_device *adev,
378 amdgpu_semaphore_free(adev, &sync->semaphores[i], fence); 375 amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
379 376
380 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 377 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
381 amdgpu_fence_unref(&sync->sync_to[i]); 378 fence_put(sync->sync_to[i]);
382 379
383 fence_put(sync->last_vm_update); 380 fence_put(sync->last_vm_update);
384} 381}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 961d7265c286..76ecbaf72a2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -111,7 +111,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
111 __entry->offset, __entry->flags) 111 __entry->offset, __entry->flags)
112); 112);
113 113
114TRACE_EVENT(amdgpu_vm_bo_update, 114DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
115 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 115 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
116 TP_ARGS(mapping), 116 TP_ARGS(mapping),
117 TP_STRUCT__entry( 117 TP_STRUCT__entry(
@@ -129,6 +129,16 @@ TRACE_EVENT(amdgpu_vm_bo_update,
129 __entry->soffset, __entry->eoffset, __entry->flags) 129 __entry->soffset, __entry->eoffset, __entry->flags)
130); 130);
131 131
132DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
133 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
134 TP_ARGS(mapping)
135);
136
137DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
138 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
139 TP_ARGS(mapping)
140);
141
132TRACE_EVENT(amdgpu_vm_set_page, 142TRACE_EVENT(amdgpu_vm_set_page,
133 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 143 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
134 uint32_t incr, uint32_t flags), 144 uint32_t incr, uint32_t flags),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 364cbe975332..81bb8e9fc26d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1041,7 +1041,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
1041 WARN_ON(ib->length_dw > num_dw); 1041 WARN_ON(ib->length_dw > num_dw);
1042 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 1042 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
1043 &amdgpu_vm_free_job, 1043 &amdgpu_vm_free_job,
1044 AMDGPU_FENCE_OWNER_MOVE, 1044 AMDGPU_FENCE_OWNER_UNDEFINED,
1045 fence); 1045 fence);
1046 if (r) 1046 if (r)
1047 goto error_free; 1047 goto error_free;
@@ -1072,6 +1072,11 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1072 spin_lock(&glob->lru_lock); 1072 spin_lock(&glob->lru_lock);
1073 ret = drm_mm_dump_table(m, mm); 1073 ret = drm_mm_dump_table(m, mm);
1074 spin_unlock(&glob->lru_lock); 1074 spin_unlock(&glob->lru_lock);
1075 if (ttm_pl == TTM_PL_VRAM)
1076 seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
1077 adev->mman.bdev.man[ttm_pl].size,
1078 atomic64_read(&adev->vram_usage) >> 20,
1079 atomic64_read(&adev->vram_vis_usage) >> 20);
1075 return ret; 1080 return ret;
1076} 1081}
1077 1082
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d0312364d950..53f987aeeacf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -53,6 +53,7 @@
53#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" 53#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
54#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" 54#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
55#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" 55#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
56#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
56 57
57/** 58/**
58 * amdgpu_uvd_cs_ctx - Command submission parser context 59 * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -83,6 +84,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
83MODULE_FIRMWARE(FIRMWARE_TONGA); 84MODULE_FIRMWARE(FIRMWARE_TONGA);
84MODULE_FIRMWARE(FIRMWARE_CARRIZO); 85MODULE_FIRMWARE(FIRMWARE_CARRIZO);
85MODULE_FIRMWARE(FIRMWARE_FIJI); 86MODULE_FIRMWARE(FIRMWARE_FIJI);
87MODULE_FIRMWARE(FIRMWARE_STONEY);
86 88
87static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); 89static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
88static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 90static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -124,6 +126,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
124 case CHIP_CARRIZO: 126 case CHIP_CARRIZO:
125 fw_name = FIRMWARE_CARRIZO; 127 fw_name = FIRMWARE_CARRIZO;
126 break; 128 break;
129 case CHIP_STONEY:
130 fw_name = FIRMWARE_STONEY;
131 break;
127 default: 132 default:
128 return -EINVAL; 133 return -EINVAL;
129 } 134 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 74f2038ac747..03f0c3bae516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -49,6 +49,7 @@
49#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 49#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 50#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" 51#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
52 53
53#ifdef CONFIG_DRM_AMDGPU_CIK 54#ifdef CONFIG_DRM_AMDGPU_CIK
54MODULE_FIRMWARE(FIRMWARE_BONAIRE); 55MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -60,6 +61,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
60MODULE_FIRMWARE(FIRMWARE_TONGA); 61MODULE_FIRMWARE(FIRMWARE_TONGA);
61MODULE_FIRMWARE(FIRMWARE_CARRIZO); 62MODULE_FIRMWARE(FIRMWARE_CARRIZO);
62MODULE_FIRMWARE(FIRMWARE_FIJI); 63MODULE_FIRMWARE(FIRMWARE_FIJI);
64MODULE_FIRMWARE(FIRMWARE_STONEY);
63 65
64static void amdgpu_vce_idle_work_handler(struct work_struct *work); 66static void amdgpu_vce_idle_work_handler(struct work_struct *work);
65 67
@@ -106,6 +108,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
106 case CHIP_FIJI: 108 case CHIP_FIJI:
107 fw_name = FIRMWARE_FIJI; 109 fw_name = FIRMWARE_FIJI;
108 break; 110 break;
111 case CHIP_STONEY:
112 fw_name = FIRMWARE_STONEY;
113 break;
109 114
110 default: 115 default:
111 return -EINVAL; 116 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 53d551f2d839..633a32a48560 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -90,11 +90,9 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
90 struct amdgpu_bo_list_entry *list; 90 struct amdgpu_bo_list_entry *list;
91 unsigned i, idx; 91 unsigned i, idx;
92 92
93 mutex_lock(&vm->mutex);
94 list = drm_malloc_ab(vm->max_pde_used + 2, 93 list = drm_malloc_ab(vm->max_pde_used + 2,
95 sizeof(struct amdgpu_bo_list_entry)); 94 sizeof(struct amdgpu_bo_list_entry));
96 if (!list) { 95 if (!list) {
97 mutex_unlock(&vm->mutex);
98 return NULL; 96 return NULL;
99 } 97 }
100 98
@@ -119,7 +117,6 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
119 list[idx].tv.shared = true; 117 list[idx].tv.shared = true;
120 list_add(&list[idx++].tv.head, head); 118 list_add(&list[idx++].tv.head, head);
121 } 119 }
122 mutex_unlock(&vm->mutex);
123 120
124 return list; 121 return list;
125} 122}
@@ -138,7 +135,7 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
138int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 135int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
139 struct amdgpu_sync *sync) 136 struct amdgpu_sync *sync)
140{ 137{
141 struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; 138 struct fence *best[AMDGPU_MAX_RINGS] = {};
142 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 139 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
143 struct amdgpu_device *adev = ring->adev; 140 struct amdgpu_device *adev = ring->adev;
144 141
@@ -147,15 +144,18 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
147 144
148 /* check if the id is still valid */ 145 /* check if the id is still valid */
149 if (vm_id->id && vm_id->last_id_use && 146 if (vm_id->id && vm_id->last_id_use &&
150 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) 147 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
148 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
151 return 0; 149 return 0;
150 }
152 151
153 /* we definately need to flush */ 152 /* we definately need to flush */
154 vm_id->pd_gpu_addr = ~0ll; 153 vm_id->pd_gpu_addr = ~0ll;
155 154
156 /* skip over VMID 0, since it is the system VM */ 155 /* skip over VMID 0, since it is the system VM */
157 for (i = 1; i < adev->vm_manager.nvm; ++i) { 156 for (i = 1; i < adev->vm_manager.nvm; ++i) {
158 struct amdgpu_fence *fence = adev->vm_manager.active[i]; 157 struct fence *fence = adev->vm_manager.active[i];
158 struct amdgpu_ring *fring;
159 159
160 if (fence == NULL) { 160 if (fence == NULL) {
161 /* found a free one */ 161 /* found a free one */
@@ -164,21 +164,23 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
164 return 0; 164 return 0;
165 } 165 }
166 166
167 if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { 167 fring = amdgpu_ring_from_fence(fence);
168 best[fence->ring->idx] = fence; 168 if (best[fring->idx] == NULL ||
169 choices[fence->ring == ring ? 0 : 1] = i; 169 fence_is_later(best[fring->idx], fence)) {
170 best[fring->idx] = fence;
171 choices[fring == ring ? 0 : 1] = i;
170 } 172 }
171 } 173 }
172 174
173 for (i = 0; i < 2; ++i) { 175 for (i = 0; i < 2; ++i) {
174 if (choices[i]) { 176 if (choices[i]) {
175 struct amdgpu_fence *fence; 177 struct fence *fence;
176 178
177 fence = adev->vm_manager.active[choices[i]]; 179 fence = adev->vm_manager.active[choices[i]];
178 vm_id->id = choices[i]; 180 vm_id->id = choices[i];
179 181
180 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 182 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
181 return amdgpu_sync_fence(ring->adev, sync, &fence->base); 183 return amdgpu_sync_fence(ring->adev, sync, fence);
182 } 184 }
183 } 185 }
184 186
@@ -247,11 +249,11 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
247 unsigned ridx = fence->ring->idx; 249 unsigned ridx = fence->ring->idx;
248 unsigned vm_id = vm->ids[ridx].id; 250 unsigned vm_id = vm->ids[ridx].id;
249 251
250 amdgpu_fence_unref(&adev->vm_manager.active[vm_id]); 252 fence_put(adev->vm_manager.active[vm_id]);
251 adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence); 253 adev->vm_manager.active[vm_id] = fence_get(&fence->base);
252 254
253 amdgpu_fence_unref(&vm->ids[ridx].last_id_use); 255 fence_put(vm->ids[ridx].last_id_use);
254 vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence); 256 vm->ids[ridx].last_id_use = fence_get(&fence->base);
255} 257}
256 258
257/** 259/**
@@ -852,6 +854,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
852 return r; 854 return r;
853 } 855 }
854 856
857 if (trace_amdgpu_vm_bo_mapping_enabled()) {
858 list_for_each_entry(mapping, &bo_va->valids, list)
859 trace_amdgpu_vm_bo_mapping(mapping);
860
861 list_for_each_entry(mapping, &bo_va->invalids, list)
862 trace_amdgpu_vm_bo_mapping(mapping);
863 }
864
855 spin_lock(&vm->status_lock); 865 spin_lock(&vm->status_lock);
856 list_splice_init(&bo_va->invalids, &bo_va->valids); 866 list_splice_init(&bo_va->invalids, &bo_va->valids);
857 list_del_init(&bo_va->vm_status); 867 list_del_init(&bo_va->vm_status);
@@ -962,9 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
962 INIT_LIST_HEAD(&bo_va->invalids); 972 INIT_LIST_HEAD(&bo_va->invalids);
963 INIT_LIST_HEAD(&bo_va->vm_status); 973 INIT_LIST_HEAD(&bo_va->vm_status);
964 974
965 mutex_lock(&vm->mutex);
966 list_add_tail(&bo_va->bo_list, &bo->va); 975 list_add_tail(&bo_va->bo_list, &bo->va);
967 mutex_unlock(&vm->mutex);
968 976
969 return bo_va; 977 return bo_va;
970} 978}
@@ -1017,8 +1025,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1017 return -EINVAL; 1025 return -EINVAL;
1018 } 1026 }
1019 1027
1020 mutex_lock(&vm->mutex);
1021
1022 saddr /= AMDGPU_GPU_PAGE_SIZE; 1028 saddr /= AMDGPU_GPU_PAGE_SIZE;
1023 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1029 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1024 1030
@@ -1032,14 +1038,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1032 tmp->it.start, tmp->it.last + 1); 1038 tmp->it.start, tmp->it.last + 1);
1033 amdgpu_bo_unreserve(bo_va->bo); 1039 amdgpu_bo_unreserve(bo_va->bo);
1034 r = -EINVAL; 1040 r = -EINVAL;
1035 goto error_unlock; 1041 goto error;
1036 } 1042 }
1037 1043
1038 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1044 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1039 if (!mapping) { 1045 if (!mapping) {
1040 amdgpu_bo_unreserve(bo_va->bo); 1046 amdgpu_bo_unreserve(bo_va->bo);
1041 r = -ENOMEM; 1047 r = -ENOMEM;
1042 goto error_unlock; 1048 goto error;
1043 } 1049 }
1044 1050
1045 INIT_LIST_HEAD(&mapping->list); 1051 INIT_LIST_HEAD(&mapping->list);
@@ -1071,9 +1077,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1071 if (vm->page_tables[pt_idx].bo) 1077 if (vm->page_tables[pt_idx].bo)
1072 continue; 1078 continue;
1073 1079
1074 /* drop mutex to allocate and clear page table */
1075 mutex_unlock(&vm->mutex);
1076
1077 ww_mutex_lock(&resv->lock, NULL); 1080 ww_mutex_lock(&resv->lock, NULL);
1078 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1081 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1079 AMDGPU_GPU_PAGE_SIZE, true, 1082 AMDGPU_GPU_PAGE_SIZE, true,
@@ -1090,32 +1093,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1090 goto error_free; 1093 goto error_free;
1091 } 1094 }
1092 1095
1093 /* aquire mutex again */
1094 mutex_lock(&vm->mutex);
1095 if (vm->page_tables[pt_idx].bo) {
1096 /* someone else allocated the pt in the meantime */
1097 mutex_unlock(&vm->mutex);
1098 amdgpu_bo_unref(&pt);
1099 mutex_lock(&vm->mutex);
1100 continue;
1101 }
1102
1103 vm->page_tables[pt_idx].addr = 0; 1096 vm->page_tables[pt_idx].addr = 0;
1104 vm->page_tables[pt_idx].bo = pt; 1097 vm->page_tables[pt_idx].bo = pt;
1105 } 1098 }
1106 1099
1107 mutex_unlock(&vm->mutex);
1108 return 0; 1100 return 0;
1109 1101
1110error_free: 1102error_free:
1111 mutex_lock(&vm->mutex);
1112 list_del(&mapping->list); 1103 list_del(&mapping->list);
1113 interval_tree_remove(&mapping->it, &vm->va); 1104 interval_tree_remove(&mapping->it, &vm->va);
1114 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1105 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1115 kfree(mapping); 1106 kfree(mapping);
1116 1107
1117error_unlock: 1108error:
1118 mutex_unlock(&vm->mutex);
1119 return r; 1109 return r;
1120} 1110}
1121 1111
@@ -1160,7 +1150,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1160 } 1150 }
1161 } 1151 }
1162 1152
1163 mutex_lock(&vm->mutex);
1164 list_del(&mapping->list); 1153 list_del(&mapping->list);
1165 interval_tree_remove(&mapping->it, &vm->va); 1154 interval_tree_remove(&mapping->it, &vm->va);
1166 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1155 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
@@ -1169,7 +1158,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1169 list_add(&mapping->list, &vm->freed); 1158 list_add(&mapping->list, &vm->freed);
1170 else 1159 else
1171 kfree(mapping); 1160 kfree(mapping);
1172 mutex_unlock(&vm->mutex);
1173 amdgpu_bo_unreserve(bo_va->bo); 1161 amdgpu_bo_unreserve(bo_va->bo);
1174 1162
1175 return 0; 1163 return 0;
@@ -1193,8 +1181,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1193 1181
1194 list_del(&bo_va->bo_list); 1182 list_del(&bo_va->bo_list);
1195 1183
1196 mutex_lock(&vm->mutex);
1197
1198 spin_lock(&vm->status_lock); 1184 spin_lock(&vm->status_lock);
1199 list_del(&bo_va->vm_status); 1185 list_del(&bo_va->vm_status);
1200 spin_unlock(&vm->status_lock); 1186 spin_unlock(&vm->status_lock);
@@ -1213,8 +1199,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1213 1199
1214 fence_put(bo_va->last_pt_update); 1200 fence_put(bo_va->last_pt_update);
1215 kfree(bo_va); 1201 kfree(bo_va);
1216
1217 mutex_unlock(&vm->mutex);
1218} 1202}
1219 1203
1220/** 1204/**
@@ -1332,7 +1316,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1332 1316
1333 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1317 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1334 fence_put(vm->ids[i].flushed_updates); 1318 fence_put(vm->ids[i].flushed_updates);
1335 amdgpu_fence_unref(&vm->ids[i].last_id_use); 1319 fence_put(vm->ids[i].last_id_use);
1336 } 1320 }
1337 1321
1338 mutex_destroy(&vm->mutex); 1322 mutex_destroy(&vm->mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index a0346a90d805..1b50e6c13fb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -685,6 +685,27 @@ static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
685 } 685 }
686} 686}
687 687
688static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
689{
690 uint64_t val64;
691 uint8_t attr = U8((*ptr)++);
692 uint32_t dst, src;
693 SDEBUG(" src1: ");
694 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
695 SDEBUG(" src2: ");
696 src = atom_get_src(ctx, attr, ptr);
697 if (src != 0) {
698 val64 = dst;
699 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
700 do_div(val64, src);
701 ctx->ctx->divmul[0] = lower_32_bits(val64);
702 ctx->ctx->divmul[1] = upper_32_bits(val64);
703 } else {
704 ctx->ctx->divmul[0] = 0;
705 ctx->ctx->divmul[1] = 0;
706 }
707}
708
688static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) 709static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
689{ 710{
690 /* functionally, a nop */ 711 /* functionally, a nop */
@@ -788,6 +809,20 @@ static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
788 ctx->ctx->divmul[0] = dst * src; 809 ctx->ctx->divmul[0] = dst * src;
789} 810}
790 811
812static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
813{
814 uint64_t val64;
815 uint8_t attr = U8((*ptr)++);
816 uint32_t dst, src;
817 SDEBUG(" src1: ");
818 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
819 SDEBUG(" src2: ");
820 src = atom_get_src(ctx, attr, ptr);
821 val64 = (uint64_t)dst * (uint64_t)src;
822 ctx->ctx->divmul[0] = lower_32_bits(val64);
823 ctx->ctx->divmul[1] = upper_32_bits(val64);
824}
825
791static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) 826static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
792{ 827{
793 /* nothing */ 828 /* nothing */
@@ -1022,7 +1057,15 @@ static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1022 1057
1023static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) 1058static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1024{ 1059{
1025 printk(KERN_INFO "unimplemented!\n"); 1060 uint8_t val = U8((*ptr)++);
1061 SDEBUG("DEBUG output: 0x%02X\n", val);
1062}
1063
1064static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1065{
1066 uint16_t val = U16(*ptr);
1067 (*ptr) += val + 2;
1068 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1026} 1069}
1027 1070
1028static struct { 1071static struct {
@@ -1151,7 +1194,13 @@ static struct {
1151 atom_op_shr, ATOM_ARG_FB}, { 1194 atom_op_shr, ATOM_ARG_FB}, {
1152 atom_op_shr, ATOM_ARG_PLL}, { 1195 atom_op_shr, ATOM_ARG_PLL}, {
1153 atom_op_shr, ATOM_ARG_MC}, { 1196 atom_op_shr, ATOM_ARG_MC}, {
1154atom_op_debug, 0},}; 1197 atom_op_debug, 0}, {
1198 atom_op_processds, 0}, {
1199 atom_op_mul32, ATOM_ARG_PS}, {
1200 atom_op_mul32, ATOM_ARG_WS}, {
1201 atom_op_div32, ATOM_ARG_PS}, {
1202 atom_op_div32, ATOM_ARG_WS},
1203};
1155 1204
1156static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) 1205static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1157{ 1206{
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index 09d0f8230708..fece8f45dc7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -60,7 +60,7 @@
60#define ATOM_CT_PS_MASK 0x7F 60#define ATOM_CT_PS_MASK 0x7F
61#define ATOM_CT_CODE_PTR 6 61#define ATOM_CT_CODE_PTR 6
62 62
63#define ATOM_OP_CNT 123 63#define ATOM_OP_CNT 127
64#define ATOM_OP_EOT 91 64#define ATOM_OP_EOT 91
65 65
66#define ATOM_CASE_MAGIC 0x63 66#define ATOM_CASE_MAGIC 0x63
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 9ea9de457da3..5f712ceddf08 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
96{ 96{
97 const char *chip_name; 97 const char *chip_name;
98 char fw_name[30]; 98 char fw_name[30];
99 int err, i; 99 int err = 0, i;
100 100
101 DRM_DEBUG("\n"); 101 DRM_DEBUG("\n");
102 102
@@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
119 default: BUG(); 119 default: BUG();
120 } 120 }
121 121
122 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 122 for (i = 0; i < adev->sdma.num_instances; i++) {
123 if (i == 0) 123 if (i == 0)
124 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 124 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
125 else 125 else
126 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); 126 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
127 err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); 127 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
128 if (err) 128 if (err)
129 goto out; 129 goto out;
130 err = amdgpu_ucode_validate(adev->sdma[i].fw); 130 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
131 } 131 }
132out: 132out:
133 if (err) { 133 if (err) {
134 printk(KERN_ERR 134 printk(KERN_ERR
135 "cik_sdma: Failed to load firmware \"%s\"\n", 135 "cik_sdma: Failed to load firmware \"%s\"\n",
136 fw_name); 136 fw_name);
137 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 137 for (i = 0; i < adev->sdma.num_instances; i++) {
138 release_firmware(adev->sdma[i].fw); 138 release_firmware(adev->sdma.instance[i].fw);
139 adev->sdma[i].fw = NULL; 139 adev->sdma.instance[i].fw = NULL;
140 } 140 }
141 } 141 }
142 return err; 142 return err;
@@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
168static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) 168static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
169{ 169{
170 struct amdgpu_device *adev = ring->adev; 170 struct amdgpu_device *adev = ring->adev;
171 u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; 171 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
172 172
173 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; 173 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
174} 174}
@@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
183static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) 183static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
184{ 184{
185 struct amdgpu_device *adev = ring->adev; 185 struct amdgpu_device *adev = ring->adev;
186 u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; 186 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
187 187
188 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); 188 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
189} 189}
190 190
191static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 191static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
192{ 192{
193 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); 193 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
194 int i; 194 int i;
195 195
196 for (i = 0; i < count; i++) 196 for (i = 0; i < count; i++)
@@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
248 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 248 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
249 u32 ref_and_mask; 249 u32 ref_and_mask;
250 250
251 if (ring == &ring->adev->sdma[0].ring) 251 if (ring == &ring->adev->sdma.instance[0].ring)
252 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; 252 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
253 else 253 else
254 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; 254 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
327 */ 327 */
328static void cik_sdma_gfx_stop(struct amdgpu_device *adev) 328static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
329{ 329{
330 struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; 330 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
331 struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; 331 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
332 u32 rb_cntl; 332 u32 rb_cntl;
333 int i; 333 int i;
334 334
@@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
336 (adev->mman.buffer_funcs_ring == sdma1)) 336 (adev->mman.buffer_funcs_ring == sdma1))
337 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 337 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
338 338
339 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 339 for (i = 0; i < adev->sdma.num_instances; i++) {
340 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 340 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
341 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; 341 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
342 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 342 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
376 cik_sdma_rlc_stop(adev); 376 cik_sdma_rlc_stop(adev);
377 } 377 }
378 378
379 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 379 for (i = 0; i < adev->sdma.num_instances; i++) {
380 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 380 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
381 if (enable) 381 if (enable)
382 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; 382 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
@@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
402 u32 wb_offset; 402 u32 wb_offset;
403 int i, j, r; 403 int i, j, r;
404 404
405 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 405 for (i = 0; i < adev->sdma.num_instances; i++) {
406 ring = &adev->sdma[i].ring; 406 ring = &adev->sdma.instance[i].ring;
407 wb_offset = (ring->rptr_offs * 4); 407 wb_offset = (ring->rptr_offs * 4);
408 408
409 mutex_lock(&adev->srbm_mutex); 409 mutex_lock(&adev->srbm_mutex);
@@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
502 u32 fw_size; 502 u32 fw_size;
503 int i, j; 503 int i, j;
504 504
505 if (!adev->sdma[0].fw || !adev->sdma[1].fw)
506 return -EINVAL;
507
508 /* halt the MEs */ 505 /* halt the MEs */
509 cik_sdma_enable(adev, false); 506 cik_sdma_enable(adev, false);
510 507
511 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 508 for (i = 0; i < adev->sdma.num_instances; i++) {
512 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 509 if (!adev->sdma.instance[i].fw)
510 return -EINVAL;
511 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
513 amdgpu_ucode_print_sdma_hdr(&hdr->header); 512 amdgpu_ucode_print_sdma_hdr(&hdr->header);
514 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 513 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
515 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 514 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
516 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 515 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
517 if (adev->sdma[i].feature_version >= 20) 516 if (adev->sdma.instance[i].feature_version >= 20)
518 adev->sdma[i].burst_nop = true; 517 adev->sdma.instance[i].burst_nop = true;
519 fw_data = (const __le32 *) 518 fw_data = (const __le32 *)
520 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 519 (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
521 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 520 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
522 for (j = 0; j < fw_size; j++) 521 for (j = 0; j < fw_size; j++)
523 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 522 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
524 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); 523 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
525 } 524 }
526 525
527 return 0; 526 return 0;
@@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
830 */ 829 */
831static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) 830static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
832{ 831{
833 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); 832 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
834 u32 pad_count; 833 u32 pad_count;
835 int i; 834 int i;
836 835
@@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle)
934{ 933{
935 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
936 935
936 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
937
937 cik_sdma_set_ring_funcs(adev); 938 cik_sdma_set_ring_funcs(adev);
938 cik_sdma_set_irq_funcs(adev); 939 cik_sdma_set_irq_funcs(adev);
939 cik_sdma_set_buffer_funcs(adev); 940 cik_sdma_set_buffer_funcs(adev);
@@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle)
946{ 947{
947 struct amdgpu_ring *ring; 948 struct amdgpu_ring *ring;
948 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 949 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
949 int r; 950 int r, i;
950 951
951 r = cik_sdma_init_microcode(adev); 952 r = cik_sdma_init_microcode(adev);
952 if (r) { 953 if (r) {
@@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle)
955 } 956 }
956 957
957 /* SDMA trap event */ 958 /* SDMA trap event */
958 r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); 959 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
959 if (r) 960 if (r)
960 return r; 961 return r;
961 962
962 /* SDMA Privileged inst */ 963 /* SDMA Privileged inst */
963 r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); 964 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
964 if (r) 965 if (r)
965 return r; 966 return r;
966 967
967 /* SDMA Privileged inst */ 968 /* SDMA Privileged inst */
968 r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); 969 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
969 if (r) 970 if (r)
970 return r; 971 return r;
971 972
972 ring = &adev->sdma[0].ring; 973 for (i = 0; i < adev->sdma.num_instances; i++) {
973 ring->ring_obj = NULL; 974 ring = &adev->sdma.instance[i].ring;
974 975 ring->ring_obj = NULL;
975 ring = &adev->sdma[1].ring; 976 sprintf(ring->name, "sdma%d", i);
976 ring->ring_obj = NULL; 977 r = amdgpu_ring_init(adev, ring, 256 * 1024,
977 978 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
978 ring = &adev->sdma[0].ring; 979 &adev->sdma.trap_irq,
979 sprintf(ring->name, "sdma0"); 980 (i == 0) ?
980 r = amdgpu_ring_init(adev, ring, 256 * 1024, 981 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
981 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, 982 AMDGPU_RING_TYPE_SDMA);
982 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, 983 if (r)
983 AMDGPU_RING_TYPE_SDMA); 984 return r;
984 if (r) 985 }
985 return r;
986
987 ring = &adev->sdma[1].ring;
988 sprintf(ring->name, "sdma1");
989 r = amdgpu_ring_init(adev, ring, 256 * 1024,
990 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
991 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
992 AMDGPU_RING_TYPE_SDMA);
993 if (r)
994 return r;
995 986
996 return r; 987 return r;
997} 988}
@@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle)
999static int cik_sdma_sw_fini(void *handle) 990static int cik_sdma_sw_fini(void *handle)
1000{ 991{
1001 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993 int i;
1002 994
1003 amdgpu_ring_fini(&adev->sdma[0].ring); 995 for (i = 0; i < adev->sdma.num_instances; i++)
1004 amdgpu_ring_fini(&adev->sdma[1].ring); 996 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1005 997
1006 return 0; 998 return 0;
1007} 999}
@@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle)
1078 dev_info(adev->dev, "CIK SDMA registers\n"); 1070 dev_info(adev->dev, "CIK SDMA registers\n");
1079 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1071 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1080 RREG32(mmSRBM_STATUS2)); 1072 RREG32(mmSRBM_STATUS2));
1081 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 1073 for (i = 0; i < adev->sdma.num_instances; i++) {
1082 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", 1074 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1083 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); 1075 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1084 dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", 1076 dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
@@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1223 case 0: 1215 case 0:
1224 switch (queue_id) { 1216 switch (queue_id) {
1225 case 0: 1217 case 0:
1226 amdgpu_fence_process(&adev->sdma[0].ring); 1218 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1227 break; 1219 break;
1228 case 1: 1220 case 1:
1229 /* XXX compute */ 1221 /* XXX compute */
@@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1236 case 1: 1228 case 1:
1237 switch (queue_id) { 1229 switch (queue_id) {
1238 case 0: 1230 case 0:
1239 amdgpu_fence_process(&adev->sdma[1].ring); 1231 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1240 break; 1232 break;
1241 case 1: 1233 case 1:
1242 /* XXX compute */ 1234 /* XXX compute */
@@ -1298,24 +1290,6 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
1298 .set_powergating_state = cik_sdma_set_powergating_state, 1290 .set_powergating_state = cik_sdma_set_powergating_state,
1299}; 1291};
1300 1292
1301/**
1302 * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up
1303 *
1304 * @ring: amdgpu_ring structure holding ring information
1305 *
1306 * Check if the async DMA engine is locked up (CIK).
1307 * Returns true if the engine appears to be locked up, false if not.
1308 */
1309static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring)
1310{
1311
1312 if (cik_sdma_is_idle(ring->adev)) {
1313 amdgpu_ring_lockup_update(ring);
1314 return false;
1315 }
1316 return amdgpu_ring_test_lockup(ring);
1317}
1318
1319static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { 1293static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1320 .get_rptr = cik_sdma_ring_get_rptr, 1294 .get_rptr = cik_sdma_ring_get_rptr,
1321 .get_wptr = cik_sdma_ring_get_wptr, 1295 .get_wptr = cik_sdma_ring_get_wptr,
@@ -1328,14 +1302,15 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1328 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1302 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
1329 .test_ring = cik_sdma_ring_test_ring, 1303 .test_ring = cik_sdma_ring_test_ring,
1330 .test_ib = cik_sdma_ring_test_ib, 1304 .test_ib = cik_sdma_ring_test_ib,
1331 .is_lockup = cik_sdma_ring_is_lockup,
1332 .insert_nop = cik_sdma_ring_insert_nop, 1305 .insert_nop = cik_sdma_ring_insert_nop,
1333}; 1306};
1334 1307
1335static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) 1308static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1336{ 1309{
1337 adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs; 1310 int i;
1338 adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs; 1311
1312 for (i = 0; i < adev->sdma.num_instances; i++)
1313 adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
1339} 1314}
1340 1315
1341static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { 1316static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
@@ -1349,9 +1324,9 @@ static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
1349 1324
1350static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) 1325static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
1351{ 1326{
1352 adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1327 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1353 adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs; 1328 adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
1354 adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; 1329 adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
1355} 1330}
1356 1331
1357/** 1332/**
@@ -1416,7 +1391,7 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
1416{ 1391{
1417 if (adev->mman.buffer_funcs == NULL) { 1392 if (adev->mman.buffer_funcs == NULL) {
1418 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; 1393 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
1419 adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; 1394 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1420 } 1395 }
1421} 1396}
1422 1397
@@ -1431,7 +1406,7 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1431{ 1406{
1432 if (adev->vm_manager.vm_pte_funcs == NULL) { 1407 if (adev->vm_manager.vm_pte_funcs == NULL) {
1433 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1408 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
1434 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1409 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
1435 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1410 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1436 } 1411 }
1437} 1412}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 2e3373ed4c94..8035d4d6a4f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1264,6 +1264,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
1264 1264
1265static int cz_dpm_enable(struct amdgpu_device *adev) 1265static int cz_dpm_enable(struct amdgpu_device *adev)
1266{ 1266{
1267 const char *chip_name;
1267 int ret = 0; 1268 int ret = 0;
1268 1269
1269 /* renable will hang up SMU, so check first */ 1270 /* renable will hang up SMU, so check first */
@@ -1272,21 +1273,33 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
1272 1273
1273 cz_program_voting_clients(adev); 1274 cz_program_voting_clients(adev);
1274 1275
1276 switch (adev->asic_type) {
1277 case CHIP_CARRIZO:
1278 chip_name = "carrizo";
1279 break;
1280 case CHIP_STONEY:
1281 chip_name = "stoney";
1282 break;
1283 default:
1284 BUG();
1285 }
1286
1287
1275 ret = cz_start_dpm(adev); 1288 ret = cz_start_dpm(adev);
1276 if (ret) { 1289 if (ret) {
1277 DRM_ERROR("Carrizo DPM enable failed\n"); 1290 DRM_ERROR("%s DPM enable failed\n", chip_name);
1278 return -EINVAL; 1291 return -EINVAL;
1279 } 1292 }
1280 1293
1281 ret = cz_program_bootup_state(adev); 1294 ret = cz_program_bootup_state(adev);
1282 if (ret) { 1295 if (ret) {
1283 DRM_ERROR("Carrizo bootup state program failed\n"); 1296 DRM_ERROR("%s bootup state program failed\n", chip_name);
1284 return -EINVAL; 1297 return -EINVAL;
1285 } 1298 }
1286 1299
1287 ret = cz_enable_didt(adev, true); 1300 ret = cz_enable_didt(adev, true);
1288 if (ret) { 1301 if (ret) {
1289 DRM_ERROR("Carrizo enable di/dt failed\n"); 1302 DRM_ERROR("%s enable di/dt failed\n", chip_name);
1290 return -EINVAL; 1303 return -EINVAL;
1291 } 1304 }
1292 1305
@@ -1353,7 +1366,7 @@ static int cz_dpm_disable(struct amdgpu_device *adev)
1353 1366
1354 ret = cz_enable_didt(adev, false); 1367 ret = cz_enable_didt(adev, false);
1355 if (ret) { 1368 if (ret) {
1356 DRM_ERROR("Carrizo disable di/dt failed\n"); 1369 DRM_ERROR("disable di/dt failed\n");
1357 return -EINVAL; 1370 return -EINVAL;
1358 } 1371 }
1359 1372
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index e33180d3314a..ac7fee7b7eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -312,13 +312,16 @@ int cz_smu_start(struct amdgpu_device *adev)
312 UCODE_ID_CP_MEC_JT1_MASK | 312 UCODE_ID_CP_MEC_JT1_MASK |
313 UCODE_ID_CP_MEC_JT2_MASK; 313 UCODE_ID_CP_MEC_JT2_MASK;
314 314
315 if (adev->asic_type == CHIP_STONEY)
316 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
317
315 cz_smu_request_load_fw(adev); 318 cz_smu_request_load_fw(adev);
316 ret = cz_smu_check_fw_load_finish(adev, fw_to_check); 319 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
317 if (ret) 320 if (ret)
318 return ret; 321 return ret;
319 322
320 /* manually load MEC firmware for CZ */ 323 /* manually load MEC firmware for CZ */
321 if (adev->asic_type == CHIP_CARRIZO) { 324 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
322 ret = cz_load_mec_firmware(adev); 325 ret = cz_load_mec_firmware(adev);
323 if (ret) { 326 if (ret) {
324 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret); 327 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
@@ -336,6 +339,9 @@ int cz_smu_start(struct amdgpu_device *adev)
336 AMDGPU_CPMEC2_UCODE_LOADED | 339 AMDGPU_CPMEC2_UCODE_LOADED |
337 AMDGPU_CPRLC_UCODE_LOADED; 340 AMDGPU_CPRLC_UCODE_LOADED;
338 341
342 if (adev->asic_type == CHIP_STONEY)
343 adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
344
339 return ret; 345 return ret;
340} 346}
341 347
@@ -601,8 +607,13 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
601 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); 607 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
602 cz_smu_populate_single_ucode_load_task(adev, 608 cz_smu_populate_single_ucode_load_task(adev,
603 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); 609 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
604 cz_smu_populate_single_ucode_load_task(adev, 610 if (adev->asic_type == CHIP_STONEY) {
611 cz_smu_populate_single_ucode_load_task(adev,
612 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
613 } else {
614 cz_smu_populate_single_ucode_load_task(adev,
605 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); 615 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
616 }
606 cz_smu_populate_single_ucode_load_task(adev, 617 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); 618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
608 } 619 }
@@ -642,8 +653,13 @@ static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
642 if (adev->firmware.smu_load) { 653 if (adev->firmware.smu_load) {
643 cz_smu_populate_single_ucode_load_task(adev, 654 cz_smu_populate_single_ucode_load_task(adev,
644 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); 655 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
645 cz_smu_populate_single_ucode_load_task(adev, 656 if (adev->asic_type == CHIP_STONEY) {
657 cz_smu_populate_single_ucode_load_task(adev,
658 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
659 } else {
660 cz_smu_populate_single_ucode_load_task(adev,
646 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); 661 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
662 }
647 cz_smu_populate_single_ucode_load_task(adev, 663 cz_smu_populate_single_ucode_load_task(adev,
648 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); 664 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
649 cz_smu_populate_single_ucode_load_task(adev, 665 cz_smu_populate_single_ucode_load_task(adev,
@@ -652,8 +668,13 @@ static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
652 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); 668 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
653 cz_smu_populate_single_ucode_load_task(adev, 669 cz_smu_populate_single_ucode_load_task(adev,
654 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); 670 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
655 cz_smu_populate_single_ucode_load_task(adev, 671 if (adev->asic_type == CHIP_STONEY) {
672 cz_smu_populate_single_ucode_load_task(adev,
673 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
674 } else {
675 cz_smu_populate_single_ucode_load_task(adev,
656 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); 676 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
677 }
657 cz_smu_populate_single_ucode_load_task(adev, 678 cz_smu_populate_single_ucode_load_task(adev,
658 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); 679 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
659 } 680 }
@@ -888,10 +909,18 @@ int cz_smu_init(struct amdgpu_device *adev)
888 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, 909 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
889 &priv->driver_buffer[priv->driver_buffer_length++])) 910 &priv->driver_buffer[priv->driver_buffer_length++]))
890 goto smu_init_failed; 911 goto smu_init_failed;
891 if (cz_smu_populate_single_firmware_entry(adev, 912
892 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, 913 if (adev->asic_type == CHIP_STONEY) {
893 &priv->driver_buffer[priv->driver_buffer_length++])) 914 if (cz_smu_populate_single_firmware_entry(adev,
894 goto smu_init_failed; 915 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
916 &priv->driver_buffer[priv->driver_buffer_length++]))
917 goto smu_init_failed;
918 } else {
919 if (cz_smu_populate_single_firmware_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
921 &priv->driver_buffer[priv->driver_buffer_length++]))
922 goto smu_init_failed;
923 }
895 if (cz_smu_populate_single_firmware_entry(adev, 924 if (cz_smu_populate_single_firmware_entry(adev,
896 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, 925 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
897 &priv->driver_buffer[priv->driver_buffer_length++])) 926 &priv->driver_buffer[priv->driver_buffer_length++]))
@@ -908,10 +937,17 @@ int cz_smu_init(struct amdgpu_device *adev)
908 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, 937 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
909 &priv->driver_buffer[priv->driver_buffer_length++])) 938 &priv->driver_buffer[priv->driver_buffer_length++]))
910 goto smu_init_failed; 939 goto smu_init_failed;
911 if (cz_smu_populate_single_firmware_entry(adev, 940 if (adev->asic_type == CHIP_STONEY) {
912 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, 941 if (cz_smu_populate_single_firmware_entry(adev,
913 &priv->driver_buffer[priv->driver_buffer_length++])) 942 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
914 goto smu_init_failed; 943 &priv->driver_buffer[priv->driver_buffer_length++]))
944 goto smu_init_failed;
945 } else {
946 if (cz_smu_populate_single_firmware_entry(adev,
947 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
948 &priv->driver_buffer[priv->driver_buffer_length++]))
949 goto smu_init_failed;
950 }
915 if (cz_smu_populate_single_firmware_entry(adev, 951 if (cz_smu_populate_single_firmware_entry(adev,
916 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, 952 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
917 &priv->driver_buffer[priv->driver_buffer_length++])) 953 &priv->driver_buffer[priv->driver_buffer_length++]))
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index d4c82b625727..cb0f7747e3dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -280,46 +280,22 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
280 * @crtc_id: crtc to cleanup pageflip on 280 * @crtc_id: crtc to cleanup pageflip on
281 * @crtc_base: new address of the crtc (GPU MC address) 281 * @crtc_base: new address of the crtc (GPU MC address)
282 * 282 *
283 * Does the actual pageflip (evergreen+). 283 * Triggers the actual pageflip by updating the primary
284 * During vblank we take the crtc lock and wait for the update_pending 284 * surface base address.
285 * bit to go high, when it does, we release the lock, and allow the
286 * double buffered update to take place.
287 * Returns the current update pending status.
288 */ 285 */
289static void dce_v10_0_page_flip(struct amdgpu_device *adev, 286static void dce_v10_0_page_flip(struct amdgpu_device *adev,
290 int crtc_id, u64 crtc_base) 287 int crtc_id, u64 crtc_base)
291{ 288{
292 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 289 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
293 u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
294 int i;
295
296 /* Lock the graphics update lock */
297 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
298 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
299
300 /* update the scanout addresses */
301 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
302 upper_32_bits(crtc_base));
303 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
304 lower_32_bits(crtc_base));
305 290
291 /* update the primary scanout address */
306 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 292 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
307 upper_32_bits(crtc_base)); 293 upper_32_bits(crtc_base));
294 /* writing to the low address triggers the update */
308 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 295 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
309 lower_32_bits(crtc_base)); 296 lower_32_bits(crtc_base));
310 297 /* post the write */
311 /* Wait for update_pending to go high. */ 298 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
312 for (i = 0; i < adev->usec_timeout; i++) {
313 if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
314 GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
315 break;
316 udelay(1);
317 }
318 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
319
320 /* Unlock the lock, so double-buffering can take place inside vblank */
321 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
322 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
323} 299}
324 300
325static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 301static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
@@ -2517,26 +2493,19 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2517 struct amdgpu_device *adev = crtc->dev->dev_private; 2493 struct amdgpu_device *adev = crtc->dev->dev_private;
2518 u32 tmp; 2494 u32 tmp;
2519 2495
2496 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2497 upper_32_bits(amdgpu_crtc->cursor_addr));
2498 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2499 lower_32_bits(amdgpu_crtc->cursor_addr));
2500
2520 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2501 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2521 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); 2502 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2522 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); 2503 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2523 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2504 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2524} 2505}
2525 2506
2526static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, 2507static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2527 uint64_t gpu_addr) 2508 int x, int y)
2528{
2529 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2530 struct amdgpu_device *adev = crtc->dev->dev_private;
2531
2532 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2533 upper_32_bits(gpu_addr));
2534 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2535 lower_32_bits(gpu_addr));
2536}
2537
2538static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2539 int x, int y)
2540{ 2509{
2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2510 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2542 struct amdgpu_device *adev = crtc->dev->dev_private; 2511 struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2556,26 +2525,40 @@ static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2556 y = 0; 2525 y = 0;
2557 } 2526 }
2558 2527
2559 dce_v10_0_lock_cursor(crtc, true);
2560 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2528 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2561 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2529 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2562 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2530 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2563 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2531 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2564 dce_v10_0_lock_cursor(crtc, false); 2532
2533 amdgpu_crtc->cursor_x = x;
2534 amdgpu_crtc->cursor_y = y;
2565 2535
2566 return 0; 2536 return 0;
2567} 2537}
2568 2538
2569static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc, 2539static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2570 struct drm_file *file_priv, 2540 int x, int y)
2571 uint32_t handle, 2541{
2572 uint32_t width, 2542 int ret;
2573 uint32_t height) 2543
2544 dce_v10_0_lock_cursor(crtc, true);
2545 ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2546 dce_v10_0_lock_cursor(crtc, false);
2547
2548 return ret;
2549}
2550
2551static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2552 struct drm_file *file_priv,
2553 uint32_t handle,
2554 uint32_t width,
2555 uint32_t height,
2556 int32_t hot_x,
2557 int32_t hot_y)
2574{ 2558{
2575 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2559 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2576 struct drm_gem_object *obj; 2560 struct drm_gem_object *obj;
2577 struct amdgpu_bo *robj; 2561 struct amdgpu_bo *aobj;
2578 uint64_t gpu_addr;
2579 int ret; 2562 int ret;
2580 2563
2581 if (!handle) { 2564 if (!handle) {
@@ -2597,41 +2580,71 @@ static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
2597 return -ENOENT; 2580 return -ENOENT;
2598 } 2581 }
2599 2582
2600 robj = gem_to_amdgpu_bo(obj); 2583 aobj = gem_to_amdgpu_bo(obj);
2601 ret = amdgpu_bo_reserve(robj, false); 2584 ret = amdgpu_bo_reserve(aobj, false);
2602 if (unlikely(ret != 0)) 2585 if (ret != 0) {
2603 goto fail; 2586 drm_gem_object_unreference_unlocked(obj);
2604 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 2587 return ret;
2605 0, 0, &gpu_addr); 2588 }
2606 amdgpu_bo_unreserve(robj); 2589
2607 if (ret) 2590 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2608 goto fail; 2591 amdgpu_bo_unreserve(aobj);
2592 if (ret) {
2593 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2594 drm_gem_object_unreference_unlocked(obj);
2595 return ret;
2596 }
2609 2597
2610 amdgpu_crtc->cursor_width = width; 2598 amdgpu_crtc->cursor_width = width;
2611 amdgpu_crtc->cursor_height = height; 2599 amdgpu_crtc->cursor_height = height;
2612 2600
2613 dce_v10_0_lock_cursor(crtc, true); 2601 dce_v10_0_lock_cursor(crtc, true);
2614 dce_v10_0_set_cursor(crtc, obj, gpu_addr); 2602
2603 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2604 hot_y != amdgpu_crtc->cursor_hot_y) {
2605 int x, y;
2606
2607 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2608 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2609
2610 dce_v10_0_cursor_move_locked(crtc, x, y);
2611
2612 amdgpu_crtc->cursor_hot_x = hot_x;
2613 amdgpu_crtc->cursor_hot_y = hot_y;
2614 }
2615
2615 dce_v10_0_show_cursor(crtc); 2616 dce_v10_0_show_cursor(crtc);
2616 dce_v10_0_lock_cursor(crtc, false); 2617 dce_v10_0_lock_cursor(crtc, false);
2617 2618
2618unpin: 2619unpin:
2619 if (amdgpu_crtc->cursor_bo) { 2620 if (amdgpu_crtc->cursor_bo) {
2620 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2621 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2621 ret = amdgpu_bo_reserve(robj, false); 2622 ret = amdgpu_bo_reserve(aobj, false);
2622 if (likely(ret == 0)) { 2623 if (likely(ret == 0)) {
2623 amdgpu_bo_unpin(robj); 2624 amdgpu_bo_unpin(aobj);
2624 amdgpu_bo_unreserve(robj); 2625 amdgpu_bo_unreserve(aobj);
2625 } 2626 }
2626 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); 2627 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2627 } 2628 }
2628 2629
2629 amdgpu_crtc->cursor_bo = obj; 2630 amdgpu_crtc->cursor_bo = obj;
2630 return 0; 2631 return 0;
2631fail: 2632}
2632 drm_gem_object_unreference_unlocked(obj);
2633 2633
2634 return ret; 2634static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2635{
2636 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2637
2638 if (amdgpu_crtc->cursor_bo) {
2639 dce_v10_0_lock_cursor(crtc, true);
2640
2641 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2642 amdgpu_crtc->cursor_y);
2643
2644 dce_v10_0_show_cursor(crtc);
2645
2646 dce_v10_0_lock_cursor(crtc, false);
2647 }
2635} 2648}
2636 2649
2637static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2650static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2659,7 +2672,7 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2659} 2672}
2660 2673
2661static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { 2674static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2662 .cursor_set = dce_v10_0_crtc_cursor_set, 2675 .cursor_set2 = dce_v10_0_crtc_cursor_set2,
2663 .cursor_move = dce_v10_0_crtc_cursor_move, 2676 .cursor_move = dce_v10_0_crtc_cursor_move,
2664 .gamma_set = dce_v10_0_crtc_gamma_set, 2677 .gamma_set = dce_v10_0_crtc_gamma_set,
2665 .set_config = amdgpu_crtc_set_config, 2678 .set_config = amdgpu_crtc_set_config,
@@ -2793,6 +2806,7 @@ static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2793 dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2806 dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2794 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2807 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2795 amdgpu_atombios_crtc_scaler_setup(crtc); 2808 amdgpu_atombios_crtc_scaler_setup(crtc);
2809 dce_v10_0_cursor_reset(crtc);
2796 /* update the hw version fpr dpm */ 2810 /* update the hw version fpr dpm */
2797 amdgpu_crtc->hw_mode = *adjusted_mode; 2811 amdgpu_crtc->hw_mode = *adjusted_mode;
2798 2812
@@ -3071,24 +3085,18 @@ static int dce_v10_0_suspend(void *handle)
3071 3085
3072 amdgpu_atombios_scratch_regs_save(adev); 3086 amdgpu_atombios_scratch_regs_save(adev);
3073 3087
3074 dce_v10_0_hpd_fini(adev); 3088 return dce_v10_0_hw_fini(handle);
3075
3076 dce_v10_0_pageflip_interrupt_fini(adev);
3077
3078 return 0;
3079} 3089}
3080 3090
3081static int dce_v10_0_resume(void *handle) 3091static int dce_v10_0_resume(void *handle)
3082{ 3092{
3083 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3093 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3094 int ret;
3084 3095
3085 dce_v10_0_init_golden_registers(adev); 3096 ret = dce_v10_0_hw_init(handle);
3086 3097
3087 amdgpu_atombios_scratch_regs_restore(adev); 3098 amdgpu_atombios_scratch_regs_restore(adev);
3088 3099
3089 /* init dig PHYs, disp eng pll */
3090 amdgpu_atombios_encoder_init_dig(adev);
3091 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3092 /* turn on the BL */ 3100 /* turn on the BL */
3093 if (adev->mode_info.bl_encoder) { 3101 if (adev->mode_info.bl_encoder) {
3094 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3102 u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3097,12 +3105,7 @@ static int dce_v10_0_resume(void *handle)
3097 bl_level); 3105 bl_level);
3098 } 3106 }
3099 3107
3100 /* initialize hpd */ 3108 return ret;
3101 dce_v10_0_hpd_init(adev);
3102
3103 dce_v10_0_pageflip_interrupt_init(adev);
3104
3105 return 0;
3106} 3109}
3107 3110
3108static bool dce_v10_0_is_idle(void *handle) 3111static bool dce_v10_0_is_idle(void *handle)
@@ -3294,37 +3297,20 @@ static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3294 unsigned type, 3297 unsigned type,
3295 enum amdgpu_interrupt_state state) 3298 enum amdgpu_interrupt_state state)
3296{ 3299{
3297 u32 reg, reg_block; 3300 u32 reg;
3298 /* now deal with page flip IRQ */ 3301
3299 switch (type) { 3302 if (type >= adev->mode_info.num_crtc) {
3300 case AMDGPU_PAGEFLIP_IRQ_D1: 3303 DRM_ERROR("invalid pageflip crtc %d\n", type);
3301 reg_block = CRTC0_REGISTER_OFFSET; 3304 return -EINVAL;
3302 break;
3303 case AMDGPU_PAGEFLIP_IRQ_D2:
3304 reg_block = CRTC1_REGISTER_OFFSET;
3305 break;
3306 case AMDGPU_PAGEFLIP_IRQ_D3:
3307 reg_block = CRTC2_REGISTER_OFFSET;
3308 break;
3309 case AMDGPU_PAGEFLIP_IRQ_D4:
3310 reg_block = CRTC3_REGISTER_OFFSET;
3311 break;
3312 case AMDGPU_PAGEFLIP_IRQ_D5:
3313 reg_block = CRTC4_REGISTER_OFFSET;
3314 break;
3315 case AMDGPU_PAGEFLIP_IRQ_D6:
3316 reg_block = CRTC5_REGISTER_OFFSET;
3317 break;
3318 default:
3319 DRM_ERROR("invalid pageflip crtc %d\n", type);
3320 return -EINVAL;
3321 } 3305 }
3322 3306
3323 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); 3307 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3324 if (state == AMDGPU_IRQ_STATE_DISABLE) 3308 if (state == AMDGPU_IRQ_STATE_DISABLE)
3325 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3309 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3310 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3326 else 3311 else
3327 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3312 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3313 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3328 3314
3329 return 0; 3315 return 0;
3330} 3316}
@@ -3333,7 +3319,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3333 struct amdgpu_irq_src *source, 3319 struct amdgpu_irq_src *source,
3334 struct amdgpu_iv_entry *entry) 3320 struct amdgpu_iv_entry *entry)
3335{ 3321{
3336 int reg_block;
3337 unsigned long flags; 3322 unsigned long flags;
3338 unsigned crtc_id; 3323 unsigned crtc_id;
3339 struct amdgpu_crtc *amdgpu_crtc; 3324 struct amdgpu_crtc *amdgpu_crtc;
@@ -3342,33 +3327,15 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3342 crtc_id = (entry->src_id - 8) >> 1; 3327 crtc_id = (entry->src_id - 8) >> 1;
3343 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3328 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3344 3329
3345 /* ack the interrupt */ 3330 if (crtc_id >= adev->mode_info.num_crtc) {
3346 switch(crtc_id){ 3331 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3347 case AMDGPU_PAGEFLIP_IRQ_D1: 3332 return -EINVAL;
3348 reg_block = CRTC0_REGISTER_OFFSET;
3349 break;
3350 case AMDGPU_PAGEFLIP_IRQ_D2:
3351 reg_block = CRTC1_REGISTER_OFFSET;
3352 break;
3353 case AMDGPU_PAGEFLIP_IRQ_D3:
3354 reg_block = CRTC2_REGISTER_OFFSET;
3355 break;
3356 case AMDGPU_PAGEFLIP_IRQ_D4:
3357 reg_block = CRTC3_REGISTER_OFFSET;
3358 break;
3359 case AMDGPU_PAGEFLIP_IRQ_D5:
3360 reg_block = CRTC4_REGISTER_OFFSET;
3361 break;
3362 case AMDGPU_PAGEFLIP_IRQ_D6:
3363 reg_block = CRTC5_REGISTER_OFFSET;
3364 break;
3365 default:
3366 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3367 return -EINVAL;
3368 } 3333 }
3369 3334
3370 if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3335 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3371 WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3336 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3337 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3338 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3372 3339
3373 /* IRQ could occur when in initial stage */ 3340 /* IRQ could occur when in initial stage */
3374 if (amdgpu_crtc == NULL) 3341 if (amdgpu_crtc == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7e1cf5e4eebf..5af3721851d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -126,6 +126,13 @@ static const u32 cz_mgcg_cgcg_init[] =
126 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 126 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
127}; 127};
128 128
129static const u32 stoney_golden_settings_a11[] =
130{
131 mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
132 mmFBC_MISC, 0x1f311fff, 0x14302000,
133};
134
135
129static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) 136static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
130{ 137{
131 switch (adev->asic_type) { 138 switch (adev->asic_type) {
@@ -137,6 +144,11 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
137 cz_golden_settings_a11, 144 cz_golden_settings_a11,
138 (const u32)ARRAY_SIZE(cz_golden_settings_a11)); 145 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
139 break; 146 break;
147 case CHIP_STONEY:
148 amdgpu_program_register_sequence(adev,
149 stoney_golden_settings_a11,
150 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
151 break;
140 default: 152 default:
141 break; 153 break;
142 } 154 }
@@ -258,46 +270,22 @@ static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
258 * @crtc_id: crtc to cleanup pageflip on 270 * @crtc_id: crtc to cleanup pageflip on
259 * @crtc_base: new address of the crtc (GPU MC address) 271 * @crtc_base: new address of the crtc (GPU MC address)
260 * 272 *
261 * Does the actual pageflip (evergreen+). 273 * Triggers the actual pageflip by updating the primary
262 * During vblank we take the crtc lock and wait for the update_pending 274 * surface base address.
263 * bit to go high, when it does, we release the lock, and allow the
264 * double buffered update to take place.
265 * Returns the current update pending status.
266 */ 275 */
267static void dce_v11_0_page_flip(struct amdgpu_device *adev, 276static void dce_v11_0_page_flip(struct amdgpu_device *adev,
268 int crtc_id, u64 crtc_base) 277 int crtc_id, u64 crtc_base)
269{ 278{
270 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 279 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
271 u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
272 int i;
273
274 /* Lock the graphics update lock */
275 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
276 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
277 280
278 /* update the scanout addresses */ 281 /* update the scanout addresses */
279 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
280 upper_32_bits(crtc_base));
281 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
282 lower_32_bits(crtc_base));
283
284 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 282 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
285 upper_32_bits(crtc_base)); 283 upper_32_bits(crtc_base));
284 /* writing to the low address triggers the update */
286 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 285 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
287 lower_32_bits(crtc_base)); 286 lower_32_bits(crtc_base));
288 287 /* post the write */
289 /* Wait for update_pending to go high. */ 288 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
290 for (i = 0; i < adev->usec_timeout; i++) {
291 if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
292 GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
293 break;
294 udelay(1);
295 }
296 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
297
298 /* Unlock the lock, so double-buffering can take place inside vblank */
299 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
300 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
301} 289}
302 290
303static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 291static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
@@ -2443,7 +2431,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2443 2431
2444 /* XXX need to determine what plls are available on each DCE11 part */ 2432 /* XXX need to determine what plls are available on each DCE11 part */
2445 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2433 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2446 if (adev->asic_type == CHIP_CARRIZO) { 2434 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
2447 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2435 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2448 return ATOM_PPLL1; 2436 return ATOM_PPLL1;
2449 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2437 if (!(pll_in_use & (1 << ATOM_PPLL0)))
@@ -2494,26 +2482,19 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
2494 struct amdgpu_device *adev = crtc->dev->dev_private; 2482 struct amdgpu_device *adev = crtc->dev->dev_private;
2495 u32 tmp; 2483 u32 tmp;
2496 2484
2485 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2486 upper_32_bits(amdgpu_crtc->cursor_addr));
2487 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2488 lower_32_bits(amdgpu_crtc->cursor_addr));
2489
2497 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2490 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2498 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); 2491 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2499 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); 2492 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2500 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2493 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2501} 2494}
2502 2495
2503static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, 2496static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2504 uint64_t gpu_addr) 2497 int x, int y)
2505{
2506 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2507 struct amdgpu_device *adev = crtc->dev->dev_private;
2508
2509 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2510 upper_32_bits(gpu_addr));
2511 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2512 lower_32_bits(gpu_addr));
2513}
2514
2515static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2516 int x, int y)
2517{ 2498{
2518 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2499 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2519 struct amdgpu_device *adev = crtc->dev->dev_private; 2500 struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2533,26 +2514,40 @@ static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2533 y = 0; 2514 y = 0;
2534 } 2515 }
2535 2516
2536 dce_v11_0_lock_cursor(crtc, true);
2537 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2517 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2538 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2518 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2539 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2519 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2540 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2520 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2541 dce_v11_0_lock_cursor(crtc, false); 2521
2522 amdgpu_crtc->cursor_x = x;
2523 amdgpu_crtc->cursor_y = y;
2542 2524
2543 return 0; 2525 return 0;
2544} 2526}
2545 2527
2546static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc, 2528static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2547 struct drm_file *file_priv, 2529 int x, int y)
2548 uint32_t handle, 2530{
2549 uint32_t width, 2531 int ret;
2550 uint32_t height) 2532
2533 dce_v11_0_lock_cursor(crtc, true);
2534 ret = dce_v11_0_cursor_move_locked(crtc, x, y);
2535 dce_v11_0_lock_cursor(crtc, false);
2536
2537 return ret;
2538}
2539
2540static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2541 struct drm_file *file_priv,
2542 uint32_t handle,
2543 uint32_t width,
2544 uint32_t height,
2545 int32_t hot_x,
2546 int32_t hot_y)
2551{ 2547{
2552 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2548 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2553 struct drm_gem_object *obj; 2549 struct drm_gem_object *obj;
2554 struct amdgpu_bo *robj; 2550 struct amdgpu_bo *aobj;
2555 uint64_t gpu_addr;
2556 int ret; 2551 int ret;
2557 2552
2558 if (!handle) { 2553 if (!handle) {
@@ -2574,41 +2569,71 @@ static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
2574 return -ENOENT; 2569 return -ENOENT;
2575 } 2570 }
2576 2571
2577 robj = gem_to_amdgpu_bo(obj); 2572 aobj = gem_to_amdgpu_bo(obj);
2578 ret = amdgpu_bo_reserve(robj, false); 2573 ret = amdgpu_bo_reserve(aobj, false);
2579 if (unlikely(ret != 0)) 2574 if (ret != 0) {
2580 goto fail; 2575 drm_gem_object_unreference_unlocked(obj);
2581 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 2576 return ret;
2582 0, 0, &gpu_addr); 2577 }
2583 amdgpu_bo_unreserve(robj); 2578
2584 if (ret) 2579 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2585 goto fail; 2580 amdgpu_bo_unreserve(aobj);
2581 if (ret) {
2582 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2583 drm_gem_object_unreference_unlocked(obj);
2584 return ret;
2585 }
2586 2586
2587 amdgpu_crtc->cursor_width = width; 2587 amdgpu_crtc->cursor_width = width;
2588 amdgpu_crtc->cursor_height = height; 2588 amdgpu_crtc->cursor_height = height;
2589 2589
2590 dce_v11_0_lock_cursor(crtc, true); 2590 dce_v11_0_lock_cursor(crtc, true);
2591 dce_v11_0_set_cursor(crtc, obj, gpu_addr); 2591
2592 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2593 hot_y != amdgpu_crtc->cursor_hot_y) {
2594 int x, y;
2595
2596 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2597 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2598
2599 dce_v11_0_cursor_move_locked(crtc, x, y);
2600
2601 amdgpu_crtc->cursor_hot_x = hot_x;
2602 amdgpu_crtc->cursor_hot_y = hot_y;
2603 }
2604
2592 dce_v11_0_show_cursor(crtc); 2605 dce_v11_0_show_cursor(crtc);
2593 dce_v11_0_lock_cursor(crtc, false); 2606 dce_v11_0_lock_cursor(crtc, false);
2594 2607
2595unpin: 2608unpin:
2596 if (amdgpu_crtc->cursor_bo) { 2609 if (amdgpu_crtc->cursor_bo) {
2597 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2610 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2598 ret = amdgpu_bo_reserve(robj, false); 2611 ret = amdgpu_bo_reserve(aobj, false);
2599 if (likely(ret == 0)) { 2612 if (likely(ret == 0)) {
2600 amdgpu_bo_unpin(robj); 2613 amdgpu_bo_unpin(aobj);
2601 amdgpu_bo_unreserve(robj); 2614 amdgpu_bo_unreserve(aobj);
2602 } 2615 }
2603 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); 2616 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2604 } 2617 }
2605 2618
2606 amdgpu_crtc->cursor_bo = obj; 2619 amdgpu_crtc->cursor_bo = obj;
2607 return 0; 2620 return 0;
2608fail: 2621}
2609 drm_gem_object_unreference_unlocked(obj);
2610 2622
2611 return ret; 2623static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2624{
2625 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2626
2627 if (amdgpu_crtc->cursor_bo) {
2628 dce_v11_0_lock_cursor(crtc, true);
2629
2630 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2631 amdgpu_crtc->cursor_y);
2632
2633 dce_v11_0_show_cursor(crtc);
2634
2635 dce_v11_0_lock_cursor(crtc, false);
2636 }
2612} 2637}
2613 2638
2614static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2639static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2636,7 +2661,7 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
2636} 2661}
2637 2662
2638static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { 2663static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
2639 .cursor_set = dce_v11_0_crtc_cursor_set, 2664 .cursor_set2 = dce_v11_0_crtc_cursor_set2,
2640 .cursor_move = dce_v11_0_crtc_cursor_move, 2665 .cursor_move = dce_v11_0_crtc_cursor_move,
2641 .gamma_set = dce_v11_0_crtc_gamma_set, 2666 .gamma_set = dce_v11_0_crtc_gamma_set,
2642 .set_config = amdgpu_crtc_set_config, 2667 .set_config = amdgpu_crtc_set_config,
@@ -2770,6 +2795,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2770 dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2795 dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2771 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2796 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2772 amdgpu_atombios_crtc_scaler_setup(crtc); 2797 amdgpu_atombios_crtc_scaler_setup(crtc);
2798 dce_v11_0_cursor_reset(crtc);
2773 /* update the hw version fpr dpm */ 2799 /* update the hw version fpr dpm */
2774 amdgpu_crtc->hw_mode = *adjusted_mode; 2800 amdgpu_crtc->hw_mode = *adjusted_mode;
2775 2801
@@ -2911,6 +2937,11 @@ static int dce_v11_0_early_init(void *handle)
2911 adev->mode_info.num_hpd = 6; 2937 adev->mode_info.num_hpd = 6;
2912 adev->mode_info.num_dig = 9; 2938 adev->mode_info.num_dig = 9;
2913 break; 2939 break;
2940 case CHIP_STONEY:
2941 adev->mode_info.num_crtc = 2;
2942 adev->mode_info.num_hpd = 6;
2943 adev->mode_info.num_dig = 9;
2944 break;
2914 default: 2945 default:
2915 /* FIXME: not supported yet */ 2946 /* FIXME: not supported yet */
2916 return -EINVAL; 2947 return -EINVAL;
@@ -3009,6 +3040,7 @@ static int dce_v11_0_hw_init(void *handle)
3009 dce_v11_0_init_golden_registers(adev); 3040 dce_v11_0_init_golden_registers(adev);
3010 3041
3011 /* init dig PHYs, disp eng pll */ 3042 /* init dig PHYs, disp eng pll */
3043 amdgpu_atombios_crtc_powergate_init(adev);
3012 amdgpu_atombios_encoder_init_dig(adev); 3044 amdgpu_atombios_encoder_init_dig(adev);
3013 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 3045 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3014 3046
@@ -3046,25 +3078,18 @@ static int dce_v11_0_suspend(void *handle)
3046 3078
3047 amdgpu_atombios_scratch_regs_save(adev); 3079 amdgpu_atombios_scratch_regs_save(adev);
3048 3080
3049 dce_v11_0_hpd_fini(adev); 3081 return dce_v11_0_hw_fini(handle);
3050
3051 dce_v11_0_pageflip_interrupt_fini(adev);
3052
3053 return 0;
3054} 3082}
3055 3083
3056static int dce_v11_0_resume(void *handle) 3084static int dce_v11_0_resume(void *handle)
3057{ 3085{
3058 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3086 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3087 int ret;
3059 3088
3060 dce_v11_0_init_golden_registers(adev); 3089 ret = dce_v11_0_hw_init(handle);
3061 3090
3062 amdgpu_atombios_scratch_regs_restore(adev); 3091 amdgpu_atombios_scratch_regs_restore(adev);
3063 3092
3064 /* init dig PHYs, disp eng pll */
3065 amdgpu_atombios_crtc_powergate_init(adev);
3066 amdgpu_atombios_encoder_init_dig(adev);
3067 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3068 /* turn on the BL */ 3093 /* turn on the BL */
3069 if (adev->mode_info.bl_encoder) { 3094 if (adev->mode_info.bl_encoder) {
3070 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3095 u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3073,12 +3098,7 @@ static int dce_v11_0_resume(void *handle)
3073 bl_level); 3098 bl_level);
3074 } 3099 }
3075 3100
3076 /* initialize hpd */ 3101 return ret;
3077 dce_v11_0_hpd_init(adev);
3078
3079 dce_v11_0_pageflip_interrupt_init(adev);
3080
3081 return 0;
3082} 3102}
3083 3103
3084static bool dce_v11_0_is_idle(void *handle) 3104static bool dce_v11_0_is_idle(void *handle)
@@ -3270,37 +3290,20 @@ static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3270 unsigned type, 3290 unsigned type,
3271 enum amdgpu_interrupt_state state) 3291 enum amdgpu_interrupt_state state)
3272{ 3292{
3273 u32 reg, reg_block; 3293 u32 reg;
3274 /* now deal with page flip IRQ */ 3294
3275 switch (type) { 3295 if (type >= adev->mode_info.num_crtc) {
3276 case AMDGPU_PAGEFLIP_IRQ_D1: 3296 DRM_ERROR("invalid pageflip crtc %d\n", type);
3277 reg_block = CRTC0_REGISTER_OFFSET; 3297 return -EINVAL;
3278 break;
3279 case AMDGPU_PAGEFLIP_IRQ_D2:
3280 reg_block = CRTC1_REGISTER_OFFSET;
3281 break;
3282 case AMDGPU_PAGEFLIP_IRQ_D3:
3283 reg_block = CRTC2_REGISTER_OFFSET;
3284 break;
3285 case AMDGPU_PAGEFLIP_IRQ_D4:
3286 reg_block = CRTC3_REGISTER_OFFSET;
3287 break;
3288 case AMDGPU_PAGEFLIP_IRQ_D5:
3289 reg_block = CRTC4_REGISTER_OFFSET;
3290 break;
3291 case AMDGPU_PAGEFLIP_IRQ_D6:
3292 reg_block = CRTC5_REGISTER_OFFSET;
3293 break;
3294 default:
3295 DRM_ERROR("invalid pageflip crtc %d\n", type);
3296 return -EINVAL;
3297 } 3298 }
3298 3299
3299 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); 3300 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3300 if (state == AMDGPU_IRQ_STATE_DISABLE) 3301 if (state == AMDGPU_IRQ_STATE_DISABLE)
3301 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3302 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3303 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3302 else 3304 else
3303 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3305 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3306 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3304 3307
3305 return 0; 3308 return 0;
3306} 3309}
@@ -3309,7 +3312,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3309 struct amdgpu_irq_src *source, 3312 struct amdgpu_irq_src *source,
3310 struct amdgpu_iv_entry *entry) 3313 struct amdgpu_iv_entry *entry)
3311{ 3314{
3312 int reg_block;
3313 unsigned long flags; 3315 unsigned long flags;
3314 unsigned crtc_id; 3316 unsigned crtc_id;
3315 struct amdgpu_crtc *amdgpu_crtc; 3317 struct amdgpu_crtc *amdgpu_crtc;
@@ -3318,33 +3320,15 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3318 crtc_id = (entry->src_id - 8) >> 1; 3320 crtc_id = (entry->src_id - 8) >> 1;
3319 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3321 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3320 3322
3321 /* ack the interrupt */ 3323 if (crtc_id >= adev->mode_info.num_crtc) {
3322 switch(crtc_id){ 3324 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3323 case AMDGPU_PAGEFLIP_IRQ_D1: 3325 return -EINVAL;
3324 reg_block = CRTC0_REGISTER_OFFSET;
3325 break;
3326 case AMDGPU_PAGEFLIP_IRQ_D2:
3327 reg_block = CRTC1_REGISTER_OFFSET;
3328 break;
3329 case AMDGPU_PAGEFLIP_IRQ_D3:
3330 reg_block = CRTC2_REGISTER_OFFSET;
3331 break;
3332 case AMDGPU_PAGEFLIP_IRQ_D4:
3333 reg_block = CRTC3_REGISTER_OFFSET;
3334 break;
3335 case AMDGPU_PAGEFLIP_IRQ_D5:
3336 reg_block = CRTC4_REGISTER_OFFSET;
3337 break;
3338 case AMDGPU_PAGEFLIP_IRQ_D6:
3339 reg_block = CRTC5_REGISTER_OFFSET;
3340 break;
3341 default:
3342 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3343 return -EINVAL;
3344 } 3326 }
3345 3327
3346 if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3328 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3347 WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3329 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3330 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3331 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3348 3332
3349 /* IRQ could occur when in initial stage */ 3333 /* IRQ could occur when in initial stage */
3350 if(amdgpu_crtc == NULL) 3334 if(amdgpu_crtc == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 34b9c2a9d8d4..4f7b49a6dc50 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -229,46 +229,22 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
229 * @crtc_id: crtc to cleanup pageflip on 229 * @crtc_id: crtc to cleanup pageflip on
230 * @crtc_base: new address of the crtc (GPU MC address) 230 * @crtc_base: new address of the crtc (GPU MC address)
231 * 231 *
232 * Does the actual pageflip (evergreen+). 232 * Triggers the actual pageflip by updating the primary
233 * During vblank we take the crtc lock and wait for the update_pending 233 * surface base address.
234 * bit to go high, when it does, we release the lock, and allow the
235 * double buffered update to take place.
236 * Returns the current update pending status.
237 */ 234 */
238static void dce_v8_0_page_flip(struct amdgpu_device *adev, 235static void dce_v8_0_page_flip(struct amdgpu_device *adev,
239 int crtc_id, u64 crtc_base) 236 int crtc_id, u64 crtc_base)
240{ 237{
241 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 238 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
242 u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
243 int i;
244
245 /* Lock the graphics update lock */
246 tmp |= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
247 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
248
249 /* update the scanout addresses */
250 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
251 upper_32_bits(crtc_base));
252 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
253 (u32)crtc_base);
254 239
240 /* update the primary scanout addresses */
255 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 241 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
256 upper_32_bits(crtc_base)); 242 upper_32_bits(crtc_base));
243 /* writing to the low address triggers the update */
257 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 244 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
258 (u32)crtc_base); 245 lower_32_bits(crtc_base));
259 246 /* post the write */
260 /* Wait for update_pending to go high. */ 247 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
261 for (i = 0; i < adev->usec_timeout; i++) {
262 if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
263 GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
264 break;
265 udelay(1);
266 }
267 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
268
269 /* Unlock the lock, so double-buffering can take place inside vblank */
270 tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
271 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
272} 248}
273 249
274static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 250static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
@@ -2429,26 +2405,19 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2429 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2405 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2430 struct amdgpu_device *adev = crtc->dev->dev_private; 2406 struct amdgpu_device *adev = crtc->dev->dev_private;
2431 2407
2408 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2409 upper_32_bits(amdgpu_crtc->cursor_addr));
2410 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2411 lower_32_bits(amdgpu_crtc->cursor_addr));
2412
2432 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2413 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2433 CUR_CONTROL__CURSOR_EN_MASK | 2414 CUR_CONTROL__CURSOR_EN_MASK |
2434 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2415 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2435 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2416 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2436} 2417}
2437 2418
2438static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, 2419static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2439 uint64_t gpu_addr) 2420 int x, int y)
2440{
2441 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2442 struct amdgpu_device *adev = crtc->dev->dev_private;
2443
2444 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2445 upper_32_bits(gpu_addr));
2446 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2447 gpu_addr & 0xffffffff);
2448}
2449
2450static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2451 int x, int y)
2452{ 2421{
2453 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2422 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2454 struct amdgpu_device *adev = crtc->dev->dev_private; 2423 struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2468,26 +2437,40 @@ static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2468 y = 0; 2437 y = 0;
2469 } 2438 }
2470 2439
2471 dce_v8_0_lock_cursor(crtc, true);
2472 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2440 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2473 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2441 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2474 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2442 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2475 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2443 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2476 dce_v8_0_lock_cursor(crtc, false); 2444
2445 amdgpu_crtc->cursor_x = x;
2446 amdgpu_crtc->cursor_y = y;
2477 2447
2478 return 0; 2448 return 0;
2479} 2449}
2480 2450
2481static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc, 2451static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2482 struct drm_file *file_priv, 2452 int x, int y)
2483 uint32_t handle, 2453{
2484 uint32_t width, 2454 int ret;
2485 uint32_t height) 2455
2456 dce_v8_0_lock_cursor(crtc, true);
2457 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2458 dce_v8_0_lock_cursor(crtc, false);
2459
2460 return ret;
2461}
2462
2463static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2464 struct drm_file *file_priv,
2465 uint32_t handle,
2466 uint32_t width,
2467 uint32_t height,
2468 int32_t hot_x,
2469 int32_t hot_y)
2486{ 2470{
2487 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2471 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2488 struct drm_gem_object *obj; 2472 struct drm_gem_object *obj;
2489 struct amdgpu_bo *robj; 2473 struct amdgpu_bo *aobj;
2490 uint64_t gpu_addr;
2491 int ret; 2474 int ret;
2492 2475
2493 if (!handle) { 2476 if (!handle) {
@@ -2509,41 +2492,71 @@ static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
2509 return -ENOENT; 2492 return -ENOENT;
2510 } 2493 }
2511 2494
2512 robj = gem_to_amdgpu_bo(obj); 2495 aobj = gem_to_amdgpu_bo(obj);
2513 ret = amdgpu_bo_reserve(robj, false); 2496 ret = amdgpu_bo_reserve(aobj, false);
2514 if (unlikely(ret != 0)) 2497 if (ret != 0) {
2515 goto fail; 2498 drm_gem_object_unreference_unlocked(obj);
2516 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 2499 return ret;
2517 0, 0, &gpu_addr); 2500 }
2518 amdgpu_bo_unreserve(robj); 2501
2519 if (ret) 2502 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2520 goto fail; 2503 amdgpu_bo_unreserve(aobj);
2504 if (ret) {
2505 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2506 drm_gem_object_unreference_unlocked(obj);
2507 return ret;
2508 }
2521 2509
2522 amdgpu_crtc->cursor_width = width; 2510 amdgpu_crtc->cursor_width = width;
2523 amdgpu_crtc->cursor_height = height; 2511 amdgpu_crtc->cursor_height = height;
2524 2512
2525 dce_v8_0_lock_cursor(crtc, true); 2513 dce_v8_0_lock_cursor(crtc, true);
2526 dce_v8_0_set_cursor(crtc, obj, gpu_addr); 2514
2515 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2516 hot_y != amdgpu_crtc->cursor_hot_y) {
2517 int x, y;
2518
2519 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2520 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2521
2522 dce_v8_0_cursor_move_locked(crtc, x, y);
2523
2524 amdgpu_crtc->cursor_hot_x = hot_x;
2525 amdgpu_crtc->cursor_hot_y = hot_y;
2526 }
2527
2527 dce_v8_0_show_cursor(crtc); 2528 dce_v8_0_show_cursor(crtc);
2528 dce_v8_0_lock_cursor(crtc, false); 2529 dce_v8_0_lock_cursor(crtc, false);
2529 2530
2530unpin: 2531unpin:
2531 if (amdgpu_crtc->cursor_bo) { 2532 if (amdgpu_crtc->cursor_bo) {
2532 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2533 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2533 ret = amdgpu_bo_reserve(robj, false); 2534 ret = amdgpu_bo_reserve(aobj, false);
2534 if (likely(ret == 0)) { 2535 if (likely(ret == 0)) {
2535 amdgpu_bo_unpin(robj); 2536 amdgpu_bo_unpin(aobj);
2536 amdgpu_bo_unreserve(robj); 2537 amdgpu_bo_unreserve(aobj);
2537 } 2538 }
2538 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); 2539 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2539 } 2540 }
2540 2541
2541 amdgpu_crtc->cursor_bo = obj; 2542 amdgpu_crtc->cursor_bo = obj;
2542 return 0; 2543 return 0;
2543fail: 2544}
2544 drm_gem_object_unreference_unlocked(obj);
2545 2545
2546 return ret; 2546static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2547{
2548 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2549
2550 if (amdgpu_crtc->cursor_bo) {
2551 dce_v8_0_lock_cursor(crtc, true);
2552
2553 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2554 amdgpu_crtc->cursor_y);
2555
2556 dce_v8_0_show_cursor(crtc);
2557
2558 dce_v8_0_lock_cursor(crtc, false);
2559 }
2547} 2560}
2548 2561
2549static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2562static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2571,7 +2584,7 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2571} 2584}
2572 2585
2573static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { 2586static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2574 .cursor_set = dce_v8_0_crtc_cursor_set, 2587 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2575 .cursor_move = dce_v8_0_crtc_cursor_move, 2588 .cursor_move = dce_v8_0_crtc_cursor_move,
2576 .gamma_set = dce_v8_0_crtc_gamma_set, 2589 .gamma_set = dce_v8_0_crtc_gamma_set,
2577 .set_config = amdgpu_crtc_set_config, 2590 .set_config = amdgpu_crtc_set_config,
@@ -2712,6 +2725,7 @@ static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2712 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2725 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2713 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2726 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2714 amdgpu_atombios_crtc_scaler_setup(crtc); 2727 amdgpu_atombios_crtc_scaler_setup(crtc);
2728 dce_v8_0_cursor_reset(crtc);
2715 /* update the hw version fpr dpm */ 2729 /* update the hw version fpr dpm */
2716 amdgpu_crtc->hw_mode = *adjusted_mode; 2730 amdgpu_crtc->hw_mode = *adjusted_mode;
2717 2731
@@ -2979,22 +2993,18 @@ static int dce_v8_0_suspend(void *handle)
2979 2993
2980 amdgpu_atombios_scratch_regs_save(adev); 2994 amdgpu_atombios_scratch_regs_save(adev);
2981 2995
2982 dce_v8_0_hpd_fini(adev); 2996 return dce_v8_0_hw_fini(handle);
2983
2984 dce_v8_0_pageflip_interrupt_fini(adev);
2985
2986 return 0;
2987} 2997}
2988 2998
2989static int dce_v8_0_resume(void *handle) 2999static int dce_v8_0_resume(void *handle)
2990{ 3000{
2991 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3001 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3002 int ret;
3003
3004 ret = dce_v8_0_hw_init(handle);
2992 3005
2993 amdgpu_atombios_scratch_regs_restore(adev); 3006 amdgpu_atombios_scratch_regs_restore(adev);
2994 3007
2995 /* init dig PHYs, disp eng pll */
2996 amdgpu_atombios_encoder_init_dig(adev);
2997 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2998 /* turn on the BL */ 3008 /* turn on the BL */
2999 if (adev->mode_info.bl_encoder) { 3009 if (adev->mode_info.bl_encoder) {
3000 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3010 u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3003,12 +3013,7 @@ static int dce_v8_0_resume(void *handle)
3003 bl_level); 3013 bl_level);
3004 } 3014 }
3005 3015
3006 /* initialize hpd */ 3016 return ret;
3007 dce_v8_0_hpd_init(adev);
3008
3009 dce_v8_0_pageflip_interrupt_init(adev);
3010
3011 return 0;
3012} 3017}
3013 3018
3014static bool dce_v8_0_is_idle(void *handle) 3019static bool dce_v8_0_is_idle(void *handle)
@@ -3301,37 +3306,20 @@ static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3301 unsigned type, 3306 unsigned type,
3302 enum amdgpu_interrupt_state state) 3307 enum amdgpu_interrupt_state state)
3303{ 3308{
3304 u32 reg, reg_block; 3309 u32 reg;
3305 /* now deal with page flip IRQ */ 3310
3306 switch (type) { 3311 if (type >= adev->mode_info.num_crtc) {
3307 case AMDGPU_PAGEFLIP_IRQ_D1: 3312 DRM_ERROR("invalid pageflip crtc %d\n", type);
3308 reg_block = CRTC0_REGISTER_OFFSET; 3313 return -EINVAL;
3309 break;
3310 case AMDGPU_PAGEFLIP_IRQ_D2:
3311 reg_block = CRTC1_REGISTER_OFFSET;
3312 break;
3313 case AMDGPU_PAGEFLIP_IRQ_D3:
3314 reg_block = CRTC2_REGISTER_OFFSET;
3315 break;
3316 case AMDGPU_PAGEFLIP_IRQ_D4:
3317 reg_block = CRTC3_REGISTER_OFFSET;
3318 break;
3319 case AMDGPU_PAGEFLIP_IRQ_D5:
3320 reg_block = CRTC4_REGISTER_OFFSET;
3321 break;
3322 case AMDGPU_PAGEFLIP_IRQ_D6:
3323 reg_block = CRTC5_REGISTER_OFFSET;
3324 break;
3325 default:
3326 DRM_ERROR("invalid pageflip crtc %d\n", type);
3327 return -EINVAL;
3328 } 3314 }
3329 3315
3330 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); 3316 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3331 if (state == AMDGPU_IRQ_STATE_DISABLE) 3317 if (state == AMDGPU_IRQ_STATE_DISABLE)
3332 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3318 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3319 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3333 else 3320 else
3334 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3321 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3322 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3335 3323
3336 return 0; 3324 return 0;
3337} 3325}
@@ -3340,7 +3328,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3340 struct amdgpu_irq_src *source, 3328 struct amdgpu_irq_src *source,
3341 struct amdgpu_iv_entry *entry) 3329 struct amdgpu_iv_entry *entry)
3342{ 3330{
3343 int reg_block;
3344 unsigned long flags; 3331 unsigned long flags;
3345 unsigned crtc_id; 3332 unsigned crtc_id;
3346 struct amdgpu_crtc *amdgpu_crtc; 3333 struct amdgpu_crtc *amdgpu_crtc;
@@ -3349,33 +3336,15 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3349 crtc_id = (entry->src_id - 8) >> 1; 3336 crtc_id = (entry->src_id - 8) >> 1;
3350 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3337 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3351 3338
3352 /* ack the interrupt */ 3339 if (crtc_id >= adev->mode_info.num_crtc) {
3353 switch(crtc_id){ 3340 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3354 case AMDGPU_PAGEFLIP_IRQ_D1: 3341 return -EINVAL;
3355 reg_block = CRTC0_REGISTER_OFFSET;
3356 break;
3357 case AMDGPU_PAGEFLIP_IRQ_D2:
3358 reg_block = CRTC1_REGISTER_OFFSET;
3359 break;
3360 case AMDGPU_PAGEFLIP_IRQ_D3:
3361 reg_block = CRTC2_REGISTER_OFFSET;
3362 break;
3363 case AMDGPU_PAGEFLIP_IRQ_D4:
3364 reg_block = CRTC3_REGISTER_OFFSET;
3365 break;
3366 case AMDGPU_PAGEFLIP_IRQ_D5:
3367 reg_block = CRTC4_REGISTER_OFFSET;
3368 break;
3369 case AMDGPU_PAGEFLIP_IRQ_D6:
3370 reg_block = CRTC5_REGISTER_OFFSET;
3371 break;
3372 default:
3373 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3374 return -EINVAL;
3375 } 3342 }
3376 3343
3377 if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3344 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3378 WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3345 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3346 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3347 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3379 3348
3380 /* IRQ could occur when in initial stage */ 3349 /* IRQ could occur when in initial stage */
3381 if (amdgpu_crtc == NULL) 3350 if (amdgpu_crtc == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index e992bf2ff66c..72793f93e2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -5542,24 +5542,6 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5542 .set_powergating_state = gfx_v7_0_set_powergating_state, 5542 .set_powergating_state = gfx_v7_0_set_powergating_state,
5543}; 5543};
5544 5544
5545/**
5546 * gfx_v7_0_ring_is_lockup - check if the 3D engine is locked up
5547 *
5548 * @adev: amdgpu_device pointer
5549 * @ring: amdgpu_ring structure holding ring information
5550 *
5551 * Check if the 3D engine is locked up (CIK).
5552 * Returns true if the engine is locked, false if not.
5553 */
5554static bool gfx_v7_0_ring_is_lockup(struct amdgpu_ring *ring)
5555{
5556 if (gfx_v7_0_is_idle(ring->adev)) {
5557 amdgpu_ring_lockup_update(ring);
5558 return false;
5559 }
5560 return amdgpu_ring_test_lockup(ring);
5561}
5562
5563static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { 5545static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5564 .get_rptr = gfx_v7_0_ring_get_rptr_gfx, 5546 .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
5565 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 5547 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
@@ -5573,7 +5555,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5573 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 5555 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5574 .test_ring = gfx_v7_0_ring_test_ring, 5556 .test_ring = gfx_v7_0_ring_test_ring,
5575 .test_ib = gfx_v7_0_ring_test_ib, 5557 .test_ib = gfx_v7_0_ring_test_ib,
5576 .is_lockup = gfx_v7_0_ring_is_lockup,
5577 .insert_nop = amdgpu_ring_insert_nop, 5558 .insert_nop = amdgpu_ring_insert_nop,
5578}; 5559};
5579 5560
@@ -5590,7 +5571,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5590 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 5571 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5591 .test_ring = gfx_v7_0_ring_test_ring, 5572 .test_ring = gfx_v7_0_ring_test_ring,
5592 .test_ib = gfx_v7_0_ring_test_ib, 5573 .test_ib = gfx_v7_0_ring_test_ib,
5593 .is_lockup = gfx_v7_0_ring_is_lockup,
5594 .insert_nop = amdgpu_ring_insert_nop, 5574 .insert_nop = amdgpu_ring_insert_nop,
5595}; 5575};
5596 5576
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index cb4f68f53f24..6776cf756d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -73,6 +73,12 @@ MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
73MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin"); 73MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
74MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin"); 74MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
75 75
76MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
77MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
78MODULE_FIRMWARE("amdgpu/stoney_me.bin");
79MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
80MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
81
76MODULE_FIRMWARE("amdgpu/tonga_ce.bin"); 82MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
77MODULE_FIRMWARE("amdgpu/tonga_pfp.bin"); 83MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
78MODULE_FIRMWARE("amdgpu/tonga_me.bin"); 84MODULE_FIRMWARE("amdgpu/tonga_me.bin");
@@ -229,11 +235,13 @@ static const u32 fiji_golden_common_all[] =
229 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 235 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
230 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a, 236 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
231 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e, 237 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
232 mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003, 238 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
233 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 239 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
234 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 240 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
235 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, 241 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
236 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF 242 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
243 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
244 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
237}; 245};
238 246
239static const u32 golden_settings_fiji_a10[] = 247static const u32 golden_settings_fiji_a10[] =
@@ -241,24 +249,26 @@ static const u32 golden_settings_fiji_a10[] =
241 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 249 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
242 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 250 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
243 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 251 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
244 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100,
245 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 252 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
253 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
254 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
246 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 255 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
247 mmTCC_CTRL, 0x00100000, 0xf30fff7f, 256 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
257 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
248 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff, 258 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
249 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4, 259 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
250 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0,
251}; 260};
252 261
253static const u32 fiji_mgcg_cgcg_init[] = 262static const u32 fiji_mgcg_cgcg_init[] =
254{ 263{
255 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0, 264 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
256 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 265 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
257 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 266 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
258 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, 267 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
259 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
260 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
261 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
271 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
262 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 272 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
263 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 273 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
264 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 274 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -286,6 +296,10 @@ static const u32 fiji_mgcg_cgcg_init[] =
286 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 296 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
287 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 297 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
288 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 298 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
299 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
300 mmPCIE_DATA, 0x000f0000, 0x00000000,
301 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
302 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
289 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 303 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
290}; 304};
291 305
@@ -493,6 +507,42 @@ static const u32 cz_mgcg_cgcg_init[] =
493 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 507 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
494}; 508};
495 509
510static const u32 stoney_golden_settings_a11[] =
511{
512 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
513 mmGB_GPU_ID, 0x0000000f, 0x00000000,
514 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
515 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
516 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
517 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
518 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
519 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
520 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
521 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
522};
523
524static const u32 stoney_golden_common_all[] =
525{
526 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
527 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
528 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
529 mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
530 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
531 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
532 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
533 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
534};
535
536static const u32 stoney_mgcg_cgcg_init[] =
537{
538 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
539 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
540 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
541 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
542 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
543 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
544};
545
496static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); 546static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
497static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); 547static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
498static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); 548static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
@@ -545,6 +595,17 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
545 cz_golden_common_all, 595 cz_golden_common_all,
546 (const u32)ARRAY_SIZE(cz_golden_common_all)); 596 (const u32)ARRAY_SIZE(cz_golden_common_all));
547 break; 597 break;
598 case CHIP_STONEY:
599 amdgpu_program_register_sequence(adev,
600 stoney_mgcg_cgcg_init,
601 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
602 amdgpu_program_register_sequence(adev,
603 stoney_golden_settings_a11,
604 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
605 amdgpu_program_register_sequence(adev,
606 stoney_golden_common_all,
607 (const u32)ARRAY_SIZE(stoney_golden_common_all));
608 break;
548 default: 609 default:
549 break; 610 break;
550 } 611 }
@@ -691,6 +752,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
691 case CHIP_FIJI: 752 case CHIP_FIJI:
692 chip_name = "fiji"; 753 chip_name = "fiji";
693 break; 754 break;
755 case CHIP_STONEY:
756 chip_name = "stoney";
757 break;
694 default: 758 default:
695 BUG(); 759 BUG();
696 } 760 }
@@ -748,21 +812,23 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
748 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 812 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
749 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 813 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
750 814
751 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 815 if (adev->asic_type != CHIP_STONEY) {
752 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 816 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
753 if (!err) { 817 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
754 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 818 if (!err) {
755 if (err) 819 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
756 goto out; 820 if (err)
757 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 821 goto out;
758 adev->gfx.mec2_fw->data; 822 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
759 adev->gfx.mec2_fw_version = le32_to_cpu( 823 adev->gfx.mec2_fw->data;
760 cp_hdr->header.ucode_version); 824 adev->gfx.mec2_fw_version =
761 adev->gfx.mec2_feature_version = le32_to_cpu( 825 le32_to_cpu(cp_hdr->header.ucode_version);
762 cp_hdr->ucode_feature_version); 826 adev->gfx.mec2_feature_version =
763 } else { 827 le32_to_cpu(cp_hdr->ucode_feature_version);
764 err = 0; 828 } else {
765 adev->gfx.mec2_fw = NULL; 829 err = 0;
830 adev->gfx.mec2_fw = NULL;
831 }
766 } 832 }
767 833
768 if (adev->firmware.smu_load) { 834 if (adev->firmware.smu_load) {
@@ -903,6 +969,232 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
903 return 0; 969 return 0;
904} 970}
905 971
972static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
973{
974 u32 gb_addr_config;
975 u32 mc_shared_chmap, mc_arb_ramcfg;
976 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
977 u32 tmp;
978
979 switch (adev->asic_type) {
980 case CHIP_TOPAZ:
981 adev->gfx.config.max_shader_engines = 1;
982 adev->gfx.config.max_tile_pipes = 2;
983 adev->gfx.config.max_cu_per_sh = 6;
984 adev->gfx.config.max_sh_per_se = 1;
985 adev->gfx.config.max_backends_per_se = 2;
986 adev->gfx.config.max_texture_channel_caches = 2;
987 adev->gfx.config.max_gprs = 256;
988 adev->gfx.config.max_gs_threads = 32;
989 adev->gfx.config.max_hw_contexts = 8;
990
991 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
992 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
993 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
994 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
995 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
996 break;
997 case CHIP_FIJI:
998 adev->gfx.config.max_shader_engines = 4;
999 adev->gfx.config.max_tile_pipes = 16;
1000 adev->gfx.config.max_cu_per_sh = 16;
1001 adev->gfx.config.max_sh_per_se = 1;
1002 adev->gfx.config.max_backends_per_se = 4;
1003 adev->gfx.config.max_texture_channel_caches = 8;
1004 adev->gfx.config.max_gprs = 256;
1005 adev->gfx.config.max_gs_threads = 32;
1006 adev->gfx.config.max_hw_contexts = 8;
1007
1008 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1009 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1010 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1011 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1012 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1013 break;
1014 case CHIP_TONGA:
1015 adev->gfx.config.max_shader_engines = 4;
1016 adev->gfx.config.max_tile_pipes = 8;
1017 adev->gfx.config.max_cu_per_sh = 8;
1018 adev->gfx.config.max_sh_per_se = 1;
1019 adev->gfx.config.max_backends_per_se = 2;
1020 adev->gfx.config.max_texture_channel_caches = 8;
1021 adev->gfx.config.max_gprs = 256;
1022 adev->gfx.config.max_gs_threads = 32;
1023 adev->gfx.config.max_hw_contexts = 8;
1024
1025 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1026 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1027 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1028 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1029 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1030 break;
1031 case CHIP_CARRIZO:
1032 adev->gfx.config.max_shader_engines = 1;
1033 adev->gfx.config.max_tile_pipes = 2;
1034 adev->gfx.config.max_sh_per_se = 1;
1035 adev->gfx.config.max_backends_per_se = 2;
1036
1037 switch (adev->pdev->revision) {
1038 case 0xc4:
1039 case 0x84:
1040 case 0xc8:
1041 case 0xcc:
1042 case 0xe1:
1043 case 0xe3:
1044 /* B10 */
1045 adev->gfx.config.max_cu_per_sh = 8;
1046 break;
1047 case 0xc5:
1048 case 0x81:
1049 case 0x85:
1050 case 0xc9:
1051 case 0xcd:
1052 case 0xe2:
1053 case 0xe4:
1054 /* B8 */
1055 adev->gfx.config.max_cu_per_sh = 6;
1056 break;
1057 case 0xc6:
1058 case 0xca:
1059 case 0xce:
1060 case 0x88:
1061 /* B6 */
1062 adev->gfx.config.max_cu_per_sh = 6;
1063 break;
1064 case 0xc7:
1065 case 0x87:
1066 case 0xcb:
1067 case 0xe5:
1068 case 0x89:
1069 default:
1070 /* B4 */
1071 adev->gfx.config.max_cu_per_sh = 4;
1072 break;
1073 }
1074
1075 adev->gfx.config.max_texture_channel_caches = 2;
1076 adev->gfx.config.max_gprs = 256;
1077 adev->gfx.config.max_gs_threads = 32;
1078 adev->gfx.config.max_hw_contexts = 8;
1079
1080 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1081 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1082 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1083 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1084 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1085 break;
1086 case CHIP_STONEY:
1087 adev->gfx.config.max_shader_engines = 1;
1088 adev->gfx.config.max_tile_pipes = 2;
1089 adev->gfx.config.max_sh_per_se = 1;
1090 adev->gfx.config.max_backends_per_se = 1;
1091
1092 switch (adev->pdev->revision) {
1093 case 0xc0:
1094 case 0xc1:
1095 case 0xc2:
1096 case 0xc4:
1097 case 0xc8:
1098 case 0xc9:
1099 adev->gfx.config.max_cu_per_sh = 3;
1100 break;
1101 case 0xd0:
1102 case 0xd1:
1103 case 0xd2:
1104 default:
1105 adev->gfx.config.max_cu_per_sh = 2;
1106 break;
1107 }
1108
1109 adev->gfx.config.max_texture_channel_caches = 2;
1110 adev->gfx.config.max_gprs = 256;
1111 adev->gfx.config.max_gs_threads = 16;
1112 adev->gfx.config.max_hw_contexts = 8;
1113
1114 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1115 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1116 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1117 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1118 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1119 break;
1120 default:
1121 adev->gfx.config.max_shader_engines = 2;
1122 adev->gfx.config.max_tile_pipes = 4;
1123 adev->gfx.config.max_cu_per_sh = 2;
1124 adev->gfx.config.max_sh_per_se = 1;
1125 adev->gfx.config.max_backends_per_se = 2;
1126 adev->gfx.config.max_texture_channel_caches = 4;
1127 adev->gfx.config.max_gprs = 256;
1128 adev->gfx.config.max_gs_threads = 32;
1129 adev->gfx.config.max_hw_contexts = 8;
1130
1131 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1132 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1133 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1134 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1135 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1136 break;
1137 }
1138
1139 mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
1140 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1141 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1142
1143 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1144 adev->gfx.config.mem_max_burst_length_bytes = 256;
1145 if (adev->flags & AMD_IS_APU) {
1146 /* Get memory bank mapping mode. */
1147 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1148 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1149 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1150
1151 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1152 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1153 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1154
1155 /* Validate settings in case only one DIMM installed. */
1156 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1157 dimm00_addr_map = 0;
1158 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1159 dimm01_addr_map = 0;
1160 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1161 dimm10_addr_map = 0;
1162 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1163 dimm11_addr_map = 0;
1164
1165 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1166 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1167 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1168 adev->gfx.config.mem_row_size_in_kb = 2;
1169 else
1170 adev->gfx.config.mem_row_size_in_kb = 1;
1171 } else {
1172 tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1173 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1174 if (adev->gfx.config.mem_row_size_in_kb > 4)
1175 adev->gfx.config.mem_row_size_in_kb = 4;
1176 }
1177
1178 adev->gfx.config.shader_engine_tile_size = 32;
1179 adev->gfx.config.num_gpus = 1;
1180 adev->gfx.config.multi_gpu_tile_size = 64;
1181
1182 /* fix up row size */
1183 switch (adev->gfx.config.mem_row_size_in_kb) {
1184 case 1:
1185 default:
1186 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1187 break;
1188 case 2:
1189 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1190 break;
1191 case 4:
1192 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1193 break;
1194 }
1195 adev->gfx.config.gb_addr_config = gb_addr_config;
1196}
1197
906static int gfx_v8_0_sw_init(void *handle) 1198static int gfx_v8_0_sw_init(void *handle)
907{ 1199{
908 int i, r; 1200 int i, r;
@@ -1010,6 +1302,8 @@ static int gfx_v8_0_sw_init(void *handle)
1010 1302
1011 adev->gfx.ce_ram_size = 0x8000; 1303 adev->gfx.ce_ram_size = 0x8000;
1012 1304
1305 gfx_v8_0_gpu_early_init(adev);
1306
1013 return 0; 1307 return 0;
1014} 1308}
1015 1309
@@ -1610,6 +1904,273 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1610 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1904 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1611 } 1905 }
1612 break; 1906 break;
1907 case CHIP_STONEY:
1908 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1909 switch (reg_offset) {
1910 case 0:
1911 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1912 PIPE_CONFIG(ADDR_SURF_P2) |
1913 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1914 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1915 break;
1916 case 1:
1917 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1918 PIPE_CONFIG(ADDR_SURF_P2) |
1919 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1920 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1921 break;
1922 case 2:
1923 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1924 PIPE_CONFIG(ADDR_SURF_P2) |
1925 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1926 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1927 break;
1928 case 3:
1929 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1930 PIPE_CONFIG(ADDR_SURF_P2) |
1931 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1932 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1933 break;
1934 case 4:
1935 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1936 PIPE_CONFIG(ADDR_SURF_P2) |
1937 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1938 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1939 break;
1940 case 5:
1941 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1942 PIPE_CONFIG(ADDR_SURF_P2) |
1943 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1944 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1945 break;
1946 case 6:
1947 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1948 PIPE_CONFIG(ADDR_SURF_P2) |
1949 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1950 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1951 break;
1952 case 8:
1953 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1954 PIPE_CONFIG(ADDR_SURF_P2));
1955 break;
1956 case 9:
1957 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1958 PIPE_CONFIG(ADDR_SURF_P2) |
1959 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1960 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1961 break;
1962 case 10:
1963 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1964 PIPE_CONFIG(ADDR_SURF_P2) |
1965 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1966 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1967 break;
1968 case 11:
1969 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1970 PIPE_CONFIG(ADDR_SURF_P2) |
1971 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1972 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1973 break;
1974 case 13:
1975 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1976 PIPE_CONFIG(ADDR_SURF_P2) |
1977 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1978 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1979 break;
1980 case 14:
1981 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1982 PIPE_CONFIG(ADDR_SURF_P2) |
1983 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1984 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1985 break;
1986 case 15:
1987 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1988 PIPE_CONFIG(ADDR_SURF_P2) |
1989 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1990 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1991 break;
1992 case 16:
1993 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1994 PIPE_CONFIG(ADDR_SURF_P2) |
1995 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1996 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1997 break;
1998 case 18:
1999 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2000 PIPE_CONFIG(ADDR_SURF_P2) |
2001 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2002 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2003 break;
2004 case 19:
2005 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2006 PIPE_CONFIG(ADDR_SURF_P2) |
2007 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2008 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2009 break;
2010 case 20:
2011 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2012 PIPE_CONFIG(ADDR_SURF_P2) |
2013 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2014 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2015 break;
2016 case 21:
2017 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2018 PIPE_CONFIG(ADDR_SURF_P2) |
2019 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2020 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2021 break;
2022 case 22:
2023 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2024 PIPE_CONFIG(ADDR_SURF_P2) |
2025 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2026 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2027 break;
2028 case 24:
2029 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2030 PIPE_CONFIG(ADDR_SURF_P2) |
2031 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2032 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2033 break;
2034 case 25:
2035 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2036 PIPE_CONFIG(ADDR_SURF_P2) |
2037 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2038 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2039 break;
2040 case 26:
2041 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2042 PIPE_CONFIG(ADDR_SURF_P2) |
2043 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2044 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2045 break;
2046 case 27:
2047 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2048 PIPE_CONFIG(ADDR_SURF_P2) |
2049 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2050 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2051 break;
2052 case 28:
2053 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2054 PIPE_CONFIG(ADDR_SURF_P2) |
2055 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2056 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2057 break;
2058 case 29:
2059 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2060 PIPE_CONFIG(ADDR_SURF_P2) |
2061 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2062 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2063 break;
2064 case 7:
2065 case 12:
2066 case 17:
2067 case 23:
2068 /* unused idx */
2069 continue;
2070 default:
2071 gb_tile_moden = 0;
2072 break;
2073 };
2074 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
2075 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
2076 }
2077 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2078 switch (reg_offset) {
2079 case 0:
2080 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2081 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2082 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2083 NUM_BANKS(ADDR_SURF_8_BANK));
2084 break;
2085 case 1:
2086 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2087 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2088 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2089 NUM_BANKS(ADDR_SURF_8_BANK));
2090 break;
2091 case 2:
2092 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2093 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2094 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2095 NUM_BANKS(ADDR_SURF_8_BANK));
2096 break;
2097 case 3:
2098 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2099 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2100 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2101 NUM_BANKS(ADDR_SURF_8_BANK));
2102 break;
2103 case 4:
2104 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2105 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2106 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2107 NUM_BANKS(ADDR_SURF_8_BANK));
2108 break;
2109 case 5:
2110 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2111 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2112 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2113 NUM_BANKS(ADDR_SURF_8_BANK));
2114 break;
2115 case 6:
2116 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2117 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2118 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2119 NUM_BANKS(ADDR_SURF_8_BANK));
2120 break;
2121 case 8:
2122 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2123 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2124 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2125 NUM_BANKS(ADDR_SURF_16_BANK));
2126 break;
2127 case 9:
2128 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2131 NUM_BANKS(ADDR_SURF_16_BANK));
2132 break;
2133 case 10:
2134 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2135 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2136 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2137 NUM_BANKS(ADDR_SURF_16_BANK));
2138 break;
2139 case 11:
2140 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2143 NUM_BANKS(ADDR_SURF_16_BANK));
2144 break;
2145 case 12:
2146 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2147 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2148 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2149 NUM_BANKS(ADDR_SURF_16_BANK));
2150 break;
2151 case 13:
2152 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2153 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2154 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2155 NUM_BANKS(ADDR_SURF_16_BANK));
2156 break;
2157 case 14:
2158 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2159 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2160 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2161 NUM_BANKS(ADDR_SURF_8_BANK));
2162 break;
2163 case 7:
2164 /* unused idx */
2165 continue;
2166 default:
2167 gb_tile_moden = 0;
2168 break;
2169 };
2170 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
2171 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
2172 }
2173 break;
1613 case CHIP_CARRIZO: 2174 case CHIP_CARRIZO:
1614 default: 2175 default:
1615 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2176 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
@@ -2043,203 +2604,23 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
2043 2604
2044static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) 2605static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2045{ 2606{
2046 u32 gb_addr_config;
2047 u32 mc_shared_chmap, mc_arb_ramcfg;
2048 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
2049 u32 tmp; 2607 u32 tmp;
2050 int i; 2608 int i;
2051 2609
2052 switch (adev->asic_type) {
2053 case CHIP_TOPAZ:
2054 adev->gfx.config.max_shader_engines = 1;
2055 adev->gfx.config.max_tile_pipes = 2;
2056 adev->gfx.config.max_cu_per_sh = 6;
2057 adev->gfx.config.max_sh_per_se = 1;
2058 adev->gfx.config.max_backends_per_se = 2;
2059 adev->gfx.config.max_texture_channel_caches = 2;
2060 adev->gfx.config.max_gprs = 256;
2061 adev->gfx.config.max_gs_threads = 32;
2062 adev->gfx.config.max_hw_contexts = 8;
2063
2064 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2065 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2066 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2067 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2068 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
2069 break;
2070 case CHIP_FIJI:
2071 adev->gfx.config.max_shader_engines = 4;
2072 adev->gfx.config.max_tile_pipes = 16;
2073 adev->gfx.config.max_cu_per_sh = 16;
2074 adev->gfx.config.max_sh_per_se = 1;
2075 adev->gfx.config.max_backends_per_se = 4;
2076 adev->gfx.config.max_texture_channel_caches = 8;
2077 adev->gfx.config.max_gprs = 256;
2078 adev->gfx.config.max_gs_threads = 32;
2079 adev->gfx.config.max_hw_contexts = 8;
2080
2081 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2082 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2083 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2084 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2085 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
2086 break;
2087 case CHIP_TONGA:
2088 adev->gfx.config.max_shader_engines = 4;
2089 adev->gfx.config.max_tile_pipes = 8;
2090 adev->gfx.config.max_cu_per_sh = 8;
2091 adev->gfx.config.max_sh_per_se = 1;
2092 adev->gfx.config.max_backends_per_se = 2;
2093 adev->gfx.config.max_texture_channel_caches = 8;
2094 adev->gfx.config.max_gprs = 256;
2095 adev->gfx.config.max_gs_threads = 32;
2096 adev->gfx.config.max_hw_contexts = 8;
2097
2098 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2099 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2100 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2101 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2102 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
2103 break;
2104 case CHIP_CARRIZO:
2105 adev->gfx.config.max_shader_engines = 1;
2106 adev->gfx.config.max_tile_pipes = 2;
2107 adev->gfx.config.max_sh_per_se = 1;
2108 adev->gfx.config.max_backends_per_se = 2;
2109
2110 switch (adev->pdev->revision) {
2111 case 0xc4:
2112 case 0x84:
2113 case 0xc8:
2114 case 0xcc:
2115 /* B10 */
2116 adev->gfx.config.max_cu_per_sh = 8;
2117 break;
2118 case 0xc5:
2119 case 0x81:
2120 case 0x85:
2121 case 0xc9:
2122 case 0xcd:
2123 /* B8 */
2124 adev->gfx.config.max_cu_per_sh = 6;
2125 break;
2126 case 0xc6:
2127 case 0xca:
2128 case 0xce:
2129 /* B6 */
2130 adev->gfx.config.max_cu_per_sh = 6;
2131 break;
2132 case 0xc7:
2133 case 0x87:
2134 case 0xcb:
2135 default:
2136 /* B4 */
2137 adev->gfx.config.max_cu_per_sh = 4;
2138 break;
2139 }
2140
2141 adev->gfx.config.max_texture_channel_caches = 2;
2142 adev->gfx.config.max_gprs = 256;
2143 adev->gfx.config.max_gs_threads = 32;
2144 adev->gfx.config.max_hw_contexts = 8;
2145
2146 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2147 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2148 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2149 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2150 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
2151 break;
2152 default:
2153 adev->gfx.config.max_shader_engines = 2;
2154 adev->gfx.config.max_tile_pipes = 4;
2155 adev->gfx.config.max_cu_per_sh = 2;
2156 adev->gfx.config.max_sh_per_se = 1;
2157 adev->gfx.config.max_backends_per_se = 2;
2158 adev->gfx.config.max_texture_channel_caches = 4;
2159 adev->gfx.config.max_gprs = 256;
2160 adev->gfx.config.max_gs_threads = 32;
2161 adev->gfx.config.max_hw_contexts = 8;
2162
2163 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2164 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2165 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2166 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2167 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
2168 break;
2169 }
2170
2171 tmp = RREG32(mmGRBM_CNTL); 2610 tmp = RREG32(mmGRBM_CNTL);
2172 tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); 2611 tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
2173 WREG32(mmGRBM_CNTL, tmp); 2612 WREG32(mmGRBM_CNTL, tmp);
2174 2613
2175 mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); 2614 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2176 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); 2615 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2177 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; 2616 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
2178
2179 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
2180 adev->gfx.config.mem_max_burst_length_bytes = 256;
2181 if (adev->flags & AMD_IS_APU) {
2182 /* Get memory bank mapping mode. */
2183 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
2184 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
2185 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
2186
2187 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
2188 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
2189 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
2190
2191 /* Validate settings in case only one DIMM installed. */
2192 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
2193 dimm00_addr_map = 0;
2194 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
2195 dimm01_addr_map = 0;
2196 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
2197 dimm10_addr_map = 0;
2198 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
2199 dimm11_addr_map = 0;
2200
2201 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
2202 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
2203 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
2204 adev->gfx.config.mem_row_size_in_kb = 2;
2205 else
2206 adev->gfx.config.mem_row_size_in_kb = 1;
2207 } else {
2208 tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
2209 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
2210 if (adev->gfx.config.mem_row_size_in_kb > 4)
2211 adev->gfx.config.mem_row_size_in_kb = 4;
2212 }
2213
2214 adev->gfx.config.shader_engine_tile_size = 32;
2215 adev->gfx.config.num_gpus = 1;
2216 adev->gfx.config.multi_gpu_tile_size = 64;
2217
2218 /* fix up row size */
2219 switch (adev->gfx.config.mem_row_size_in_kb) {
2220 case 1:
2221 default:
2222 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
2223 break;
2224 case 2:
2225 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
2226 break;
2227 case 4:
2228 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
2229 break;
2230 }
2231 adev->gfx.config.gb_addr_config = gb_addr_config;
2232
2233 WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
2234 WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
2235 WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
2236 WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, 2617 WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
2237 gb_addr_config & 0x70); 2618 adev->gfx.config.gb_addr_config & 0x70);
2238 WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, 2619 WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
2239 gb_addr_config & 0x70); 2620 adev->gfx.config.gb_addr_config & 0x70);
2240 WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); 2621 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2241 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); 2622 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2242 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); 2623 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2243 2624
2244 gfx_v8_0_tiling_mode_table_init(adev); 2625 gfx_v8_0_tiling_mode_table_init(adev);
2245 2626
@@ -2256,13 +2637,13 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2256 if (i == 0) { 2637 if (i == 0) {
2257 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC); 2638 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
2258 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC); 2639 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
2259 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 2640 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
2260 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 2641 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2261 WREG32(mmSH_MEM_CONFIG, tmp); 2642 WREG32(mmSH_MEM_CONFIG, tmp);
2262 } else { 2643 } else {
2263 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC); 2644 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
2264 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC); 2645 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
2265 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 2646 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
2266 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 2647 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2267 WREG32(mmSH_MEM_CONFIG, tmp); 2648 WREG32(mmSH_MEM_CONFIG, tmp);
2268 } 2649 }
@@ -2377,7 +2758,7 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
2377 WREG32(mmRLC_CNTL, tmp); 2758 WREG32(mmRLC_CNTL, tmp);
2378 2759
2379 /* carrizo do enable cp interrupt after cp inited */ 2760 /* carrizo do enable cp interrupt after cp inited */
2380 if (adev->asic_type != CHIP_CARRIZO) 2761 if (!(adev->flags & AMD_IS_APU))
2381 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 2762 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
2382 2763
2383 udelay(50); 2764 udelay(50);
@@ -2599,6 +2980,10 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2599 amdgpu_ring_write(ring, 0x00000002); 2980 amdgpu_ring_write(ring, 0x00000002);
2600 amdgpu_ring_write(ring, 0x00000000); 2981 amdgpu_ring_write(ring, 0x00000000);
2601 break; 2982 break;
2983 case CHIP_STONEY:
2984 amdgpu_ring_write(ring, 0x00000000);
2985 amdgpu_ring_write(ring, 0x00000000);
2986 break;
2602 default: 2987 default:
2603 BUG(); 2988 BUG();
2604 } 2989 }
@@ -3233,7 +3618,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3233 /* enable the doorbell if requested */ 3618 /* enable the doorbell if requested */
3234 if (use_doorbell) { 3619 if (use_doorbell) {
3235 if ((adev->asic_type == CHIP_CARRIZO) || 3620 if ((adev->asic_type == CHIP_CARRIZO) ||
3236 (adev->asic_type == CHIP_FIJI)) { 3621 (adev->asic_type == CHIP_FIJI) ||
3622 (adev->asic_type == CHIP_STONEY)) {
3237 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, 3623 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3238 AMDGPU_DOORBELL_KIQ << 2); 3624 AMDGPU_DOORBELL_KIQ << 2);
3239 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, 3625 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -3305,7 +3691,7 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
3305{ 3691{
3306 int r; 3692 int r;
3307 3693
3308 if (adev->asic_type != CHIP_CARRIZO) 3694 if (!(adev->flags & AMD_IS_APU))
3309 gfx_v8_0_enable_gui_idle_interrupt(adev, false); 3695 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
3310 3696
3311 if (!adev->firmware.smu_load) { 3697 if (!adev->firmware.smu_load) {
@@ -4068,15 +4454,6 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4068 } 4454 }
4069} 4455}
4070 4456
4071static bool gfx_v8_0_ring_is_lockup(struct amdgpu_ring *ring)
4072{
4073 if (gfx_v8_0_is_idle(ring->adev)) {
4074 amdgpu_ring_lockup_update(ring);
4075 return false;
4076 }
4077 return amdgpu_ring_test_lockup(ring);
4078}
4079
4080static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 4457static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4081{ 4458{
4082 return ring->adev->wb.wb[ring->rptr_offs]; 4459 return ring->adev->wb.wb[ring->rptr_offs];
@@ -4107,6 +4484,7 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
4107 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); 4484 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
4108 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 4485 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
4109 EOP_TC_ACTION_EN | 4486 EOP_TC_ACTION_EN |
4487 EOP_TC_WB_ACTION_EN |
4110 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 4488 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4111 EVENT_INDEX(5))); 4489 EVENT_INDEX(5)));
4112 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 4490 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -4357,7 +4735,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
4357 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 4735 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
4358 .test_ring = gfx_v8_0_ring_test_ring, 4736 .test_ring = gfx_v8_0_ring_test_ring,
4359 .test_ib = gfx_v8_0_ring_test_ib, 4737 .test_ib = gfx_v8_0_ring_test_ib,
4360 .is_lockup = gfx_v8_0_ring_is_lockup,
4361 .insert_nop = amdgpu_ring_insert_nop, 4738 .insert_nop = amdgpu_ring_insert_nop,
4362}; 4739};
4363 4740
@@ -4374,7 +4751,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
4374 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 4751 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
4375 .test_ring = gfx_v8_0_ring_test_ring, 4752 .test_ring = gfx_v8_0_ring_test_ring,
4376 .test_ib = gfx_v8_0_ring_test_ib, 4753 .test_ib = gfx_v8_0_ring_test_ib,
4377 .is_lockup = gfx_v8_0_ring_is_lockup,
4378 .insert_nop = amdgpu_ring_insert_nop, 4754 .insert_nop = amdgpu_ring_insert_nop,
4379}; 4755};
4380 4756
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index fab5471d25d7..85bbcdc73fff 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -436,6 +436,33 @@ static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
436} 436}
437 437
438/** 438/**
439 * gmc_v8_0_set_fault_enable_default - update VM fault handling
440 *
441 * @adev: amdgpu_device pointer
442 * @value: true redirects VM faults to the default page
443 */
444static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
445 bool value)
446{
447 u32 tmp;
448
449 tmp = RREG32(mmVM_CONTEXT1_CNTL);
450 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
451 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
452 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
453 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
454 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
455 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
456 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
457 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
458 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
459 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
460 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
461 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
462 WREG32(mmVM_CONTEXT1_CNTL, tmp);
463}
464
465/**
439 * gmc_v7_0_gart_enable - gart enable 466 * gmc_v7_0_gart_enable - gart enable
440 * 467 *
441 * @adev: amdgpu_device pointer 468 * @adev: amdgpu_device pointer
@@ -523,15 +550,13 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
523 tmp = RREG32(mmVM_CONTEXT1_CNTL); 550 tmp = RREG32(mmVM_CONTEXT1_CNTL);
524 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 551 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
525 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); 552 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
526 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
527 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
528 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
529 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
530 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
531 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
532 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, 553 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
533 amdgpu_vm_block_size - 9); 554 amdgpu_vm_block_size - 9);
534 WREG32(mmVM_CONTEXT1_CNTL, tmp); 555 WREG32(mmVM_CONTEXT1_CNTL, tmp);
556 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
557 gmc_v7_0_set_fault_enable_default(adev, false);
558 else
559 gmc_v7_0_set_fault_enable_default(adev, true);
535 560
536 if (adev->asic_type == CHIP_KAVERI) { 561 if (adev->asic_type == CHIP_KAVERI) {
537 tmp = RREG32(mmCHUB_CONTROL); 562 tmp = RREG32(mmCHUB_CONTROL);
@@ -940,7 +965,7 @@ static int gmc_v7_0_sw_fini(void *handle)
940 965
941 if (adev->vm_manager.enabled) { 966 if (adev->vm_manager.enabled) {
942 for (i = 0; i < AMDGPU_NUM_VM; ++i) 967 for (i = 0; i < AMDGPU_NUM_VM; ++i)
943 amdgpu_fence_unref(&adev->vm_manager.active[i]); 968 fence_put(adev->vm_manager.active[i]);
944 gmc_v7_0_vm_fini(adev); 969 gmc_v7_0_vm_fini(adev);
945 adev->vm_manager.enabled = false; 970 adev->vm_manager.enabled = false;
946 } 971 }
@@ -990,7 +1015,7 @@ static int gmc_v7_0_suspend(void *handle)
990 1015
991 if (adev->vm_manager.enabled) { 1016 if (adev->vm_manager.enabled) {
992 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1017 for (i = 0; i < AMDGPU_NUM_VM; ++i)
993 amdgpu_fence_unref(&adev->vm_manager.active[i]); 1018 fence_put(adev->vm_manager.active[i]);
994 gmc_v7_0_vm_fini(adev); 1019 gmc_v7_0_vm_fini(adev);
995 adev->vm_manager.enabled = false; 1020 adev->vm_manager.enabled = false;
996 } 1021 }
@@ -1268,6 +1293,9 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1268 if (!addr && !status) 1293 if (!addr && !status)
1269 return 0; 1294 return 0;
1270 1295
1296 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1297 gmc_v7_0_set_fault_enable_default(adev, false);
1298
1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1299 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1272 entry->src_id, entry->src_data); 1300 entry->src_id, entry->src_data);
1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1301 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 7bc9e9fcf3d2..1bcc4e74e3b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -93,6 +93,12 @@ static const u32 cz_mgcg_cgcg_init[] =
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
94}; 94};
95 95
96static const u32 stoney_mgcg_cgcg_init[] =
97{
98 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
99};
100
101
96static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) 102static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
97{ 103{
98 switch (adev->asic_type) { 104 switch (adev->asic_type) {
@@ -125,6 +131,11 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
125 cz_mgcg_cgcg_init, 131 cz_mgcg_cgcg_init,
126 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 132 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
127 break; 133 break;
134 case CHIP_STONEY:
135 amdgpu_program_register_sequence(adev,
136 stoney_mgcg_cgcg_init,
137 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
138 break;
128 default: 139 default:
129 break; 140 break;
130 } 141 }
@@ -228,6 +239,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
228 chip_name = "fiji"; 239 chip_name = "fiji";
229 break; 240 break;
230 case CHIP_CARRIZO: 241 case CHIP_CARRIZO:
242 case CHIP_STONEY:
231 return 0; 243 return 0;
232 default: BUG(); 244 default: BUG();
233 } 245 }
@@ -550,6 +562,35 @@ static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
550} 562}
551 563
552/** 564/**
565 * gmc_v8_0_set_fault_enable_default - update VM fault handling
566 *
567 * @adev: amdgpu_device pointer
568 * @value: true redirects VM faults to the default page
569 */
570static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
571 bool value)
572{
573 u32 tmp;
574
575 tmp = RREG32(mmVM_CONTEXT1_CNTL);
576 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
577 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
578 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
579 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
580 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
581 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
582 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
583 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
584 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
585 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
586 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
587 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
588 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
589 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
590 WREG32(mmVM_CONTEXT1_CNTL, tmp);
591}
592
593/**
553 * gmc_v8_0_gart_enable - gart enable 594 * gmc_v8_0_gart_enable - gart enable
554 * 595 *
555 * @adev: amdgpu_device pointer 596 * @adev: amdgpu_device pointer
@@ -663,6 +704,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
663 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, 704 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
664 amdgpu_vm_block_size - 9); 705 amdgpu_vm_block_size - 9);
665 WREG32(mmVM_CONTEXT1_CNTL, tmp); 706 WREG32(mmVM_CONTEXT1_CNTL, tmp);
707 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
708 gmc_v8_0_set_fault_enable_default(adev, false);
709 else
710 gmc_v8_0_set_fault_enable_default(adev, true);
666 711
667 gmc_v8_0_gart_flush_gpu_tlb(adev, 0); 712 gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
668 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 713 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -939,7 +984,7 @@ static int gmc_v8_0_sw_fini(void *handle)
939 984
940 if (adev->vm_manager.enabled) { 985 if (adev->vm_manager.enabled) {
941 for (i = 0; i < AMDGPU_NUM_VM; ++i) 986 for (i = 0; i < AMDGPU_NUM_VM; ++i)
942 amdgpu_fence_unref(&adev->vm_manager.active[i]); 987 fence_put(adev->vm_manager.active[i]);
943 gmc_v8_0_vm_fini(adev); 988 gmc_v8_0_vm_fini(adev);
944 adev->vm_manager.enabled = false; 989 adev->vm_manager.enabled = false;
945 } 990 }
@@ -991,7 +1036,7 @@ static int gmc_v8_0_suspend(void *handle)
991 1036
992 if (adev->vm_manager.enabled) { 1037 if (adev->vm_manager.enabled) {
993 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1038 for (i = 0; i < AMDGPU_NUM_VM; ++i)
994 amdgpu_fence_unref(&adev->vm_manager.active[i]); 1039 fence_put(adev->vm_manager.active[i]);
995 gmc_v8_0_vm_fini(adev); 1040 gmc_v8_0_vm_fini(adev);
996 adev->vm_manager.enabled = false; 1041 adev->vm_manager.enabled = false;
997 } 1042 }
@@ -1268,6 +1313,9 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1268 if (!addr && !status) 1313 if (!addr && !status)
1269 return 0; 1314 return 0;
1270 1315
1316 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1317 gmc_v8_0_set_fault_enable_default(adev, false);
1318
1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1319 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1272 entry->src_id, entry->src_data); 1320 entry->src_id, entry->src_data);
1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1321 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 14e87234171a..2cf50180cc51 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -118,7 +118,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
118{ 118{
119 const char *chip_name; 119 const char *chip_name;
120 char fw_name[30]; 120 char fw_name[30];
121 int err, i; 121 int err = 0, i;
122 struct amdgpu_firmware_info *info = NULL; 122 struct amdgpu_firmware_info *info = NULL;
123 const struct common_firmware_header *header = NULL; 123 const struct common_firmware_header *header = NULL;
124 const struct sdma_firmware_header_v1_0 *hdr; 124 const struct sdma_firmware_header_v1_0 *hdr;
@@ -132,27 +132,27 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
132 default: BUG(); 132 default: BUG();
133 } 133 }
134 134
135 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 135 for (i = 0; i < adev->sdma.num_instances; i++) {
136 if (i == 0) 136 if (i == 0)
137 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 137 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
138 else 138 else
139 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 139 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
140 err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); 140 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
141 if (err) 141 if (err)
142 goto out; 142 goto out;
143 err = amdgpu_ucode_validate(adev->sdma[i].fw); 143 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
144 if (err) 144 if (err)
145 goto out; 145 goto out;
146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
147 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 147 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
148 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 148 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
149 if (adev->sdma[i].feature_version >= 20) 149 if (adev->sdma.instance[i].feature_version >= 20)
150 adev->sdma[i].burst_nop = true; 150 adev->sdma.instance[i].burst_nop = true;
151 151
152 if (adev->firmware.smu_load) { 152 if (adev->firmware.smu_load) {
153 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 153 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
154 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 154 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
155 info->fw = adev->sdma[i].fw; 155 info->fw = adev->sdma.instance[i].fw;
156 header = (const struct common_firmware_header *)info->fw->data; 156 header = (const struct common_firmware_header *)info->fw->data;
157 adev->firmware.fw_size += 157 adev->firmware.fw_size +=
158 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 158 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@@ -164,9 +164,9 @@ out:
164 printk(KERN_ERR 164 printk(KERN_ERR
165 "sdma_v2_4: Failed to load firmware \"%s\"\n", 165 "sdma_v2_4: Failed to load firmware \"%s\"\n",
166 fw_name); 166 fw_name);
167 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 167 for (i = 0; i < adev->sdma.num_instances; i++) {
168 release_firmware(adev->sdma[i].fw); 168 release_firmware(adev->sdma.instance[i].fw);
169 adev->sdma[i].fw = NULL; 169 adev->sdma.instance[i].fw = NULL;
170 } 170 }
171 } 171 }
172 return err; 172 return err;
@@ -199,7 +199,7 @@ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
199static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) 199static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
200{ 200{
201 struct amdgpu_device *adev = ring->adev; 201 struct amdgpu_device *adev = ring->adev;
202 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 202 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
203 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; 203 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
204 204
205 return wptr; 205 return wptr;
@@ -215,14 +215,14 @@ static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
215static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) 215static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
216{ 216{
217 struct amdgpu_device *adev = ring->adev; 217 struct amdgpu_device *adev = ring->adev;
218 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 218 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
219 219
220 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); 220 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
221} 221}
222 222
223static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 223static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
224{ 224{
225 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); 225 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
226 int i; 226 int i;
227 227
228 for (i = 0; i < count; i++) 228 for (i = 0; i < count; i++)
@@ -284,7 +284,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
284{ 284{
285 u32 ref_and_mask = 0; 285 u32 ref_and_mask = 0;
286 286
287 if (ring == &ring->adev->sdma[0].ring) 287 if (ring == &ring->adev->sdma.instance[0].ring)
288 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 288 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
289 else 289 else
290 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 290 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -368,8 +368,8 @@ static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
368 */ 368 */
369static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) 369static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
370{ 370{
371 struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; 371 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
372 struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; 372 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
373 u32 rb_cntl, ib_cntl; 373 u32 rb_cntl, ib_cntl;
374 int i; 374 int i;
375 375
@@ -377,7 +377,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
377 (adev->mman.buffer_funcs_ring == sdma1)) 377 (adev->mman.buffer_funcs_ring == sdma1))
378 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 378 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
379 379
380 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 380 for (i = 0; i < adev->sdma.num_instances; i++) {
381 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 381 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
382 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 382 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
383 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 383 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -419,7 +419,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
419 sdma_v2_4_rlc_stop(adev); 419 sdma_v2_4_rlc_stop(adev);
420 } 420 }
421 421
422 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 422 for (i = 0; i < adev->sdma.num_instances; i++) {
423 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 423 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
424 if (enable) 424 if (enable)
425 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); 425 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@@ -445,8 +445,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
445 u32 wb_offset; 445 u32 wb_offset;
446 int i, j, r; 446 int i, j, r;
447 447
448 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 448 for (i = 0; i < adev->sdma.num_instances; i++) {
449 ring = &adev->sdma[i].ring; 449 ring = &adev->sdma.instance[i].ring;
450 wb_offset = (ring->rptr_offs * 4); 450 wb_offset = (ring->rptr_offs * 4);
451 451
452 mutex_lock(&adev->srbm_mutex); 452 mutex_lock(&adev->srbm_mutex);
@@ -545,29 +545,23 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
545 const __le32 *fw_data; 545 const __le32 *fw_data;
546 u32 fw_size; 546 u32 fw_size;
547 int i, j; 547 int i, j;
548 bool smc_loads_fw = false; /* XXX fix me */
549
550 if (!adev->sdma[0].fw || !adev->sdma[1].fw)
551 return -EINVAL;
552 548
553 /* halt the MEs */ 549 /* halt the MEs */
554 sdma_v2_4_enable(adev, false); 550 sdma_v2_4_enable(adev, false);
555 551
556 if (smc_loads_fw) { 552 for (i = 0; i < adev->sdma.num_instances; i++) {
557 /* XXX query SMC for fw load complete */ 553 if (!adev->sdma.instance[i].fw)
558 } else { 554 return -EINVAL;
559 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 555 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
560 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 556 amdgpu_ucode_print_sdma_hdr(&hdr->header);
561 amdgpu_ucode_print_sdma_hdr(&hdr->header); 557 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
562 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 558 fw_data = (const __le32 *)
563 fw_data = (const __le32 *) 559 (adev->sdma.instance[i].fw->data +
564 (adev->sdma[i].fw->data + 560 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
565 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 561 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
566 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 562 for (j = 0; j < fw_size; j++)
567 for (j = 0; j < fw_size; j++) 563 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
568 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 564 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
569 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
570 }
571 } 565 }
572 566
573 return 0; 567 return 0;
@@ -894,7 +888,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
894 */ 888 */
895static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) 889static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
896{ 890{
897 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); 891 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
898 u32 pad_count; 892 u32 pad_count;
899 int i; 893 int i;
900 894
@@ -952,6 +946,8 @@ static int sdma_v2_4_early_init(void *handle)
952{ 946{
953 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 947 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
954 948
949 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
950
955 sdma_v2_4_set_ring_funcs(adev); 951 sdma_v2_4_set_ring_funcs(adev);
956 sdma_v2_4_set_buffer_funcs(adev); 952 sdma_v2_4_set_buffer_funcs(adev);
957 sdma_v2_4_set_vm_pte_funcs(adev); 953 sdma_v2_4_set_vm_pte_funcs(adev);
@@ -963,21 +959,21 @@ static int sdma_v2_4_early_init(void *handle)
963static int sdma_v2_4_sw_init(void *handle) 959static int sdma_v2_4_sw_init(void *handle)
964{ 960{
965 struct amdgpu_ring *ring; 961 struct amdgpu_ring *ring;
966 int r; 962 int r, i;
967 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
968 964
969 /* SDMA trap event */ 965 /* SDMA trap event */
970 r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); 966 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
971 if (r) 967 if (r)
972 return r; 968 return r;
973 969
974 /* SDMA Privileged inst */ 970 /* SDMA Privileged inst */
975 r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); 971 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
976 if (r) 972 if (r)
977 return r; 973 return r;
978 974
979 /* SDMA Privileged inst */ 975 /* SDMA Privileged inst */
980 r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); 976 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
981 if (r) 977 if (r)
982 return r; 978 return r;
983 979
@@ -987,31 +983,20 @@ static int sdma_v2_4_sw_init(void *handle)
987 return r; 983 return r;
988 } 984 }
989 985
990 ring = &adev->sdma[0].ring; 986 for (i = 0; i < adev->sdma.num_instances; i++) {
991 ring->ring_obj = NULL; 987 ring = &adev->sdma.instance[i].ring;
992 ring->use_doorbell = false; 988 ring->ring_obj = NULL;
993 989 ring->use_doorbell = false;
994 ring = &adev->sdma[1].ring; 990 sprintf(ring->name, "sdma%d", i);
995 ring->ring_obj = NULL; 991 r = amdgpu_ring_init(adev, ring, 256 * 1024,
996 ring->use_doorbell = false; 992 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
997 993 &adev->sdma.trap_irq,
998 ring = &adev->sdma[0].ring; 994 (i == 0) ?
999 sprintf(ring->name, "sdma0"); 995 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
1000 r = amdgpu_ring_init(adev, ring, 256 * 1024, 996 AMDGPU_RING_TYPE_SDMA);
1001 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, 997 if (r)
1002 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, 998 return r;
1003 AMDGPU_RING_TYPE_SDMA); 999 }
1004 if (r)
1005 return r;
1006
1007 ring = &adev->sdma[1].ring;
1008 sprintf(ring->name, "sdma1");
1009 r = amdgpu_ring_init(adev, ring, 256 * 1024,
1010 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
1011 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
1012 AMDGPU_RING_TYPE_SDMA);
1013 if (r)
1014 return r;
1015 1000
1016 return r; 1001 return r;
1017} 1002}
@@ -1019,9 +1004,10 @@ static int sdma_v2_4_sw_init(void *handle)
1019static int sdma_v2_4_sw_fini(void *handle) 1004static int sdma_v2_4_sw_fini(void *handle)
1020{ 1005{
1021 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1006 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1007 int i;
1022 1008
1023 amdgpu_ring_fini(&adev->sdma[0].ring); 1009 for (i = 0; i < adev->sdma.num_instances; i++)
1024 amdgpu_ring_fini(&adev->sdma[1].ring); 1010 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1025 1011
1026 return 0; 1012 return 0;
1027} 1013}
@@ -1100,7 +1086,7 @@ static void sdma_v2_4_print_status(void *handle)
1100 dev_info(adev->dev, "VI SDMA registers\n"); 1086 dev_info(adev->dev, "VI SDMA registers\n");
1101 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1087 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1102 RREG32(mmSRBM_STATUS2)); 1088 RREG32(mmSRBM_STATUS2));
1103 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 1089 for (i = 0; i < adev->sdma.num_instances; i++) {
1104 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", 1090 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1105 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); 1091 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1106 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", 1092 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
@@ -1243,7 +1229,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1243 case 0: 1229 case 0:
1244 switch (queue_id) { 1230 switch (queue_id) {
1245 case 0: 1231 case 0:
1246 amdgpu_fence_process(&adev->sdma[0].ring); 1232 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1247 break; 1233 break;
1248 case 1: 1234 case 1:
1249 /* XXX compute */ 1235 /* XXX compute */
@@ -1256,7 +1242,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1256 case 1: 1242 case 1:
1257 switch (queue_id) { 1243 switch (queue_id) {
1258 case 0: 1244 case 0:
1259 amdgpu_fence_process(&adev->sdma[1].ring); 1245 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1260 break; 1246 break;
1261 case 1: 1247 case 1:
1262 /* XXX compute */ 1248 /* XXX compute */
@@ -1309,24 +1295,6 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
1309 .set_powergating_state = sdma_v2_4_set_powergating_state, 1295 .set_powergating_state = sdma_v2_4_set_powergating_state,
1310}; 1296};
1311 1297
1312/**
1313 * sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up
1314 *
1315 * @ring: amdgpu_ring structure holding ring information
1316 *
1317 * Check if the async DMA engine is locked up (VI).
1318 * Returns true if the engine appears to be locked up, false if not.
1319 */
1320static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring)
1321{
1322
1323 if (sdma_v2_4_is_idle(ring->adev)) {
1324 amdgpu_ring_lockup_update(ring);
1325 return false;
1326 }
1327 return amdgpu_ring_test_lockup(ring);
1328}
1329
1330static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { 1298static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1331 .get_rptr = sdma_v2_4_ring_get_rptr, 1299 .get_rptr = sdma_v2_4_ring_get_rptr,
1332 .get_wptr = sdma_v2_4_ring_get_wptr, 1300 .get_wptr = sdma_v2_4_ring_get_wptr,
@@ -1339,14 +1307,15 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1339 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, 1307 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1340 .test_ring = sdma_v2_4_ring_test_ring, 1308 .test_ring = sdma_v2_4_ring_test_ring,
1341 .test_ib = sdma_v2_4_ring_test_ib, 1309 .test_ib = sdma_v2_4_ring_test_ib,
1342 .is_lockup = sdma_v2_4_ring_is_lockup,
1343 .insert_nop = sdma_v2_4_ring_insert_nop, 1310 .insert_nop = sdma_v2_4_ring_insert_nop,
1344}; 1311};
1345 1312
1346static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) 1313static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1347{ 1314{
1348 adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs; 1315 int i;
1349 adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs; 1316
1317 for (i = 0; i < adev->sdma.num_instances; i++)
1318 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1350} 1319}
1351 1320
1352static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { 1321static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
@@ -1360,9 +1329,9 @@ static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1360 1329
1361static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) 1330static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1362{ 1331{
1363 adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1332 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1364 adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; 1333 adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1365 adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; 1334 adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1366} 1335}
1367 1336
1368/** 1337/**
@@ -1428,7 +1397,7 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1428{ 1397{
1429 if (adev->mman.buffer_funcs == NULL) { 1398 if (adev->mman.buffer_funcs == NULL) {
1430 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; 1399 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1431 adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; 1400 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1432 } 1401 }
1433} 1402}
1434 1403
@@ -1443,7 +1412,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1443{ 1412{
1444 if (adev->vm_manager.vm_pte_funcs == NULL) { 1413 if (adev->vm_manager.vm_pte_funcs == NULL) {
1445 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; 1414 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1446 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1415 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
1447 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1416 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1448 } 1417 }
1449} 1418}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 9bfe92df15f7..7253132f04b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -55,6 +55,7 @@ MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin"); 55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
56MODULE_FIRMWARE("amdgpu/fiji_sdma.bin"); 56MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin"); 57MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
58MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
58 59
59static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 60static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
60{ 61{
@@ -122,6 +123,19 @@ static const u32 cz_mgcg_cgcg_init[] =
122 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 123 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
123}; 124};
124 125
126static const u32 stoney_golden_settings_a11[] =
127{
128 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
129 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
130 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
131 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
132};
133
134static const u32 stoney_mgcg_cgcg_init[] =
135{
136 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
137};
138
125/* 139/*
126 * sDMA - System DMA 140 * sDMA - System DMA
127 * Starting with CIK, the GPU has new asynchronous 141 * Starting with CIK, the GPU has new asynchronous
@@ -166,6 +180,14 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
166 cz_golden_settings_a11, 180 cz_golden_settings_a11,
167 (const u32)ARRAY_SIZE(cz_golden_settings_a11)); 181 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
168 break; 182 break;
183 case CHIP_STONEY:
184 amdgpu_program_register_sequence(adev,
185 stoney_mgcg_cgcg_init,
186 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
187 amdgpu_program_register_sequence(adev,
188 stoney_golden_settings_a11,
189 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
190 break;
169 default: 191 default:
170 break; 192 break;
171 } 193 }
@@ -184,7 +206,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
184{ 206{
185 const char *chip_name; 207 const char *chip_name;
186 char fw_name[30]; 208 char fw_name[30];
187 int err, i; 209 int err = 0, i;
188 struct amdgpu_firmware_info *info = NULL; 210 struct amdgpu_firmware_info *info = NULL;
189 const struct common_firmware_header *header = NULL; 211 const struct common_firmware_header *header = NULL;
190 const struct sdma_firmware_header_v1_0 *hdr; 212 const struct sdma_firmware_header_v1_0 *hdr;
@@ -201,30 +223,33 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
201 case CHIP_CARRIZO: 223 case CHIP_CARRIZO:
202 chip_name = "carrizo"; 224 chip_name = "carrizo";
203 break; 225 break;
226 case CHIP_STONEY:
227 chip_name = "stoney";
228 break;
204 default: BUG(); 229 default: BUG();
205 } 230 }
206 231
207 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 232 for (i = 0; i < adev->sdma.num_instances; i++) {
208 if (i == 0) 233 if (i == 0)
209 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 234 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
210 else 235 else
211 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 236 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
212 err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); 237 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
213 if (err) 238 if (err)
214 goto out; 239 goto out;
215 err = amdgpu_ucode_validate(adev->sdma[i].fw); 240 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
216 if (err) 241 if (err)
217 goto out; 242 goto out;
218 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 243 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
219 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 244 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
220 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 245 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
221 if (adev->sdma[i].feature_version >= 20) 246 if (adev->sdma.instance[i].feature_version >= 20)
222 adev->sdma[i].burst_nop = true; 247 adev->sdma.instance[i].burst_nop = true;
223 248
224 if (adev->firmware.smu_load) { 249 if (adev->firmware.smu_load) {
225 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 250 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
226 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 251 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
227 info->fw = adev->sdma[i].fw; 252 info->fw = adev->sdma.instance[i].fw;
228 header = (const struct common_firmware_header *)info->fw->data; 253 header = (const struct common_firmware_header *)info->fw->data;
229 adev->firmware.fw_size += 254 adev->firmware.fw_size +=
230 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 255 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@@ -235,9 +260,9 @@ out:
235 printk(KERN_ERR 260 printk(KERN_ERR
236 "sdma_v3_0: Failed to load firmware \"%s\"\n", 261 "sdma_v3_0: Failed to load firmware \"%s\"\n",
237 fw_name); 262 fw_name);
238 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 263 for (i = 0; i < adev->sdma.num_instances; i++) {
239 release_firmware(adev->sdma[i].fw); 264 release_firmware(adev->sdma.instance[i].fw);
240 adev->sdma[i].fw = NULL; 265 adev->sdma.instance[i].fw = NULL;
241 } 266 }
242 } 267 }
243 return err; 268 return err;
@@ -276,7 +301,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
276 /* XXX check if swapping is necessary on BE */ 301 /* XXX check if swapping is necessary on BE */
277 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; 302 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
278 } else { 303 } else {
279 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 304 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
280 305
281 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; 306 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
282 } 307 }
@@ -300,7 +325,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
300 adev->wb.wb[ring->wptr_offs] = ring->wptr << 2; 325 adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
301 WDOORBELL32(ring->doorbell_index, ring->wptr << 2); 326 WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
302 } else { 327 } else {
303 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 328 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
304 329
305 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); 330 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
306 } 331 }
@@ -308,7 +333,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
308 333
309static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 334static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
310{ 335{
311 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); 336 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
312 int i; 337 int i;
313 338
314 for (i = 0; i < count; i++) 339 for (i = 0; i < count; i++)
@@ -369,7 +394,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
369{ 394{
370 u32 ref_and_mask = 0; 395 u32 ref_and_mask = 0;
371 396
372 if (ring == &ring->adev->sdma[0].ring) 397 if (ring == &ring->adev->sdma.instance[0].ring)
373 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 398 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
374 else 399 else
375 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 400 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -454,8 +479,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
454 */ 479 */
455static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) 480static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
456{ 481{
457 struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; 482 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
458 struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; 483 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
459 u32 rb_cntl, ib_cntl; 484 u32 rb_cntl, ib_cntl;
460 int i; 485 int i;
461 486
@@ -463,7 +488,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
463 (adev->mman.buffer_funcs_ring == sdma1)) 488 (adev->mman.buffer_funcs_ring == sdma1))
464 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 489 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
465 490
466 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 491 for (i = 0; i < adev->sdma.num_instances; i++) {
467 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 492 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
468 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 493 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
469 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 494 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -500,7 +525,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
500 u32 f32_cntl; 525 u32 f32_cntl;
501 int i; 526 int i;
502 527
503 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 528 for (i = 0; i < adev->sdma.num_instances; i++) {
504 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); 529 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
505 if (enable) 530 if (enable)
506 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 531 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
@@ -530,7 +555,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
530 sdma_v3_0_rlc_stop(adev); 555 sdma_v3_0_rlc_stop(adev);
531 } 556 }
532 557
533 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 558 for (i = 0; i < adev->sdma.num_instances; i++) {
534 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 559 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
535 if (enable) 560 if (enable)
536 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); 561 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@@ -557,8 +582,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
557 u32 doorbell; 582 u32 doorbell;
558 int i, j, r; 583 int i, j, r;
559 584
560 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 585 for (i = 0; i < adev->sdma.num_instances; i++) {
561 ring = &adev->sdma[i].ring; 586 ring = &adev->sdma.instance[i].ring;
562 wb_offset = (ring->rptr_offs * 4); 587 wb_offset = (ring->rptr_offs * 4);
563 588
564 mutex_lock(&adev->srbm_mutex); 589 mutex_lock(&adev->srbm_mutex);
@@ -669,23 +694,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
669 u32 fw_size; 694 u32 fw_size;
670 int i, j; 695 int i, j;
671 696
672 if (!adev->sdma[0].fw || !adev->sdma[1].fw)
673 return -EINVAL;
674
675 /* halt the MEs */ 697 /* halt the MEs */
676 sdma_v3_0_enable(adev, false); 698 sdma_v3_0_enable(adev, false);
677 699
678 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 700 for (i = 0; i < adev->sdma.num_instances; i++) {
679 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 701 if (!adev->sdma.instance[i].fw)
702 return -EINVAL;
703 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
680 amdgpu_ucode_print_sdma_hdr(&hdr->header); 704 amdgpu_ucode_print_sdma_hdr(&hdr->header);
681 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 705 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
682 fw_data = (const __le32 *) 706 fw_data = (const __le32 *)
683 (adev->sdma[i].fw->data + 707 (adev->sdma.instance[i].fw->data +
684 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 708 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
685 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 709 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
686 for (j = 0; j < fw_size; j++) 710 for (j = 0; j < fw_size; j++)
687 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 711 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
688 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); 712 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
689 } 713 }
690 714
691 return 0; 715 return 0;
@@ -701,21 +725,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
701 */ 725 */
702static int sdma_v3_0_start(struct amdgpu_device *adev) 726static int sdma_v3_0_start(struct amdgpu_device *adev)
703{ 727{
704 int r; 728 int r, i;
705 729
706 if (!adev->firmware.smu_load) { 730 if (!adev->firmware.smu_load) {
707 r = sdma_v3_0_load_microcode(adev); 731 r = sdma_v3_0_load_microcode(adev);
708 if (r) 732 if (r)
709 return r; 733 return r;
710 } else { 734 } else {
711 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 735 for (i = 0; i < adev->sdma.num_instances; i++) {
712 AMDGPU_UCODE_ID_SDMA0); 736 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
713 if (r) 737 (i == 0) ?
714 return -EINVAL; 738 AMDGPU_UCODE_ID_SDMA0 :
715 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 739 AMDGPU_UCODE_ID_SDMA1);
716 AMDGPU_UCODE_ID_SDMA1); 740 if (r)
717 if (r) 741 return -EINVAL;
718 return -EINVAL; 742 }
719 } 743 }
720 744
721 /* unhalt the MEs */ 745 /* unhalt the MEs */
@@ -1013,7 +1037,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1013 */ 1037 */
1014static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) 1038static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
1015{ 1039{
1016 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); 1040 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
1017 u32 pad_count; 1041 u32 pad_count;
1018 int i; 1042 int i;
1019 1043
@@ -1071,6 +1095,15 @@ static int sdma_v3_0_early_init(void *handle)
1071{ 1095{
1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1096 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1073 1097
1098 switch (adev->asic_type) {
1099 case CHIP_STONEY:
1100 adev->sdma.num_instances = 1;
1101 break;
1102 default:
1103 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1104 break;
1105 }
1106
1074 sdma_v3_0_set_ring_funcs(adev); 1107 sdma_v3_0_set_ring_funcs(adev);
1075 sdma_v3_0_set_buffer_funcs(adev); 1108 sdma_v3_0_set_buffer_funcs(adev);
1076 sdma_v3_0_set_vm_pte_funcs(adev); 1109 sdma_v3_0_set_vm_pte_funcs(adev);
@@ -1082,21 +1115,21 @@ static int sdma_v3_0_early_init(void *handle)
1082static int sdma_v3_0_sw_init(void *handle) 1115static int sdma_v3_0_sw_init(void *handle)
1083{ 1116{
1084 struct amdgpu_ring *ring; 1117 struct amdgpu_ring *ring;
1085 int r; 1118 int r, i;
1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1119 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087 1120
1088 /* SDMA trap event */ 1121 /* SDMA trap event */
1089 r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); 1122 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
1090 if (r) 1123 if (r)
1091 return r; 1124 return r;
1092 1125
1093 /* SDMA Privileged inst */ 1126 /* SDMA Privileged inst */
1094 r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); 1127 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
1095 if (r) 1128 if (r)
1096 return r; 1129 return r;
1097 1130
1098 /* SDMA Privileged inst */ 1131 /* SDMA Privileged inst */
1099 r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); 1132 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
1100 if (r) 1133 if (r)
1101 return r; 1134 return r;
1102 1135
@@ -1106,33 +1139,23 @@ static int sdma_v3_0_sw_init(void *handle)
1106 return r; 1139 return r;
1107 } 1140 }
1108 1141
1109 ring = &adev->sdma[0].ring; 1142 for (i = 0; i < adev->sdma.num_instances; i++) {
1110 ring->ring_obj = NULL; 1143 ring = &adev->sdma.instance[i].ring;
1111 ring->use_doorbell = true; 1144 ring->ring_obj = NULL;
1112 ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0; 1145 ring->use_doorbell = true;
1113 1146 ring->doorbell_index = (i == 0) ?
1114 ring = &adev->sdma[1].ring; 1147 AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
1115 ring->ring_obj = NULL; 1148
1116 ring->use_doorbell = true; 1149 sprintf(ring->name, "sdma%d", i);
1117 ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1; 1150 r = amdgpu_ring_init(adev, ring, 256 * 1024,
1118 1151 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
1119 ring = &adev->sdma[0].ring; 1152 &adev->sdma.trap_irq,
1120 sprintf(ring->name, "sdma0"); 1153 (i == 0) ?
1121 r = amdgpu_ring_init(adev, ring, 256 * 1024, 1154 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
1122 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, 1155 AMDGPU_RING_TYPE_SDMA);
1123 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, 1156 if (r)
1124 AMDGPU_RING_TYPE_SDMA); 1157 return r;
1125 if (r) 1158 }
1126 return r;
1127
1128 ring = &adev->sdma[1].ring;
1129 sprintf(ring->name, "sdma1");
1130 r = amdgpu_ring_init(adev, ring, 256 * 1024,
1131 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
1132 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
1133 AMDGPU_RING_TYPE_SDMA);
1134 if (r)
1135 return r;
1136 1159
1137 return r; 1160 return r;
1138} 1161}
@@ -1140,9 +1163,10 @@ static int sdma_v3_0_sw_init(void *handle)
1140static int sdma_v3_0_sw_fini(void *handle) 1163static int sdma_v3_0_sw_fini(void *handle)
1141{ 1164{
1142 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1165 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1166 int i;
1143 1167
1144 amdgpu_ring_fini(&adev->sdma[0].ring); 1168 for (i = 0; i < adev->sdma.num_instances; i++)
1145 amdgpu_ring_fini(&adev->sdma[1].ring); 1169 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1146 1170
1147 return 0; 1171 return 0;
1148} 1172}
@@ -1222,7 +1246,7 @@ static void sdma_v3_0_print_status(void *handle)
1222 dev_info(adev->dev, "VI SDMA registers\n"); 1246 dev_info(adev->dev, "VI SDMA registers\n");
1223 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1247 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1224 RREG32(mmSRBM_STATUS2)); 1248 RREG32(mmSRBM_STATUS2));
1225 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 1249 for (i = 0; i < adev->sdma.num_instances; i++) {
1226 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", 1250 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1227 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); 1251 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1228 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", 1252 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
@@ -1367,7 +1391,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1367 case 0: 1391 case 0:
1368 switch (queue_id) { 1392 switch (queue_id) {
1369 case 0: 1393 case 0:
1370 amdgpu_fence_process(&adev->sdma[0].ring); 1394 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1371 break; 1395 break;
1372 case 1: 1396 case 1:
1373 /* XXX compute */ 1397 /* XXX compute */
@@ -1380,7 +1404,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1380 case 1: 1404 case 1:
1381 switch (queue_id) { 1405 switch (queue_id) {
1382 case 0: 1406 case 0:
1383 amdgpu_fence_process(&adev->sdma[1].ring); 1407 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1384 break; 1408 break;
1385 case 1: 1409 case 1:
1386 /* XXX compute */ 1410 /* XXX compute */
@@ -1432,24 +1456,6 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
1432 .set_powergating_state = sdma_v3_0_set_powergating_state, 1456 .set_powergating_state = sdma_v3_0_set_powergating_state,
1433}; 1457};
1434 1458
1435/**
1436 * sdma_v3_0_ring_is_lockup - Check if the DMA engine is locked up
1437 *
1438 * @ring: amdgpu_ring structure holding ring information
1439 *
1440 * Check if the async DMA engine is locked up (VI).
1441 * Returns true if the engine appears to be locked up, false if not.
1442 */
1443static bool sdma_v3_0_ring_is_lockup(struct amdgpu_ring *ring)
1444{
1445
1446 if (sdma_v3_0_is_idle(ring->adev)) {
1447 amdgpu_ring_lockup_update(ring);
1448 return false;
1449 }
1450 return amdgpu_ring_test_lockup(ring);
1451}
1452
1453static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { 1459static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1454 .get_rptr = sdma_v3_0_ring_get_rptr, 1460 .get_rptr = sdma_v3_0_ring_get_rptr,
1455 .get_wptr = sdma_v3_0_ring_get_wptr, 1461 .get_wptr = sdma_v3_0_ring_get_wptr,
@@ -1462,14 +1468,15 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1462 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, 1468 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1463 .test_ring = sdma_v3_0_ring_test_ring, 1469 .test_ring = sdma_v3_0_ring_test_ring,
1464 .test_ib = sdma_v3_0_ring_test_ib, 1470 .test_ib = sdma_v3_0_ring_test_ib,
1465 .is_lockup = sdma_v3_0_ring_is_lockup,
1466 .insert_nop = sdma_v3_0_ring_insert_nop, 1471 .insert_nop = sdma_v3_0_ring_insert_nop,
1467}; 1472};
1468 1473
1469static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) 1474static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1470{ 1475{
1471 adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs; 1476 int i;
1472 adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs; 1477
1478 for (i = 0; i < adev->sdma.num_instances; i++)
1479 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1473} 1480}
1474 1481
1475static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { 1482static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
@@ -1483,9 +1490,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1483 1490
1484static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) 1491static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1485{ 1492{
1486 adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1493 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1487 adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; 1494 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1488 adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; 1495 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
1489} 1496}
1490 1497
1491/** 1498/**
@@ -1551,7 +1558,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1551{ 1558{
1552 if (adev->mman.buffer_funcs == NULL) { 1559 if (adev->mman.buffer_funcs == NULL) {
1553 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; 1560 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
1554 adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; 1561 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1555 } 1562 }
1556} 1563}
1557 1564
@@ -1566,7 +1573,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1566{ 1573{
1567 if (adev->vm_manager.vm_pte_funcs == NULL) { 1574 if (adev->vm_manager.vm_pte_funcs == NULL) {
1568 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; 1575 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1569 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1576 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
1570 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1577 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1571 } 1578 }
1572} 1579}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index ed50dd725788..5e9f73af83a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -885,7 +885,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
885 .emit_semaphore = uvd_v4_2_ring_emit_semaphore, 885 .emit_semaphore = uvd_v4_2_ring_emit_semaphore,
886 .test_ring = uvd_v4_2_ring_test_ring, 886 .test_ring = uvd_v4_2_ring_test_ring,
887 .test_ib = uvd_v4_2_ring_test_ib, 887 .test_ib = uvd_v4_2_ring_test_ib,
888 .is_lockup = amdgpu_ring_test_lockup,
889 .insert_nop = amdgpu_ring_insert_nop, 888 .insert_nop = amdgpu_ring_insert_nop,
890}; 889};
891 890
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 9ad8b9906c0b..38864f562981 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -824,7 +824,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
824 .emit_semaphore = uvd_v5_0_ring_emit_semaphore, 824 .emit_semaphore = uvd_v5_0_ring_emit_semaphore,
825 .test_ring = uvd_v5_0_ring_test_ring, 825 .test_ring = uvd_v5_0_ring_test_ring,
826 .test_ib = uvd_v5_0_ring_test_ib, 826 .test_ib = uvd_v5_0_ring_test_ib,
827 .is_lockup = amdgpu_ring_test_lockup,
828 .insert_nop = amdgpu_ring_insert_nop, 827 .insert_nop = amdgpu_ring_insert_nop,
829}; 828};
830 829
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7e9934fa4193..121915bbc3b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -808,7 +808,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
808 .emit_semaphore = uvd_v6_0_ring_emit_semaphore, 808 .emit_semaphore = uvd_v6_0_ring_emit_semaphore,
809 .test_ring = uvd_v6_0_ring_test_ring, 809 .test_ring = uvd_v6_0_ring_test_ring,
810 .test_ib = uvd_v6_0_ring_test_ib, 810 .test_ib = uvd_v6_0_ring_test_ib,
811 .is_lockup = amdgpu_ring_test_lockup,
812 .insert_nop = amdgpu_ring_insert_nop, 811 .insert_nop = amdgpu_ring_insert_nop,
813}; 812};
814 813
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index cd16df543f64..52ac7a8f1e58 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -642,7 +642,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
642 .emit_semaphore = amdgpu_vce_ring_emit_semaphore, 642 .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
643 .test_ring = amdgpu_vce_ring_test_ring, 643 .test_ring = amdgpu_vce_ring_test_ring,
644 .test_ib = amdgpu_vce_ring_test_ib, 644 .test_ib = amdgpu_vce_ring_test_ib,
645 .is_lockup = amdgpu_ring_test_lockup,
646 .insert_nop = amdgpu_ring_insert_nop, 645 .insert_nop = amdgpu_ring_insert_nop,
647}; 646};
648 647
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index f0656dfb53f3..6a52db6ad8d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -205,8 +205,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
205 u32 tmp; 205 u32 tmp;
206 unsigned ret; 206 unsigned ret;
207 207
208 /* Fiji is single pipe */ 208 /* Fiji, Stoney are single pipe */
209 if (adev->asic_type == CHIP_FIJI) { 209 if ((adev->asic_type == CHIP_FIJI) ||
210 (adev->asic_type == CHIP_STONEY)){
210 ret = AMDGPU_VCE_HARVEST_VCE1; 211 ret = AMDGPU_VCE_HARVEST_VCE1;
211 return ret; 212 return ret;
212 } 213 }
@@ -643,7 +644,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
643 .emit_semaphore = amdgpu_vce_ring_emit_semaphore, 644 .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
644 .test_ring = amdgpu_vce_ring_test_ring, 645 .test_ring = amdgpu_vce_ring_test_ring,
645 .test_ib = amdgpu_vce_ring_test_ib, 646 .test_ib = amdgpu_vce_ring_test_ib,
646 .is_lockup = amdgpu_ring_test_lockup,
647 .insert_nop = amdgpu_ring_insert_nop, 647 .insert_nop = amdgpu_ring_insert_nop,
648}; 648};
649 649
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 0bac8702e934..2adc1c855e85 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -232,6 +232,13 @@ static const u32 cz_mgcg_cgcg_init[] =
232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
233}; 233};
234 234
235static const u32 stoney_mgcg_cgcg_init[] =
236{
237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
238 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
239 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
240};
241
235static void vi_init_golden_registers(struct amdgpu_device *adev) 242static void vi_init_golden_registers(struct amdgpu_device *adev)
236{ 243{
237 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 244 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
@@ -258,6 +265,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
258 cz_mgcg_cgcg_init, 265 cz_mgcg_cgcg_init,
259 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 266 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
260 break; 267 break;
268 case CHIP_STONEY:
269 amdgpu_program_register_sequence(adev,
270 stoney_mgcg_cgcg_init,
271 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
272 break;
261 default: 273 default:
262 break; 274 break;
263 } 275 }
@@ -488,6 +500,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
488 case CHIP_FIJI: 500 case CHIP_FIJI:
489 case CHIP_TONGA: 501 case CHIP_TONGA:
490 case CHIP_CARRIZO: 502 case CHIP_CARRIZO:
503 case CHIP_STONEY:
491 asic_register_table = cz_allowed_read_registers; 504 asic_register_table = cz_allowed_read_registers;
492 size = ARRAY_SIZE(cz_allowed_read_registers); 505 size = ARRAY_SIZE(cz_allowed_read_registers);
493 break; 506 break;
@@ -543,8 +556,10 @@ static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
543 RREG32(mmSRBM_STATUS2)); 556 RREG32(mmSRBM_STATUS2));
544 dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", 557 dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
545 RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); 558 RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
546 dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", 559 if (adev->sdma.num_instances > 1) {
547 RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); 560 dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
561 RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
562 }
548 dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); 563 dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
549 dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", 564 dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
550 RREG32(mmCP_STALLED_STAT1)); 565 RREG32(mmCP_STALLED_STAT1));
@@ -639,9 +654,11 @@ u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
639 reset_mask |= AMDGPU_RESET_DMA; 654 reset_mask |= AMDGPU_RESET_DMA;
640 655
641 /* SDMA1_STATUS_REG */ 656 /* SDMA1_STATUS_REG */
642 tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); 657 if (adev->sdma.num_instances > 1) {
643 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 658 tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
644 reset_mask |= AMDGPU_RESET_DMA1; 659 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
660 reset_mask |= AMDGPU_RESET_DMA1;
661 }
645#if 0 662#if 0
646 /* VCE_STATUS */ 663 /* VCE_STATUS */
647 if (adev->asic_type != CHIP_TOPAZ) { 664 if (adev->asic_type != CHIP_TOPAZ) {
@@ -1319,6 +1336,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1319 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1336 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1320 break; 1337 break;
1321 case CHIP_CARRIZO: 1338 case CHIP_CARRIZO:
1339 case CHIP_STONEY:
1322 adev->ip_blocks = cz_ip_blocks; 1340 adev->ip_blocks = cz_ip_blocks;
1323 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); 1341 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1324 break; 1342 break;
@@ -1330,11 +1348,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1330 return 0; 1348 return 0;
1331} 1349}
1332 1350
1351#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
1352#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
1353#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
1354
1333static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1355static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1334{ 1356{
1335 if (adev->asic_type == CHIP_TOPAZ) 1357 if (adev->asic_type == CHIP_TOPAZ)
1336 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1358 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1337 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1359 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1360 else if (adev->flags & AMD_IS_APU)
1361 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1362 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1338 else 1363 else
1339 return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) 1364 return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1340 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; 1365 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
@@ -1388,32 +1413,35 @@ static int vi_common_early_init(void *handle)
1388 adev->cg_flags = 0; 1413 adev->cg_flags = 0;
1389 adev->pg_flags = 0; 1414 adev->pg_flags = 0;
1390 adev->external_rev_id = 0x1; 1415 adev->external_rev_id = 0x1;
1391 if (amdgpu_smc_load_fw && smc_enabled)
1392 adev->firmware.smu_load = true;
1393 break; 1416 break;
1394 case CHIP_FIJI: 1417 case CHIP_FIJI:
1418 adev->has_uvd = true;
1419 adev->cg_flags = 0;
1420 adev->pg_flags = 0;
1421 adev->external_rev_id = adev->rev_id + 0x3c;
1422 break;
1395 case CHIP_TONGA: 1423 case CHIP_TONGA:
1396 adev->has_uvd = true; 1424 adev->has_uvd = true;
1397 adev->cg_flags = 0; 1425 adev->cg_flags = 0;
1398 adev->pg_flags = 0; 1426 adev->pg_flags = 0;
1399 adev->external_rev_id = adev->rev_id + 0x14; 1427 adev->external_rev_id = adev->rev_id + 0x14;
1400 if (amdgpu_smc_load_fw && smc_enabled)
1401 adev->firmware.smu_load = true;
1402 break; 1428 break;
1403 case CHIP_CARRIZO: 1429 case CHIP_CARRIZO:
1430 case CHIP_STONEY:
1404 adev->has_uvd = true; 1431 adev->has_uvd = true;
1405 adev->cg_flags = 0; 1432 adev->cg_flags = 0;
1406 /* Disable UVD pg */ 1433 /* Disable UVD pg */
1407 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE; 1434 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1408 adev->external_rev_id = adev->rev_id + 0x1; 1435 adev->external_rev_id = adev->rev_id + 0x1;
1409 if (amdgpu_smc_load_fw && smc_enabled)
1410 adev->firmware.smu_load = true;
1411 break; 1436 break;
1412 default: 1437 default:
1413 /* FIXME: not supported yet */ 1438 /* FIXME: not supported yet */
1414 return -EINVAL; 1439 return -EINVAL;
1415 } 1440 }
1416 1441
1442 if (amdgpu_smc_load_fw && smc_enabled)
1443 adev->firmware.smu_load = true;
1444
1417 return 0; 1445 return 0;
1418} 1446}
1419 1447
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 68a8eaa1b7d0..fe28fb353fab 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -47,6 +47,7 @@ enum amd_asic_type {
47 CHIP_TONGA, 47 CHIP_TONGA,
48 CHIP_FIJI, 48 CHIP_FIJI,
49 CHIP_CARRIZO, 49 CHIP_CARRIZO,
50 CHIP_STONEY,
50 CHIP_LAST, 51 CHIP_LAST,
51}; 52};
52 53
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_d.h
new file mode 100644
index 000000000000..2d672b3d2fed
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_d.h
@@ -0,0 +1,2791 @@
1/*
2 * GFX_8_1 Register documentation
3 *
4 * Copyright (C) 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
20 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef GFX_8_1_D_H
25#define GFX_8_1_D_H
26
27#define mmCB_BLEND_RED 0xa105
28#define mmCB_BLEND_GREEN 0xa106
29#define mmCB_BLEND_BLUE 0xa107
30#define mmCB_BLEND_ALPHA 0xa108
31#define mmCB_DCC_CONTROL 0xa109
32#define mmCB_COLOR_CONTROL 0xa202
33#define mmCB_BLEND0_CONTROL 0xa1e0
34#define mmCB_BLEND1_CONTROL 0xa1e1
35#define mmCB_BLEND2_CONTROL 0xa1e2
36#define mmCB_BLEND3_CONTROL 0xa1e3
37#define mmCB_BLEND4_CONTROL 0xa1e4
38#define mmCB_BLEND5_CONTROL 0xa1e5
39#define mmCB_BLEND6_CONTROL 0xa1e6
40#define mmCB_BLEND7_CONTROL 0xa1e7
41#define mmCB_COLOR0_BASE 0xa318
42#define mmCB_COLOR1_BASE 0xa327
43#define mmCB_COLOR2_BASE 0xa336
44#define mmCB_COLOR3_BASE 0xa345
45#define mmCB_COLOR4_BASE 0xa354
46#define mmCB_COLOR5_BASE 0xa363
47#define mmCB_COLOR6_BASE 0xa372
48#define mmCB_COLOR7_BASE 0xa381
49#define mmCB_COLOR0_PITCH 0xa319
50#define mmCB_COLOR1_PITCH 0xa328
51#define mmCB_COLOR2_PITCH 0xa337
52#define mmCB_COLOR3_PITCH 0xa346
53#define mmCB_COLOR4_PITCH 0xa355
54#define mmCB_COLOR5_PITCH 0xa364
55#define mmCB_COLOR6_PITCH 0xa373
56#define mmCB_COLOR7_PITCH 0xa382
57#define mmCB_COLOR0_SLICE 0xa31a
58#define mmCB_COLOR1_SLICE 0xa329
59#define mmCB_COLOR2_SLICE 0xa338
60#define mmCB_COLOR3_SLICE 0xa347
61#define mmCB_COLOR4_SLICE 0xa356
62#define mmCB_COLOR5_SLICE 0xa365
63#define mmCB_COLOR6_SLICE 0xa374
64#define mmCB_COLOR7_SLICE 0xa383
65#define mmCB_COLOR0_VIEW 0xa31b
66#define mmCB_COLOR1_VIEW 0xa32a
67#define mmCB_COLOR2_VIEW 0xa339
68#define mmCB_COLOR3_VIEW 0xa348
69#define mmCB_COLOR4_VIEW 0xa357
70#define mmCB_COLOR5_VIEW 0xa366
71#define mmCB_COLOR6_VIEW 0xa375
72#define mmCB_COLOR7_VIEW 0xa384
73#define mmCB_COLOR0_INFO 0xa31c
74#define mmCB_COLOR1_INFO 0xa32b
75#define mmCB_COLOR2_INFO 0xa33a
76#define mmCB_COLOR3_INFO 0xa349
77#define mmCB_COLOR4_INFO 0xa358
78#define mmCB_COLOR5_INFO 0xa367
79#define mmCB_COLOR6_INFO 0xa376
80#define mmCB_COLOR7_INFO 0xa385
81#define mmCB_COLOR0_ATTRIB 0xa31d
82#define mmCB_COLOR1_ATTRIB 0xa32c
83#define mmCB_COLOR2_ATTRIB 0xa33b
84#define mmCB_COLOR3_ATTRIB 0xa34a
85#define mmCB_COLOR4_ATTRIB 0xa359
86#define mmCB_COLOR5_ATTRIB 0xa368
87#define mmCB_COLOR6_ATTRIB 0xa377
88#define mmCB_COLOR7_ATTRIB 0xa386
89#define mmCB_COLOR0_DCC_CONTROL 0xa31e
90#define mmCB_COLOR1_DCC_CONTROL 0xa32d
91#define mmCB_COLOR2_DCC_CONTROL 0xa33c
92#define mmCB_COLOR3_DCC_CONTROL 0xa34b
93#define mmCB_COLOR4_DCC_CONTROL 0xa35a
94#define mmCB_COLOR5_DCC_CONTROL 0xa369
95#define mmCB_COLOR6_DCC_CONTROL 0xa378
96#define mmCB_COLOR7_DCC_CONTROL 0xa387
97#define mmCB_COLOR0_CMASK 0xa31f
98#define mmCB_COLOR1_CMASK 0xa32e
99#define mmCB_COLOR2_CMASK 0xa33d
100#define mmCB_COLOR3_CMASK 0xa34c
101#define mmCB_COLOR4_CMASK 0xa35b
102#define mmCB_COLOR5_CMASK 0xa36a
103#define mmCB_COLOR6_CMASK 0xa379
104#define mmCB_COLOR7_CMASK 0xa388
105#define mmCB_COLOR0_CMASK_SLICE 0xa320
106#define mmCB_COLOR1_CMASK_SLICE 0xa32f
107#define mmCB_COLOR2_CMASK_SLICE 0xa33e
108#define mmCB_COLOR3_CMASK_SLICE 0xa34d
109#define mmCB_COLOR4_CMASK_SLICE 0xa35c
110#define mmCB_COLOR5_CMASK_SLICE 0xa36b
111#define mmCB_COLOR6_CMASK_SLICE 0xa37a
112#define mmCB_COLOR7_CMASK_SLICE 0xa389
113#define mmCB_COLOR0_FMASK 0xa321
114#define mmCB_COLOR1_FMASK 0xa330
115#define mmCB_COLOR2_FMASK 0xa33f
116#define mmCB_COLOR3_FMASK 0xa34e
117#define mmCB_COLOR4_FMASK 0xa35d
118#define mmCB_COLOR5_FMASK 0xa36c
119#define mmCB_COLOR6_FMASK 0xa37b
120#define mmCB_COLOR7_FMASK 0xa38a
121#define mmCB_COLOR0_FMASK_SLICE 0xa322
122#define mmCB_COLOR1_FMASK_SLICE 0xa331
123#define mmCB_COLOR2_FMASK_SLICE 0xa340
124#define mmCB_COLOR3_FMASK_SLICE 0xa34f
125#define mmCB_COLOR4_FMASK_SLICE 0xa35e
126#define mmCB_COLOR5_FMASK_SLICE 0xa36d
127#define mmCB_COLOR6_FMASK_SLICE 0xa37c
128#define mmCB_COLOR7_FMASK_SLICE 0xa38b
129#define mmCB_COLOR0_CLEAR_WORD0 0xa323
130#define mmCB_COLOR1_CLEAR_WORD0 0xa332
131#define mmCB_COLOR2_CLEAR_WORD0 0xa341
132#define mmCB_COLOR3_CLEAR_WORD0 0xa350
133#define mmCB_COLOR4_CLEAR_WORD0 0xa35f
134#define mmCB_COLOR5_CLEAR_WORD0 0xa36e
135#define mmCB_COLOR6_CLEAR_WORD0 0xa37d
136#define mmCB_COLOR7_CLEAR_WORD0 0xa38c
137#define mmCB_COLOR0_CLEAR_WORD1 0xa324
138#define mmCB_COLOR1_CLEAR_WORD1 0xa333
139#define mmCB_COLOR2_CLEAR_WORD1 0xa342
140#define mmCB_COLOR3_CLEAR_WORD1 0xa351
141#define mmCB_COLOR4_CLEAR_WORD1 0xa360
142#define mmCB_COLOR5_CLEAR_WORD1 0xa36f
143#define mmCB_COLOR6_CLEAR_WORD1 0xa37e
144#define mmCB_COLOR7_CLEAR_WORD1 0xa38d
145#define mmCB_COLOR0_DCC_BASE 0xa325
146#define mmCB_COLOR1_DCC_BASE 0xa334
147#define mmCB_COLOR2_DCC_BASE 0xa343
148#define mmCB_COLOR3_DCC_BASE 0xa352
149#define mmCB_COLOR4_DCC_BASE 0xa361
150#define mmCB_COLOR5_DCC_BASE 0xa370
151#define mmCB_COLOR6_DCC_BASE 0xa37f
152#define mmCB_COLOR7_DCC_BASE 0xa38e
153#define mmCB_TARGET_MASK 0xa08e
154#define mmCB_SHADER_MASK 0xa08f
155#define mmCB_HW_CONTROL 0x2684
156#define mmCB_HW_CONTROL_1 0x2685
157#define mmCB_HW_CONTROL_2 0x2686
158#define mmCB_HW_CONTROL_3 0x2683
159#define mmCB_DCC_CONFIG 0x2687
160#define mmCB_PERFCOUNTER_FILTER 0xdc00
161#define mmCB_PERFCOUNTER0_SELECT 0xdc01
162#define mmCB_PERFCOUNTER0_SELECT1 0xdc02
163#define mmCB_PERFCOUNTER1_SELECT 0xdc03
164#define mmCB_PERFCOUNTER2_SELECT 0xdc04
165#define mmCB_PERFCOUNTER3_SELECT 0xdc05
166#define mmCB_PERFCOUNTER0_LO 0xd406
167#define mmCB_PERFCOUNTER1_LO 0xd408
168#define mmCB_PERFCOUNTER2_LO 0xd40a
169#define mmCB_PERFCOUNTER3_LO 0xd40c
170#define mmCB_PERFCOUNTER0_HI 0xd407
171#define mmCB_PERFCOUNTER1_HI 0xd409
172#define mmCB_PERFCOUNTER2_HI 0xd40b
173#define mmCB_PERFCOUNTER3_HI 0xd40d
174#define mmCB_CGTT_SCLK_CTRL 0xf0a8
175#define mmCB_DEBUG_BUS_1 0x2699
176#define mmCB_DEBUG_BUS_2 0x269a
177#define mmCB_DEBUG_BUS_3 0x269b
178#define mmCB_DEBUG_BUS_4 0x269c
179#define mmCB_DEBUG_BUS_5 0x269d
180#define mmCB_DEBUG_BUS_6 0x269e
181#define mmCB_DEBUG_BUS_7 0x269f
182#define mmCB_DEBUG_BUS_8 0x26a0
183#define mmCB_DEBUG_BUS_9 0x26a1
184#define mmCB_DEBUG_BUS_10 0x26a2
185#define mmCB_DEBUG_BUS_11 0x26a3
186#define mmCB_DEBUG_BUS_12 0x26a4
187#define mmCB_DEBUG_BUS_13 0x26a5
188#define mmCB_DEBUG_BUS_14 0x26a6
189#define mmCB_DEBUG_BUS_15 0x26a7
190#define mmCB_DEBUG_BUS_16 0x26a8
191#define mmCB_DEBUG_BUS_17 0x26a9
192#define mmCB_DEBUG_BUS_18 0x26aa
193#define mmCB_DEBUG_BUS_19 0x26ab
194#define mmCB_DEBUG_BUS_20 0x26ac
195#define mmCB_DEBUG_BUS_21 0x26ad
196#define mmCB_DEBUG_BUS_22 0x26ae
197#define mmCP_DFY_CNTL 0x3020
198#define mmCP_DFY_STAT 0x3021
199#define mmCP_DFY_ADDR_HI 0x3022
200#define mmCP_DFY_ADDR_LO 0x3023
201#define mmCP_DFY_DATA_0 0x3024
202#define mmCP_DFY_DATA_1 0x3025
203#define mmCP_DFY_DATA_2 0x3026
204#define mmCP_DFY_DATA_3 0x3027
205#define mmCP_DFY_DATA_4 0x3028
206#define mmCP_DFY_DATA_5 0x3029
207#define mmCP_DFY_DATA_6 0x302a
208#define mmCP_DFY_DATA_7 0x302b
209#define mmCP_DFY_DATA_8 0x302c
210#define mmCP_DFY_DATA_9 0x302d
211#define mmCP_DFY_DATA_10 0x302e
212#define mmCP_DFY_DATA_11 0x302f
213#define mmCP_DFY_DATA_12 0x3030
214#define mmCP_DFY_DATA_13 0x3031
215#define mmCP_DFY_DATA_14 0x3032
216#define mmCP_DFY_DATA_15 0x3033
217#define mmCP_DFY_CMD 0x3034
218#define mmCP_CPC_MGCG_SYNC_CNTL 0x3036
219#define mmCP_ATCL1_CNTL 0x303c
220#define mmCP_RB0_BASE 0x3040
221#define mmCP_RB0_BASE_HI 0x30b1
222#define mmCP_RB_BASE 0x3040
223#define mmCP_RB1_BASE 0x3060
224#define mmCP_RB1_BASE_HI 0x30b2
225#define mmCP_RB2_BASE 0x3065
226#define mmCP_RB0_CNTL 0x3041
227#define mmCP_RB_CNTL 0x3041
228#define mmCP_RB1_CNTL 0x3061
229#define mmCP_RB2_CNTL 0x3066
230#define mmCP_RB_RPTR_WR 0x3042
231#define mmCP_RB0_RPTR_ADDR 0x3043
232#define mmCP_RB_RPTR_ADDR 0x3043
233#define mmCP_RB1_RPTR_ADDR 0x3062
234#define mmCP_RB2_RPTR_ADDR 0x3067
235#define mmCP_RB0_RPTR_ADDR_HI 0x3044
236#define mmCP_RB_RPTR_ADDR_HI 0x3044
237#define mmCP_RB1_RPTR_ADDR_HI 0x3063
238#define mmCP_RB2_RPTR_ADDR_HI 0x3068
239#define mmCP_RB0_WPTR 0x3045
240#define mmCP_RB_WPTR 0x3045
241#define mmCP_RB1_WPTR 0x3064
242#define mmCP_RB2_WPTR 0x3069
243#define mmCP_RB_WPTR_POLL_ADDR_LO 0x3046
244#define mmCP_RB_WPTR_POLL_ADDR_HI 0x3047
245#define mmGC_PRIV_MODE 0x3048
246#define mmCP_INT_CNTL 0x3049
247#define mmCP_INT_CNTL_RING0 0x306a
248#define mmCP_INT_CNTL_RING1 0x306b
249#define mmCP_INT_CNTL_RING2 0x306c
250#define mmCP_INT_STATUS 0x304a
251#define mmCP_INT_STATUS_RING0 0x306d
252#define mmCP_INT_STATUS_RING1 0x306e
253#define mmCP_INT_STATUS_RING2 0x306f
254#define mmCP_DEVICE_ID 0x304b
255#define mmCP_RING_PRIORITY_CNTS 0x304c
256#define mmCP_ME0_PIPE_PRIORITY_CNTS 0x304c
257#define mmCP_RING0_PRIORITY 0x304d
258#define mmCP_ME0_PIPE0_PRIORITY 0x304d
259#define mmCP_RING1_PRIORITY 0x304e
260#define mmCP_ME0_PIPE1_PRIORITY 0x304e
261#define mmCP_RING2_PRIORITY 0x304f
262#define mmCP_ME0_PIPE2_PRIORITY 0x304f
263#define mmCP_ENDIAN_SWAP 0x3050
264#define mmCP_RB_VMID 0x3051
265#define mmCP_ME0_PIPE0_VMID 0x3052
266#define mmCP_ME0_PIPE1_VMID 0x3053
267#define mmCP_RB_DOORBELL_CONTROL 0x3059
268#define mmCP_RB_DOORBELL_RANGE_LOWER 0x305a
269#define mmCP_RB_DOORBELL_RANGE_UPPER 0x305b
270#define mmCP_MEC_DOORBELL_RANGE_LOWER 0x305c
271#define mmCP_MEC_DOORBELL_RANGE_UPPER 0x305d
272#define mmCP_PFP_UCODE_ADDR 0xf814
273#define mmCP_PFP_UCODE_DATA 0xf815
274#define mmCP_ME_RAM_RADDR 0xf816
275#define mmCP_ME_RAM_WADDR 0xf816
276#define mmCP_ME_RAM_DATA 0xf817
277#define mmCGTT_CPC_CLK_CTRL 0xf0b2
278#define mmCGTT_CPF_CLK_CTRL 0xf0b1
279#define mmCGTT_CP_CLK_CTRL 0xf0b0
280#define mmCP_CE_UCODE_ADDR 0xf818
281#define mmCP_CE_UCODE_DATA 0xf819
282#define mmCP_MEC_ME1_UCODE_ADDR 0xf81a
283#define mmCP_MEC_ME1_UCODE_DATA 0xf81b
284#define mmCP_MEC_ME2_UCODE_ADDR 0xf81c
285#define mmCP_MEC_ME2_UCODE_DATA 0xf81d
286#define mmCP_MEC1_F32_INT_DIS 0x30bd
287#define mmCP_MEC2_F32_INT_DIS 0x30be
288#define mmCP_PWR_CNTL 0x3078
289#define mmCP_MEM_SLP_CNTL 0x3079
290#define mmCP_ECC_FIRSTOCCURRENCE 0x307a
291#define mmCP_ECC_FIRSTOCCURRENCE_RING0 0x307b
292#define mmCP_ECC_FIRSTOCCURRENCE_RING1 0x307c
293#define mmCP_ECC_FIRSTOCCURRENCE_RING2 0x307d
294#define mmCP_CPF_DEBUG 0x3080
295#define mmCP_PQ_WPTR_POLL_CNTL 0x3083
296#define mmCP_PQ_WPTR_POLL_CNTL1 0x3084
297#define mmCPC_INT_CNTL 0x30b4
298#define mmCP_ME1_PIPE0_INT_CNTL 0x3085
299#define mmCP_ME1_PIPE1_INT_CNTL 0x3086
300#define mmCP_ME1_PIPE2_INT_CNTL 0x3087
301#define mmCP_ME1_PIPE3_INT_CNTL 0x3088
302#define mmCP_ME2_PIPE0_INT_CNTL 0x3089
303#define mmCP_ME2_PIPE1_INT_CNTL 0x308a
304#define mmCP_ME2_PIPE2_INT_CNTL 0x308b
305#define mmCP_ME2_PIPE3_INT_CNTL 0x308c
306#define mmCPC_INT_STATUS 0x30b5
307#define mmCP_ME1_PIPE0_INT_STATUS 0x308d
308#define mmCP_ME1_PIPE1_INT_STATUS 0x308e
309#define mmCP_ME1_PIPE2_INT_STATUS 0x308f
310#define mmCP_ME1_PIPE3_INT_STATUS 0x3090
311#define mmCP_ME2_PIPE0_INT_STATUS 0x3091
312#define mmCP_ME2_PIPE1_INT_STATUS 0x3092
313#define mmCP_ME2_PIPE2_INT_STATUS 0x3093
314#define mmCP_ME2_PIPE3_INT_STATUS 0x3094
315#define mmCP_ME1_INT_STAT_DEBUG 0x3095
316#define mmCP_ME2_INT_STAT_DEBUG 0x3096
317#define mmCP_ME1_PIPE_PRIORITY_CNTS 0x3099
318#define mmCP_ME1_PIPE0_PRIORITY 0x309a
319#define mmCP_ME1_PIPE1_PRIORITY 0x309b
320#define mmCP_ME1_PIPE2_PRIORITY 0x309c
321#define mmCP_ME1_PIPE3_PRIORITY 0x309d
322#define mmCP_ME2_PIPE_PRIORITY_CNTS 0x309e
323#define mmCP_ME2_PIPE0_PRIORITY 0x309f
324#define mmCP_ME2_PIPE1_PRIORITY 0x30a0
325#define mmCP_ME2_PIPE2_PRIORITY 0x30a1
326#define mmCP_ME2_PIPE3_PRIORITY 0x30a2
327#define mmCP_CE_PRGRM_CNTR_START 0x30a3
328#define mmCP_PFP_PRGRM_CNTR_START 0x30a4
329#define mmCP_ME_PRGRM_CNTR_START 0x30a5
330#define mmCP_MEC1_PRGRM_CNTR_START 0x30a6
331#define mmCP_MEC2_PRGRM_CNTR_START 0x30a7
332#define mmCP_CE_INTR_ROUTINE_START 0x30a8
333#define mmCP_PFP_INTR_ROUTINE_START 0x30a9
334#define mmCP_ME_INTR_ROUTINE_START 0x30aa
335#define mmCP_MEC1_INTR_ROUTINE_START 0x30ab
336#define mmCP_MEC2_INTR_ROUTINE_START 0x30ac
337#define mmCP_CONTEXT_CNTL 0x30ad
338#define mmCP_MAX_CONTEXT 0x30ae
339#define mmCP_IQ_WAIT_TIME1 0x30af
340#define mmCP_IQ_WAIT_TIME2 0x30b0
341#define mmCP_VMID_RESET 0x30b3
342#define mmCP_VMID_PREEMPT 0x30b6
343#define mmCP_VMID_STATUS 0x30bf
344#define mmCPC_INT_CNTX_ID 0x30b7
345#define mmCP_PQ_STATUS 0x30b8
346#define mmCP_CPC_IC_BASE_LO 0x30b9
347#define mmCP_CPC_IC_BASE_HI 0x30ba
348#define mmCP_CPC_IC_BASE_CNTL 0x30bb
349#define mmCP_CPC_IC_OP_CNTL 0x30bc
350#define mmCP_CPC_STATUS 0x2084
351#define mmCP_CPC_BUSY_STAT 0x2085
352#define mmCP_CPC_STALLED_STAT1 0x2086
353#define mmCP_CPF_STATUS 0x2087
354#define mmCP_CPF_BUSY_STAT 0x2088
355#define mmCP_CPF_STALLED_STAT1 0x2089
356#define mmCP_CPC_GRBM_FREE_COUNT 0x208b
357#define mmCP_MEC_CNTL 0x208d
358#define mmCP_MEC_ME1_HEADER_DUMP 0x208e
359#define mmCP_MEC_ME2_HEADER_DUMP 0x208f
360#define mmCP_CPC_SCRATCH_INDEX 0x2090
361#define mmCP_CPC_SCRATCH_DATA 0x2091
362#define mmCPG_PERFCOUNTER1_SELECT 0xd800
363#define mmCPG_PERFCOUNTER1_LO 0xd000
364#define mmCPG_PERFCOUNTER1_HI 0xd001
365#define mmCPG_PERFCOUNTER0_SELECT1 0xd801
366#define mmCPG_PERFCOUNTER0_SELECT 0xd802
367#define mmCPG_PERFCOUNTER0_LO 0xd002
368#define mmCPG_PERFCOUNTER0_HI 0xd003
369#define mmCPC_PERFCOUNTER1_SELECT 0xd803
370#define mmCPC_PERFCOUNTER1_LO 0xd004
371#define mmCPC_PERFCOUNTER1_HI 0xd005
372#define mmCPC_PERFCOUNTER0_SELECT1 0xd804
373#define mmCPC_PERFCOUNTER0_SELECT 0xd809
374#define mmCPC_PERFCOUNTER0_LO 0xd006
375#define mmCPC_PERFCOUNTER0_HI 0xd007
376#define mmCPF_PERFCOUNTER1_SELECT 0xd805
377#define mmCPF_PERFCOUNTER1_LO 0xd008
378#define mmCPF_PERFCOUNTER1_HI 0xd009
379#define mmCPF_PERFCOUNTER0_SELECT1 0xd806
380#define mmCPF_PERFCOUNTER0_SELECT 0xd807
381#define mmCPF_PERFCOUNTER0_LO 0xd00a
382#define mmCPF_PERFCOUNTER0_HI 0xd00b
383#define mmCP_CPC_HALT_HYST_COUNT 0x20a7
384#define mmCP_DRAW_OBJECT 0xd810
385#define mmCP_DRAW_OBJECT_COUNTER 0xd811
386#define mmCP_DRAW_WINDOW_MASK_HI 0xd812
387#define mmCP_DRAW_WINDOW_HI 0xd813
388#define mmCP_DRAW_WINDOW_LO 0xd814
389#define mmCP_DRAW_WINDOW_CNTL 0xd815
390#define mmCP_PRT_LOD_STATS_CNTL0 0x20ad
391#define mmCP_PRT_LOD_STATS_CNTL1 0x20ae
392#define mmCP_PRT_LOD_STATS_CNTL2 0x20af
393#define mmCP_CE_COMPARE_COUNT 0x20c0
394#define mmCP_CE_DE_COUNT 0x20c1
395#define mmCP_DE_CE_COUNT 0x20c2
396#define mmCP_DE_LAST_INVAL_COUNT 0x20c3
397#define mmCP_DE_DE_COUNT 0x20c4
398#define mmCP_EOP_DONE_EVENT_CNTL 0xc0d5
399#define mmCP_EOP_DONE_DATA_CNTL 0xc0d6
400#define mmCP_EOP_DONE_CNTX_ID 0xc0d7
401#define mmCP_EOP_DONE_ADDR_LO 0xc000
402#define mmCP_EOP_DONE_ADDR_HI 0xc001
403#define mmCP_EOP_DONE_DATA_LO 0xc002
404#define mmCP_EOP_DONE_DATA_HI 0xc003
405#define mmCP_EOP_LAST_FENCE_LO 0xc004
406#define mmCP_EOP_LAST_FENCE_HI 0xc005
407#define mmCP_STREAM_OUT_ADDR_LO 0xc006
408#define mmCP_STREAM_OUT_ADDR_HI 0xc007
409#define mmCP_NUM_PRIM_WRITTEN_COUNT0_LO 0xc008
410#define mmCP_NUM_PRIM_WRITTEN_COUNT0_HI 0xc009
411#define mmCP_NUM_PRIM_NEEDED_COUNT0_LO 0xc00a
412#define mmCP_NUM_PRIM_NEEDED_COUNT0_HI 0xc00b
413#define mmCP_NUM_PRIM_WRITTEN_COUNT1_LO 0xc00c
414#define mmCP_NUM_PRIM_WRITTEN_COUNT1_HI 0xc00d
415#define mmCP_NUM_PRIM_NEEDED_COUNT1_LO 0xc00e
416#define mmCP_NUM_PRIM_NEEDED_COUNT1_HI 0xc00f
417#define mmCP_NUM_PRIM_WRITTEN_COUNT2_LO 0xc010
418#define mmCP_NUM_PRIM_WRITTEN_COUNT2_HI 0xc011
419#define mmCP_NUM_PRIM_NEEDED_COUNT2_LO 0xc012
420#define mmCP_NUM_PRIM_NEEDED_COUNT2_HI 0xc013
421#define mmCP_NUM_PRIM_WRITTEN_COUNT3_LO 0xc014
422#define mmCP_NUM_PRIM_WRITTEN_COUNT3_HI 0xc015
423#define mmCP_NUM_PRIM_NEEDED_COUNT3_LO 0xc016
424#define mmCP_NUM_PRIM_NEEDED_COUNT3_HI 0xc017
425#define mmCP_PIPE_STATS_ADDR_LO 0xc018
426#define mmCP_PIPE_STATS_ADDR_HI 0xc019
427#define mmCP_VGT_IAVERT_COUNT_LO 0xc01a
428#define mmCP_VGT_IAVERT_COUNT_HI 0xc01b
429#define mmCP_VGT_IAPRIM_COUNT_LO 0xc01c
430#define mmCP_VGT_IAPRIM_COUNT_HI 0xc01d
431#define mmCP_VGT_GSPRIM_COUNT_LO 0xc01e
432#define mmCP_VGT_GSPRIM_COUNT_HI 0xc01f
433#define mmCP_VGT_VSINVOC_COUNT_LO 0xc020
434#define mmCP_VGT_VSINVOC_COUNT_HI 0xc021
435#define mmCP_VGT_GSINVOC_COUNT_LO 0xc022
436#define mmCP_VGT_GSINVOC_COUNT_HI 0xc023
437#define mmCP_VGT_HSINVOC_COUNT_LO 0xc024
438#define mmCP_VGT_HSINVOC_COUNT_HI 0xc025
439#define mmCP_VGT_DSINVOC_COUNT_LO 0xc026
440#define mmCP_VGT_DSINVOC_COUNT_HI 0xc027
441#define mmCP_PA_CINVOC_COUNT_LO 0xc028
442#define mmCP_PA_CINVOC_COUNT_HI 0xc029
443#define mmCP_PA_CPRIM_COUNT_LO 0xc02a
444#define mmCP_PA_CPRIM_COUNT_HI 0xc02b
445#define mmCP_SC_PSINVOC_COUNT0_LO 0xc02c
446#define mmCP_SC_PSINVOC_COUNT0_HI 0xc02d
447#define mmCP_SC_PSINVOC_COUNT1_LO 0xc02e
448#define mmCP_SC_PSINVOC_COUNT1_HI 0xc02f
449#define mmCP_VGT_CSINVOC_COUNT_LO 0xc030
450#define mmCP_VGT_CSINVOC_COUNT_HI 0xc031
451#define mmCP_PIPE_STATS_CONTROL 0xc03d
452#define mmCP_STREAM_OUT_CONTROL 0xc03e
453#define mmCP_STRMOUT_CNTL 0xc03f
454#define mmSCRATCH_REG0 0xc040
455#define mmSCRATCH_REG1 0xc041
456#define mmSCRATCH_REG2 0xc042
457#define mmSCRATCH_REG3 0xc043
458#define mmSCRATCH_REG4 0xc044
459#define mmSCRATCH_REG5 0xc045
460#define mmSCRATCH_REG6 0xc046
461#define mmSCRATCH_REG7 0xc047
462#define mmSCRATCH_UMSK 0xc050
463#define mmSCRATCH_ADDR 0xc051
464#define mmCP_PFP_ATOMIC_PREOP_LO 0xc052
465#define mmCP_PFP_ATOMIC_PREOP_HI 0xc053
466#define mmCP_PFP_GDS_ATOMIC0_PREOP_LO 0xc054
467#define mmCP_PFP_GDS_ATOMIC0_PREOP_HI 0xc055
468#define mmCP_PFP_GDS_ATOMIC1_PREOP_LO 0xc056
469#define mmCP_PFP_GDS_ATOMIC1_PREOP_HI 0xc057
470#define mmCP_APPEND_ADDR_LO 0xc058
471#define mmCP_APPEND_ADDR_HI 0xc059
472#define mmCP_APPEND_DATA 0xc05a
473#define mmCP_APPEND_LAST_CS_FENCE 0xc05b
474#define mmCP_APPEND_LAST_PS_FENCE 0xc05c
475#define mmCP_ATOMIC_PREOP_LO 0xc05d
476#define mmCP_ME_ATOMIC_PREOP_LO 0xc05d
477#define mmCP_ATOMIC_PREOP_HI 0xc05e
478#define mmCP_ME_ATOMIC_PREOP_HI 0xc05e
479#define mmCP_GDS_ATOMIC0_PREOP_LO 0xc05f
480#define mmCP_ME_GDS_ATOMIC0_PREOP_LO 0xc05f
481#define mmCP_GDS_ATOMIC0_PREOP_HI 0xc060
482#define mmCP_ME_GDS_ATOMIC0_PREOP_HI 0xc060
483#define mmCP_GDS_ATOMIC1_PREOP_LO 0xc061
484#define mmCP_ME_GDS_ATOMIC1_PREOP_LO 0xc061
485#define mmCP_GDS_ATOMIC1_PREOP_HI 0xc062
486#define mmCP_ME_GDS_ATOMIC1_PREOP_HI 0xc062
487#define mmCP_ME_MC_WADDR_LO 0xc069
488#define mmCP_ME_MC_WADDR_HI 0xc06a
489#define mmCP_ME_MC_WDATA_LO 0xc06b
490#define mmCP_ME_MC_WDATA_HI 0xc06c
491#define mmCP_ME_MC_RADDR_LO 0xc06d
492#define mmCP_ME_MC_RADDR_HI 0xc06e
493#define mmCP_SEM_WAIT_TIMER 0xc06f
494#define mmCP_SIG_SEM_ADDR_LO 0xc070
495#define mmCP_SIG_SEM_ADDR_HI 0xc071
496#define mmCP_WAIT_SEM_ADDR_LO 0xc075
497#define mmCP_WAIT_SEM_ADDR_HI 0xc076
498#define mmCP_WAIT_REG_MEM_TIMEOUT 0xc074
499#define mmCP_COHER_START_DELAY 0xc07b
500#define mmCP_COHER_CNTL 0xc07c
501#define mmCP_COHER_SIZE 0xc07d
502#define mmCP_COHER_SIZE_HI 0xc08c
503#define mmCP_COHER_BASE 0xc07e
504#define mmCP_COHER_BASE_HI 0xc079
505#define mmCP_COHER_STATUS 0xc07f
506#define mmCOHER_DEST_BASE_0 0xa092
507#define mmCOHER_DEST_BASE_1 0xa093
508#define mmCOHER_DEST_BASE_2 0xa07e
509#define mmCOHER_DEST_BASE_3 0xa07f
510#define mmCOHER_DEST_BASE_HI_0 0xa07a
511#define mmCOHER_DEST_BASE_HI_1 0xa07b
512#define mmCOHER_DEST_BASE_HI_2 0xa07c
513#define mmCOHER_DEST_BASE_HI_3 0xa07d
514#define mmCP_DMA_ME_SRC_ADDR 0xc080
515#define mmCP_DMA_ME_SRC_ADDR_HI 0xc081
516#define mmCP_DMA_ME_DST_ADDR 0xc082
517#define mmCP_DMA_ME_DST_ADDR_HI 0xc083
518#define mmCP_DMA_ME_CONTROL 0xc078
519#define mmCP_DMA_ME_COMMAND 0xc084
520#define mmCP_DMA_PFP_SRC_ADDR 0xc085
521#define mmCP_DMA_PFP_SRC_ADDR_HI 0xc086
522#define mmCP_DMA_PFP_DST_ADDR 0xc087
523#define mmCP_DMA_PFP_DST_ADDR_HI 0xc088
524#define mmCP_DMA_PFP_CONTROL 0xc077
525#define mmCP_DMA_PFP_COMMAND 0xc089
526#define mmCP_DMA_CNTL 0xc08a
527#define mmCP_DMA_READ_TAGS 0xc08b
528#define mmCP_PFP_IB_CONTROL 0xc08d
529#define mmCP_PFP_LOAD_CONTROL 0xc08e
530#define mmCP_SCRATCH_INDEX 0xc08f
531#define mmCP_SCRATCH_DATA 0xc090
532#define mmCP_RB_OFFSET 0xc091
533#define mmCP_IB1_OFFSET 0xc092
534#define mmCP_IB2_OFFSET 0xc093
535#define mmCP_IB1_PREAMBLE_BEGIN 0xc094
536#define mmCP_IB1_PREAMBLE_END 0xc095
537#define mmCP_IB2_PREAMBLE_BEGIN 0xc096
538#define mmCP_IB2_PREAMBLE_END 0xc097
539#define mmCP_CE_IB1_OFFSET 0xc098
540#define mmCP_CE_IB2_OFFSET 0xc099
541#define mmCP_CE_COUNTER 0xc09a
542#define mmCP_CE_RB_OFFSET 0xc09b
543#define mmCP_PFP_COMPLETION_STATUS 0xc0ec
544#define mmCP_CE_COMPLETION_STATUS 0xc0ed
545#define mmCP_PRED_NOT_VISIBLE 0xc0ee
546#define mmCP_PFP_METADATA_BASE_ADDR 0xc0f0
547#define mmCP_PFP_METADATA_BASE_ADDR_HI 0xc0f1
548#define mmCP_CE_METADATA_BASE_ADDR 0xc0f2
549#define mmCP_CE_METADATA_BASE_ADDR_HI 0xc0f3
550#define mmCP_DRAW_INDX_INDR_ADDR 0xc0f4
551#define mmCP_DRAW_INDX_INDR_ADDR_HI 0xc0f5
552#define mmCP_DISPATCH_INDR_ADDR 0xc0f6
553#define mmCP_DISPATCH_INDR_ADDR_HI 0xc0f7
554#define mmCP_INDEX_BASE_ADDR 0xc0f8
555#define mmCP_INDEX_BASE_ADDR_HI 0xc0f9
556#define mmCP_INDEX_TYPE 0xc0fa
557#define mmCP_GDS_BKUP_ADDR 0xc0fb
558#define mmCP_GDS_BKUP_ADDR_HI 0xc0fc
559#define mmCP_SAMPLE_STATUS 0xc0fd
560#define mmCP_STALLED_STAT1 0x219d
561#define mmCP_STALLED_STAT2 0x219e
562#define mmCP_STALLED_STAT3 0x219c
563#define mmCP_BUSY_STAT 0x219f
564#define mmCP_STAT 0x21a0
565#define mmCP_ME_HEADER_DUMP 0x21a1
566#define mmCP_PFP_HEADER_DUMP 0x21a2
567#define mmCP_GRBM_FREE_COUNT 0x21a3
568#define mmCP_CE_HEADER_DUMP 0x21a4
569#define mmCP_CSF_STAT 0x21b4
570#define mmCP_CSF_CNTL 0x21b5
571#define mmCP_ME_CNTL 0x21b6
572#define mmCP_CNTX_STAT 0x21b8
573#define mmCP_ME_PREEMPTION 0x21b9
574#define mmCP_RB0_RPTR 0x21c0
575#define mmCP_RB_RPTR 0x21c0
576#define mmCP_RB1_RPTR 0x21bf
577#define mmCP_RB2_RPTR 0x21be
578#define mmCP_RB_WPTR_DELAY 0x21c1
579#define mmCP_RB_WPTR_POLL_CNTL 0x21c2
580#define mmCP_CE_INIT_BASE_LO 0xc0c3
581#define mmCP_CE_INIT_BASE_HI 0xc0c4
582#define mmCP_CE_INIT_BUFSZ 0xc0c5
583#define mmCP_CE_IB1_BASE_LO 0xc0c6
584#define mmCP_CE_IB1_BASE_HI 0xc0c7
585#define mmCP_CE_IB1_BUFSZ 0xc0c8
586#define mmCP_CE_IB2_BASE_LO 0xc0c9
587#define mmCP_CE_IB2_BASE_HI 0xc0ca
588#define mmCP_CE_IB2_BUFSZ 0xc0cb
589#define mmCP_IB1_BASE_LO 0xc0cc
590#define mmCP_IB1_BASE_HI 0xc0cd
591#define mmCP_IB1_BUFSZ 0xc0ce
592#define mmCP_IB2_BASE_LO 0xc0cf
593#define mmCP_IB2_BASE_HI 0xc0d0
594#define mmCP_IB2_BUFSZ 0xc0d1
595#define mmCP_ST_BASE_LO 0xc0d2
596#define mmCP_ST_BASE_HI 0xc0d3
597#define mmCP_ST_BUFSZ 0xc0d4
598#define mmCP_ROQ_THRESHOLDS 0x21bc
599#define mmCP_MEQ_STQ_THRESHOLD 0x21bd
600#define mmCP_ROQ1_THRESHOLDS 0x21d5
601#define mmCP_ROQ2_THRESHOLDS 0x21d6
602#define mmCP_STQ_THRESHOLDS 0x21d7
603#define mmCP_QUEUE_THRESHOLDS 0x21d8
604#define mmCP_MEQ_THRESHOLDS 0x21d9
605#define mmCP_ROQ_AVAIL 0x21da
606#define mmCP_STQ_AVAIL 0x21db
607#define mmCP_ROQ2_AVAIL 0x21dc
608#define mmCP_MEQ_AVAIL 0x21dd
609#define mmCP_CMD_INDEX 0x21de
610#define mmCP_CMD_DATA 0x21df
611#define mmCP_ROQ_RB_STAT 0x21e0
612#define mmCP_ROQ_IB1_STAT 0x21e1
613#define mmCP_ROQ_IB2_STAT 0x21e2
614#define mmCP_STQ_STAT 0x21e3
615#define mmCP_STQ_WR_STAT 0x21e4
616#define mmCP_MEQ_STAT 0x21e5
617#define mmCP_CEQ1_AVAIL 0x21e6
618#define mmCP_CEQ2_AVAIL 0x21e7
619#define mmCP_CE_ROQ_RB_STAT 0x21e8
620#define mmCP_CE_ROQ_IB1_STAT 0x21e9
621#define mmCP_CE_ROQ_IB2_STAT 0x21ea
622#define mmCP_INT_STAT_DEBUG 0x21f7
623#define mmCP_PERFMON_CNTL 0xd808
624#define mmCP_PERFMON_CNTX_CNTL 0xa0d8
625#define mmCP_RINGID 0xa0d9
626#define mmCP_PIPEID 0xa0d9
627#define mmCP_VMID 0xa0da
628#define mmCP_HPD_ROQ_OFFSETS 0x3240
629#define mmCP_HPD_STATUS0 0x3241
630#define mmCP_MQD_BASE_ADDR 0x3245
631#define mmCP_MQD_BASE_ADDR_HI 0x3246
632#define mmCP_HQD_ACTIVE 0x3247
633#define mmCP_HQD_VMID 0x3248
634#define mmCP_HQD_PERSISTENT_STATE 0x3249
635#define mmCP_HQD_PIPE_PRIORITY 0x324a
636#define mmCP_HQD_QUEUE_PRIORITY 0x324b
637#define mmCP_HQD_QUANTUM 0x324c
638#define mmCP_HQD_PQ_BASE 0x324d
639#define mmCP_HQD_PQ_BASE_HI 0x324e
640#define mmCP_HQD_PQ_RPTR 0x324f
641#define mmCP_HQD_PQ_RPTR_REPORT_ADDR 0x3250
642#define mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI 0x3251
643#define mmCP_HQD_PQ_WPTR_POLL_ADDR 0x3252
644#define mmCP_HQD_PQ_WPTR_POLL_ADDR_HI 0x3253
645#define mmCP_HQD_PQ_DOORBELL_CONTROL 0x3254
646#define mmCP_HQD_PQ_WPTR 0x3255
647#define mmCP_HQD_PQ_CONTROL 0x3256
648#define mmCP_HQD_IB_BASE_ADDR 0x3257
649#define mmCP_HQD_IB_BASE_ADDR_HI 0x3258
650#define mmCP_HQD_IB_RPTR 0x3259
651#define mmCP_HQD_IB_CONTROL 0x325a
652#define mmCP_HQD_IQ_TIMER 0x325b
653#define mmCP_HQD_IQ_RPTR 0x325c
654#define mmCP_HQD_DEQUEUE_REQUEST 0x325d
655#define mmCP_HQD_DMA_OFFLOAD 0x325e
656#define mmCP_HQD_OFFLOAD 0x325e
657#define mmCP_HQD_SEMA_CMD 0x325f
658#define mmCP_HQD_MSG_TYPE 0x3260
659#define mmCP_HQD_ATOMIC0_PREOP_LO 0x3261
660#define mmCP_HQD_ATOMIC0_PREOP_HI 0x3262
661#define mmCP_HQD_ATOMIC1_PREOP_LO 0x3263
662#define mmCP_HQD_ATOMIC1_PREOP_HI 0x3264
663#define mmCP_HQD_HQ_SCHEDULER0 0x3265
664#define mmCP_HQD_HQ_STATUS0 0x3265
665#define mmCP_HQD_HQ_SCHEDULER1 0x3266
666#define mmCP_HQD_HQ_CONTROL0 0x3266
667#define mmCP_MQD_CONTROL 0x3267
668#define mmCP_HQD_HQ_STATUS1 0x3268
669#define mmCP_HQD_HQ_CONTROL1 0x3269
670#define mmCP_HQD_EOP_BASE_ADDR 0x326a
671#define mmCP_HQD_EOP_BASE_ADDR_HI 0x326b
672#define mmCP_HQD_EOP_CONTROL 0x326c
673#define mmCP_HQD_EOP_RPTR 0x326d
674#define mmCP_HQD_EOP_WPTR 0x326e
675#define mmCP_HQD_EOP_EVENTS 0x326f
676#define mmCP_HQD_CTX_SAVE_BASE_ADDR_LO 0x3270
677#define mmCP_HQD_CTX_SAVE_BASE_ADDR_HI 0x3271
678#define mmCP_HQD_CTX_SAVE_CONTROL 0x3272
679#define mmCP_HQD_CNTL_STACK_OFFSET 0x3273
680#define mmCP_HQD_CNTL_STACK_SIZE 0x3274
681#define mmCP_HQD_WG_STATE_OFFSET 0x3275
682#define mmCP_HQD_CTX_SAVE_SIZE 0x3276
683#define mmCP_HQD_GDS_RESOURCE_STATE 0x3277
684#define mmCP_HQD_ERROR 0x3278
685#define mmCP_HQD_EOP_WPTR_MEM 0x3279
686#define mmCP_HQD_EOP_DONES 0x327a
687#define mmDB_Z_READ_BASE 0xa012
688#define mmDB_STENCIL_READ_BASE 0xa013
689#define mmDB_Z_WRITE_BASE 0xa014
690#define mmDB_STENCIL_WRITE_BASE 0xa015
691#define mmDB_DEPTH_INFO 0xa00f
692#define mmDB_Z_INFO 0xa010
693#define mmDB_STENCIL_INFO 0xa011
694#define mmDB_DEPTH_SIZE 0xa016
695#define mmDB_DEPTH_SLICE 0xa017
696#define mmDB_DEPTH_VIEW 0xa002
697#define mmDB_RENDER_CONTROL 0xa000
698#define mmDB_COUNT_CONTROL 0xa001
699#define mmDB_RENDER_OVERRIDE 0xa003
700#define mmDB_RENDER_OVERRIDE2 0xa004
701#define mmDB_EQAA 0xa201
702#define mmDB_SHADER_CONTROL 0xa203
703#define mmDB_DEPTH_BOUNDS_MIN 0xa008
704#define mmDB_DEPTH_BOUNDS_MAX 0xa009
705#define mmDB_STENCIL_CLEAR 0xa00a
706#define mmDB_DEPTH_CLEAR 0xa00b
707#define mmDB_HTILE_DATA_BASE 0xa005
708#define mmDB_HTILE_SURFACE 0xa2af
709#define mmDB_PRELOAD_CONTROL 0xa2b2
710#define mmDB_STENCILREFMASK 0xa10c
711#define mmDB_STENCILREFMASK_BF 0xa10d
712#define mmDB_SRESULTS_COMPARE_STATE0 0xa2b0
713#define mmDB_SRESULTS_COMPARE_STATE1 0xa2b1
714#define mmDB_DEPTH_CONTROL 0xa200
715#define mmDB_STENCIL_CONTROL 0xa10b
716#define mmDB_ALPHA_TO_MASK 0xa2dc
717#define mmDB_PERFCOUNTER0_SELECT 0xdc40
718#define mmDB_PERFCOUNTER1_SELECT 0xdc42
719#define mmDB_PERFCOUNTER2_SELECT 0xdc44
720#define mmDB_PERFCOUNTER3_SELECT 0xdc46
721#define mmDB_PERFCOUNTER0_SELECT1 0xdc41
722#define mmDB_PERFCOUNTER1_SELECT1 0xdc43
723#define mmDB_PERFCOUNTER0_LO 0xd440
724#define mmDB_PERFCOUNTER1_LO 0xd442
725#define mmDB_PERFCOUNTER2_LO 0xd444
726#define mmDB_PERFCOUNTER3_LO 0xd446
727#define mmDB_PERFCOUNTER0_HI 0xd441
728#define mmDB_PERFCOUNTER1_HI 0xd443
729#define mmDB_PERFCOUNTER2_HI 0xd445
730#define mmDB_PERFCOUNTER3_HI 0xd447
731#define mmDB_DEBUG 0x260c
732#define mmDB_DEBUG2 0x260d
733#define mmDB_DEBUG3 0x260e
734#define mmDB_DEBUG4 0x260f
735#define mmDB_CREDIT_LIMIT 0x2614
736#define mmDB_WATERMARKS 0x2615
737#define mmDB_SUBTILE_CONTROL 0x2616
738#define mmDB_FREE_CACHELINES 0x2617
739#define mmDB_FIFO_DEPTH1 0x2618
740#define mmDB_FIFO_DEPTH2 0x2619
741#define mmDB_CGTT_CLK_CTRL_0 0xf0a4
742#define mmDB_ZPASS_COUNT_LOW 0xc3fe
743#define mmDB_ZPASS_COUNT_HI 0xc3ff
744#define mmDB_RING_CONTROL 0x261b
745#define mmDB_READ_DEBUG_0 0x2620
746#define mmDB_READ_DEBUG_1 0x2621
747#define mmDB_READ_DEBUG_2 0x2622
748#define mmDB_READ_DEBUG_3 0x2623
749#define mmDB_READ_DEBUG_4 0x2624
750#define mmDB_READ_DEBUG_5 0x2625
751#define mmDB_READ_DEBUG_6 0x2626
752#define mmDB_READ_DEBUG_7 0x2627
753#define mmDB_READ_DEBUG_8 0x2628
754#define mmDB_READ_DEBUG_9 0x2629
755#define mmDB_READ_DEBUG_A 0x262a
756#define mmDB_READ_DEBUG_B 0x262b
757#define mmDB_READ_DEBUG_C 0x262c
758#define mmDB_READ_DEBUG_D 0x262d
759#define mmDB_READ_DEBUG_E 0x262e
760#define mmDB_READ_DEBUG_F 0x262f
761#define mmDB_OCCLUSION_COUNT0_LOW 0xc3c0
762#define mmDB_OCCLUSION_COUNT0_HI 0xc3c1
763#define mmDB_OCCLUSION_COUNT1_LOW 0xc3c2
764#define mmDB_OCCLUSION_COUNT1_HI 0xc3c3
765#define mmDB_OCCLUSION_COUNT2_LOW 0xc3c4
766#define mmDB_OCCLUSION_COUNT2_HI 0xc3c5
767#define mmDB_OCCLUSION_COUNT3_LOW 0xc3c6
768#define mmDB_OCCLUSION_COUNT3_HI 0xc3c7
769#define mmCC_RB_REDUNDANCY 0x263c
770#define mmCC_RB_BACKEND_DISABLE 0x263d
771#define mmGC_USER_RB_REDUNDANCY 0x26de
772#define mmGC_USER_RB_BACKEND_DISABLE 0x26df
773#define mmGB_ADDR_CONFIG 0x263e
774#define mmGB_BACKEND_MAP 0x263f
775#define mmGB_GPU_ID 0x2640
776#define mmCC_RB_DAISY_CHAIN 0x2641
777#define mmGB_TILE_MODE0 0x2644
778#define mmGB_TILE_MODE1 0x2645
779#define mmGB_TILE_MODE2 0x2646
780#define mmGB_TILE_MODE3 0x2647
781#define mmGB_TILE_MODE4 0x2648
782#define mmGB_TILE_MODE5 0x2649
783#define mmGB_TILE_MODE6 0x264a
784#define mmGB_TILE_MODE7 0x264b
785#define mmGB_TILE_MODE8 0x264c
786#define mmGB_TILE_MODE9 0x264d
787#define mmGB_TILE_MODE10 0x264e
788#define mmGB_TILE_MODE11 0x264f
789#define mmGB_TILE_MODE12 0x2650
790#define mmGB_TILE_MODE13 0x2651
791#define mmGB_TILE_MODE14 0x2652
792#define mmGB_TILE_MODE15 0x2653
793#define mmGB_TILE_MODE16 0x2654
794#define mmGB_TILE_MODE17 0x2655
795#define mmGB_TILE_MODE18 0x2656
796#define mmGB_TILE_MODE19 0x2657
797#define mmGB_TILE_MODE20 0x2658
798#define mmGB_TILE_MODE21 0x2659
799#define mmGB_TILE_MODE22 0x265a
800#define mmGB_TILE_MODE23 0x265b
801#define mmGB_TILE_MODE24 0x265c
802#define mmGB_TILE_MODE25 0x265d
803#define mmGB_TILE_MODE26 0x265e
804#define mmGB_TILE_MODE27 0x265f
805#define mmGB_TILE_MODE28 0x2660
806#define mmGB_TILE_MODE29 0x2661
807#define mmGB_TILE_MODE30 0x2662
808#define mmGB_TILE_MODE31 0x2663
809#define mmGB_MACROTILE_MODE0 0x2664
810#define mmGB_MACROTILE_MODE1 0x2665
811#define mmGB_MACROTILE_MODE2 0x2666
812#define mmGB_MACROTILE_MODE3 0x2667
813#define mmGB_MACROTILE_MODE4 0x2668
814#define mmGB_MACROTILE_MODE5 0x2669
815#define mmGB_MACROTILE_MODE6 0x266a
816#define mmGB_MACROTILE_MODE7 0x266b
817#define mmGB_MACROTILE_MODE8 0x266c
818#define mmGB_MACROTILE_MODE9 0x266d
819#define mmGB_MACROTILE_MODE10 0x266e
820#define mmGB_MACROTILE_MODE11 0x266f
821#define mmGB_MACROTILE_MODE12 0x2670
822#define mmGB_MACROTILE_MODE13 0x2671
823#define mmGB_MACROTILE_MODE14 0x2672
824#define mmGB_MACROTILE_MODE15 0x2673
825#define mmGB_EDC_MODE 0x307e
826#define mmCC_GC_EDC_CONFIG 0x3098
827#define mmRAS_SIGNATURE_CONTROL 0x3380
828#define mmRAS_SIGNATURE_MASK 0x3381
829#define mmRAS_SX_SIGNATURE0 0x3382
830#define mmRAS_SX_SIGNATURE1 0x3383
831#define mmRAS_SX_SIGNATURE2 0x3384
832#define mmRAS_SX_SIGNATURE3 0x3385
833#define mmRAS_DB_SIGNATURE0 0x338b
834#define mmRAS_PA_SIGNATURE0 0x338c
835#define mmRAS_VGT_SIGNATURE0 0x338d
836#define mmRAS_SC_SIGNATURE0 0x338f
837#define mmRAS_SC_SIGNATURE1 0x3390
838#define mmRAS_SC_SIGNATURE2 0x3391
839#define mmRAS_SC_SIGNATURE3 0x3392
840#define mmRAS_SC_SIGNATURE4 0x3393
841#define mmRAS_SC_SIGNATURE5 0x3394
842#define mmRAS_SC_SIGNATURE6 0x3395
843#define mmRAS_SC_SIGNATURE7 0x3396
844#define mmRAS_IA_SIGNATURE0 0x3397
845#define mmRAS_IA_SIGNATURE1 0x3398
846#define mmRAS_SPI_SIGNATURE0 0x3399
847#define mmRAS_SPI_SIGNATURE1 0x339a
848#define mmRAS_TA_SIGNATURE0 0x339b
849#define mmRAS_TD_SIGNATURE0 0x339c
850#define mmRAS_CB_SIGNATURE0 0x339d
851#define mmRAS_BCI_SIGNATURE0 0x339e
852#define mmRAS_BCI_SIGNATURE1 0x339f
853#define mmRAS_TA_SIGNATURE1 0x33a0
854#define mmGRBM_HYP_CAM_INDEX 0xf83e
855#define mmGRBM_CAM_INDEX 0xf83e
856#define mmGRBM_HYP_CAM_DATA 0xf83f
857#define mmGRBM_CAM_DATA 0xf83f
858#define mmGRBM_CNTL 0x2000
859#define mmGRBM_SKEW_CNTL 0x2001
860#define mmGRBM_PWR_CNTL 0x2003
861#define mmGRBM_STATUS 0x2004
862#define mmGRBM_STATUS2 0x2002
863#define mmGRBM_STATUS_SE0 0x2005
864#define mmGRBM_STATUS_SE1 0x2006
865#define mmGRBM_STATUS_SE2 0x200e
866#define mmGRBM_STATUS_SE3 0x200f
867#define mmGRBM_SOFT_RESET 0x2008
868#define mmGRBM_DEBUG_CNTL 0x2009
869#define mmGRBM_DEBUG_DATA 0x200a
870#define mmGRBM_CGTT_CLK_CNTL 0x200b
871#define mmGRBM_GFX_INDEX 0xc200
872#define mmGRBM_GFX_CLKEN_CNTL 0x200c
873#define mmGRBM_WAIT_IDLE_CLOCKS 0x200d
874#define mmGRBM_DEBUG 0x2014
875#define mmGRBM_DEBUG_SNAPSHOT 0x2015
876#define mmGRBM_READ_ERROR 0x2016
877#define mmGRBM_READ_ERROR2 0x2017
878#define mmGRBM_INT_CNTL 0x2018
879#define mmGRBM_TRAP_OP 0x2019
880#define mmGRBM_TRAP_ADDR 0x201a
881#define mmGRBM_TRAP_ADDR_MSK 0x201b
882#define mmGRBM_TRAP_WD 0x201c
883#define mmGRBM_TRAP_WD_MSK 0x201d
884#define mmGRBM_DSM_BYPASS 0x201e
885#define mmGRBM_WRITE_ERROR 0x201f
886#define mmGRBM_PERFCOUNTER0_SELECT 0xd840
887#define mmGRBM_PERFCOUNTER1_SELECT 0xd841
888#define mmGRBM_SE0_PERFCOUNTER_SELECT 0xd842
889#define mmGRBM_SE1_PERFCOUNTER_SELECT 0xd843
890#define mmGRBM_SE2_PERFCOUNTER_SELECT 0xd844
891#define mmGRBM_SE3_PERFCOUNTER_SELECT 0xd845
892#define mmGRBM_PERFCOUNTER0_LO 0xd040
893#define mmGRBM_PERFCOUNTER0_HI 0xd041
894#define mmGRBM_PERFCOUNTER1_LO 0xd043
895#define mmGRBM_PERFCOUNTER1_HI 0xd044
896#define mmGRBM_SE0_PERFCOUNTER_LO 0xd045
897#define mmGRBM_SE0_PERFCOUNTER_HI 0xd046
898#define mmGRBM_SE1_PERFCOUNTER_LO 0xd047
899#define mmGRBM_SE1_PERFCOUNTER_HI 0xd048
900#define mmGRBM_SE2_PERFCOUNTER_LO 0xd049
901#define mmGRBM_SE2_PERFCOUNTER_HI 0xd04a
902#define mmGRBM_SE3_PERFCOUNTER_LO 0xd04b
903#define mmGRBM_SE3_PERFCOUNTER_HI 0xd04c
904#define mmGRBM_SCRATCH_REG0 0x2040
905#define mmGRBM_SCRATCH_REG1 0x2041
906#define mmGRBM_SCRATCH_REG2 0x2042
907#define mmGRBM_SCRATCH_REG3 0x2043
908#define mmGRBM_SCRATCH_REG4 0x2044
909#define mmGRBM_SCRATCH_REG5 0x2045
910#define mmGRBM_SCRATCH_REG6 0x2046
911#define mmGRBM_SCRATCH_REG7 0x2047
912#define mmDEBUG_INDEX 0x203c
913#define mmDEBUG_DATA 0x203d
914#define mmGRBM_NOWHERE 0x203f
915#define mmPA_CL_VPORT_XSCALE 0xa10f
916#define mmPA_CL_VPORT_XOFFSET 0xa110
917#define mmPA_CL_VPORT_YSCALE 0xa111
918#define mmPA_CL_VPORT_YOFFSET 0xa112
919#define mmPA_CL_VPORT_ZSCALE 0xa113
920#define mmPA_CL_VPORT_ZOFFSET 0xa114
921#define mmPA_CL_VPORT_XSCALE_1 0xa115
922#define mmPA_CL_VPORT_XSCALE_2 0xa11b
923#define mmPA_CL_VPORT_XSCALE_3 0xa121
924#define mmPA_CL_VPORT_XSCALE_4 0xa127
925#define mmPA_CL_VPORT_XSCALE_5 0xa12d
926#define mmPA_CL_VPORT_XSCALE_6 0xa133
927#define mmPA_CL_VPORT_XSCALE_7 0xa139
928#define mmPA_CL_VPORT_XSCALE_8 0xa13f
929#define mmPA_CL_VPORT_XSCALE_9 0xa145
930#define mmPA_CL_VPORT_XSCALE_10 0xa14b
931#define mmPA_CL_VPORT_XSCALE_11 0xa151
932#define mmPA_CL_VPORT_XSCALE_12 0xa157
933#define mmPA_CL_VPORT_XSCALE_13 0xa15d
934#define mmPA_CL_VPORT_XSCALE_14 0xa163
935#define mmPA_CL_VPORT_XSCALE_15 0xa169
936#define mmPA_CL_VPORT_XOFFSET_1 0xa116
937#define mmPA_CL_VPORT_XOFFSET_2 0xa11c
938#define mmPA_CL_VPORT_XOFFSET_3 0xa122
939#define mmPA_CL_VPORT_XOFFSET_4 0xa128
940#define mmPA_CL_VPORT_XOFFSET_5 0xa12e
941#define mmPA_CL_VPORT_XOFFSET_6 0xa134
942#define mmPA_CL_VPORT_XOFFSET_7 0xa13a
943#define mmPA_CL_VPORT_XOFFSET_8 0xa140
944#define mmPA_CL_VPORT_XOFFSET_9 0xa146
945#define mmPA_CL_VPORT_XOFFSET_10 0xa14c
946#define mmPA_CL_VPORT_XOFFSET_11 0xa152
947#define mmPA_CL_VPORT_XOFFSET_12 0xa158
948#define mmPA_CL_VPORT_XOFFSET_13 0xa15e
949#define mmPA_CL_VPORT_XOFFSET_14 0xa164
950#define mmPA_CL_VPORT_XOFFSET_15 0xa16a
951#define mmPA_CL_VPORT_YSCALE_1 0xa117
952#define mmPA_CL_VPORT_YSCALE_2 0xa11d
953#define mmPA_CL_VPORT_YSCALE_3 0xa123
954#define mmPA_CL_VPORT_YSCALE_4 0xa129
955#define mmPA_CL_VPORT_YSCALE_5 0xa12f
956#define mmPA_CL_VPORT_YSCALE_6 0xa135
957#define mmPA_CL_VPORT_YSCALE_7 0xa13b
958#define mmPA_CL_VPORT_YSCALE_8 0xa141
959#define mmPA_CL_VPORT_YSCALE_9 0xa147
960#define mmPA_CL_VPORT_YSCALE_10 0xa14d
961#define mmPA_CL_VPORT_YSCALE_11 0xa153
962#define mmPA_CL_VPORT_YSCALE_12 0xa159
963#define mmPA_CL_VPORT_YSCALE_13 0xa15f
964#define mmPA_CL_VPORT_YSCALE_14 0xa165
965#define mmPA_CL_VPORT_YSCALE_15 0xa16b
966#define mmPA_CL_VPORT_YOFFSET_1 0xa118
967#define mmPA_CL_VPORT_YOFFSET_2 0xa11e
968#define mmPA_CL_VPORT_YOFFSET_3 0xa124
969#define mmPA_CL_VPORT_YOFFSET_4 0xa12a
970#define mmPA_CL_VPORT_YOFFSET_5 0xa130
971#define mmPA_CL_VPORT_YOFFSET_6 0xa136
972#define mmPA_CL_VPORT_YOFFSET_7 0xa13c
973#define mmPA_CL_VPORT_YOFFSET_8 0xa142
974#define mmPA_CL_VPORT_YOFFSET_9 0xa148
975#define mmPA_CL_VPORT_YOFFSET_10 0xa14e
976#define mmPA_CL_VPORT_YOFFSET_11 0xa154
977#define mmPA_CL_VPORT_YOFFSET_12 0xa15a
978#define mmPA_CL_VPORT_YOFFSET_13 0xa160
979#define mmPA_CL_VPORT_YOFFSET_14 0xa166
980#define mmPA_CL_VPORT_YOFFSET_15 0xa16c
981#define mmPA_CL_VPORT_ZSCALE_1 0xa119
982#define mmPA_CL_VPORT_ZSCALE_2 0xa11f
983#define mmPA_CL_VPORT_ZSCALE_3 0xa125
984#define mmPA_CL_VPORT_ZSCALE_4 0xa12b
985#define mmPA_CL_VPORT_ZSCALE_5 0xa131
986#define mmPA_CL_VPORT_ZSCALE_6 0xa137
987#define mmPA_CL_VPORT_ZSCALE_7 0xa13d
988#define mmPA_CL_VPORT_ZSCALE_8 0xa143
989#define mmPA_CL_VPORT_ZSCALE_9 0xa149
990#define mmPA_CL_VPORT_ZSCALE_10 0xa14f
991#define mmPA_CL_VPORT_ZSCALE_11 0xa155
992#define mmPA_CL_VPORT_ZSCALE_12 0xa15b
993#define mmPA_CL_VPORT_ZSCALE_13 0xa161
994#define mmPA_CL_VPORT_ZSCALE_14 0xa167
995#define mmPA_CL_VPORT_ZSCALE_15 0xa16d
996#define mmPA_CL_VPORT_ZOFFSET_1 0xa11a
997#define mmPA_CL_VPORT_ZOFFSET_2 0xa120
998#define mmPA_CL_VPORT_ZOFFSET_3 0xa126
999#define mmPA_CL_VPORT_ZOFFSET_4 0xa12c
1000#define mmPA_CL_VPORT_ZOFFSET_5 0xa132
1001#define mmPA_CL_VPORT_ZOFFSET_6 0xa138
1002#define mmPA_CL_VPORT_ZOFFSET_7 0xa13e
1003#define mmPA_CL_VPORT_ZOFFSET_8 0xa144
1004#define mmPA_CL_VPORT_ZOFFSET_9 0xa14a
1005#define mmPA_CL_VPORT_ZOFFSET_10 0xa150
1006#define mmPA_CL_VPORT_ZOFFSET_11 0xa156
1007#define mmPA_CL_VPORT_ZOFFSET_12 0xa15c
1008#define mmPA_CL_VPORT_ZOFFSET_13 0xa162
1009#define mmPA_CL_VPORT_ZOFFSET_14 0xa168
1010#define mmPA_CL_VPORT_ZOFFSET_15 0xa16e
1011#define mmPA_CL_VTE_CNTL 0xa206
1012#define mmPA_CL_VS_OUT_CNTL 0xa207
1013#define mmPA_CL_NANINF_CNTL 0xa208
1014#define mmPA_CL_CLIP_CNTL 0xa204
1015#define mmPA_CL_GB_VERT_CLIP_ADJ 0xa2fa
1016#define mmPA_CL_GB_VERT_DISC_ADJ 0xa2fb
1017#define mmPA_CL_GB_HORZ_CLIP_ADJ 0xa2fc
1018#define mmPA_CL_GB_HORZ_DISC_ADJ 0xa2fd
1019#define mmPA_CL_UCP_0_X 0xa16f
1020#define mmPA_CL_UCP_0_Y 0xa170
1021#define mmPA_CL_UCP_0_Z 0xa171
1022#define mmPA_CL_UCP_0_W 0xa172
1023#define mmPA_CL_UCP_1_X 0xa173
1024#define mmPA_CL_UCP_1_Y 0xa174
1025#define mmPA_CL_UCP_1_Z 0xa175
1026#define mmPA_CL_UCP_1_W 0xa176
1027#define mmPA_CL_UCP_2_X 0xa177
1028#define mmPA_CL_UCP_2_Y 0xa178
1029#define mmPA_CL_UCP_2_Z 0xa179
1030#define mmPA_CL_UCP_2_W 0xa17a
1031#define mmPA_CL_UCP_3_X 0xa17b
1032#define mmPA_CL_UCP_3_Y 0xa17c
1033#define mmPA_CL_UCP_3_Z 0xa17d
1034#define mmPA_CL_UCP_3_W 0xa17e
1035#define mmPA_CL_UCP_4_X 0xa17f
1036#define mmPA_CL_UCP_4_Y 0xa180
1037#define mmPA_CL_UCP_4_Z 0xa181
1038#define mmPA_CL_UCP_4_W 0xa182
1039#define mmPA_CL_UCP_5_X 0xa183
1040#define mmPA_CL_UCP_5_Y 0xa184
1041#define mmPA_CL_UCP_5_Z 0xa185
1042#define mmPA_CL_UCP_5_W 0xa186
1043#define mmPA_CL_POINT_X_RAD 0xa1f5
1044#define mmPA_CL_POINT_Y_RAD 0xa1f6
1045#define mmPA_CL_POINT_SIZE 0xa1f7
1046#define mmPA_CL_POINT_CULL_RAD 0xa1f8
1047#define mmPA_CL_ENHANCE 0x2285
1048#define mmPA_CL_RESET_DEBUG 0x2286
1049#define mmPA_SU_VTX_CNTL 0xa2f9
1050#define mmPA_SU_POINT_SIZE 0xa280
1051#define mmPA_SU_POINT_MINMAX 0xa281
1052#define mmPA_SU_LINE_CNTL 0xa282
1053#define mmPA_SU_LINE_STIPPLE_CNTL 0xa209
1054#define mmPA_SU_LINE_STIPPLE_SCALE 0xa20a
1055#define mmPA_SU_PRIM_FILTER_CNTL 0xa20b
1056#define mmPA_SU_SC_MODE_CNTL 0xa205
1057#define mmPA_SU_POLY_OFFSET_DB_FMT_CNTL 0xa2de
1058#define mmPA_SU_POLY_OFFSET_CLAMP 0xa2df
1059#define mmPA_SU_POLY_OFFSET_FRONT_SCALE 0xa2e0
1060#define mmPA_SU_POLY_OFFSET_FRONT_OFFSET 0xa2e1
1061#define mmPA_SU_POLY_OFFSET_BACK_SCALE 0xa2e2
1062#define mmPA_SU_POLY_OFFSET_BACK_OFFSET 0xa2e3
1063#define mmPA_SU_HARDWARE_SCREEN_OFFSET 0xa08d
1064#define mmPA_SU_LINE_STIPPLE_VALUE 0xc280
1065#define mmPA_SU_PERFCOUNTER0_SELECT 0xd900
1066#define mmPA_SU_PERFCOUNTER0_SELECT1 0xd901
1067#define mmPA_SU_PERFCOUNTER1_SELECT 0xd902
1068#define mmPA_SU_PERFCOUNTER1_SELECT1 0xd903
1069#define mmPA_SU_PERFCOUNTER2_SELECT 0xd904
1070#define mmPA_SU_PERFCOUNTER3_SELECT 0xd905
1071#define mmPA_SU_PERFCOUNTER0_LO 0xd100
1072#define mmPA_SU_PERFCOUNTER0_HI 0xd101
1073#define mmPA_SU_PERFCOUNTER1_LO 0xd102
1074#define mmPA_SU_PERFCOUNTER1_HI 0xd103
1075#define mmPA_SU_PERFCOUNTER2_LO 0xd104
1076#define mmPA_SU_PERFCOUNTER2_HI 0xd105
1077#define mmPA_SU_PERFCOUNTER3_LO 0xd106
1078#define mmPA_SU_PERFCOUNTER3_HI 0xd107
1079#define mmPA_SC_AA_CONFIG 0xa2f8
1080#define mmPA_SC_AA_MASK_X0Y0_X1Y0 0xa30e
1081#define mmPA_SC_AA_MASK_X0Y1_X1Y1 0xa30f
1082#define mmPA_SC_SHADER_CONTROL 0xa310
1083#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0xa2fe
1084#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0xa2ff
1085#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0xa300
1086#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0xa301
1087#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0xa302
1088#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0xa303
1089#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0xa304
1090#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0xa305
1091#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0xa306
1092#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0xa307
1093#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0xa308
1094#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0xa309
1095#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0xa30a
1096#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0xa30b
1097#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0xa30c
1098#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0xa30d
1099#define mmPA_SC_CENTROID_PRIORITY_0 0xa2f5
1100#define mmPA_SC_CENTROID_PRIORITY_1 0xa2f6
1101#define mmPA_SC_CLIPRECT_0_TL 0xa084
1102#define mmPA_SC_CLIPRECT_0_BR 0xa085
1103#define mmPA_SC_CLIPRECT_1_TL 0xa086
1104#define mmPA_SC_CLIPRECT_1_BR 0xa087
1105#define mmPA_SC_CLIPRECT_2_TL 0xa088
1106#define mmPA_SC_CLIPRECT_2_BR 0xa089
1107#define mmPA_SC_CLIPRECT_3_TL 0xa08a
1108#define mmPA_SC_CLIPRECT_3_BR 0xa08b
1109#define mmPA_SC_CLIPRECT_RULE 0xa083
1110#define mmPA_SC_EDGERULE 0xa08c
1111#define mmPA_SC_LINE_CNTL 0xa2f7
1112#define mmPA_SC_LINE_STIPPLE 0xa283
1113#define mmPA_SC_MODE_CNTL_0 0xa292
1114#define mmPA_SC_MODE_CNTL_1 0xa293
1115#define mmPA_SC_RASTER_CONFIG 0xa0d4
1116#define mmPA_SC_RASTER_CONFIG_1 0xa0d5
1117#define mmPA_SC_SCREEN_EXTENT_CONTROL 0xa0d6
1118#define mmPA_SC_GENERIC_SCISSOR_TL 0xa090
1119#define mmPA_SC_GENERIC_SCISSOR_BR 0xa091
1120#define mmPA_SC_SCREEN_SCISSOR_TL 0xa00c
1121#define mmPA_SC_SCREEN_SCISSOR_BR 0xa00d
1122#define mmPA_SC_WINDOW_OFFSET 0xa080
1123#define mmPA_SC_WINDOW_SCISSOR_TL 0xa081
1124#define mmPA_SC_WINDOW_SCISSOR_BR 0xa082
1125#define mmPA_SC_VPORT_SCISSOR_0_TL 0xa094
1126#define mmPA_SC_VPORT_SCISSOR_1_TL 0xa096
1127#define mmPA_SC_VPORT_SCISSOR_2_TL 0xa098
1128#define mmPA_SC_VPORT_SCISSOR_3_TL 0xa09a
1129#define mmPA_SC_VPORT_SCISSOR_4_TL 0xa09c
1130#define mmPA_SC_VPORT_SCISSOR_5_TL 0xa09e
1131#define mmPA_SC_VPORT_SCISSOR_6_TL 0xa0a0
1132#define mmPA_SC_VPORT_SCISSOR_7_TL 0xa0a2
1133#define mmPA_SC_VPORT_SCISSOR_8_TL 0xa0a4
1134#define mmPA_SC_VPORT_SCISSOR_9_TL 0xa0a6
1135#define mmPA_SC_VPORT_SCISSOR_10_TL 0xa0a8
1136#define mmPA_SC_VPORT_SCISSOR_11_TL 0xa0aa
1137#define mmPA_SC_VPORT_SCISSOR_12_TL 0xa0ac
1138#define mmPA_SC_VPORT_SCISSOR_13_TL 0xa0ae
1139#define mmPA_SC_VPORT_SCISSOR_14_TL 0xa0b0
1140#define mmPA_SC_VPORT_SCISSOR_15_TL 0xa0b2
1141#define mmPA_SC_VPORT_SCISSOR_0_BR 0xa095
1142#define mmPA_SC_VPORT_SCISSOR_1_BR 0xa097
1143#define mmPA_SC_VPORT_SCISSOR_2_BR 0xa099
1144#define mmPA_SC_VPORT_SCISSOR_3_BR 0xa09b
1145#define mmPA_SC_VPORT_SCISSOR_4_BR 0xa09d
1146#define mmPA_SC_VPORT_SCISSOR_5_BR 0xa09f
1147#define mmPA_SC_VPORT_SCISSOR_6_BR 0xa0a1
1148#define mmPA_SC_VPORT_SCISSOR_7_BR 0xa0a3
1149#define mmPA_SC_VPORT_SCISSOR_8_BR 0xa0a5
1150#define mmPA_SC_VPORT_SCISSOR_9_BR 0xa0a7
1151#define mmPA_SC_VPORT_SCISSOR_10_BR 0xa0a9
1152#define mmPA_SC_VPORT_SCISSOR_11_BR 0xa0ab
1153#define mmPA_SC_VPORT_SCISSOR_12_BR 0xa0ad
1154#define mmPA_SC_VPORT_SCISSOR_13_BR 0xa0af
1155#define mmPA_SC_VPORT_SCISSOR_14_BR 0xa0b1
1156#define mmPA_SC_VPORT_SCISSOR_15_BR 0xa0b3
1157#define mmPA_SC_VPORT_ZMIN_0 0xa0b4
1158#define mmPA_SC_VPORT_ZMIN_1 0xa0b6
1159#define mmPA_SC_VPORT_ZMIN_2 0xa0b8
1160#define mmPA_SC_VPORT_ZMIN_3 0xa0ba
1161#define mmPA_SC_VPORT_ZMIN_4 0xa0bc
1162#define mmPA_SC_VPORT_ZMIN_5 0xa0be
1163#define mmPA_SC_VPORT_ZMIN_6 0xa0c0
1164#define mmPA_SC_VPORT_ZMIN_7 0xa0c2
1165#define mmPA_SC_VPORT_ZMIN_8 0xa0c4
1166#define mmPA_SC_VPORT_ZMIN_9 0xa0c6
1167#define mmPA_SC_VPORT_ZMIN_10 0xa0c8
1168#define mmPA_SC_VPORT_ZMIN_11 0xa0ca
1169#define mmPA_SC_VPORT_ZMIN_12 0xa0cc
1170#define mmPA_SC_VPORT_ZMIN_13 0xa0ce
1171#define mmPA_SC_VPORT_ZMIN_14 0xa0d0
1172#define mmPA_SC_VPORT_ZMIN_15 0xa0d2
1173#define mmPA_SC_VPORT_ZMAX_0 0xa0b5
1174#define mmPA_SC_VPORT_ZMAX_1 0xa0b7
1175#define mmPA_SC_VPORT_ZMAX_2 0xa0b9
1176#define mmPA_SC_VPORT_ZMAX_3 0xa0bb
1177#define mmPA_SC_VPORT_ZMAX_4 0xa0bd
1178#define mmPA_SC_VPORT_ZMAX_5 0xa0bf
1179#define mmPA_SC_VPORT_ZMAX_6 0xa0c1
1180#define mmPA_SC_VPORT_ZMAX_7 0xa0c3
1181#define mmPA_SC_VPORT_ZMAX_8 0xa0c5
1182#define mmPA_SC_VPORT_ZMAX_9 0xa0c7
1183#define mmPA_SC_VPORT_ZMAX_10 0xa0c9
1184#define mmPA_SC_VPORT_ZMAX_11 0xa0cb
1185#define mmPA_SC_VPORT_ZMAX_12 0xa0cd
1186#define mmPA_SC_VPORT_ZMAX_13 0xa0cf
1187#define mmPA_SC_VPORT_ZMAX_14 0xa0d1
1188#define mmPA_SC_VPORT_ZMAX_15 0xa0d3
1189#define mmPA_SC_ENHANCE 0x22fc
1190#define mmPA_SC_ENHANCE_1 0x22fd
1191#define mmPA_SC_DSM_CNTL 0x22fe
1192#define mmPA_SC_FIFO_SIZE 0x22f3
1193#define mmPA_SC_IF_FIFO_SIZE 0x22f5
1194#define mmPA_SC_FORCE_EOV_MAX_CNTS 0x22c9
1195#define mmPA_SC_LINE_STIPPLE_STATE 0xc281
1196#define mmPA_SC_SCREEN_EXTENT_MIN_0 0xc284
1197#define mmPA_SC_SCREEN_EXTENT_MAX_0 0xc285
1198#define mmPA_SC_SCREEN_EXTENT_MIN_1 0xc286
1199#define mmPA_SC_SCREEN_EXTENT_MAX_1 0xc28b
1200#define mmPA_SC_PERFCOUNTER0_SELECT 0xd940
1201#define mmPA_SC_PERFCOUNTER0_SELECT1 0xd941
1202#define mmPA_SC_PERFCOUNTER1_SELECT 0xd942
1203#define mmPA_SC_PERFCOUNTER2_SELECT 0xd943
1204#define mmPA_SC_PERFCOUNTER3_SELECT 0xd944
1205#define mmPA_SC_PERFCOUNTER4_SELECT 0xd945
1206#define mmPA_SC_PERFCOUNTER5_SELECT 0xd946
1207#define mmPA_SC_PERFCOUNTER6_SELECT 0xd947
1208#define mmPA_SC_PERFCOUNTER7_SELECT 0xd948
1209#define mmPA_SC_PERFCOUNTER0_LO 0xd140
1210#define mmPA_SC_PERFCOUNTER0_HI 0xd141
1211#define mmPA_SC_PERFCOUNTER1_LO 0xd142
1212#define mmPA_SC_PERFCOUNTER1_HI 0xd143
1213#define mmPA_SC_PERFCOUNTER2_LO 0xd144
1214#define mmPA_SC_PERFCOUNTER2_HI 0xd145
1215#define mmPA_SC_PERFCOUNTER3_LO 0xd146
1216#define mmPA_SC_PERFCOUNTER3_HI 0xd147
1217#define mmPA_SC_PERFCOUNTER4_LO 0xd148
1218#define mmPA_SC_PERFCOUNTER4_HI 0xd149
1219#define mmPA_SC_PERFCOUNTER5_LO 0xd14a
1220#define mmPA_SC_PERFCOUNTER5_HI 0xd14b
1221#define mmPA_SC_PERFCOUNTER6_LO 0xd14c
1222#define mmPA_SC_PERFCOUNTER6_HI 0xd14d
1223#define mmPA_SC_PERFCOUNTER7_LO 0xd14e
1224#define mmPA_SC_PERFCOUNTER7_HI 0xd14f
1225#define mmPA_SC_P3D_TRAP_SCREEN_HV_EN 0xc2a0
1226#define mmPA_SC_P3D_TRAP_SCREEN_H 0xc2a1
1227#define mmPA_SC_P3D_TRAP_SCREEN_V 0xc2a2
1228#define mmPA_SC_P3D_TRAP_SCREEN_OCCURRENCE 0xc2a3
1229#define mmPA_SC_P3D_TRAP_SCREEN_COUNT 0xc2a4
1230#define mmPA_SC_HP3D_TRAP_SCREEN_HV_EN 0xc2a8
1231#define mmPA_SC_HP3D_TRAP_SCREEN_H 0xc2a9
1232#define mmPA_SC_HP3D_TRAP_SCREEN_V 0xc2aa
1233#define mmPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE 0xc2ab
1234#define mmPA_SC_HP3D_TRAP_SCREEN_COUNT 0xc2ac
1235#define mmPA_SC_TRAP_SCREEN_HV_EN 0xc2b0
1236#define mmPA_SC_TRAP_SCREEN_H 0xc2b1
1237#define mmPA_SC_TRAP_SCREEN_V 0xc2b2
1238#define mmPA_SC_TRAP_SCREEN_OCCURRENCE 0xc2b3
1239#define mmPA_SC_TRAP_SCREEN_COUNT 0xc2b4
1240#define mmPA_SC_P3D_TRAP_SCREEN_HV_LOCK 0x22c0
1241#define mmPA_SC_HP3D_TRAP_SCREEN_HV_LOCK 0x22c1
1242#define mmPA_SC_TRAP_SCREEN_HV_LOCK 0x22c2
1243#define mmPA_CL_CNTL_STATUS 0x2284
1244#define mmPA_SU_CNTL_STATUS 0x2294
1245#define mmPA_SC_FIFO_DEPTH_CNTL 0x2295
1246#define mmCGTT_PA_CLK_CTRL 0xf088
1247#define mmCGTT_SC_CLK_CTRL 0xf089
1248#define mmPA_SU_DEBUG_CNTL 0x2280
1249#define mmPA_SU_DEBUG_DATA 0x2281
1250#define mmPA_SC_DEBUG_CNTL 0x22f6
1251#define mmPA_SC_DEBUG_DATA 0x22f7
1252#define ixCLIPPER_DEBUG_REG00 0x0
1253#define ixCLIPPER_DEBUG_REG01 0x1
1254#define ixCLIPPER_DEBUG_REG02 0x2
1255#define ixCLIPPER_DEBUG_REG03 0x3
1256#define ixCLIPPER_DEBUG_REG04 0x4
1257#define ixCLIPPER_DEBUG_REG05 0x5
1258#define ixCLIPPER_DEBUG_REG06 0x6
1259#define ixCLIPPER_DEBUG_REG07 0x7
1260#define ixCLIPPER_DEBUG_REG08 0x8
1261#define ixCLIPPER_DEBUG_REG09 0x9
1262#define ixCLIPPER_DEBUG_REG10 0xa
1263#define ixCLIPPER_DEBUG_REG11 0xb
1264#define ixCLIPPER_DEBUG_REG12 0xc
1265#define ixCLIPPER_DEBUG_REG13 0xd
1266#define ixCLIPPER_DEBUG_REG14 0xe
1267#define ixCLIPPER_DEBUG_REG15 0xf
1268#define ixCLIPPER_DEBUG_REG16 0x10
1269#define ixCLIPPER_DEBUG_REG17 0x11
1270#define ixCLIPPER_DEBUG_REG18 0x12
1271#define ixCLIPPER_DEBUG_REG19 0x13
1272#define ixSXIFCCG_DEBUG_REG0 0x14
1273#define ixSXIFCCG_DEBUG_REG1 0x15
1274#define ixSXIFCCG_DEBUG_REG2 0x16
1275#define ixSXIFCCG_DEBUG_REG3 0x17
1276#define ixSETUP_DEBUG_REG0 0x18
1277#define ixSETUP_DEBUG_REG1 0x19
1278#define ixSETUP_DEBUG_REG2 0x1a
1279#define ixSETUP_DEBUG_REG3 0x1b
1280#define ixSETUP_DEBUG_REG4 0x1c
1281#define ixSETUP_DEBUG_REG5 0x1d
1282#define ixPA_SC_DEBUG_REG0 0x0
1283#define ixPA_SC_DEBUG_REG1 0x1
1284#define mmCOMPUTE_DISPATCH_INITIATOR 0x2e00
1285#define mmCOMPUTE_DIM_X 0x2e01
1286#define mmCOMPUTE_DIM_Y 0x2e02
1287#define mmCOMPUTE_DIM_Z 0x2e03
1288#define mmCOMPUTE_START_X 0x2e04
1289#define mmCOMPUTE_START_Y 0x2e05
1290#define mmCOMPUTE_START_Z 0x2e06
1291#define mmCOMPUTE_NUM_THREAD_X 0x2e07
1292#define mmCOMPUTE_NUM_THREAD_Y 0x2e08
1293#define mmCOMPUTE_NUM_THREAD_Z 0x2e09
1294#define mmCOMPUTE_PIPELINESTAT_ENABLE 0x2e0a
1295#define mmCOMPUTE_PERFCOUNT_ENABLE 0x2e0b
1296#define mmCOMPUTE_PGM_LO 0x2e0c
1297#define mmCOMPUTE_PGM_HI 0x2e0d
1298#define mmCOMPUTE_TBA_LO 0x2e0e
1299#define mmCOMPUTE_TBA_HI 0x2e0f
1300#define mmCOMPUTE_TMA_LO 0x2e10
1301#define mmCOMPUTE_TMA_HI 0x2e11
1302#define mmCOMPUTE_PGM_RSRC1 0x2e12
1303#define mmCOMPUTE_PGM_RSRC2 0x2e13
1304#define mmCOMPUTE_VMID 0x2e14
1305#define mmCOMPUTE_RESOURCE_LIMITS 0x2e15
1306#define mmCOMPUTE_STATIC_THREAD_MGMT_SE0 0x2e16
1307#define mmCOMPUTE_STATIC_THREAD_MGMT_SE1 0x2e17
1308#define mmCOMPUTE_TMPRING_SIZE 0x2e18
1309#define mmCOMPUTE_STATIC_THREAD_MGMT_SE2 0x2e19
1310#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3 0x2e1a
1311#define mmCOMPUTE_RESTART_X 0x2e1b
1312#define mmCOMPUTE_RESTART_Y 0x2e1c
1313#define mmCOMPUTE_RESTART_Z 0x2e1d
1314#define mmCOMPUTE_THREAD_TRACE_ENABLE 0x2e1e
1315#define mmCOMPUTE_MISC_RESERVED 0x2e1f
1316#define mmCOMPUTE_DISPATCH_ID 0x2e20
1317#define mmCOMPUTE_THREADGROUP_ID 0x2e21
1318#define mmCOMPUTE_RELAUNCH 0x2e22
1319#define mmCOMPUTE_WAVE_RESTORE_ADDR_LO 0x2e23
1320#define mmCOMPUTE_WAVE_RESTORE_ADDR_HI 0x2e24
1321#define mmCOMPUTE_WAVE_RESTORE_CONTROL 0x2e25
1322#define mmCOMPUTE_USER_DATA_0 0x2e40
1323#define mmCOMPUTE_USER_DATA_1 0x2e41
1324#define mmCOMPUTE_USER_DATA_2 0x2e42
1325#define mmCOMPUTE_USER_DATA_3 0x2e43
1326#define mmCOMPUTE_USER_DATA_4 0x2e44
1327#define mmCOMPUTE_USER_DATA_5 0x2e45
1328#define mmCOMPUTE_USER_DATA_6 0x2e46
1329#define mmCOMPUTE_USER_DATA_7 0x2e47
1330#define mmCOMPUTE_USER_DATA_8 0x2e48
1331#define mmCOMPUTE_USER_DATA_9 0x2e49
1332#define mmCOMPUTE_USER_DATA_10 0x2e4a
1333#define mmCOMPUTE_USER_DATA_11 0x2e4b
1334#define mmCOMPUTE_USER_DATA_12 0x2e4c
1335#define mmCOMPUTE_USER_DATA_13 0x2e4d
1336#define mmCOMPUTE_USER_DATA_14 0x2e4e
1337#define mmCOMPUTE_USER_DATA_15 0x2e4f
1338#define mmCOMPUTE_NOWHERE 0x2e7f
1339#define mmCSPRIV_CONNECT 0x0
1340#define mmCSPRIV_THREAD_TRACE_TG0 0x1e
1341#define mmCSPRIV_THREAD_TRACE_TG1 0x1e
1342#define mmCSPRIV_THREAD_TRACE_TG2 0x1e
1343#define mmCSPRIV_THREAD_TRACE_TG3 0x1e
1344#define mmCSPRIV_THREAD_TRACE_EVENT 0x1f
1345#define mmRLC_CNTL 0xec00
1346#define mmRLC_DEBUG_SELECT 0xec01
1347#define mmRLC_DEBUG 0xec02
1348#define mmRLC_MC_CNTL 0xec03
1349#define mmRLC_STAT 0xec04
1350#define mmRLC_SAFE_MODE 0xec05
1351#define mmRLC_MEM_SLP_CNTL 0xec06
1352#define mmSMU_RLC_RESPONSE 0xec07
1353#define mmRLC_RLCV_SAFE_MODE 0xec08
1354#define mmRLC_SMU_SAFE_MODE 0xec09
1355#define mmRLC_RLCV_COMMAND 0xec0a
1356#define mmRLC_CLK_CNTL 0xec0b
1357#define mmRLC_PERFMON_CLK_CNTL 0xdcbf
1358#define mmRLC_PERFMON_CNTL 0xdcc0
1359#define mmRLC_PERFCOUNTER0_SELECT 0xdcc1
1360#define mmRLC_PERFCOUNTER1_SELECT 0xdcc2
1361#define mmRLC_PERFCOUNTER0_LO 0xd480
1362#define mmRLC_PERFCOUNTER1_LO 0xd482
1363#define mmRLC_PERFCOUNTER0_HI 0xd481
1364#define mmRLC_PERFCOUNTER1_HI 0xd483
1365#define mmCGTT_RLC_CLK_CTRL 0xf0b8
1366#define mmRLC_LB_CNTL 0xec19
1367#define mmRLC_LB_CNTR_MAX 0xec12
1368#define mmRLC_LB_CNTR_INIT 0xec1b
1369#define mmRLC_LOAD_BALANCE_CNTR 0xec1c
1370#define mmRLC_JUMP_TABLE_RESTORE 0xec1e
1371#define mmRLC_PG_DELAY_2 0xec1f
1372#define mmRLC_GPM_DEBUG_SELECT 0xec20
1373#define mmRLC_GPM_DEBUG 0xec21
1374#define mmRLC_GPM_DEBUG_INST_A 0xec22
1375#define mmRLC_GPM_DEBUG_INST_B 0xec23
1376#define mmRLC_GPM_DEBUG_INST_ADDR 0xec1d
1377#define mmRLC_GPM_UCODE_ADDR 0xf83c
1378#define mmRLC_GPM_UCODE_DATA 0xf83d
1379#define mmGPU_BIST_CONTROL 0xf835
1380#define mmRLC_ROM_CNTL 0xf836
1381#define mmRLC_GPU_CLOCK_COUNT_LSB 0xec24
1382#define mmRLC_GPU_CLOCK_COUNT_MSB 0xec25
1383#define mmRLC_CAPTURE_GPU_CLOCK_COUNT 0xec26
1384#define mmRLC_UCODE_CNTL 0xec27
1385#define mmRLC_GPM_STAT 0xec40
1386#define mmRLC_GPU_CLOCK_32_RES_SEL 0xec41
1387#define mmRLC_GPU_CLOCK_32 0xec42
1388#define mmRLC_PG_CNTL 0xec43
1389#define mmRLC_GPM_THREAD_PRIORITY 0xec44
1390#define mmRLC_GPM_THREAD_ENABLE 0xec45
1391#define mmRLC_GPM_VMID_THREAD0 0xec46
1392#define mmRLC_GPM_VMID_THREAD1 0xec47
1393#define mmRLC_CGTT_MGCG_OVERRIDE 0xec48
1394#define mmRLC_CGCG_CGLS_CTRL 0xec49
1395#define mmRLC_CGCG_RAMP_CTRL 0xec4a
1396#define mmRLC_DYN_PG_STATUS 0xec4b
1397#define mmRLC_DYN_PG_REQUEST 0xec4c
1398#define mmRLC_PG_DELAY 0xec4d
1399#define mmRLC_CU_STATUS 0xec4e
1400#define mmRLC_LB_INIT_CU_MASK 0xec4f
1401#define mmRLC_LB_ALWAYS_ACTIVE_CU_MASK 0xec50
1402#define mmRLC_LB_PARAMS 0xec51
1403#define mmRLC_THREAD1_DELAY 0xec52
1404#define mmRLC_PG_ALWAYS_ON_CU_MASK 0xec53
1405#define mmRLC_MAX_PG_CU 0xec54
1406#define mmRLC_AUTO_PG_CTRL 0xec55
1407#define mmRLC_SMU_GRBM_REG_SAVE_CTRL 0xec56
1408#define mmRLC_SERDES_RD_MASTER_INDEX 0xec59
1409#define mmRLC_SERDES_RD_DATA_0 0xec5a
1410#define mmRLC_SERDES_RD_DATA_1 0xec5b
1411#define mmRLC_SERDES_RD_DATA_2 0xec5c
1412#define mmRLC_SERDES_WR_CU_MASTER_MASK 0xec5d
1413#define mmRLC_SERDES_WR_NONCU_MASTER_MASK 0xec5e
1414#define mmRLC_SERDES_WR_CTRL 0xec5f
1415#define mmRLC_SERDES_WR_DATA 0xec60
1416#define mmRLC_SERDES_CU_MASTER_BUSY 0xec61
1417#define mmRLC_SERDES_NONCU_MASTER_BUSY 0xec62
1418#define mmRLC_GPM_GENERAL_0 0xec63
1419#define mmRLC_GPM_GENERAL_1 0xec64
1420#define mmRLC_GPM_GENERAL_2 0xec65
1421#define mmRLC_GPM_GENERAL_3 0xec66
1422#define mmRLC_GPM_GENERAL_4 0xec67
1423#define mmRLC_GPM_GENERAL_5 0xec68
1424#define mmRLC_GPM_GENERAL_6 0xec69
1425#define mmRLC_GPM_GENERAL_7 0xec6a
1426#define mmRLC_GPM_SCRATCH_ADDR 0xec6c
1427#define mmRLC_GPM_SCRATCH_DATA 0xec6d
1428#define mmRLC_STATIC_PG_STATUS 0xec6e
1429#define mmRLC_GPM_PERF_COUNT_0 0xec6f
1430#define mmRLC_GPM_PERF_COUNT_1 0xec70
1431#define mmRLC_GPR_REG1 0xec79
1432#define mmRLC_GPR_REG2 0xec7a
1433#define mmRLC_MGCG_CTRL 0xec1a
1434#define mmRLC_GPM_THREAD_RESET 0xec28
1435#define mmRLC_SPM_VMID 0xec71
1436#define mmRLC_SPM_INT_CNTL 0xec72
1437#define mmRLC_SPM_INT_STATUS 0xec73
1438#define mmRLC_SPM_DEBUG_SELECT 0xec74
1439#define mmRLC_SPM_DEBUG 0xec75
1440#define mmRLC_SMU_MESSAGE 0xec76
1441#define mmRLC_GPM_LOG_SIZE 0xec77
1442#define mmRLC_GPM_LOG_CONT 0xec7b
1443#define mmRLC_PG_DELAY_3 0xec78
1444#define mmRLC_GPM_INT_DISABLE_TH0 0xec7c
1445#define mmRLC_GPM_INT_DISABLE_TH1 0xec7d
1446#define mmRLC_GPM_INT_FORCE_TH0 0xec7e
1447#define mmRLC_GPM_INT_FORCE_TH1 0xec7f
1448#define mmRLC_SRM_CNTL 0xec80
1449#define mmRLC_SRM_DEBUG_SELECT 0xec81
1450#define mmRLC_SRM_DEBUG 0xec82
1451#define mmRLC_SRM_ARAM_ADDR 0xec83
1452#define mmRLC_SRM_ARAM_DATA 0xec84
1453#define mmRLC_SRM_DRAM_ADDR 0xec85
1454#define mmRLC_SRM_DRAM_DATA 0xec86
1455#define mmRLC_SRM_GPM_COMMAND 0xec87
1456#define mmRLC_SRM_GPM_COMMAND_STATUS 0xec88
1457#define mmRLC_SRM_RLCV_COMMAND 0xec89
1458#define mmRLC_SRM_RLCV_COMMAND_STATUS 0xec8a
1459#define mmRLC_SRM_INDEX_CNTL_ADDR_0 0xec8b
1460#define mmRLC_SRM_INDEX_CNTL_ADDR_1 0xec8c
1461#define mmRLC_SRM_INDEX_CNTL_ADDR_2 0xec8d
1462#define mmRLC_SRM_INDEX_CNTL_ADDR_3 0xec8e
1463#define mmRLC_SRM_INDEX_CNTL_ADDR_4 0xec8f
1464#define mmRLC_SRM_INDEX_CNTL_ADDR_5 0xec90
1465#define mmRLC_SRM_INDEX_CNTL_ADDR_6 0xec91
1466#define mmRLC_SRM_INDEX_CNTL_ADDR_7 0xec92
1467#define mmRLC_SRM_INDEX_CNTL_DATA_0 0xec93
1468#define mmRLC_SRM_INDEX_CNTL_DATA_1 0xec94
1469#define mmRLC_SRM_INDEX_CNTL_DATA_2 0xec95
1470#define mmRLC_SRM_INDEX_CNTL_DATA_3 0xec96
1471#define mmRLC_SRM_INDEX_CNTL_DATA_4 0xec97
1472#define mmRLC_SRM_INDEX_CNTL_DATA_5 0xec98
1473#define mmRLC_SRM_INDEX_CNTL_DATA_6 0xec99
1474#define mmRLC_SRM_INDEX_CNTL_DATA_7 0xec9a
1475#define mmRLC_SRM_STAT 0xec9b
1476#define mmRLC_SRM_GPM_ABORT 0xec9c
1477#define mmRLC_CSIB_ADDR_LO 0xeca2
1478#define mmRLC_CSIB_ADDR_HI 0xeca3
1479#define mmRLC_CSIB_LENGTH 0xeca4
1480#define mmRLC_CP_RESPONSE0 0xeca5
1481#define mmRLC_CP_RESPONSE1 0xeca6
1482#define mmRLC_CP_RESPONSE2 0xeca7
1483#define mmRLC_CP_RESPONSE3 0xeca8
1484#define mmRLC_SMU_COMMAND 0xeca9
1485#define mmRLC_CP_SCHEDULERS 0xecaa
1486#define mmRLC_SMU_ARGUMENT_1 0xecab
1487#define mmRLC_SMU_ARGUMENT_2 0xecac
1488#define mmRLC_GPM_GENERAL_8 0xecad
1489#define mmRLC_GPM_GENERAL_9 0xecae
1490#define mmRLC_GPM_GENERAL_10 0xecaf
1491#define mmRLC_GPM_GENERAL_11 0xecb0
1492#define mmRLC_GPM_GENERAL_12 0xecb1
1493#define mmRLC_SPM_PERFMON_CNTL 0xdc80
1494#define mmRLC_SPM_PERFMON_RING_BASE_LO 0xdc81
1495#define mmRLC_SPM_PERFMON_RING_BASE_HI 0xdc82
1496#define mmRLC_SPM_PERFMON_RING_SIZE 0xdc83
1497#define mmRLC_SPM_PERFMON_SEGMENT_SIZE 0xdc84
1498#define mmRLC_SPM_SE_MUXSEL_ADDR 0xdc85
1499#define mmRLC_SPM_SE_MUXSEL_DATA 0xdc86
1500#define mmRLC_SPM_CPG_PERFMON_SAMPLE_DELAY 0xdc87
1501#define mmRLC_SPM_CPC_PERFMON_SAMPLE_DELAY 0xdc88
1502#define mmRLC_SPM_CPF_PERFMON_SAMPLE_DELAY 0xdc89
1503#define mmRLC_SPM_CB_PERFMON_SAMPLE_DELAY 0xdc8a
1504#define mmRLC_SPM_DB_PERFMON_SAMPLE_DELAY 0xdc8b
1505#define mmRLC_SPM_PA_PERFMON_SAMPLE_DELAY 0xdc8c
1506#define mmRLC_SPM_GDS_PERFMON_SAMPLE_DELAY 0xdc8d
1507#define mmRLC_SPM_IA_PERFMON_SAMPLE_DELAY 0xdc8e
1508#define mmRLC_SPM_SC_PERFMON_SAMPLE_DELAY 0xdc90
1509#define mmRLC_SPM_TCC_PERFMON_SAMPLE_DELAY 0xdc91
1510#define mmRLC_SPM_TCA_PERFMON_SAMPLE_DELAY 0xdc92
1511#define mmRLC_SPM_TCP_PERFMON_SAMPLE_DELAY 0xdc93
1512#define mmRLC_SPM_TA_PERFMON_SAMPLE_DELAY 0xdc94
1513#define mmRLC_SPM_TD_PERFMON_SAMPLE_DELAY 0xdc95
1514#define mmRLC_SPM_VGT_PERFMON_SAMPLE_DELAY 0xdc96
1515#define mmRLC_SPM_SPI_PERFMON_SAMPLE_DELAY 0xdc97
1516#define mmRLC_SPM_SQG_PERFMON_SAMPLE_DELAY 0xdc98
1517#define mmRLC_SPM_SX_PERFMON_SAMPLE_DELAY 0xdc9a
1518#define mmRLC_SPM_GLOBAL_MUXSEL_ADDR 0xdc9b
1519#define mmRLC_SPM_GLOBAL_MUXSEL_DATA 0xdc9c
1520#define mmRLC_SPM_RING_RDPTR 0xdc9d
1521#define mmRLC_SPM_SEGMENT_THRESHOLD 0xdc9e
1522#define mmRLC_GPU_IOV_VF_ENABLE 0xfb00
1523#define mmRLC_GPU_IOV_RLC_RESPONSE 0xfb4d
1524#define mmRLC_GPU_IOV_ACTIVE_FCN_ID 0xfb40
1525#define mmSPI_PS_INPUT_CNTL_0 0xa191
1526#define mmSPI_PS_INPUT_CNTL_1 0xa192
1527#define mmSPI_PS_INPUT_CNTL_2 0xa193
1528#define mmSPI_PS_INPUT_CNTL_3 0xa194
1529#define mmSPI_PS_INPUT_CNTL_4 0xa195
1530#define mmSPI_PS_INPUT_CNTL_5 0xa196
1531#define mmSPI_PS_INPUT_CNTL_6 0xa197
1532#define mmSPI_PS_INPUT_CNTL_7 0xa198
1533#define mmSPI_PS_INPUT_CNTL_8 0xa199
1534#define mmSPI_PS_INPUT_CNTL_9 0xa19a
1535#define mmSPI_PS_INPUT_CNTL_10 0xa19b
1536#define mmSPI_PS_INPUT_CNTL_11 0xa19c
1537#define mmSPI_PS_INPUT_CNTL_12 0xa19d
1538#define mmSPI_PS_INPUT_CNTL_13 0xa19e
1539#define mmSPI_PS_INPUT_CNTL_14 0xa19f
1540#define mmSPI_PS_INPUT_CNTL_15 0xa1a0
1541#define mmSPI_PS_INPUT_CNTL_16 0xa1a1
1542#define mmSPI_PS_INPUT_CNTL_17 0xa1a2
1543#define mmSPI_PS_INPUT_CNTL_18 0xa1a3
1544#define mmSPI_PS_INPUT_CNTL_19 0xa1a4
1545#define mmSPI_PS_INPUT_CNTL_20 0xa1a5
1546#define mmSPI_PS_INPUT_CNTL_21 0xa1a6
1547#define mmSPI_PS_INPUT_CNTL_22 0xa1a7
1548#define mmSPI_PS_INPUT_CNTL_23 0xa1a8
1549#define mmSPI_PS_INPUT_CNTL_24 0xa1a9
1550#define mmSPI_PS_INPUT_CNTL_25 0xa1aa
1551#define mmSPI_PS_INPUT_CNTL_26 0xa1ab
1552#define mmSPI_PS_INPUT_CNTL_27 0xa1ac
1553#define mmSPI_PS_INPUT_CNTL_28 0xa1ad
1554#define mmSPI_PS_INPUT_CNTL_29 0xa1ae
1555#define mmSPI_PS_INPUT_CNTL_30 0xa1af
1556#define mmSPI_PS_INPUT_CNTL_31 0xa1b0
1557#define mmSPI_VS_OUT_CONFIG 0xa1b1
1558#define mmSPI_PS_INPUT_ENA 0xa1b3
1559#define mmSPI_PS_INPUT_ADDR 0xa1b4
1560#define mmSPI_INTERP_CONTROL_0 0xa1b5
1561#define mmSPI_PS_IN_CONTROL 0xa1b6
1562#define mmSPI_BARYC_CNTL 0xa1b8
1563#define mmSPI_TMPRING_SIZE 0xa1ba
1564#define mmSPI_SHADER_POS_FORMAT 0xa1c3
1565#define mmSPI_SHADER_Z_FORMAT 0xa1c4
1566#define mmSPI_SHADER_COL_FORMAT 0xa1c5
1567#define mmSPI_ARB_PRIORITY 0x31c0
1568#define mmSPI_ARB_CYCLES_0 0x31c1
1569#define mmSPI_ARB_CYCLES_1 0x31c2
1570#define mmSPI_CDBG_SYS_GFX 0x31c3
1571#define mmSPI_CDBG_SYS_HP3D 0x31c4
1572#define mmSPI_CDBG_SYS_CS0 0x31c5
1573#define mmSPI_CDBG_SYS_CS1 0x31c6
1574#define mmSPI_WCL_PIPE_PERCENT_GFX 0x31c7
1575#define mmSPI_WCL_PIPE_PERCENT_HP3D 0x31c8
1576#define mmSPI_WCL_PIPE_PERCENT_CS0 0x31c9
1577#define mmSPI_WCL_PIPE_PERCENT_CS1 0x31ca
1578#define mmSPI_WCL_PIPE_PERCENT_CS2 0x31cb
1579#define mmSPI_WCL_PIPE_PERCENT_CS3 0x31cc
1580#define mmSPI_WCL_PIPE_PERCENT_CS4 0x31cd
1581#define mmSPI_WCL_PIPE_PERCENT_CS5 0x31ce
1582#define mmSPI_WCL_PIPE_PERCENT_CS6 0x31cf
1583#define mmSPI_WCL_PIPE_PERCENT_CS7 0x31d0
1584#define mmSPI_GDBG_WAVE_CNTL 0x31d1
1585#define mmSPI_GDBG_TRAP_CONFIG 0x31d2
1586#define mmSPI_GDBG_TRAP_MASK 0x31d3
1587#define mmSPI_GDBG_TBA_LO 0x31d4
1588#define mmSPI_GDBG_TBA_HI 0x31d5
1589#define mmSPI_GDBG_TMA_LO 0x31d6
1590#define mmSPI_GDBG_TMA_HI 0x31d7
1591#define mmSPI_GDBG_TRAP_DATA0 0x31d8
1592#define mmSPI_GDBG_TRAP_DATA1 0x31d9
1593#define mmSPI_RESET_DEBUG 0x31da
1594#define mmSPI_COMPUTE_QUEUE_RESET 0x31db
1595#define mmSPI_RESOURCE_RESERVE_CU_0 0x31dc
1596#define mmSPI_RESOURCE_RESERVE_CU_1 0x31dd
1597#define mmSPI_RESOURCE_RESERVE_CU_2 0x31de
1598#define mmSPI_RESOURCE_RESERVE_CU_3 0x31df
1599#define mmSPI_RESOURCE_RESERVE_CU_4 0x31e0
1600#define mmSPI_RESOURCE_RESERVE_CU_5 0x31e1
1601#define mmSPI_RESOURCE_RESERVE_CU_6 0x31e2
1602#define mmSPI_RESOURCE_RESERVE_CU_7 0x31e3
1603#define mmSPI_RESOURCE_RESERVE_CU_8 0x31e4
1604#define mmSPI_RESOURCE_RESERVE_CU_9 0x31e5
1605#define mmSPI_RESOURCE_RESERVE_CU_10 0x31f0
1606#define mmSPI_RESOURCE_RESERVE_CU_11 0x31f1
1607#define mmSPI_RESOURCE_RESERVE_CU_12 0x31f4
1608#define mmSPI_RESOURCE_RESERVE_CU_13 0x31f5
1609#define mmSPI_RESOURCE_RESERVE_CU_14 0x31f6
1610#define mmSPI_RESOURCE_RESERVE_CU_15 0x31f7
1611#define mmSPI_RESOURCE_RESERVE_EN_CU_0 0x31e6
1612#define mmSPI_RESOURCE_RESERVE_EN_CU_1 0x31e7
1613#define mmSPI_RESOURCE_RESERVE_EN_CU_2 0x31e8
1614#define mmSPI_RESOURCE_RESERVE_EN_CU_3 0x31e9
1615#define mmSPI_RESOURCE_RESERVE_EN_CU_4 0x31ea
1616#define mmSPI_RESOURCE_RESERVE_EN_CU_5 0x31eb
1617#define mmSPI_RESOURCE_RESERVE_EN_CU_6 0x31ec
1618#define mmSPI_RESOURCE_RESERVE_EN_CU_7 0x31ed
1619#define mmSPI_RESOURCE_RESERVE_EN_CU_8 0x31ee
1620#define mmSPI_RESOURCE_RESERVE_EN_CU_9 0x31ef
1621#define mmSPI_RESOURCE_RESERVE_EN_CU_10 0x31f2
1622#define mmSPI_RESOURCE_RESERVE_EN_CU_11 0x31f3
1623#define mmSPI_RESOURCE_RESERVE_EN_CU_12 0x31f8
1624#define mmSPI_RESOURCE_RESERVE_EN_CU_13 0x31f9
1625#define mmSPI_RESOURCE_RESERVE_EN_CU_14 0x31fa
1626#define mmSPI_RESOURCE_RESERVE_EN_CU_15 0x31fb
1627#define mmSPI_COMPUTE_WF_CTX_SAVE 0x31fc
1628#define mmSPI_PS_MAX_WAVE_ID 0x243a
1629#define mmSPI_START_PHASE 0x243b
1630#define mmSPI_GFX_CNTL 0x243c
1631#define mmSPI_CONFIG_CNTL 0x2440
1632#define mmSPI_DEBUG_CNTL 0x2441
1633#define mmSPI_DEBUG_READ 0x2442
1634#define mmSPI_DSM_CNTL 0x2443
1635#define mmSPI_EDC_CNT 0x2444
1636#define mmSPI_PERFCOUNTER0_SELECT 0xd980
1637#define mmSPI_PERFCOUNTER1_SELECT 0xd981
1638#define mmSPI_PERFCOUNTER2_SELECT 0xd982
1639#define mmSPI_PERFCOUNTER3_SELECT 0xd983
1640#define mmSPI_PERFCOUNTER0_SELECT1 0xd984
1641#define mmSPI_PERFCOUNTER1_SELECT1 0xd985
1642#define mmSPI_PERFCOUNTER2_SELECT1 0xd986
1643#define mmSPI_PERFCOUNTER3_SELECT1 0xd987
1644#define mmSPI_PERFCOUNTER4_SELECT 0xd988
1645#define mmSPI_PERFCOUNTER5_SELECT 0xd989
1646#define mmSPI_PERFCOUNTER_BINS 0xd98a
1647#define mmSPI_PERFCOUNTER0_HI 0xd180
1648#define mmSPI_PERFCOUNTER0_LO 0xd181
1649#define mmSPI_PERFCOUNTER1_HI 0xd182
1650#define mmSPI_PERFCOUNTER1_LO 0xd183
1651#define mmSPI_PERFCOUNTER2_HI 0xd184
1652#define mmSPI_PERFCOUNTER2_LO 0xd185
1653#define mmSPI_PERFCOUNTER3_HI 0xd186
1654#define mmSPI_PERFCOUNTER3_LO 0xd187
1655#define mmSPI_PERFCOUNTER4_HI 0xd188
1656#define mmSPI_PERFCOUNTER4_LO 0xd189
1657#define mmSPI_PERFCOUNTER5_HI 0xd18a
1658#define mmSPI_PERFCOUNTER5_LO 0xd18b
1659#define mmSPI_CONFIG_CNTL_1 0x244f
1660#define mmSPI_DEBUG_BUSY 0x2450
1661#define mmSPI_CONFIG_CNTL_2 0x2451
1662#define mmCGTS_SM_CTRL_REG 0xf000
1663#define mmCGTS_RD_CTRL_REG 0xf001
1664#define mmCGTS_RD_REG 0xf002
1665#define mmCGTS_TCC_DISABLE 0xf003
1666#define mmCGTS_USER_TCC_DISABLE 0xf004
1667#define mmCGTS_CU0_SP0_CTRL_REG 0xf008
1668#define mmCGTS_CU0_LDS_SQ_CTRL_REG 0xf009
1669#define mmCGTS_CU0_TA_SQC_CTRL_REG 0xf00a
1670#define mmCGTS_CU0_SP1_CTRL_REG 0xf00b
1671#define mmCGTS_CU0_TD_TCP_CTRL_REG 0xf00c
1672#define mmCGTS_CU1_SP0_CTRL_REG 0xf00d
1673#define mmCGTS_CU1_LDS_SQ_CTRL_REG 0xf00e
1674#define mmCGTS_CU1_TA_CTRL_REG 0xf00f
1675#define mmCGTS_CU1_SP1_CTRL_REG 0xf010
1676#define mmCGTS_CU1_TD_TCP_CTRL_REG 0xf011
1677#define mmCGTS_CU2_SP0_CTRL_REG 0xf012
1678#define mmCGTS_CU2_LDS_SQ_CTRL_REG 0xf013
1679#define mmCGTS_CU2_TA_CTRL_REG 0xf014
1680#define mmCGTS_CU2_SP1_CTRL_REG 0xf015
1681#define mmCGTS_CU2_TD_TCP_CTRL_REG 0xf016
1682#define mmCGTS_CU3_SP0_CTRL_REG 0xf017
1683#define mmCGTS_CU3_LDS_SQ_CTRL_REG 0xf018
1684#define mmCGTS_CU3_TA_CTRL_REG 0xf019
1685#define mmCGTS_CU3_SP1_CTRL_REG 0xf01a
1686#define mmCGTS_CU3_TD_TCP_CTRL_REG 0xf01b
1687#define mmCGTS_CU4_SP0_CTRL_REG 0xf01c
1688#define mmCGTS_CU4_LDS_SQ_CTRL_REG 0xf01d
1689#define mmCGTS_CU4_TA_SQC_CTRL_REG 0xf01e
1690#define mmCGTS_CU4_SP1_CTRL_REG 0xf01f
1691#define mmCGTS_CU4_TD_TCP_CTRL_REG 0xf020
1692#define mmCGTS_CU5_SP0_CTRL_REG 0xf021
1693#define mmCGTS_CU5_LDS_SQ_CTRL_REG 0xf022
1694#define mmCGTS_CU5_TA_CTRL_REG 0xf023
1695#define mmCGTS_CU5_SP1_CTRL_REG 0xf024
1696#define mmCGTS_CU5_TD_TCP_CTRL_REG 0xf025
1697#define mmCGTS_CU6_SP0_CTRL_REG 0xf026
1698#define mmCGTS_CU6_LDS_SQ_CTRL_REG 0xf027
1699#define mmCGTS_CU6_TA_CTRL_REG 0xf028
1700#define mmCGTS_CU6_SP1_CTRL_REG 0xf029
1701#define mmCGTS_CU6_TD_TCP_CTRL_REG 0xf02a
1702#define mmCGTS_CU7_SP0_CTRL_REG 0xf02b
1703#define mmCGTS_CU7_LDS_SQ_CTRL_REG 0xf02c
1704#define mmCGTS_CU7_TA_CTRL_REG 0xf02d
1705#define mmCGTS_CU7_SP1_CTRL_REG 0xf02e
1706#define mmCGTS_CU7_TD_TCP_CTRL_REG 0xf02f
1707#define mmCGTS_CU8_SP0_CTRL_REG 0xf030
1708#define mmCGTS_CU8_LDS_SQ_CTRL_REG 0xf031
1709#define mmCGTS_CU8_TA_SQC_CTRL_REG 0xf032
1710#define mmCGTS_CU8_SP1_CTRL_REG 0xf033
1711#define mmCGTS_CU8_TD_TCP_CTRL_REG 0xf034
1712#define mmCGTS_CU9_SP0_CTRL_REG 0xf035
1713#define mmCGTS_CU9_LDS_SQ_CTRL_REG 0xf036
1714#define mmCGTS_CU9_TA_CTRL_REG 0xf037
1715#define mmCGTS_CU9_SP1_CTRL_REG 0xf038
1716#define mmCGTS_CU9_TD_TCP_CTRL_REG 0xf039
1717#define mmCGTS_CU10_SP0_CTRL_REG 0xf03a
1718#define mmCGTS_CU10_LDS_SQ_CTRL_REG 0xf03b
1719#define mmCGTS_CU10_TA_CTRL_REG 0xf03c
1720#define mmCGTS_CU10_SP1_CTRL_REG 0xf03d
1721#define mmCGTS_CU10_TD_TCP_CTRL_REG 0xf03e
1722#define mmCGTS_CU11_SP0_CTRL_REG 0xf03f
1723#define mmCGTS_CU11_LDS_SQ_CTRL_REG 0xf040
1724#define mmCGTS_CU11_TA_CTRL_REG 0xf041
1725#define mmCGTS_CU11_SP1_CTRL_REG 0xf042
1726#define mmCGTS_CU11_TD_TCP_CTRL_REG 0xf043
1727#define mmCGTS_CU12_SP0_CTRL_REG 0xf044
1728#define mmCGTS_CU12_LDS_SQ_CTRL_REG 0xf045
1729#define mmCGTS_CU12_TA_SQC_CTRL_REG 0xf046
1730#define mmCGTS_CU12_SP1_CTRL_REG 0xf047
1731#define mmCGTS_CU12_TD_TCP_CTRL_REG 0xf048
1732#define mmCGTS_CU13_SP0_CTRL_REG 0xf049
1733#define mmCGTS_CU13_LDS_SQ_CTRL_REG 0xf04a
1734#define mmCGTS_CU13_TA_CTRL_REG 0xf04b
1735#define mmCGTS_CU13_SP1_CTRL_REG 0xf04c
1736#define mmCGTS_CU13_TD_TCP_CTRL_REG 0xf04d
1737#define mmCGTS_CU14_SP0_CTRL_REG 0xf04e
1738#define mmCGTS_CU14_LDS_SQ_CTRL_REG 0xf04f
1739#define mmCGTS_CU14_TA_CTRL_REG 0xf050
1740#define mmCGTS_CU14_SP1_CTRL_REG 0xf051
1741#define mmCGTS_CU14_TD_TCP_CTRL_REG 0xf052
1742#define mmCGTS_CU15_SP0_CTRL_REG 0xf053
1743#define mmCGTS_CU15_LDS_SQ_CTRL_REG 0xf054
1744#define mmCGTS_CU15_TA_CTRL_REG 0xf055
1745#define mmCGTS_CU15_SP1_CTRL_REG 0xf056
1746#define mmCGTS_CU15_TD_TCP_CTRL_REG 0xf057
1747#define mmCGTT_SPI_CLK_CTRL 0xf080
1748#define mmCGTT_PC_CLK_CTRL 0xf081
1749#define mmCGTT_BCI_CLK_CTRL 0xf082
1750#define mmSPI_WF_LIFETIME_CNTL 0x24aa
1751#define mmSPI_WF_LIFETIME_LIMIT_0 0x24ab
1752#define mmSPI_WF_LIFETIME_LIMIT_1 0x24ac
1753#define mmSPI_WF_LIFETIME_LIMIT_2 0x24ad
1754#define mmSPI_WF_LIFETIME_LIMIT_3 0x24ae
1755#define mmSPI_WF_LIFETIME_LIMIT_4 0x24af
1756#define mmSPI_WF_LIFETIME_LIMIT_5 0x24b0
1757#define mmSPI_WF_LIFETIME_LIMIT_6 0x24b1
1758#define mmSPI_WF_LIFETIME_LIMIT_7 0x24b2
1759#define mmSPI_WF_LIFETIME_LIMIT_8 0x24b3
1760#define mmSPI_WF_LIFETIME_LIMIT_9 0x24b4
1761#define mmSPI_WF_LIFETIME_STATUS_0 0x24b5
1762#define mmSPI_WF_LIFETIME_STATUS_1 0x24b6
1763#define mmSPI_WF_LIFETIME_STATUS_2 0x24b7
1764#define mmSPI_WF_LIFETIME_STATUS_3 0x24b8
1765#define mmSPI_WF_LIFETIME_STATUS_4 0x24b9
1766#define mmSPI_WF_LIFETIME_STATUS_5 0x24ba
1767#define mmSPI_WF_LIFETIME_STATUS_6 0x24bb
1768#define mmSPI_WF_LIFETIME_STATUS_7 0x24bc
1769#define mmSPI_WF_LIFETIME_STATUS_8 0x24bd
1770#define mmSPI_WF_LIFETIME_STATUS_9 0x24be
1771#define mmSPI_WF_LIFETIME_STATUS_10 0x24bf
1772#define mmSPI_WF_LIFETIME_STATUS_11 0x24c0
1773#define mmSPI_WF_LIFETIME_STATUS_12 0x24c1
1774#define mmSPI_WF_LIFETIME_STATUS_13 0x24c2
1775#define mmSPI_WF_LIFETIME_STATUS_14 0x24c3
1776#define mmSPI_WF_LIFETIME_STATUS_15 0x24c4
1777#define mmSPI_WF_LIFETIME_STATUS_16 0x24c5
1778#define mmSPI_WF_LIFETIME_STATUS_17 0x24c6
1779#define mmSPI_WF_LIFETIME_STATUS_18 0x24c7
1780#define mmSPI_WF_LIFETIME_STATUS_19 0x24c8
1781#define mmSPI_WF_LIFETIME_STATUS_20 0x24c9
1782#define mmSPI_WF_LIFETIME_DEBUG 0x24ca
1783#define mmSPI_SLAVE_DEBUG_BUSY 0x24d3
1784#define mmSPI_LB_CTR_CTRL 0x24d4
1785#define mmSPI_LB_CU_MASK 0x24d5
1786#define mmSPI_LB_DATA_REG 0x24d6
1787#define mmSPI_PG_ENABLE_STATIC_CU_MASK 0x24d7
1788#define mmSPI_GDS_CREDITS 0x24d8
1789#define mmSPI_SX_EXPORT_BUFFER_SIZES 0x24d9
1790#define mmSPI_SX_SCOREBOARD_BUFFER_SIZES 0x24da
1791#define mmSPI_CSQ_WF_ACTIVE_STATUS 0x24db
1792#define mmSPI_CSQ_WF_ACTIVE_COUNT_0 0x24dc
1793#define mmSPI_CSQ_WF_ACTIVE_COUNT_1 0x24dd
1794#define mmSPI_CSQ_WF_ACTIVE_COUNT_2 0x24de
1795#define mmSPI_CSQ_WF_ACTIVE_COUNT_3 0x24df
1796#define mmSPI_CSQ_WF_ACTIVE_COUNT_4 0x24e0
1797#define mmSPI_CSQ_WF_ACTIVE_COUNT_5 0x24e1
1798#define mmSPI_CSQ_WF_ACTIVE_COUNT_6 0x24e2
1799#define mmSPI_CSQ_WF_ACTIVE_COUNT_7 0x24e3
1800#define mmBCI_DEBUG_READ 0x24eb
1801#define mmSPI_P0_TRAP_SCREEN_PSBA_LO 0x24ec
1802#define mmSPI_P0_TRAP_SCREEN_PSBA_HI 0x24ed
1803#define mmSPI_P0_TRAP_SCREEN_PSMA_LO 0x24ee
1804#define mmSPI_P0_TRAP_SCREEN_PSMA_HI 0x24ef
1805#define mmSPI_P0_TRAP_SCREEN_GPR_MIN 0x24f0
1806#define mmSPI_P1_TRAP_SCREEN_PSBA_LO 0x24f1
1807#define mmSPI_P1_TRAP_SCREEN_PSBA_HI 0x24f2
1808#define mmSPI_P1_TRAP_SCREEN_PSMA_LO 0x24f3
1809#define mmSPI_P1_TRAP_SCREEN_PSMA_HI 0x24f4
1810#define mmSPI_P1_TRAP_SCREEN_GPR_MIN 0x24f5
1811#define mmSPI_SHADER_TBA_LO_PS 0x2c00
1812#define mmSPI_SHADER_TBA_HI_PS 0x2c01
1813#define mmSPI_SHADER_TMA_LO_PS 0x2c02
1814#define mmSPI_SHADER_TMA_HI_PS 0x2c03
1815#define mmSPI_SHADER_PGM_LO_PS 0x2c08
1816#define mmSPI_SHADER_PGM_HI_PS 0x2c09
1817#define mmSPI_SHADER_PGM_RSRC1_PS 0x2c0a
1818#define mmSPI_SHADER_PGM_RSRC2_PS 0x2c0b
1819#define mmSPI_SHADER_PGM_RSRC3_PS 0x2c07
1820#define mmSPI_SHADER_USER_DATA_PS_0 0x2c0c
1821#define mmSPI_SHADER_USER_DATA_PS_1 0x2c0d
1822#define mmSPI_SHADER_USER_DATA_PS_2 0x2c0e
1823#define mmSPI_SHADER_USER_DATA_PS_3 0x2c0f
1824#define mmSPI_SHADER_USER_DATA_PS_4 0x2c10
1825#define mmSPI_SHADER_USER_DATA_PS_5 0x2c11
1826#define mmSPI_SHADER_USER_DATA_PS_6 0x2c12
1827#define mmSPI_SHADER_USER_DATA_PS_7 0x2c13
1828#define mmSPI_SHADER_USER_DATA_PS_8 0x2c14
1829#define mmSPI_SHADER_USER_DATA_PS_9 0x2c15
1830#define mmSPI_SHADER_USER_DATA_PS_10 0x2c16
1831#define mmSPI_SHADER_USER_DATA_PS_11 0x2c17
1832#define mmSPI_SHADER_USER_DATA_PS_12 0x2c18
1833#define mmSPI_SHADER_USER_DATA_PS_13 0x2c19
1834#define mmSPI_SHADER_USER_DATA_PS_14 0x2c1a
1835#define mmSPI_SHADER_USER_DATA_PS_15 0x2c1b
1836#define mmSPI_SHADER_TBA_LO_VS 0x2c40
1837#define mmSPI_SHADER_TBA_HI_VS 0x2c41
1838#define mmSPI_SHADER_TMA_LO_VS 0x2c42
1839#define mmSPI_SHADER_TMA_HI_VS 0x2c43
1840#define mmSPI_SHADER_PGM_LO_VS 0x2c48
1841#define mmSPI_SHADER_PGM_HI_VS 0x2c49
1842#define mmSPI_SHADER_PGM_RSRC1_VS 0x2c4a
1843#define mmSPI_SHADER_PGM_RSRC2_VS 0x2c4b
1844#define mmSPI_SHADER_PGM_RSRC3_VS 0x2c46
1845#define mmSPI_SHADER_LATE_ALLOC_VS 0x2c47
1846#define mmSPI_SHADER_USER_DATA_VS_0 0x2c4c
1847#define mmSPI_SHADER_USER_DATA_VS_1 0x2c4d
1848#define mmSPI_SHADER_USER_DATA_VS_2 0x2c4e
1849#define mmSPI_SHADER_USER_DATA_VS_3 0x2c4f
1850#define mmSPI_SHADER_USER_DATA_VS_4 0x2c50
1851#define mmSPI_SHADER_USER_DATA_VS_5 0x2c51
1852#define mmSPI_SHADER_USER_DATA_VS_6 0x2c52
1853#define mmSPI_SHADER_USER_DATA_VS_7 0x2c53
1854#define mmSPI_SHADER_USER_DATA_VS_8 0x2c54
1855#define mmSPI_SHADER_USER_DATA_VS_9 0x2c55
1856#define mmSPI_SHADER_USER_DATA_VS_10 0x2c56
1857#define mmSPI_SHADER_USER_DATA_VS_11 0x2c57
1858#define mmSPI_SHADER_USER_DATA_VS_12 0x2c58
1859#define mmSPI_SHADER_USER_DATA_VS_13 0x2c59
1860#define mmSPI_SHADER_USER_DATA_VS_14 0x2c5a
1861#define mmSPI_SHADER_USER_DATA_VS_15 0x2c5b
1862#define mmSPI_SHADER_PGM_RSRC2_ES_VS 0x2c7c
1863#define mmSPI_SHADER_PGM_RSRC2_LS_VS 0x2c7d
1864#define mmSPI_SHADER_TBA_LO_GS 0x2c80
1865#define mmSPI_SHADER_TBA_HI_GS 0x2c81
1866#define mmSPI_SHADER_TMA_LO_GS 0x2c82
1867#define mmSPI_SHADER_TMA_HI_GS 0x2c83
1868#define mmSPI_SHADER_PGM_LO_GS 0x2c88
1869#define mmSPI_SHADER_PGM_HI_GS 0x2c89
1870#define mmSPI_SHADER_PGM_RSRC1_GS 0x2c8a
1871#define mmSPI_SHADER_PGM_RSRC2_GS 0x2c8b
1872#define mmSPI_SHADER_PGM_RSRC3_GS 0x2c87
1873#define mmSPI_SHADER_USER_DATA_GS_0 0x2c8c
1874#define mmSPI_SHADER_USER_DATA_GS_1 0x2c8d
1875#define mmSPI_SHADER_USER_DATA_GS_2 0x2c8e
1876#define mmSPI_SHADER_USER_DATA_GS_3 0x2c8f
1877#define mmSPI_SHADER_USER_DATA_GS_4 0x2c90
1878#define mmSPI_SHADER_USER_DATA_GS_5 0x2c91
1879#define mmSPI_SHADER_USER_DATA_GS_6 0x2c92
1880#define mmSPI_SHADER_USER_DATA_GS_7 0x2c93
1881#define mmSPI_SHADER_USER_DATA_GS_8 0x2c94
1882#define mmSPI_SHADER_USER_DATA_GS_9 0x2c95
1883#define mmSPI_SHADER_USER_DATA_GS_10 0x2c96
1884#define mmSPI_SHADER_USER_DATA_GS_11 0x2c97
1885#define mmSPI_SHADER_USER_DATA_GS_12 0x2c98
1886#define mmSPI_SHADER_USER_DATA_GS_13 0x2c99
1887#define mmSPI_SHADER_USER_DATA_GS_14 0x2c9a
1888#define mmSPI_SHADER_USER_DATA_GS_15 0x2c9b
1889#define mmSPI_SHADER_PGM_RSRC2_ES_GS 0x2cbc
1890#define mmSPI_SHADER_TBA_LO_ES 0x2cc0
1891#define mmSPI_SHADER_TBA_HI_ES 0x2cc1
1892#define mmSPI_SHADER_TMA_LO_ES 0x2cc2
1893#define mmSPI_SHADER_TMA_HI_ES 0x2cc3
1894#define mmSPI_SHADER_PGM_LO_ES 0x2cc8
1895#define mmSPI_SHADER_PGM_HI_ES 0x2cc9
1896#define mmSPI_SHADER_PGM_RSRC1_ES 0x2cca
1897#define mmSPI_SHADER_PGM_RSRC2_ES 0x2ccb
1898#define mmSPI_SHADER_PGM_RSRC3_ES 0x2cc7
1899#define mmSPI_SHADER_USER_DATA_ES_0 0x2ccc
1900#define mmSPI_SHADER_USER_DATA_ES_1 0x2ccd
1901#define mmSPI_SHADER_USER_DATA_ES_2 0x2cce
1902#define mmSPI_SHADER_USER_DATA_ES_3 0x2ccf
1903#define mmSPI_SHADER_USER_DATA_ES_4 0x2cd0
1904#define mmSPI_SHADER_USER_DATA_ES_5 0x2cd1
1905#define mmSPI_SHADER_USER_DATA_ES_6 0x2cd2
1906#define mmSPI_SHADER_USER_DATA_ES_7 0x2cd3
1907#define mmSPI_SHADER_USER_DATA_ES_8 0x2cd4
1908#define mmSPI_SHADER_USER_DATA_ES_9 0x2cd5
1909#define mmSPI_SHADER_USER_DATA_ES_10 0x2cd6
1910#define mmSPI_SHADER_USER_DATA_ES_11 0x2cd7
1911#define mmSPI_SHADER_USER_DATA_ES_12 0x2cd8
1912#define mmSPI_SHADER_USER_DATA_ES_13 0x2cd9
1913#define mmSPI_SHADER_USER_DATA_ES_14 0x2cda
1914#define mmSPI_SHADER_USER_DATA_ES_15 0x2cdb
1915#define mmSPI_SHADER_PGM_RSRC2_LS_ES 0x2cfd
1916#define mmSPI_SHADER_TBA_LO_HS 0x2d00
1917#define mmSPI_SHADER_TBA_HI_HS 0x2d01
1918#define mmSPI_SHADER_TMA_LO_HS 0x2d02
1919#define mmSPI_SHADER_TMA_HI_HS 0x2d03
1920#define mmSPI_SHADER_PGM_LO_HS 0x2d08
1921#define mmSPI_SHADER_PGM_HI_HS 0x2d09
1922#define mmSPI_SHADER_PGM_RSRC1_HS 0x2d0a
1923#define mmSPI_SHADER_PGM_RSRC2_HS 0x2d0b
1924#define mmSPI_SHADER_PGM_RSRC3_HS 0x2d07
1925#define mmSPI_SHADER_USER_DATA_HS_0 0x2d0c
1926#define mmSPI_SHADER_USER_DATA_HS_1 0x2d0d
1927#define mmSPI_SHADER_USER_DATA_HS_2 0x2d0e
1928#define mmSPI_SHADER_USER_DATA_HS_3 0x2d0f
1929#define mmSPI_SHADER_USER_DATA_HS_4 0x2d10
1930#define mmSPI_SHADER_USER_DATA_HS_5 0x2d11
1931#define mmSPI_SHADER_USER_DATA_HS_6 0x2d12
1932#define mmSPI_SHADER_USER_DATA_HS_7 0x2d13
1933#define mmSPI_SHADER_USER_DATA_HS_8 0x2d14
1934#define mmSPI_SHADER_USER_DATA_HS_9 0x2d15
1935#define mmSPI_SHADER_USER_DATA_HS_10 0x2d16
1936#define mmSPI_SHADER_USER_DATA_HS_11 0x2d17
1937#define mmSPI_SHADER_USER_DATA_HS_12 0x2d18
1938#define mmSPI_SHADER_USER_DATA_HS_13 0x2d19
1939#define mmSPI_SHADER_USER_DATA_HS_14 0x2d1a
1940#define mmSPI_SHADER_USER_DATA_HS_15 0x2d1b
1941#define mmSPI_SHADER_PGM_RSRC2_LS_HS 0x2d3d
1942#define mmSPI_SHADER_TBA_LO_LS 0x2d40
1943#define mmSPI_SHADER_TBA_HI_LS 0x2d41
1944#define mmSPI_SHADER_TMA_LO_LS 0x2d42
1945#define mmSPI_SHADER_TMA_HI_LS 0x2d43
1946#define mmSPI_SHADER_PGM_LO_LS 0x2d48
1947#define mmSPI_SHADER_PGM_HI_LS 0x2d49
1948#define mmSPI_SHADER_PGM_RSRC1_LS 0x2d4a
1949#define mmSPI_SHADER_PGM_RSRC2_LS 0x2d4b
1950#define mmSPI_SHADER_PGM_RSRC3_LS 0x2d47
1951#define mmSPI_SHADER_USER_DATA_LS_0 0x2d4c
1952#define mmSPI_SHADER_USER_DATA_LS_1 0x2d4d
1953#define mmSPI_SHADER_USER_DATA_LS_2 0x2d4e
1954#define mmSPI_SHADER_USER_DATA_LS_3 0x2d4f
1955#define mmSPI_SHADER_USER_DATA_LS_4 0x2d50
1956#define mmSPI_SHADER_USER_DATA_LS_5 0x2d51
1957#define mmSPI_SHADER_USER_DATA_LS_6 0x2d52
1958#define mmSPI_SHADER_USER_DATA_LS_7 0x2d53
1959#define mmSPI_SHADER_USER_DATA_LS_8 0x2d54
1960#define mmSPI_SHADER_USER_DATA_LS_9 0x2d55
1961#define mmSPI_SHADER_USER_DATA_LS_10 0x2d56
1962#define mmSPI_SHADER_USER_DATA_LS_11 0x2d57
1963#define mmSPI_SHADER_USER_DATA_LS_12 0x2d58
1964#define mmSPI_SHADER_USER_DATA_LS_13 0x2d59
1965#define mmSPI_SHADER_USER_DATA_LS_14 0x2d5a
1966#define mmSPI_SHADER_USER_DATA_LS_15 0x2d5b
1967#define mmSQ_CONFIG 0x2300
1968#define mmSQC_CONFIG 0x2301
1969#define mmSQC_CACHES 0xc348
1970#define mmSQC_WRITEBACK 0xc349
1971#define mmSQC_DSM_CNTL 0x230f
1972#define mmSQ_RANDOM_WAVE_PRI 0x2303
1973#define mmSQ_REG_CREDITS 0x2304
1974#define mmSQ_FIFO_SIZES 0x2305
1975#define mmSQ_DSM_CNTL 0x2306
1976#define mmCC_GC_SHADER_RATE_CONFIG 0x2312
1977#define mmGC_USER_SHADER_RATE_CONFIG 0x2313
1978#define mmSQ_INTERRUPT_AUTO_MASK 0x2314
1979#define mmSQ_INTERRUPT_MSG_CTRL 0x2315
1980#define mmSQ_PERFCOUNTER_CTRL 0xd9e0
1981#define mmSQ_PERFCOUNTER_MASK 0xd9e1
1982#define mmSQ_PERFCOUNTER_CTRL2 0xd9e2
1983#define mmCC_SQC_BANK_DISABLE 0x2307
1984#define mmUSER_SQC_BANK_DISABLE 0x2308
1985#define mmSQ_PERFCOUNTER0_LO 0xd1c0
1986#define mmSQ_PERFCOUNTER1_LO 0xd1c2
1987#define mmSQ_PERFCOUNTER2_LO 0xd1c4
1988#define mmSQ_PERFCOUNTER3_LO 0xd1c6
1989#define mmSQ_PERFCOUNTER4_LO 0xd1c8
1990#define mmSQ_PERFCOUNTER5_LO 0xd1ca
1991#define mmSQ_PERFCOUNTER6_LO 0xd1cc
1992#define mmSQ_PERFCOUNTER7_LO 0xd1ce
1993#define mmSQ_PERFCOUNTER8_LO 0xd1d0
1994#define mmSQ_PERFCOUNTER9_LO 0xd1d2
1995#define mmSQ_PERFCOUNTER10_LO 0xd1d4
1996#define mmSQ_PERFCOUNTER11_LO 0xd1d6
1997#define mmSQ_PERFCOUNTER12_LO 0xd1d8
1998#define mmSQ_PERFCOUNTER13_LO 0xd1da
1999#define mmSQ_PERFCOUNTER14_LO 0xd1dc
2000#define mmSQ_PERFCOUNTER15_LO 0xd1de
2001#define mmSQ_PERFCOUNTER0_HI 0xd1c1
2002#define mmSQ_PERFCOUNTER1_HI 0xd1c3
2003#define mmSQ_PERFCOUNTER2_HI 0xd1c5
2004#define mmSQ_PERFCOUNTER3_HI 0xd1c7
2005#define mmSQ_PERFCOUNTER4_HI 0xd1c9
2006#define mmSQ_PERFCOUNTER5_HI 0xd1cb
2007#define mmSQ_PERFCOUNTER6_HI 0xd1cd
2008#define mmSQ_PERFCOUNTER7_HI 0xd1cf
2009#define mmSQ_PERFCOUNTER8_HI 0xd1d1
2010#define mmSQ_PERFCOUNTER9_HI 0xd1d3
2011#define mmSQ_PERFCOUNTER10_HI 0xd1d5
2012#define mmSQ_PERFCOUNTER11_HI 0xd1d7
2013#define mmSQ_PERFCOUNTER12_HI 0xd1d9
2014#define mmSQ_PERFCOUNTER13_HI 0xd1db
2015#define mmSQ_PERFCOUNTER14_HI 0xd1dd
2016#define mmSQ_PERFCOUNTER15_HI 0xd1df
2017#define mmSQ_PERFCOUNTER0_SELECT 0xd9c0
2018#define mmSQ_PERFCOUNTER1_SELECT 0xd9c1
2019#define mmSQ_PERFCOUNTER2_SELECT 0xd9c2
2020#define mmSQ_PERFCOUNTER3_SELECT 0xd9c3
2021#define mmSQ_PERFCOUNTER4_SELECT 0xd9c4
2022#define mmSQ_PERFCOUNTER5_SELECT 0xd9c5
2023#define mmSQ_PERFCOUNTER6_SELECT 0xd9c6
2024#define mmSQ_PERFCOUNTER7_SELECT 0xd9c7
2025#define mmSQ_PERFCOUNTER8_SELECT 0xd9c8
2026#define mmSQ_PERFCOUNTER9_SELECT 0xd9c9
2027#define mmSQ_PERFCOUNTER10_SELECT 0xd9ca
2028#define mmSQ_PERFCOUNTER11_SELECT 0xd9cb
2029#define mmSQ_PERFCOUNTER12_SELECT 0xd9cc
2030#define mmSQ_PERFCOUNTER13_SELECT 0xd9cd
2031#define mmSQ_PERFCOUNTER14_SELECT 0xd9ce
2032#define mmSQ_PERFCOUNTER15_SELECT 0xd9cf
2033#define mmCGTT_SQ_CLK_CTRL 0xf08c
2034#define mmCGTT_SQG_CLK_CTRL 0xf08d
2035#define mmSQ_ALU_CLK_CTRL 0xf08e
2036#define mmSQ_TEX_CLK_CTRL 0xf08f
2037#define mmSQ_LDS_CLK_CTRL 0xf090
2038#define mmSQ_POWER_THROTTLE 0xf091
2039#define mmSQ_POWER_THROTTLE2 0xf092
2040#define mmSQ_TIME_HI 0x237c
2041#define mmSQ_TIME_LO 0x237d
2042#define mmSQ_THREAD_TRACE_BASE 0xc330
2043#define mmSQ_THREAD_TRACE_BASE2 0xc337
2044#define mmSQ_THREAD_TRACE_SIZE 0xc331
2045#define mmSQ_THREAD_TRACE_MASK 0xc332
2046#define mmSQ_THREAD_TRACE_USERDATA_0 0xc340
2047#define mmSQ_THREAD_TRACE_USERDATA_1 0xc341
2048#define mmSQ_THREAD_TRACE_USERDATA_2 0xc342
2049#define mmSQ_THREAD_TRACE_USERDATA_3 0xc343
2050#define mmSQ_THREAD_TRACE_MODE 0xc336
2051#define mmSQ_THREAD_TRACE_CTRL 0xc335
2052#define mmSQ_THREAD_TRACE_TOKEN_MASK 0xc333
2053#define mmSQ_THREAD_TRACE_TOKEN_MASK2 0xc338
2054#define mmSQ_THREAD_TRACE_PERF_MASK 0xc334
2055#define mmSQ_THREAD_TRACE_WPTR 0xc339
2056#define mmSQ_THREAD_TRACE_STATUS 0xc33a
2057#define mmSQ_THREAD_TRACE_CNTR 0x2390
2058#define mmSQ_THREAD_TRACE_HIWATER 0xc33b
2059#define mmSQ_LB_CTR_CTRL 0x2398
2060#define mmSQ_LB_DATA_ALU_CYCLES 0x2399
2061#define mmSQ_LB_DATA_TEX_CYCLES 0x239a
2062#define mmSQ_LB_DATA_ALU_STALLS 0x239b
2063#define mmSQ_LB_DATA_TEX_STALLS 0x239c
2064#define mmSQC_EDC_CNT 0x23a0
2065#define mmSQ_EDC_SEC_CNT 0x23a1
2066#define mmSQ_EDC_DED_CNT 0x23a2
2067#define mmSQ_EDC_INFO 0x23a3
2068#define mmSQ_BUF_RSRC_WORD0 0x23c0
2069#define mmSQ_BUF_RSRC_WORD1 0x23c1
2070#define mmSQ_BUF_RSRC_WORD2 0x23c2
2071#define mmSQ_BUF_RSRC_WORD3 0x23c3
2072#define mmSQ_IMG_RSRC_WORD0 0x23c4
2073#define mmSQ_IMG_RSRC_WORD1 0x23c5
2074#define mmSQ_IMG_RSRC_WORD2 0x23c6
2075#define mmSQ_IMG_RSRC_WORD3 0x23c7
2076#define mmSQ_IMG_RSRC_WORD4 0x23c8
2077#define mmSQ_IMG_RSRC_WORD5 0x23c9
2078#define mmSQ_IMG_RSRC_WORD6 0x23ca
2079#define mmSQ_IMG_RSRC_WORD7 0x23cb
2080#define mmSQ_IMG_SAMP_WORD0 0x23cc
2081#define mmSQ_IMG_SAMP_WORD1 0x23cd
2082#define mmSQ_IMG_SAMP_WORD2 0x23ce
2083#define mmSQ_IMG_SAMP_WORD3 0x23cf
2084#define mmSQ_FLAT_SCRATCH_WORD0 0x23d0
2085#define mmSQ_FLAT_SCRATCH_WORD1 0x23d1
2086#define mmSQ_M0_GPR_IDX_WORD 0x23d2
2087#define mmSQ_IND_INDEX 0x2378
2088#define mmSQ_CMD 0x237b
2089#define mmSQ_IND_DATA 0x2379
2090#define mmSQ_REG_TIMESTAMP 0x2374
2091#define mmSQ_CMD_TIMESTAMP 0x2375
2092#define mmSQ_HV_VMID_CTRL 0xf840
2093#define ixSQ_WAVE_INST_DW0 0x1a
2094#define ixSQ_WAVE_INST_DW1 0x1b
2095#define ixSQ_WAVE_PC_LO 0x18
2096#define ixSQ_WAVE_PC_HI 0x19
2097#define ixSQ_WAVE_IB_DBG0 0x1c
2098#define ixSQ_WAVE_IB_DBG1 0x1d
2099#define ixSQ_WAVE_EXEC_LO 0x27e
2100#define ixSQ_WAVE_EXEC_HI 0x27f
2101#define ixSQ_WAVE_STATUS 0x12
2102#define ixSQ_WAVE_MODE 0x11
2103#define ixSQ_WAVE_TRAPSTS 0x13
2104#define ixSQ_WAVE_HW_ID 0x14
2105#define ixSQ_WAVE_GPR_ALLOC 0x15
2106#define ixSQ_WAVE_LDS_ALLOC 0x16
2107#define ixSQ_WAVE_IB_STS 0x17
2108#define ixSQ_WAVE_M0 0x27c
2109#define ixSQ_WAVE_TBA_LO 0x26c
2110#define ixSQ_WAVE_TBA_HI 0x26d
2111#define ixSQ_WAVE_TMA_LO 0x26e
2112#define ixSQ_WAVE_TMA_HI 0x26f
2113#define ixSQ_WAVE_TTMP0 0x270
2114#define ixSQ_WAVE_TTMP1 0x271
2115#define ixSQ_WAVE_TTMP2 0x272
2116#define ixSQ_WAVE_TTMP3 0x273
2117#define ixSQ_WAVE_TTMP4 0x274
2118#define ixSQ_WAVE_TTMP5 0x275
2119#define ixSQ_WAVE_TTMP6 0x276
2120#define ixSQ_WAVE_TTMP7 0x277
2121#define ixSQ_WAVE_TTMP8 0x278
2122#define ixSQ_WAVE_TTMP9 0x279
2123#define ixSQ_WAVE_TTMP10 0x27a
2124#define ixSQ_WAVE_TTMP11 0x27b
2125#define mmSQ_DEBUG_STS_GLOBAL 0x2309
2126#define mmSQ_DEBUG_STS_GLOBAL2 0x2310
2127#define mmSQ_DEBUG_STS_GLOBAL3 0x2311
2128#define ixSQ_DEBUG_STS_LOCAL 0x8
2129#define ixSQ_DEBUG_CTRL_LOCAL 0x9
2130#define mmSH_MEM_BASES 0x230a
2131#define mmSH_MEM_APE1_BASE 0x230b
2132#define mmSH_MEM_APE1_LIMIT 0x230c
2133#define mmSH_MEM_CONFIG 0x230d
2134#define mmSQ_THREAD_TRACE_WORD_CMN 0x23b0
2135#define mmSQ_THREAD_TRACE_WORD_INST 0x23b0
2136#define mmSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2 0x23b0
2137#define mmSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2 0x23b1
2138#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2 0x23b0
2139#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2 0x23b1
2140#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2 0x23b0
2141#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2 0x23b1
2142#define mmSQ_THREAD_TRACE_WORD_WAVE 0x23b0
2143#define mmSQ_THREAD_TRACE_WORD_MISC 0x23b0
2144#define mmSQ_THREAD_TRACE_WORD_WAVE_START 0x23b0
2145#define mmSQ_THREAD_TRACE_WORD_REG_1_OF_2 0x23b0
2146#define mmSQ_THREAD_TRACE_WORD_REG_2_OF_2 0x23b0
2147#define mmSQ_THREAD_TRACE_WORD_REG_CS_1_OF_2 0x23b0
2148#define mmSQ_THREAD_TRACE_WORD_REG_CS_2_OF_2 0x23b0
2149#define mmSQ_THREAD_TRACE_WORD_EVENT 0x23b0
2150#define mmSQ_THREAD_TRACE_WORD_ISSUE 0x23b0
2151#define mmSQ_THREAD_TRACE_WORD_PERF_1_OF_2 0x23b0
2152#define mmSQ_THREAD_TRACE_WORD_PERF_2_OF_2 0x23b1
2153#define mmSQ_WREXEC_EXEC_LO 0x23b1
2154#define mmSQ_WREXEC_EXEC_HI 0x23b1
2155#define mmSQC_GATCL1_CNTL 0x23b2
2156#define mmSQC_ATC_EDC_GATCL1_CNT 0x23b3
2157#define ixSQ_INTERRUPT_WORD_CMN 0x20c0
2158#define ixSQ_INTERRUPT_WORD_AUTO 0x20c0
2159#define ixSQ_INTERRUPT_WORD_WAVE 0x20c0
2160#define mmSQ_SOP2 0x237f
2161#define mmSQ_VOP1 0x237f
2162#define mmSQ_MTBUF_1 0x237f
2163#define mmSQ_EXP_1 0x237f
2164#define mmSQ_MUBUF_1 0x237f
2165#define mmSQ_SMEM_1 0x237f
2166#define mmSQ_INST 0x237f
2167#define mmSQ_EXP_0 0x237f
2168#define mmSQ_MUBUF_0 0x237f
2169#define mmSQ_VOP_SDWA 0x237f
2170#define mmSQ_VOP3_0 0x237f
2171#define mmSQ_VOP2 0x237f
2172#define mmSQ_MTBUF_0 0x237f
2173#define mmSQ_SOPP 0x237f
2174#define mmSQ_FLAT_0 0x237f
2175#define mmSQ_VOP3_0_SDST_ENC 0x237f
2176#define mmSQ_MIMG_1 0x237f
2177#define mmSQ_SOP1 0x237f
2178#define mmSQ_SOPC 0x237f
2179#define mmSQ_FLAT_1 0x237f
2180#define mmSQ_DS_1 0x237f
2181#define mmSQ_VOP3_1 0x237f
2182#define mmSQ_SMEM_0 0x237f
2183#define mmSQ_MIMG_0 0x237f
2184#define mmSQ_SOPK 0x237f
2185#define mmSQ_DS_0 0x237f
2186#define mmSQ_VOP_DPP 0x237f
2187#define mmSQ_VOPC 0x237f
2188#define mmSQ_VINTRP 0x237f
2189#define mmCGTT_SX_CLK_CTRL0 0xf094
2190#define mmCGTT_SX_CLK_CTRL1 0xf095
2191#define mmCGTT_SX_CLK_CTRL2 0xf096
2192#define mmCGTT_SX_CLK_CTRL3 0xf097
2193#define mmCGTT_SX_CLK_CTRL4 0xf098
2194#define mmSX_DEBUG_BUSY 0x2414
2195#define mmSX_DEBUG_BUSY_2 0x2415
2196#define mmSX_DEBUG_BUSY_3 0x2416
2197#define mmSX_DEBUG_BUSY_4 0x2417
2198#define mmSX_DEBUG_1 0x2418
2199#define mmSX_PERFCOUNTER0_SELECT 0xda40
2200#define mmSX_PERFCOUNTER1_SELECT 0xda41
2201#define mmSX_PERFCOUNTER2_SELECT 0xda42
2202#define mmSX_PERFCOUNTER3_SELECT 0xda43
2203#define mmSX_PERFCOUNTER0_SELECT1 0xda44
2204#define mmSX_PERFCOUNTER1_SELECT1 0xda45
2205#define mmSX_PERFCOUNTER0_LO 0xd240
2206#define mmSX_PERFCOUNTER0_HI 0xd241
2207#define mmSX_PERFCOUNTER1_LO 0xd242
2208#define mmSX_PERFCOUNTER1_HI 0xd243
2209#define mmSX_PERFCOUNTER2_LO 0xd244
2210#define mmSX_PERFCOUNTER2_HI 0xd245
2211#define mmSX_PERFCOUNTER3_LO 0xd246
2212#define mmSX_PERFCOUNTER3_HI 0xd247
2213#define mmSX_PS_DOWNCONVERT 0xa1d5
2214#define mmSX_BLEND_OPT_EPSILON 0xa1d6
2215#define mmSX_BLEND_OPT_CONTROL 0xa1d7
2216#define mmSX_MRT0_BLEND_OPT 0xa1d8
2217#define mmSX_MRT1_BLEND_OPT 0xa1d9
2218#define mmSX_MRT2_BLEND_OPT 0xa1da
2219#define mmSX_MRT3_BLEND_OPT 0xa1db
2220#define mmSX_MRT4_BLEND_OPT 0xa1dc
2221#define mmSX_MRT5_BLEND_OPT 0xa1dd
2222#define mmSX_MRT6_BLEND_OPT 0xa1de
2223#define mmSX_MRT7_BLEND_OPT 0xa1df
2224#define mmTCC_CTRL 0x2b80
2225#define mmTCC_EDC_CNT 0x2b82
2226#define mmTCC_REDUNDANCY 0x2b83
2227#define mmTCC_EXE_DISABLE 0x2b84
2228#define mmTCC_DSM_CNTL 0x2b85
2229#define mmTCC_CGTT_SCLK_CTRL 0xf0ac
2230#define mmTCA_CGTT_SCLK_CTRL 0xf0ad
2231#define mmTCC_PERFCOUNTER0_SELECT 0xdb80
2232#define mmTCC_PERFCOUNTER1_SELECT 0xdb82
2233#define mmTCC_PERFCOUNTER0_SELECT1 0xdb81
2234#define mmTCC_PERFCOUNTER1_SELECT1 0xdb83
2235#define mmTCC_PERFCOUNTER2_SELECT 0xdb84
2236#define mmTCC_PERFCOUNTER3_SELECT 0xdb85
2237#define mmTCC_PERFCOUNTER0_LO 0xd380
2238#define mmTCC_PERFCOUNTER1_LO 0xd382
2239#define mmTCC_PERFCOUNTER2_LO 0xd384
2240#define mmTCC_PERFCOUNTER3_LO 0xd386
2241#define mmTCC_PERFCOUNTER0_HI 0xd381
2242#define mmTCC_PERFCOUNTER1_HI 0xd383
2243#define mmTCC_PERFCOUNTER2_HI 0xd385
2244#define mmTCC_PERFCOUNTER3_HI 0xd387
2245#define mmTCA_CTRL 0x2bc0
2246#define mmTCA_PERFCOUNTER0_SELECT 0xdb90
2247#define mmTCA_PERFCOUNTER1_SELECT 0xdb92
2248#define mmTCA_PERFCOUNTER0_SELECT1 0xdb91
2249#define mmTCA_PERFCOUNTER1_SELECT1 0xdb93
2250#define mmTCA_PERFCOUNTER2_SELECT 0xdb94
2251#define mmTCA_PERFCOUNTER3_SELECT 0xdb95
2252#define mmTCA_PERFCOUNTER0_LO 0xd390
2253#define mmTCA_PERFCOUNTER1_LO 0xd392
2254#define mmTCA_PERFCOUNTER2_LO 0xd394
2255#define mmTCA_PERFCOUNTER3_LO 0xd396
2256#define mmTCA_PERFCOUNTER0_HI 0xd391
2257#define mmTCA_PERFCOUNTER1_HI 0xd393
2258#define mmTCA_PERFCOUNTER2_HI 0xd395
2259#define mmTCA_PERFCOUNTER3_HI 0xd397
2260#define mmTA_BC_BASE_ADDR 0xa020
2261#define mmTA_BC_BASE_ADDR_HI 0xa021
2262#define mmTD_CNTL 0x2525
2263#define mmTD_STATUS 0x2526
2264#define mmTD_DEBUG_INDEX 0x2528
2265#define mmTD_DEBUG_DATA 0x2529
2266#define mmTD_DSM_CNTL 0x252f
2267#define mmTD_PERFCOUNTER0_SELECT 0xdb00
2268#define mmTD_PERFCOUNTER1_SELECT 0xdb02
2269#define mmTD_PERFCOUNTER0_SELECT1 0xdb01
2270#define mmTD_PERFCOUNTER0_LO 0xd300
2271#define mmTD_PERFCOUNTER1_LO 0xd302
2272#define mmTD_PERFCOUNTER0_HI 0xd301
2273#define mmTD_PERFCOUNTER1_HI 0xd303
2274#define mmTD_SCRATCH 0x2533
2275#define mmTA_CNTL 0x2541
2276#define mmTA_CNTL_AUX 0x2542
2277#define mmTA_RESERVED_010C 0x2543
2278#define mmTA_CS_BC_BASE_ADDR 0xc380
2279#define mmTA_CS_BC_BASE_ADDR_HI 0xc381
2280#define mmTA_STATUS 0x2548
2281#define mmTA_DEBUG_INDEX 0x254c
2282#define mmTA_DEBUG_DATA 0x254d
2283#define mmTA_PERFCOUNTER0_SELECT 0xdac0
2284#define mmTA_PERFCOUNTER1_SELECT 0xdac2
2285#define mmTA_PERFCOUNTER0_SELECT1 0xdac1
2286#define mmTA_PERFCOUNTER0_LO 0xd2c0
2287#define mmTA_PERFCOUNTER1_LO 0xd2c2
2288#define mmTA_PERFCOUNTER0_HI 0xd2c1
2289#define mmTA_PERFCOUNTER1_HI 0xd2c3
2290#define mmTA_SCRATCH 0x2564
2291#define mmSH_HIDDEN_PRIVATE_BASE_VMID 0x2580
2292#define mmSH_STATIC_MEM_CONFIG 0x2581
2293#define mmTCP_INVALIDATE 0x2b00
2294#define mmTCP_STATUS 0x2b01
2295#define mmTCP_CNTL 0x2b02
2296#define mmTCP_CHAN_STEER_LO 0x2b03
2297#define mmTCP_CHAN_STEER_HI 0x2b04
2298#define mmTCP_ADDR_CONFIG 0x2b05
2299#define mmTCP_CREDIT 0x2b06
2300#define mmTCP_PERFCOUNTER0_SELECT 0xdb40
2301#define mmTCP_PERFCOUNTER1_SELECT 0xdb42
2302#define mmTCP_PERFCOUNTER0_SELECT1 0xdb41
2303#define mmTCP_PERFCOUNTER1_SELECT1 0xdb43
2304#define mmTCP_PERFCOUNTER2_SELECT 0xdb44
2305#define mmTCP_PERFCOUNTER3_SELECT 0xdb45
2306#define mmTCP_PERFCOUNTER0_LO 0xd340
2307#define mmTCP_PERFCOUNTER1_LO 0xd342
2308#define mmTCP_PERFCOUNTER2_LO 0xd344
2309#define mmTCP_PERFCOUNTER3_LO 0xd346
2310#define mmTCP_PERFCOUNTER0_HI 0xd341
2311#define mmTCP_PERFCOUNTER1_HI 0xd343
2312#define mmTCP_PERFCOUNTER2_HI 0xd345
2313#define mmTCP_PERFCOUNTER3_HI 0xd347
2314#define mmTCP_BUFFER_ADDR_HASH_CNTL 0x2b16
2315#define mmTCP_EDC_CNT 0x2b17
2316#define mmTC_CFG_L1_LOAD_POLICY0 0x2b1a
2317#define mmTC_CFG_L1_LOAD_POLICY1 0x2b1b
2318#define mmTC_CFG_L1_STORE_POLICY 0x2b1c
2319#define mmTC_CFG_L2_LOAD_POLICY0 0x2b1d
2320#define mmTC_CFG_L2_LOAD_POLICY1 0x2b1e
2321#define mmTC_CFG_L2_STORE_POLICY0 0x2b1f
2322#define mmTC_CFG_L2_STORE_POLICY1 0x2b20
2323#define mmTC_CFG_L2_ATOMIC_POLICY 0x2b21
2324#define mmTC_CFG_L1_VOLATILE 0x2b22
2325#define mmTC_CFG_L2_VOLATILE 0x2b23
2326#define mmTCP_WATCH0_ADDR_H 0x32a0
2327#define mmTCP_WATCH1_ADDR_H 0x32a3
2328#define mmTCP_WATCH2_ADDR_H 0x32a6
2329#define mmTCP_WATCH3_ADDR_H 0x32a9
2330#define mmTCP_WATCH0_ADDR_L 0x32a1
2331#define mmTCP_WATCH1_ADDR_L 0x32a4
2332#define mmTCP_WATCH2_ADDR_L 0x32a7
2333#define mmTCP_WATCH3_ADDR_L 0x32aa
2334#define mmTCP_WATCH0_CNTL 0x32a2
2335#define mmTCP_WATCH1_CNTL 0x32a5
2336#define mmTCP_WATCH2_CNTL 0x32a8
2337#define mmTCP_WATCH3_CNTL 0x32ab
2338#define mmTCP_GATCL1_CNTL 0x32b0
2339#define mmTCP_ATC_EDC_GATCL1_CNT 0x32b1
2340#define mmTCP_GATCL1_DSM_CNTL 0x32b2
2341#define mmTCP_DSM_CNTL 0x32b3
2342#define mmTCP_CNTL2 0x32b4
2343#define mmTD_CGTT_CTRL 0xf09c
2344#define mmTA_CGTT_CTRL 0xf09d
2345#define mmCGTT_TCP_CLK_CTRL 0xf09e
2346#define mmCGTT_TCI_CLK_CTRL 0xf09f
2347#define mmTCI_STATUS 0x2b61
2348#define mmTCI_CNTL_1 0x2b62
2349#define mmTCI_CNTL_2 0x2b63
2350#define mmGDS_CONFIG 0x25c0
2351#define mmGDS_CNTL_STATUS 0x25c1
2352#define mmGDS_ENHANCE2 0x25c2
2353#define mmGDS_PROTECTION_FAULT 0x25c3
2354#define mmGDS_VM_PROTECTION_FAULT 0x25c4
2355#define mmGDS_EDC_CNT 0x25c5
2356#define mmGDS_EDC_GRBM_CNT 0x25c6
2357#define mmGDS_EDC_OA_DED 0x25c7
2358#define mmGDS_DEBUG_CNTL 0x25c8
2359#define mmGDS_DEBUG_DATA 0x25c9
2360#define mmGDS_DSM_CNTL 0x25ca
2361#define mmCGTT_GDS_CLK_CTRL 0xf0a0
2362#define mmGDS_RD_ADDR 0xc400
2363#define mmGDS_RD_DATA 0xc401
2364#define mmGDS_RD_BURST_ADDR 0xc402
2365#define mmGDS_RD_BURST_COUNT 0xc403
2366#define mmGDS_RD_BURST_DATA 0xc404
2367#define mmGDS_WR_ADDR 0xc405
2368#define mmGDS_WR_DATA 0xc406
2369#define mmGDS_WR_BURST_ADDR 0xc407
2370#define mmGDS_WR_BURST_DATA 0xc408
2371#define mmGDS_WRITE_COMPLETE 0xc409
2372#define mmGDS_ATOM_CNTL 0xc40a
2373#define mmGDS_ATOM_COMPLETE 0xc40b
2374#define mmGDS_ATOM_BASE 0xc40c
2375#define mmGDS_ATOM_SIZE 0xc40d
2376#define mmGDS_ATOM_OFFSET0 0xc40e
2377#define mmGDS_ATOM_OFFSET1 0xc40f
2378#define mmGDS_ATOM_DST 0xc410
2379#define mmGDS_ATOM_OP 0xc411
2380#define mmGDS_ATOM_SRC0 0xc412
2381#define mmGDS_ATOM_SRC0_U 0xc413
2382#define mmGDS_ATOM_SRC1 0xc414
2383#define mmGDS_ATOM_SRC1_U 0xc415
2384#define mmGDS_ATOM_READ0 0xc416
2385#define mmGDS_ATOM_READ0_U 0xc417
2386#define mmGDS_ATOM_READ1 0xc418
2387#define mmGDS_ATOM_READ1_U 0xc419
2388#define mmGDS_GWS_RESOURCE_CNTL 0xc41a
2389#define mmGDS_GWS_RESOURCE 0xc41b
2390#define mmGDS_GWS_RESOURCE_CNT 0xc41c
2391#define mmGDS_OA_CNTL 0xc41d
2392#define mmGDS_OA_COUNTER 0xc41e
2393#define mmGDS_OA_ADDRESS 0xc41f
2394#define mmGDS_OA_INCDEC 0xc420
2395#define mmGDS_OA_RING_SIZE 0xc421
2396#define ixGDS_DEBUG_REG0 0x0
2397#define ixGDS_DEBUG_REG1 0x1
2398#define ixGDS_DEBUG_REG2 0x2
2399#define ixGDS_DEBUG_REG3 0x3
2400#define ixGDS_DEBUG_REG4 0x4
2401#define ixGDS_DEBUG_REG5 0x5
2402#define ixGDS_DEBUG_REG6 0x6
2403#define mmGDS_PERFCOUNTER0_SELECT 0xda80
2404#define mmGDS_PERFCOUNTER1_SELECT 0xda81
2405#define mmGDS_PERFCOUNTER2_SELECT 0xda82
2406#define mmGDS_PERFCOUNTER3_SELECT 0xda83
2407#define mmGDS_PERFCOUNTER0_LO 0xd280
2408#define mmGDS_PERFCOUNTER1_LO 0xd282
2409#define mmGDS_PERFCOUNTER2_LO 0xd284
2410#define mmGDS_PERFCOUNTER3_LO 0xd286
2411#define mmGDS_PERFCOUNTER0_HI 0xd281
2412#define mmGDS_PERFCOUNTER1_HI 0xd283
2413#define mmGDS_PERFCOUNTER2_HI 0xd285
2414#define mmGDS_PERFCOUNTER3_HI 0xd287
2415#define mmGDS_PERFCOUNTER0_SELECT1 0xda84
2416#define mmGDS_VMID0_BASE 0x3300
2417#define mmGDS_VMID1_BASE 0x3302
2418#define mmGDS_VMID2_BASE 0x3304
2419#define mmGDS_VMID3_BASE 0x3306
2420#define mmGDS_VMID4_BASE 0x3308
2421#define mmGDS_VMID5_BASE 0x330a
2422#define mmGDS_VMID6_BASE 0x330c
2423#define mmGDS_VMID7_BASE 0x330e
2424#define mmGDS_VMID8_BASE 0x3310
2425#define mmGDS_VMID9_BASE 0x3312
2426#define mmGDS_VMID10_BASE 0x3314
2427#define mmGDS_VMID11_BASE 0x3316
2428#define mmGDS_VMID12_BASE 0x3318
2429#define mmGDS_VMID13_BASE 0x331a
2430#define mmGDS_VMID14_BASE 0x331c
2431#define mmGDS_VMID15_BASE 0x331e
2432#define mmGDS_VMID0_SIZE 0x3301
2433#define mmGDS_VMID1_SIZE 0x3303
2434#define mmGDS_VMID2_SIZE 0x3305
2435#define mmGDS_VMID3_SIZE 0x3307
2436#define mmGDS_VMID4_SIZE 0x3309
2437#define mmGDS_VMID5_SIZE 0x330b
2438#define mmGDS_VMID6_SIZE 0x330d
2439#define mmGDS_VMID7_SIZE 0x330f
2440#define mmGDS_VMID8_SIZE 0x3311
2441#define mmGDS_VMID9_SIZE 0x3313
2442#define mmGDS_VMID10_SIZE 0x3315
2443#define mmGDS_VMID11_SIZE 0x3317
2444#define mmGDS_VMID12_SIZE 0x3319
2445#define mmGDS_VMID13_SIZE 0x331b
2446#define mmGDS_VMID14_SIZE 0x331d
2447#define mmGDS_VMID15_SIZE 0x331f
2448#define mmGDS_GWS_VMID0 0x3320
2449#define mmGDS_GWS_VMID1 0x3321
2450#define mmGDS_GWS_VMID2 0x3322
2451#define mmGDS_GWS_VMID3 0x3323
2452#define mmGDS_GWS_VMID4 0x3324
2453#define mmGDS_GWS_VMID5 0x3325
2454#define mmGDS_GWS_VMID6 0x3326
2455#define mmGDS_GWS_VMID7 0x3327
2456#define mmGDS_GWS_VMID8 0x3328
2457#define mmGDS_GWS_VMID9 0x3329
2458#define mmGDS_GWS_VMID10 0x332a
2459#define mmGDS_GWS_VMID11 0x332b
2460#define mmGDS_GWS_VMID12 0x332c
2461#define mmGDS_GWS_VMID13 0x332d
2462#define mmGDS_GWS_VMID14 0x332e
2463#define mmGDS_GWS_VMID15 0x332f
2464#define mmGDS_OA_VMID0 0x3330
2465#define mmGDS_OA_VMID1 0x3331
2466#define mmGDS_OA_VMID2 0x3332
2467#define mmGDS_OA_VMID3 0x3333
2468#define mmGDS_OA_VMID4 0x3334
2469#define mmGDS_OA_VMID5 0x3335
2470#define mmGDS_OA_VMID6 0x3336
2471#define mmGDS_OA_VMID7 0x3337
2472#define mmGDS_OA_VMID8 0x3338
2473#define mmGDS_OA_VMID9 0x3339
2474#define mmGDS_OA_VMID10 0x333a
2475#define mmGDS_OA_VMID11 0x333b
2476#define mmGDS_OA_VMID12 0x333c
2477#define mmGDS_OA_VMID13 0x333d
2478#define mmGDS_OA_VMID14 0x333e
2479#define mmGDS_OA_VMID15 0x333f
2480#define mmGDS_GWS_RESET0 0x3344
2481#define mmGDS_GWS_RESET1 0x3345
2482#define mmGDS_GWS_RESOURCE_RESET 0x3346
2483#define mmGDS_COMPUTE_MAX_WAVE_ID 0x3348
2484#define mmGDS_OA_RESET_MASK 0x3349
2485#define mmGDS_OA_RESET 0x334a
2486#define mmGDS_ENHANCE 0x334b
2487#define mmGDS_OA_CGPG_RESTORE 0x334c
2488#define mmGDS_CS_CTXSW_STATUS 0x334d
2489#define mmGDS_CS_CTXSW_CNT0 0x334e
2490#define mmGDS_CS_CTXSW_CNT1 0x334f
2491#define mmGDS_CS_CTXSW_CNT2 0x3350
2492#define mmGDS_CS_CTXSW_CNT3 0x3351
2493#define mmGDS_GFX_CTXSW_STATUS 0x3352
2494#define mmGDS_VS_CTXSW_CNT0 0x3353
2495#define mmGDS_VS_CTXSW_CNT1 0x3354
2496#define mmGDS_VS_CTXSW_CNT2 0x3355
2497#define mmGDS_VS_CTXSW_CNT3 0x3356
2498#define mmGDS_PS0_CTXSW_CNT0 0x3357
2499#define mmGDS_PS1_CTXSW_CNT0 0x335b
2500#define mmGDS_PS2_CTXSW_CNT0 0x335f
2501#define mmGDS_PS3_CTXSW_CNT0 0x3363
2502#define mmGDS_PS4_CTXSW_CNT0 0x3367
2503#define mmGDS_PS5_CTXSW_CNT0 0x336b
2504#define mmGDS_PS6_CTXSW_CNT0 0x336f
2505#define mmGDS_PS7_CTXSW_CNT0 0x3373
2506#define mmGDS_PS0_CTXSW_CNT1 0x3358
2507#define mmGDS_PS1_CTXSW_CNT1 0x335c
2508#define mmGDS_PS2_CTXSW_CNT1 0x3360
2509#define mmGDS_PS3_CTXSW_CNT1 0x3364
2510#define mmGDS_PS4_CTXSW_CNT1 0x3368
2511#define mmGDS_PS5_CTXSW_CNT1 0x336c
2512#define mmGDS_PS6_CTXSW_CNT1 0x3370
2513#define mmGDS_PS7_CTXSW_CNT1 0x3374
2514#define mmGDS_PS0_CTXSW_CNT2 0x3359
2515#define mmGDS_PS1_CTXSW_CNT2 0x335d
2516#define mmGDS_PS2_CTXSW_CNT2 0x3361
2517#define mmGDS_PS3_CTXSW_CNT2 0x3365
2518#define mmGDS_PS4_CTXSW_CNT2 0x3369
2519#define mmGDS_PS5_CTXSW_CNT2 0x336d
2520#define mmGDS_PS6_CTXSW_CNT2 0x3371
2521#define mmGDS_PS7_CTXSW_CNT2 0x3375
2522#define mmGDS_PS0_CTXSW_CNT3 0x335a
2523#define mmGDS_PS1_CTXSW_CNT3 0x335e
2524#define mmGDS_PS2_CTXSW_CNT3 0x3362
2525#define mmGDS_PS3_CTXSW_CNT3 0x3366
2526#define mmGDS_PS4_CTXSW_CNT3 0x336a
2527#define mmGDS_PS5_CTXSW_CNT3 0x336e
2528#define mmGDS_PS6_CTXSW_CNT3 0x3372
2529#define mmGDS_PS7_CTXSW_CNT3 0x3376
2530#define mmCS_COPY_STATE 0xa1f3
2531#define mmGFX_COPY_STATE 0xa1f4
2532#define mmVGT_DRAW_INITIATOR 0xa1fc
2533#define mmVGT_EVENT_INITIATOR 0xa2a4
2534#define mmVGT_EVENT_ADDRESS_REG 0xa1fe
2535#define mmVGT_DMA_BASE_HI 0xa1f9
2536#define mmVGT_DMA_BASE 0xa1fa
2537#define mmVGT_DMA_INDEX_TYPE 0xa29f
2538#define mmVGT_DMA_NUM_INSTANCES 0xa2a2
2539#define mmIA_ENHANCE 0xa29c
2540#define mmVGT_DMA_SIZE 0xa29d
2541#define mmVGT_DMA_MAX_SIZE 0xa29e
2542#define mmVGT_DMA_PRIMITIVE_TYPE 0x2271
2543#define mmVGT_DMA_CONTROL 0x2272
2544#define mmVGT_IMMED_DATA 0xa1fd
2545#define mmVGT_INDEX_TYPE 0xc243
2546#define mmVGT_NUM_INDICES 0xc24c
2547#define mmVGT_NUM_INSTANCES 0xc24d
2548#define mmVGT_PRIMITIVE_TYPE 0xc242
2549#define mmVGT_PRIMITIVEID_EN 0xa2a1
2550#define mmVGT_PRIMITIVEID_RESET 0xa2a3
2551#define mmVGT_VTX_CNT_EN 0xa2ae
2552#define mmVGT_REUSE_OFF 0xa2ad
2553#define mmVGT_INSTANCE_STEP_RATE_0 0xa2a8
2554#define mmVGT_INSTANCE_STEP_RATE_1 0xa2a9
2555#define mmVGT_MAX_VTX_INDX 0xa100
2556#define mmVGT_MIN_VTX_INDX 0xa101
2557#define mmVGT_INDX_OFFSET 0xa102
2558#define mmVGT_VERTEX_REUSE_BLOCK_CNTL 0xa316
2559#define mmVGT_OUT_DEALLOC_CNTL 0xa317
2560#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0xa103
2561#define mmVGT_MULTI_PRIM_IB_RESET_EN 0xa2a5
2562#define mmVGT_ENHANCE 0xa294
2563#define mmVGT_OUTPUT_PATH_CNTL 0xa284
2564#define mmVGT_HOS_CNTL 0xa285
2565#define mmVGT_HOS_MAX_TESS_LEVEL 0xa286
2566#define mmVGT_HOS_MIN_TESS_LEVEL 0xa287
2567#define mmVGT_HOS_REUSE_DEPTH 0xa288
2568#define mmVGT_GROUP_PRIM_TYPE 0xa289
2569#define mmVGT_GROUP_FIRST_DECR 0xa28a
2570#define mmVGT_GROUP_DECR 0xa28b
2571#define mmVGT_GROUP_VECT_0_CNTL 0xa28c
2572#define mmVGT_GROUP_VECT_1_CNTL 0xa28d
2573#define mmVGT_GROUP_VECT_0_FMT_CNTL 0xa28e
2574#define mmVGT_GROUP_VECT_1_FMT_CNTL 0xa28f
2575#define mmVGT_VTX_VECT_EJECT_REG 0x222c
2576#define mmVGT_DMA_DATA_FIFO_DEPTH 0x222d
2577#define mmVGT_DMA_REQ_FIFO_DEPTH 0x222e
2578#define mmVGT_DRAW_INIT_FIFO_DEPTH 0x222f
2579#define mmVGT_LAST_COPY_STATE 0x2230
2580#define mmCC_GC_SHADER_ARRAY_CONFIG 0x226f
2581#define mmGC_USER_SHADER_ARRAY_CONFIG 0x2270
2582#define mmVGT_GS_MODE 0xa290
2583#define mmVGT_GS_ONCHIP_CNTL 0xa291
2584#define mmVGT_GS_OUT_PRIM_TYPE 0xa29b
2585#define mmVGT_CACHE_INVALIDATION 0x2231
2586#define mmVGT_RESET_DEBUG 0x2232
2587#define mmVGT_STRMOUT_DELAY 0x2233
2588#define mmVGT_FIFO_DEPTHS 0x2234
2589#define mmVGT_GS_PER_ES 0xa295
2590#define mmVGT_ES_PER_GS 0xa296
2591#define mmVGT_GS_PER_VS 0xa297
2592#define mmVGT_GS_VERTEX_REUSE 0x2235
2593#define mmVGT_MC_LAT_CNTL 0x2236
2594#define mmIA_CNTL_STATUS 0x2237
2595#define mmVGT_STRMOUT_CONFIG 0xa2e5
2596#define mmVGT_STRMOUT_BUFFER_SIZE_0 0xa2b4
2597#define mmVGT_STRMOUT_BUFFER_SIZE_1 0xa2b8
2598#define mmVGT_STRMOUT_BUFFER_SIZE_2 0xa2bc
2599#define mmVGT_STRMOUT_BUFFER_SIZE_3 0xa2c0
2600#define mmVGT_STRMOUT_BUFFER_OFFSET_0 0xa2b7
2601#define mmVGT_STRMOUT_BUFFER_OFFSET_1 0xa2bb
2602#define mmVGT_STRMOUT_BUFFER_OFFSET_2 0xa2bf
2603#define mmVGT_STRMOUT_BUFFER_OFFSET_3 0xa2c3
2604#define mmVGT_STRMOUT_VTX_STRIDE_0 0xa2b5
2605#define mmVGT_STRMOUT_VTX_STRIDE_1 0xa2b9
2606#define mmVGT_STRMOUT_VTX_STRIDE_2 0xa2bd
2607#define mmVGT_STRMOUT_VTX_STRIDE_3 0xa2c1
2608#define mmVGT_STRMOUT_BUFFER_CONFIG 0xa2e6
2609#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_0 0xc244
2610#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_1 0xc245
2611#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_2 0xc246
2612#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_3 0xc247
2613#define mmVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0xa2ca
2614#define mmVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0xa2cb
2615#define mmVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0xa2cc
2616#define mmVGT_GS_MAX_VERT_OUT 0xa2ce
2617#define mmVGT_SHADER_STAGES_EN 0xa2d5
2618#define mmVGT_DISPATCH_DRAW_INDEX 0xa2dd
2619#define mmVGT_LS_HS_CONFIG 0xa2d6
2620#define mmVGT_DMA_LS_HS_CONFIG 0x2273
2621#define mmVGT_TF_PARAM 0xa2db
2622#define mmVGT_TESS_DISTRIBUTION 0xa2d4
2623#define mmVGT_TF_RING_SIZE 0xc24e
2624#define mmVGT_SYS_CONFIG 0x2263
2625#define mmVGT_HS_OFFCHIP_PARAM 0xc24f
2626#define mmVGT_TF_MEMORY_BASE 0xc250
2627#define mmVGT_GS_INSTANCE_CNT 0xa2e4
2628#define mmIA_MULTI_VGT_PARAM 0xa2aa
2629#define mmVGT_VS_MAX_WAVE_ID 0x2268
2630#define mmVGT_ESGS_RING_SIZE 0xc240
2631#define mmVGT_GSVS_RING_SIZE 0xc241
2632#define mmVGT_GSVS_RING_OFFSET_1 0xa298
2633#define mmVGT_GSVS_RING_OFFSET_2 0xa299
2634#define mmVGT_GSVS_RING_OFFSET_3 0xa29a
2635#define mmVGT_ESGS_RING_ITEMSIZE 0xa2ab
2636#define mmVGT_GSVS_RING_ITEMSIZE 0xa2ac
2637#define mmVGT_GS_VERT_ITEMSIZE 0xa2d7
2638#define mmVGT_GS_VERT_ITEMSIZE_1 0xa2d8
2639#define mmVGT_GS_VERT_ITEMSIZE_2 0xa2d9
2640#define mmVGT_GS_VERT_ITEMSIZE_3 0xa2da
2641#define mmWD_CNTL_STATUS 0x223f
2642#define mmWD_ENHANCE 0xa2a0
2643#define mmGFX_PIPE_CONTROL 0x226d
2644#define mmCGTT_VGT_CLK_CTRL 0xf084
2645#define mmCGTT_IA_CLK_CTRL 0xf085
2646#define mmCGTT_WD_CLK_CTRL 0xf086
2647#define mmVGT_DEBUG_CNTL 0x2238
2648#define mmVGT_DEBUG_DATA 0x2239
2649#define mmIA_DEBUG_CNTL 0x223a
2650#define mmIA_DEBUG_DATA 0x223b
2651#define mmVGT_CNTL_STATUS 0x223c
2652#define mmWD_DEBUG_CNTL 0x223d
2653#define mmWD_DEBUG_DATA 0x223e
2654#define mmWD_QOS 0x2242
2655#define mmCC_GC_PRIM_CONFIG 0x2240
2656#define mmGC_USER_PRIM_CONFIG 0x2241
2657#define ixWD_DEBUG_REG0 0x0
2658#define ixWD_DEBUG_REG1 0x1
2659#define ixWD_DEBUG_REG2 0x2
2660#define ixWD_DEBUG_REG3 0x3
2661#define ixWD_DEBUG_REG4 0x4
2662#define ixWD_DEBUG_REG5 0x5
2663#define ixWD_DEBUG_REG6 0x6
2664#define ixWD_DEBUG_REG7 0x7
2665#define ixWD_DEBUG_REG8 0x8
2666#define ixWD_DEBUG_REG9 0x9
2667#define ixWD_DEBUG_REG10 0xa
2668#define ixIA_DEBUG_REG0 0x0
2669#define ixIA_DEBUG_REG1 0x1
2670#define ixIA_DEBUG_REG2 0x2
2671#define ixIA_DEBUG_REG3 0x3
2672#define ixIA_DEBUG_REG4 0x4
2673#define ixIA_DEBUG_REG5 0x5
2674#define ixIA_DEBUG_REG6 0x6
2675#define ixIA_DEBUG_REG7 0x7
2676#define ixIA_DEBUG_REG8 0x8
2677#define ixIA_DEBUG_REG9 0x9
2678#define ixVGT_DEBUG_REG0 0x0
2679#define ixVGT_DEBUG_REG1 0x1
2680#define ixVGT_DEBUG_REG2 0x1e
2681#define ixVGT_DEBUG_REG3 0x1f
2682#define ixVGT_DEBUG_REG4 0x20
2683#define ixVGT_DEBUG_REG5 0x21
2684#define ixVGT_DEBUG_REG6 0x22
2685#define ixVGT_DEBUG_REG7 0x23
2686#define ixVGT_DEBUG_REG8 0x8
2687#define ixVGT_DEBUG_REG9 0x9
2688#define ixVGT_DEBUG_REG10 0xa
2689#define ixVGT_DEBUG_REG11 0xb
2690#define ixVGT_DEBUG_REG12 0xc
2691#define ixVGT_DEBUG_REG13 0xd
2692#define ixVGT_DEBUG_REG14 0xe
2693#define ixVGT_DEBUG_REG15 0xf
2694#define ixVGT_DEBUG_REG16 0x10
2695#define ixVGT_DEBUG_REG17 0x11
2696#define ixVGT_DEBUG_REG18 0x7
2697#define ixVGT_DEBUG_REG19 0x13
2698#define ixVGT_DEBUG_REG20 0x14
2699#define ixVGT_DEBUG_REG21 0x15
2700#define ixVGT_DEBUG_REG22 0x16
2701#define ixVGT_DEBUG_REG23 0x17
2702#define ixVGT_DEBUG_REG24 0x18
2703#define ixVGT_DEBUG_REG25 0x19
2704#define ixVGT_DEBUG_REG26 0x24
2705#define ixVGT_DEBUG_REG27 0x1b
2706#define ixVGT_DEBUG_REG28 0x1c
2707#define ixVGT_DEBUG_REG29 0x1d
2708#define ixVGT_DEBUG_REG31 0x26
2709#define ixVGT_DEBUG_REG32 0x27
2710#define ixVGT_DEBUG_REG33 0x28
2711#define ixVGT_DEBUG_REG34 0x29
2712#define ixVGT_DEBUG_REG36 0x2b
2713#define mmVGT_PERFCOUNTER_SEID_MASK 0xd894
2714#define mmVGT_PERFCOUNTER0_SELECT 0xd88c
2715#define mmVGT_PERFCOUNTER1_SELECT 0xd88d
2716#define mmVGT_PERFCOUNTER2_SELECT 0xd88e
2717#define mmVGT_PERFCOUNTER3_SELECT 0xd88f
2718#define mmVGT_PERFCOUNTER0_SELECT1 0xd890
2719#define mmVGT_PERFCOUNTER1_SELECT1 0xd891
2720#define mmVGT_PERFCOUNTER0_LO 0xd090
2721#define mmVGT_PERFCOUNTER1_LO 0xd092
2722#define mmVGT_PERFCOUNTER2_LO 0xd094
2723#define mmVGT_PERFCOUNTER3_LO 0xd096
2724#define mmVGT_PERFCOUNTER0_HI 0xd091
2725#define mmVGT_PERFCOUNTER1_HI 0xd093
2726#define mmVGT_PERFCOUNTER2_HI 0xd095
2727#define mmVGT_PERFCOUNTER3_HI 0xd097
2728#define mmIA_PERFCOUNTER0_SELECT 0xd884
2729#define mmIA_PERFCOUNTER1_SELECT 0xd885
2730#define mmIA_PERFCOUNTER2_SELECT 0xd886
2731#define mmIA_PERFCOUNTER3_SELECT 0xd887
2732#define mmIA_PERFCOUNTER0_SELECT1 0xd888
2733#define mmIA_PERFCOUNTER0_LO 0xd088
2734#define mmIA_PERFCOUNTER1_LO 0xd08a
2735#define mmIA_PERFCOUNTER2_LO 0xd08c
2736#define mmIA_PERFCOUNTER3_LO 0xd08e
2737#define mmIA_PERFCOUNTER0_HI 0xd089
2738#define mmIA_PERFCOUNTER1_HI 0xd08b
2739#define mmIA_PERFCOUNTER2_HI 0xd08d
2740#define mmIA_PERFCOUNTER3_HI 0xd08f
2741#define mmWD_PERFCOUNTER0_SELECT 0xd880
2742#define mmWD_PERFCOUNTER1_SELECT 0xd881
2743#define mmWD_PERFCOUNTER2_SELECT 0xd882
2744#define mmWD_PERFCOUNTER3_SELECT 0xd883
2745#define mmWD_PERFCOUNTER0_LO 0xd080
2746#define mmWD_PERFCOUNTER1_LO 0xd082
2747#define mmWD_PERFCOUNTER2_LO 0xd084
2748#define mmWD_PERFCOUNTER3_LO 0xd086
2749#define mmWD_PERFCOUNTER0_HI 0xd081
2750#define mmWD_PERFCOUNTER1_HI 0xd083
2751#define mmWD_PERFCOUNTER2_HI 0xd085
2752#define mmWD_PERFCOUNTER3_HI 0xd087
2753#define mmDIDT_IND_INDEX 0x3280
2754#define mmDIDT_IND_DATA 0x3281
2755#define ixDIDT_SQ_CTRL0 0x0
2756#define ixDIDT_SQ_CTRL1 0x1
2757#define ixDIDT_SQ_CTRL2 0x2
2758#define ixDIDT_SQ_CTRL_OCP 0x3
2759#define ixDIDT_SQ_WEIGHT0_3 0x10
2760#define ixDIDT_SQ_WEIGHT4_7 0x11
2761#define ixDIDT_SQ_WEIGHT8_11 0x12
2762#define ixDIDT_DB_CTRL0 0x20
2763#define ixDIDT_DB_CTRL1 0x21
2764#define ixDIDT_DB_CTRL2 0x22
2765#define ixDIDT_DB_CTRL_OCP 0x23
2766#define ixDIDT_DB_WEIGHT0_3 0x30
2767#define ixDIDT_DB_WEIGHT4_7 0x31
2768#define ixDIDT_DB_WEIGHT8_11 0x32
2769#define ixDIDT_TD_CTRL0 0x40
2770#define ixDIDT_TD_CTRL1 0x41
2771#define ixDIDT_TD_CTRL2 0x42
2772#define ixDIDT_TD_CTRL_OCP 0x43
2773#define ixDIDT_TD_WEIGHT0_3 0x50
2774#define ixDIDT_TD_WEIGHT4_7 0x51
2775#define ixDIDT_TD_WEIGHT8_11 0x52
2776#define ixDIDT_TCP_CTRL0 0x60
2777#define ixDIDT_TCP_CTRL1 0x61
2778#define ixDIDT_TCP_CTRL2 0x62
2779#define ixDIDT_TCP_CTRL_OCP 0x63
2780#define ixDIDT_TCP_WEIGHT0_3 0x70
2781#define ixDIDT_TCP_WEIGHT4_7 0x71
2782#define ixDIDT_TCP_WEIGHT8_11 0x72
2783#define ixDIDT_DBR_CTRL0 0x80
2784#define ixDIDT_DBR_CTRL1 0x81
2785#define ixDIDT_DBR_CTRL2 0x82
2786#define ixDIDT_DBR_CTRL_OCP 0x83
2787#define ixDIDT_DBR_WEIGHT0_3 0x90
2788#define ixDIDT_DBR_WEIGHT4_7 0x91
2789#define ixDIDT_DBR_WEIGHT8_11 0x92
2790
2791#endif /* GFX_8_1_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_enum.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_enum.h
new file mode 100644
index 000000000000..f9022097fbe9
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_enum.h
@@ -0,0 +1,6808 @@
1/*
2 * GFX_8_1 Register documentation
3 *
4 * Copyright (C) 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
20 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef GFX_8_1_ENUM_H
25#define GFX_8_1_ENUM_H
26
27typedef enum SurfaceNumber {
28 NUMBER_UNORM = 0x0,
29 NUMBER_SNORM = 0x1,
30 NUMBER_USCALED = 0x2,
31 NUMBER_SSCALED = 0x3,
32 NUMBER_UINT = 0x4,
33 NUMBER_SINT = 0x5,
34 NUMBER_SRGB = 0x6,
35 NUMBER_FLOAT = 0x7,
36} SurfaceNumber;
37typedef enum SurfaceSwap {
38 SWAP_STD = 0x0,
39 SWAP_ALT = 0x1,
40 SWAP_STD_REV = 0x2,
41 SWAP_ALT_REV = 0x3,
42} SurfaceSwap;
43typedef enum CBMode {
44 CB_DISABLE = 0x0,
45 CB_NORMAL = 0x1,
46 CB_ELIMINATE_FAST_CLEAR = 0x2,
47 CB_RESOLVE = 0x3,
48 CB_DECOMPRESS = 0x4,
49 CB_FMASK_DECOMPRESS = 0x5,
50 CB_DCC_DECOMPRESS = 0x6,
51} CBMode;
52typedef enum RoundMode {
53 ROUND_BY_HALF = 0x0,
54 ROUND_TRUNCATE = 0x1,
55} RoundMode;
56typedef enum SourceFormat {
57 EXPORT_4C_32BPC = 0x0,
58 EXPORT_4C_16BPC = 0x1,
59 EXPORT_2C_32BPC_GR = 0x2,
60 EXPORT_2C_32BPC_AR = 0x3,
61} SourceFormat;
62typedef enum BlendOp {
63 BLEND_ZERO = 0x0,
64 BLEND_ONE = 0x1,
65 BLEND_SRC_COLOR = 0x2,
66 BLEND_ONE_MINUS_SRC_COLOR = 0x3,
67 BLEND_SRC_ALPHA = 0x4,
68 BLEND_ONE_MINUS_SRC_ALPHA = 0x5,
69 BLEND_DST_ALPHA = 0x6,
70 BLEND_ONE_MINUS_DST_ALPHA = 0x7,
71 BLEND_DST_COLOR = 0x8,
72 BLEND_ONE_MINUS_DST_COLOR = 0x9,
73 BLEND_SRC_ALPHA_SATURATE = 0xa,
74 BLEND_BOTH_SRC_ALPHA = 0xb,
75 BLEND_BOTH_INV_SRC_ALPHA = 0xc,
76 BLEND_CONSTANT_COLOR = 0xd,
77 BLEND_ONE_MINUS_CONSTANT_COLOR = 0xe,
78 BLEND_SRC1_COLOR = 0xf,
79 BLEND_INV_SRC1_COLOR = 0x10,
80 BLEND_SRC1_ALPHA = 0x11,
81 BLEND_INV_SRC1_ALPHA = 0x12,
82 BLEND_CONSTANT_ALPHA = 0x13,
83 BLEND_ONE_MINUS_CONSTANT_ALPHA = 0x14,
84} BlendOp;
85typedef enum CombFunc {
86 COMB_DST_PLUS_SRC = 0x0,
87 COMB_SRC_MINUS_DST = 0x1,
88 COMB_MIN_DST_SRC = 0x2,
89 COMB_MAX_DST_SRC = 0x3,
90 COMB_DST_MINUS_SRC = 0x4,
91} CombFunc;
92typedef enum BlendOpt {
93 FORCE_OPT_AUTO = 0x0,
94 FORCE_OPT_DISABLE = 0x1,
95 FORCE_OPT_ENABLE_IF_SRC_A_0 = 0x2,
96 FORCE_OPT_ENABLE_IF_SRC_RGB_0 = 0x3,
97 FORCE_OPT_ENABLE_IF_SRC_ARGB_0 = 0x4,
98 FORCE_OPT_ENABLE_IF_SRC_A_1 = 0x5,
99 FORCE_OPT_ENABLE_IF_SRC_RGB_1 = 0x6,
100 FORCE_OPT_ENABLE_IF_SRC_ARGB_1 = 0x7,
101} BlendOpt;
102typedef enum CmaskCode {
103 CMASK_CLR00_F0 = 0x0,
104 CMASK_CLR00_F1 = 0x1,
105 CMASK_CLR00_F2 = 0x2,
106 CMASK_CLR00_FX = 0x3,
107 CMASK_CLR01_F0 = 0x4,
108 CMASK_CLR01_F1 = 0x5,
109 CMASK_CLR01_F2 = 0x6,
110 CMASK_CLR01_FX = 0x7,
111 CMASK_CLR10_F0 = 0x8,
112 CMASK_CLR10_F1 = 0x9,
113 CMASK_CLR10_F2 = 0xa,
114 CMASK_CLR10_FX = 0xb,
115 CMASK_CLR11_F0 = 0xc,
116 CMASK_CLR11_F1 = 0xd,
117 CMASK_CLR11_F2 = 0xe,
118 CMASK_CLR11_FX = 0xf,
119} CmaskCode;
120typedef enum CmaskAddr {
121 CMASK_ADDR_TILED = 0x0,
122 CMASK_ADDR_LINEAR = 0x1,
123 CMASK_ADDR_COMPATIBLE = 0x2,
124} CmaskAddr;
125typedef enum CBPerfSel {
126 CB_PERF_SEL_NONE = 0x0,
127 CB_PERF_SEL_BUSY = 0x1,
128 CB_PERF_SEL_CORE_SCLK_VLD = 0x2,
129 CB_PERF_SEL_REG_SCLK0_VLD = 0x3,
130 CB_PERF_SEL_REG_SCLK1_VLD = 0x4,
131 CB_PERF_SEL_DRAWN_QUAD = 0x5,
132 CB_PERF_SEL_DRAWN_PIXEL = 0x6,
133 CB_PERF_SEL_DRAWN_QUAD_FRAGMENT = 0x7,
134 CB_PERF_SEL_DRAWN_TILE = 0x8,
135 CB_PERF_SEL_DB_CB_TILE_VALID_READY = 0x9,
136 CB_PERF_SEL_DB_CB_TILE_VALID_READYB = 0xa,
137 CB_PERF_SEL_DB_CB_TILE_VALIDB_READY = 0xb,
138 CB_PERF_SEL_DB_CB_TILE_VALIDB_READYB = 0xc,
139 CB_PERF_SEL_CM_FC_TILE_VALID_READY = 0xd,
140 CB_PERF_SEL_CM_FC_TILE_VALID_READYB = 0xe,
141 CB_PERF_SEL_CM_FC_TILE_VALIDB_READY = 0xf,
142 CB_PERF_SEL_CM_FC_TILE_VALIDB_READYB = 0x10,
143 CB_PERF_SEL_MERGE_TILE_ONLY_VALID_READY = 0x11,
144 CB_PERF_SEL_MERGE_TILE_ONLY_VALID_READYB = 0x12,
145 CB_PERF_SEL_DB_CB_LQUAD_VALID_READY = 0x13,
146 CB_PERF_SEL_DB_CB_LQUAD_VALID_READYB = 0x14,
147 CB_PERF_SEL_DB_CB_LQUAD_VALIDB_READY = 0x15,
148 CB_PERF_SEL_DB_CB_LQUAD_VALIDB_READYB = 0x16,
149 CB_PERF_SEL_LQUAD_NO_TILE = 0x17,
150 CB_PERF_SEL_LQUAD_FORMAT_IS_EXPORT_32_R = 0x18,
151 CB_PERF_SEL_LQUAD_FORMAT_IS_EXPORT_32_AR = 0x19,
152 CB_PERF_SEL_LQUAD_FORMAT_IS_EXPORT_32_GR = 0x1a,
153 CB_PERF_SEL_LQUAD_FORMAT_IS_EXPORT_32_ABGR = 0x1b,
154 CB_PERF_SEL_LQUAD_FORMAT_IS_EXPORT_FP16_ABGR = 0x1c,
155 CB_PERF_SEL_LQUAD_FORMAT_IS_EXPORT_SIGNED16_ABGR = 0x1d,
156 CB_PERF_SEL_LQUAD_FORMAT_IS_EXPORT_UNSIGNED16_ABGR= 0x1e,
157 CB_PERF_SEL_QUAD_KILLED_BY_EXTRA_PIXEL_EXPORT = 0x1f,
158 CB_PERF_SEL_QUAD_KILLED_BY_COLOR_INVALID = 0x20,
159 CB_PERF_SEL_QUAD_KILLED_BY_NULL_TARGET_SHADER_MASK= 0x21,
160 CB_PERF_SEL_QUAD_KILLED_BY_NULL_SAMPLE_MASK = 0x22,
161 CB_PERF_SEL_QUAD_KILLED_BY_DISCARD_PIXEL = 0x23,
162 CB_PERF_SEL_FC_CLEAR_QUAD_VALID_READY = 0x24,
163 CB_PERF_SEL_FC_CLEAR_QUAD_VALID_READYB = 0x25,
164 CB_PERF_SEL_FC_CLEAR_QUAD_VALIDB_READY = 0x26,
165 CB_PERF_SEL_FC_CLEAR_QUAD_VALIDB_READYB = 0x27,
166 CB_PERF_SEL_FOP_IN_VALID_READY = 0x28,
167 CB_PERF_SEL_FOP_IN_VALID_READYB = 0x29,
168 CB_PERF_SEL_FOP_IN_VALIDB_READY = 0x2a,
169 CB_PERF_SEL_FOP_IN_VALIDB_READYB = 0x2b,
170 CB_PERF_SEL_FC_CC_QUADFRAG_VALID_READY = 0x2c,
171 CB_PERF_SEL_FC_CC_QUADFRAG_VALID_READYB = 0x2d,
172 CB_PERF_SEL_FC_CC_QUADFRAG_VALIDB_READY = 0x2e,
173 CB_PERF_SEL_FC_CC_QUADFRAG_VALIDB_READYB = 0x2f,
174 CB_PERF_SEL_CC_IB_SR_FRAG_VALID_READY = 0x30,
175 CB_PERF_SEL_CC_IB_SR_FRAG_VALID_READYB = 0x31,
176 CB_PERF_SEL_CC_IB_SR_FRAG_VALIDB_READY = 0x32,
177 CB_PERF_SEL_CC_IB_SR_FRAG_VALIDB_READYB = 0x33,
178 CB_PERF_SEL_CC_IB_TB_FRAG_VALID_READY = 0x34,
179 CB_PERF_SEL_CC_IB_TB_FRAG_VALID_READYB = 0x35,
180 CB_PERF_SEL_CC_IB_TB_FRAG_VALIDB_READY = 0x36,
181 CB_PERF_SEL_CC_IB_TB_FRAG_VALIDB_READYB = 0x37,
182 CB_PERF_SEL_CC_RB_BC_EVENFRAG_VALID_READY = 0x38,
183 CB_PERF_SEL_CC_RB_BC_EVENFRAG_VALID_READYB = 0x39,
184 CB_PERF_SEL_CC_RB_BC_EVENFRAG_VALIDB_READY = 0x3a,
185 CB_PERF_SEL_CC_RB_BC_EVENFRAG_VALIDB_READYB = 0x3b,
186 CB_PERF_SEL_CC_RB_BC_ODDFRAG_VALID_READY = 0x3c,
187 CB_PERF_SEL_CC_RB_BC_ODDFRAG_VALID_READYB = 0x3d,
188 CB_PERF_SEL_CC_RB_BC_ODDFRAG_VALIDB_READY = 0x3e,
189 CB_PERF_SEL_CC_RB_BC_ODDFRAG_VALIDB_READYB = 0x3f,
190 CB_PERF_SEL_CC_BC_CS_FRAG_VALID = 0x40,
191 CB_PERF_SEL_CM_CACHE_HIT = 0x41,
192 CB_PERF_SEL_CM_CACHE_TAG_MISS = 0x42,
193 CB_PERF_SEL_CM_CACHE_SECTOR_MISS = 0x43,
194 CB_PERF_SEL_CM_CACHE_REEVICTION_STALL = 0x44,
195 CB_PERF_SEL_CM_CACHE_EVICT_NONZERO_INFLIGHT_STALL= 0x45,
196 CB_PERF_SEL_CM_CACHE_REPLACE_PENDING_EVICT_STALL = 0x46,
197 CB_PERF_SEL_CM_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL= 0x47,
198 CB_PERF_SEL_CM_CACHE_READ_OUTPUT_STALL = 0x48,
199 CB_PERF_SEL_CM_CACHE_WRITE_OUTPUT_STALL = 0x49,
200 CB_PERF_SEL_CM_CACHE_ACK_OUTPUT_STALL = 0x4a,
201 CB_PERF_SEL_CM_CACHE_STALL = 0x4b,
202 CB_PERF_SEL_CM_CACHE_FLUSH = 0x4c,
203 CB_PERF_SEL_CM_CACHE_TAGS_FLUSHED = 0x4d,
204 CB_PERF_SEL_CM_CACHE_SECTORS_FLUSHED = 0x4e,
205 CB_PERF_SEL_CM_CACHE_DIRTY_SECTORS_FLUSHED = 0x4f,
206 CB_PERF_SEL_FC_CACHE_HIT = 0x50,
207 CB_PERF_SEL_FC_CACHE_TAG_MISS = 0x51,
208 CB_PERF_SEL_FC_CACHE_SECTOR_MISS = 0x52,
209 CB_PERF_SEL_FC_CACHE_REEVICTION_STALL = 0x53,
210 CB_PERF_SEL_FC_CACHE_EVICT_NONZERO_INFLIGHT_STALL= 0x54,
211 CB_PERF_SEL_FC_CACHE_REPLACE_PENDING_EVICT_STALL = 0x55,
212 CB_PERF_SEL_FC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL= 0x56,
213 CB_PERF_SEL_FC_CACHE_READ_OUTPUT_STALL = 0x57,
214 CB_PERF_SEL_FC_CACHE_WRITE_OUTPUT_STALL = 0x58,
215 CB_PERF_SEL_FC_CACHE_ACK_OUTPUT_STALL = 0x59,
216 CB_PERF_SEL_FC_CACHE_STALL = 0x5a,
217 CB_PERF_SEL_FC_CACHE_FLUSH = 0x5b,
218 CB_PERF_SEL_FC_CACHE_TAGS_FLUSHED = 0x5c,
219 CB_PERF_SEL_FC_CACHE_SECTORS_FLUSHED = 0x5d,
220 CB_PERF_SEL_FC_CACHE_DIRTY_SECTORS_FLUSHED = 0x5e,
221 CB_PERF_SEL_CC_CACHE_HIT = 0x5f,
222 CB_PERF_SEL_CC_CACHE_TAG_MISS = 0x60,
223 CB_PERF_SEL_CC_CACHE_SECTOR_MISS = 0x61,
224 CB_PERF_SEL_CC_CACHE_REEVICTION_STALL = 0x62,
225 CB_PERF_SEL_CC_CACHE_EVICT_NONZERO_INFLIGHT_STALL= 0x63,
226 CB_PERF_SEL_CC_CACHE_REPLACE_PENDING_EVICT_STALL = 0x64,
227 CB_PERF_SEL_CC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL= 0x65,
228 CB_PERF_SEL_CC_CACHE_READ_OUTPUT_STALL = 0x66,
229 CB_PERF_SEL_CC_CACHE_WRITE_OUTPUT_STALL = 0x67,
230 CB_PERF_SEL_CC_CACHE_ACK_OUTPUT_STALL = 0x68,
231 CB_PERF_SEL_CC_CACHE_STALL = 0x69,
232 CB_PERF_SEL_CC_CACHE_FLUSH = 0x6a,
233 CB_PERF_SEL_CC_CACHE_TAGS_FLUSHED = 0x6b,
234 CB_PERF_SEL_CC_CACHE_SECTORS_FLUSHED = 0x6c,
235 CB_PERF_SEL_CC_CACHE_DIRTY_SECTORS_FLUSHED = 0x6d,
236 CB_PERF_SEL_CC_CACHE_WA_TO_RMW_CONVERSION = 0x6e,
237 CB_PERF_SEL_CC_CACHE_READS_SAVED_DUE_TO_DCC = 0x6f,
238 CB_PERF_SEL_CB_TAP_WRREQ_VALID_READY = 0x70,
239 CB_PERF_SEL_CB_TAP_WRREQ_VALID_READYB = 0x71,
240 CB_PERF_SEL_CB_TAP_WRREQ_VALIDB_READY = 0x72,
241 CB_PERF_SEL_CB_TAP_WRREQ_VALIDB_READYB = 0x73,
242 CB_PERF_SEL_CM_MC_WRITE_REQUEST = 0x74,
243 CB_PERF_SEL_FC_MC_WRITE_REQUEST = 0x75,
244 CB_PERF_SEL_CC_MC_WRITE_REQUEST = 0x76,
245 CB_PERF_SEL_CM_MC_WRITE_REQUESTS_IN_FLIGHT = 0x77,
246 CB_PERF_SEL_FC_MC_WRITE_REQUESTS_IN_FLIGHT = 0x78,
247 CB_PERF_SEL_CC_MC_WRITE_REQUESTS_IN_FLIGHT = 0x79,
248 CB_PERF_SEL_CB_TAP_RDREQ_VALID_READY = 0x7a,
249 CB_PERF_SEL_CB_TAP_RDREQ_VALID_READYB = 0x7b,
250 CB_PERF_SEL_CB_TAP_RDREQ_VALIDB_READY = 0x7c,
251 CB_PERF_SEL_CB_TAP_RDREQ_VALIDB_READYB = 0x7d,
252 CB_PERF_SEL_CM_MC_READ_REQUEST = 0x7e,
253 CB_PERF_SEL_FC_MC_READ_REQUEST = 0x7f,
254 CB_PERF_SEL_CC_MC_READ_REQUEST = 0x80,
255 CB_PERF_SEL_CM_MC_READ_REQUESTS_IN_FLIGHT = 0x81,
256 CB_PERF_SEL_FC_MC_READ_REQUESTS_IN_FLIGHT = 0x82,
257 CB_PERF_SEL_CC_MC_READ_REQUESTS_IN_FLIGHT = 0x83,
258 CB_PERF_SEL_CM_TQ_FULL = 0x84,
259 CB_PERF_SEL_CM_TQ_FIFO_TILE_RESIDENCY_STALL = 0x85,
260 CB_PERF_SEL_FC_QUAD_RDLAT_FIFO_FULL = 0x86,
261 CB_PERF_SEL_FC_TILE_RDLAT_FIFO_FULL = 0x87,
262 CB_PERF_SEL_FC_RDLAT_FIFO_QUAD_RESIDENCY_STALL = 0x88,
263 CB_PERF_SEL_FOP_FMASK_RAW_STALL = 0x89,
264 CB_PERF_SEL_FOP_FMASK_BYPASS_STALL = 0x8a,
265 CB_PERF_SEL_CC_SF_FULL = 0x8b,
266 CB_PERF_SEL_CC_RB_FULL = 0x8c,
267 CB_PERF_SEL_CC_EVENFIFO_QUAD_RESIDENCY_STALL = 0x8d,
268 CB_PERF_SEL_CC_ODDFIFO_QUAD_RESIDENCY_STALL = 0x8e,
269 CB_PERF_SEL_BLENDER_RAW_HAZARD_STALL = 0x8f,
270 CB_PERF_SEL_EVENT = 0x90,
271 CB_PERF_SEL_EVENT_CACHE_FLUSH_TS = 0x91,
272 CB_PERF_SEL_EVENT_CONTEXT_DONE = 0x92,
273 CB_PERF_SEL_EVENT_CACHE_FLUSH = 0x93,
274 CB_PERF_SEL_EVENT_CACHE_FLUSH_AND_INV_TS_EVENT = 0x94,
275 CB_PERF_SEL_EVENT_CACHE_FLUSH_AND_INV_EVENT = 0x95,
276 CB_PERF_SEL_EVENT_FLUSH_AND_INV_CB_DATA_TS = 0x96,
277 CB_PERF_SEL_EVENT_FLUSH_AND_INV_CB_META = 0x97,
278 CB_PERF_SEL_CC_SURFACE_SYNC = 0x98,
279 CB_PERF_SEL_CMASK_READ_DATA_0xC = 0x99,
280 CB_PERF_SEL_CMASK_READ_DATA_0xD = 0x9a,
281 CB_PERF_SEL_CMASK_READ_DATA_0xE = 0x9b,
282 CB_PERF_SEL_CMASK_READ_DATA_0xF = 0x9c,
283 CB_PERF_SEL_CMASK_WRITE_DATA_0xC = 0x9d,
284 CB_PERF_SEL_CMASK_WRITE_DATA_0xD = 0x9e,
285 CB_PERF_SEL_CMASK_WRITE_DATA_0xE = 0x9f,
286 CB_PERF_SEL_CMASK_WRITE_DATA_0xF = 0xa0,
287 CB_PERF_SEL_TWO_PROBE_QUAD_FRAGMENT = 0xa1,
288 CB_PERF_SEL_EXPORT_32_ABGR_QUAD_FRAGMENT = 0xa2,
289 CB_PERF_SEL_DUAL_SOURCE_COLOR_QUAD_FRAGMENT = 0xa3,
290 CB_PERF_SEL_QUAD_HAS_1_FRAGMENT_BEFORE_UPDATE = 0xa4,
291 CB_PERF_SEL_QUAD_HAS_2_FRAGMENTS_BEFORE_UPDATE = 0xa5,
292 CB_PERF_SEL_QUAD_HAS_3_FRAGMENTS_BEFORE_UPDATE = 0xa6,
293 CB_PERF_SEL_QUAD_HAS_4_FRAGMENTS_BEFORE_UPDATE = 0xa7,
294 CB_PERF_SEL_QUAD_HAS_5_FRAGMENTS_BEFORE_UPDATE = 0xa8,
295 CB_PERF_SEL_QUAD_HAS_6_FRAGMENTS_BEFORE_UPDATE = 0xa9,
296 CB_PERF_SEL_QUAD_HAS_7_FRAGMENTS_BEFORE_UPDATE = 0xaa,
297 CB_PERF_SEL_QUAD_HAS_8_FRAGMENTS_BEFORE_UPDATE = 0xab,
298 CB_PERF_SEL_QUAD_HAS_1_FRAGMENT_AFTER_UPDATE = 0xac,
299 CB_PERF_SEL_QUAD_HAS_2_FRAGMENTS_AFTER_UPDATE = 0xad,
300 CB_PERF_SEL_QUAD_HAS_3_FRAGMENTS_AFTER_UPDATE = 0xae,
301 CB_PERF_SEL_QUAD_HAS_4_FRAGMENTS_AFTER_UPDATE = 0xaf,
302 CB_PERF_SEL_QUAD_HAS_5_FRAGMENTS_AFTER_UPDATE = 0xb0,
303 CB_PERF_SEL_QUAD_HAS_6_FRAGMENTS_AFTER_UPDATE = 0xb1,
304 CB_PERF_SEL_QUAD_HAS_7_FRAGMENTS_AFTER_UPDATE = 0xb2,
305 CB_PERF_SEL_QUAD_HAS_8_FRAGMENTS_AFTER_UPDATE = 0xb3,
306 CB_PERF_SEL_QUAD_ADDED_1_FRAGMENT = 0xb4,
307 CB_PERF_SEL_QUAD_ADDED_2_FRAGMENTS = 0xb5,
308 CB_PERF_SEL_QUAD_ADDED_3_FRAGMENTS = 0xb6,
309 CB_PERF_SEL_QUAD_ADDED_4_FRAGMENTS = 0xb7,
310 CB_PERF_SEL_QUAD_ADDED_5_FRAGMENTS = 0xb8,
311 CB_PERF_SEL_QUAD_ADDED_6_FRAGMENTS = 0xb9,
312 CB_PERF_SEL_QUAD_ADDED_7_FRAGMENTS = 0xba,
313 CB_PERF_SEL_QUAD_REMOVED_1_FRAGMENT = 0xbb,
314 CB_PERF_SEL_QUAD_REMOVED_2_FRAGMENTS = 0xbc,
315 CB_PERF_SEL_QUAD_REMOVED_3_FRAGMENTS = 0xbd,
316 CB_PERF_SEL_QUAD_REMOVED_4_FRAGMENTS = 0xbe,
317 CB_PERF_SEL_QUAD_REMOVED_5_FRAGMENTS = 0xbf,
318 CB_PERF_SEL_QUAD_REMOVED_6_FRAGMENTS = 0xc0,
319 CB_PERF_SEL_QUAD_REMOVED_7_FRAGMENTS = 0xc1,
320 CB_PERF_SEL_QUAD_READS_FRAGMENT_0 = 0xc2,
321 CB_PERF_SEL_QUAD_READS_FRAGMENT_1 = 0xc3,
322 CB_PERF_SEL_QUAD_READS_FRAGMENT_2 = 0xc4,
323 CB_PERF_SEL_QUAD_READS_FRAGMENT_3 = 0xc5,
324 CB_PERF_SEL_QUAD_READS_FRAGMENT_4 = 0xc6,
325 CB_PERF_SEL_QUAD_READS_FRAGMENT_5 = 0xc7,
326 CB_PERF_SEL_QUAD_READS_FRAGMENT_6 = 0xc8,
327 CB_PERF_SEL_QUAD_READS_FRAGMENT_7 = 0xc9,
328 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_0 = 0xca,
329 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_1 = 0xcb,
330 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_2 = 0xcc,
331 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_3 = 0xcd,
332 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_4 = 0xce,
333 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_5 = 0xcf,
334 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_6 = 0xd0,
335 CB_PERF_SEL_QUAD_WRITES_FRAGMENT_7 = 0xd1,
336 CB_PERF_SEL_QUAD_BLEND_OPT_DONT_READ_DST = 0xd2,
337 CB_PERF_SEL_QUAD_BLEND_OPT_BLEND_BYPASS = 0xd3,
338 CB_PERF_SEL_QUAD_BLEND_OPT_DISCARD_PIXELS = 0xd4,
339 CB_PERF_SEL_QUAD_DST_READ_COULD_HAVE_BEEN_OPTIMIZED= 0xd5,
340 CB_PERF_SEL_QUAD_BLENDING_COULD_HAVE_BEEN_BYPASSED= 0xd6,
341 CB_PERF_SEL_QUAD_COULD_HAVE_BEEN_DISCARDED = 0xd7,
342 CB_PERF_SEL_BLEND_OPT_PIXELS_RESULT_EQ_DEST = 0xd8,
343 CB_PERF_SEL_DRAWN_BUSY = 0xd9,
344 CB_PERF_SEL_TILE_TO_CMR_REGION_BUSY = 0xda,
345 CB_PERF_SEL_CMR_TO_FCR_REGION_BUSY = 0xdb,
346 CB_PERF_SEL_FCR_TO_CCR_REGION_BUSY = 0xdc,
347 CB_PERF_SEL_CCR_TO_CCW_REGION_BUSY = 0xdd,
348 CB_PERF_SEL_FC_PF_SLOW_MODE_QUAD_EMPTY_HALF_DROPPED= 0xde,
349 CB_PERF_SEL_FC_SEQUENCER_CLEAR = 0xdf,
350 CB_PERF_SEL_FC_SEQUENCER_ELIMINATE_FAST_CLEAR = 0xe0,
351 CB_PERF_SEL_FC_SEQUENCER_FMASK_DECOMPRESS = 0xe1,
352 CB_PERF_SEL_FC_SEQUENCER_FMASK_COMPRESSION_DISABLE= 0xe2,
353 CB_PERF_SEL_FC_KEYID_RDLAT_FIFO_FULL = 0xe3,
354 CB_PERF_SEL_FC_DOC_IS_STALLED = 0xe4,
355 CB_PERF_SEL_FC_DOC_MRTS_NOT_COMBINED = 0xe5,
356 CB_PERF_SEL_FC_DOC_MRTS_COMBINED = 0xe6,
357 CB_PERF_SEL_FC_DOC_QTILE_CAM_MISS = 0xe7,
358 CB_PERF_SEL_FC_DOC_QTILE_CAM_HIT = 0xe8,
359 CB_PERF_SEL_FC_DOC_CLINE_CAM_MISS = 0xe9,
360 CB_PERF_SEL_FC_DOC_CLINE_CAM_HIT = 0xea,
361 CB_PERF_SEL_FC_DOC_QUAD_PTR_FIFO_IS_FULL = 0xeb,
362 CB_PERF_SEL_FC_DOC_OVERWROTE_1_SECTOR = 0xec,
363 CB_PERF_SEL_FC_DOC_OVERWROTE_2_SECTORS = 0xed,
364 CB_PERF_SEL_FC_DOC_OVERWROTE_3_SECTORS = 0xee,
365 CB_PERF_SEL_FC_DOC_OVERWROTE_4_SECTORS = 0xef,
366 CB_PERF_SEL_FC_DOC_TOTAL_OVERWRITTEN_SECTORS = 0xf0,
367 CB_PERF_SEL_FC_DCC_CACHE_HIT = 0xf1,
368 CB_PERF_SEL_FC_DCC_CACHE_TAG_MISS = 0xf2,
369 CB_PERF_SEL_FC_DCC_CACHE_SECTOR_MISS = 0xf3,
370 CB_PERF_SEL_FC_DCC_CACHE_REEVICTION_STALL = 0xf4,
371 CB_PERF_SEL_FC_DCC_CACHE_EVICT_NONZERO_INFLIGHT_STALL= 0xf5,
372 CB_PERF_SEL_FC_DCC_CACHE_REPLACE_PENDING_EVICT_STALL= 0xf6,
373 CB_PERF_SEL_FC_DCC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL= 0xf7,
374 CB_PERF_SEL_FC_DCC_CACHE_READ_OUTPUT_STALL = 0xf8,
375 CB_PERF_SEL_FC_DCC_CACHE_WRITE_OUTPUT_STALL = 0xf9,
376 CB_PERF_SEL_FC_DCC_CACHE_ACK_OUTPUT_STALL = 0xfa,
377 CB_PERF_SEL_FC_DCC_CACHE_STALL = 0xfb,
378 CB_PERF_SEL_FC_DCC_CACHE_FLUSH = 0xfc,
379 CB_PERF_SEL_FC_DCC_CACHE_TAGS_FLUSHED = 0xfd,
380 CB_PERF_SEL_FC_DCC_CACHE_SECTORS_FLUSHED = 0xfe,
381 CB_PERF_SEL_FC_DCC_CACHE_DIRTY_SECTORS_FLUSHED = 0xff,
382 CB_PERF_SEL_CC_DCC_BEYOND_TILE_SPLIT = 0x100,
383 CB_PERF_SEL_FC_MC_DCC_WRITE_REQUEST = 0x101,
384 CB_PERF_SEL_FC_MC_DCC_WRITE_REQUESTS_IN_FLIGHT = 0x102,
385 CB_PERF_SEL_FC_MC_DCC_READ_REQUEST = 0x103,
386 CB_PERF_SEL_FC_MC_DCC_READ_REQUESTS_IN_FLIGHT = 0x104,
387 CB_PERF_SEL_CC_DCC_RDREQ_STALL = 0x105,
388 CB_PERF_SEL_CC_DCC_DECOMPRESS_TIDS_IN = 0x106,
389 CB_PERF_SEL_CC_DCC_DECOMPRESS_TIDS_OUT = 0x107,
390 CB_PERF_SEL_CC_DCC_COMPRESS_TIDS_IN = 0x108,
391 CB_PERF_SEL_CC_DCC_COMPRESS_TIDS_OUT = 0x109,
392 CB_PERF_SEL_FC_DCC_KEY_VALUE__CLEAR = 0x10a,
393 CB_PERF_SEL_CC_DCC_KEY_VALUE__4_BLOCKS__2TO1 = 0x10b,
394 CB_PERF_SEL_CC_DCC_KEY_VALUE__3BLOCKS_2TO1__1BLOCK_2TO2= 0x10c,
395 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO1__1BLOCK_2TO2__1BLOCK_2TO1= 0x10d,
396 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_2TO2__2BLOCKS_2TO1= 0x10e,
397 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__3BLOCKS_2TO1= 0x10f,
398 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO1__2BLOCKS_2TO2= 0x110,
399 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__2BLOCKS_2TO2__1BLOCK_2TO1= 0x111,
400 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_2TO2__1BLOCK_2TO1__1BLOCK_2TO2= 0x112,
401 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_2TO1__1BLOCK_2TO2__1BLOCK_2TO1= 0x113,
402 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO2__2BLOCKS_2TO1= 0x114,
403 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__2BLOCKS_2TO1__1BLOCK_2TO2= 0x115,
404 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__3BLOCKS_2TO2= 0x116,
405 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_2TO1__2BLOCKS_2TO2= 0x117,
406 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO2__1BLOCK_2TO1__1BLOCK_2TO2= 0x118,
407 CB_PERF_SEL_CC_DCC_KEY_VALUE__3BLOCKS_2TO2__1BLOCK_2TO1= 0x119,
408 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_4TO1 = 0x11a,
409 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO1__1BLOCK_4TO2= 0x11b,
410 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO1__1BLOCK_4TO3= 0x11c,
411 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO1__1BLOCK_4TO4= 0x11d,
412 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO2__1BLOCK_4TO1= 0x11e,
413 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_4TO2 = 0x11f,
414 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO2__1BLOCK_4TO3= 0x120,
415 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO2__1BLOCK_4TO4= 0x121,
416 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO3__1BLOCK_4TO1= 0x122,
417 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO3__1BLOCK_4TO2= 0x123,
418 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_4TO3 = 0x124,
419 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO3__1BLOCK_4TO4= 0x125,
420 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO4__1BLOCK_4TO1= 0x126,
421 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO4__1BLOCK_4TO2= 0x127,
422 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO4__1BLOCK_4TO3= 0x128,
423 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO1__1BLOCK_4TO1= 0x129,
424 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO1__1BLOCK_4TO2= 0x12a,
425 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO1__1BLOCK_4TO3= 0x12b,
426 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO1__1BLOCK_4TO4= 0x12c,
427 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_2TO2__1BLOCK_4TO1= 0x12d,
428 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_2TO2__1BLOCK_4TO2= 0x12e,
429 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_2TO2__1BLOCK_4TO3= 0x12f,
430 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_2TO2__1BLOCK_4TO4= 0x130,
431 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_2TO1__1BLOCK_4TO1= 0x131,
432 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_2TO1__1BLOCK_4TO2= 0x132,
433 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_2TO1__1BLOCK_4TO3= 0x133,
434 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_2TO1__1BLOCK_4TO4= 0x134,
435 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO2__1BLOCK_4TO1= 0x135,
436 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO2__1BLOCK_4TO2= 0x136,
437 CB_PERF_SEL_CC_DCC_KEY_VALUE__2BLOCKS_2TO2__1BLOCK_4TO3= 0x137,
438 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO1__1BLOCK_2TO1= 0x138,
439 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO2__1BLOCK_2TO1= 0x139,
440 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO3__1BLOCK_2TO1= 0x13a,
441 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO4__1BLOCK_2TO1= 0x13b,
442 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_4TO1__1BLOCK_2TO1= 0x13c,
443 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_4TO2__1BLOCK_2TO1= 0x13d,
444 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_4TO3__1BLOCK_2TO1= 0x13e,
445 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_4TO4__1BLOCK_2TO1= 0x13f,
446 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO1__1BLOCK_2TO2= 0x140,
447 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO2__1BLOCK_2TO2= 0x141,
448 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO3__1BLOCK_2TO2= 0x142,
449 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_4TO4__1BLOCK_2TO2= 0x143,
450 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_4TO1__1BLOCK_2TO2= 0x144,
451 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_4TO2__1BLOCK_2TO2= 0x145,
452 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_4TO3__1BLOCK_2TO2= 0x146,
453 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO1__2BLOCKS_2TO1= 0x147,
454 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO2__2BLOCKS_2TO1= 0x148,
455 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO3__2BLOCKS_2TO1= 0x149,
456 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO4__2BLOCKS_2TO1= 0x14a,
457 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO1__2BLOCKS_2TO2= 0x14b,
458 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO2__2BLOCKS_2TO2= 0x14c,
459 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO3__2BLOCKS_2TO2= 0x14d,
460 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO1__1BLOCK_2TO1__1BLOCK_2TO2= 0x14e,
461 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO2__1BLOCK_2TO1__1BLOCK_2TO2= 0x14f,
462 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO3__1BLOCK_2TO1__1BLOCK_2TO2= 0x150,
463 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO4__1BLOCK_2TO1__1BLOCK_2TO2= 0x151,
464 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO1__1BLOCK_2TO2__1BLOCK_2TO1= 0x152,
465 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO2__1BLOCK_2TO2__1BLOCK_2TO1= 0x153,
466 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO3__1BLOCK_2TO2__1BLOCK_2TO1= 0x154,
467 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_4TO4__1BLOCK_2TO2__1BLOCK_2TO1= 0x155,
468 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_6TO1= 0x156,
469 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_6TO2= 0x157,
470 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_6TO3= 0x158,
471 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_6TO4= 0x159,
472 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_6TO5= 0x15a,
473 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__1BLOCK_6TO6= 0x15b,
474 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__INV0 = 0x15c,
475 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO1__INV1 = 0x15d,
476 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_6TO1= 0x15e,
477 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_6TO2= 0x15f,
478 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_6TO3= 0x160,
479 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_6TO4= 0x161,
480 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__1BLOCK_6TO5= 0x162,
481 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__INV0 = 0x163,
482 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_2TO2__INV1 = 0x164,
483 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO1__1BLOCK_2TO1= 0x165,
484 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO2__1BLOCK_2TO1= 0x166,
485 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO3__1BLOCK_2TO1= 0x167,
486 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO4__1BLOCK_2TO1= 0x168,
487 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO5__1BLOCK_2TO1= 0x169,
488 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO6__1BLOCK_2TO1= 0x16a,
489 CB_PERF_SEL_CC_DCC_KEY_VALUE__INV0__1BLOCK_2TO1 = 0x16b,
490 CB_PERF_SEL_CC_DCC_KEY_VALUE__INV1__1BLOCK_2TO1 = 0x16c,
491 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO1__1BLOCK_2TO2= 0x16d,
492 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO2__1BLOCK_2TO2= 0x16e,
493 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO3__1BLOCK_2TO2= 0x16f,
494 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO4__1BLOCK_2TO2= 0x170,
495 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_6TO5__1BLOCK_2TO2= 0x171,
496 CB_PERF_SEL_CC_DCC_KEY_VALUE__INV0__1BLOCK_2TO2 = 0x172,
497 CB_PERF_SEL_CC_DCC_KEY_VALUE__INV1__1BLOCK_2TO2 = 0x173,
498 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_8TO1 = 0x174,
499 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_8TO2 = 0x175,
500 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_8TO3 = 0x176,
501 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_8TO4 = 0x177,
502 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_8TO5 = 0x178,
503 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_8TO6 = 0x179,
504 CB_PERF_SEL_CC_DCC_KEY_VALUE__1BLOCK_8TO7 = 0x17a,
505 CB_PERF_SEL_CC_DCC_KEY_VALUE__UNCOMPRESSED = 0x17b,
506 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_2TO1 = 0x17c,
507 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_4TO1 = 0x17d,
508 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_4TO2 = 0x17e,
509 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_4TO3 = 0x17f,
510 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_6TO1 = 0x180,
511 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_6TO2 = 0x181,
512 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_6TO3 = 0x182,
513 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_6TO4 = 0x183,
514 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_6TO5 = 0x184,
515 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_8TO1 = 0x185,
516 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_8TO2 = 0x186,
517 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_8TO3 = 0x187,
518 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_8TO4 = 0x188,
519 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_8TO5 = 0x189,
520 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_8TO6 = 0x18a,
521 CB_PERF_SEL_CC_DCC_COMPRESS_RATIO_8TO7 = 0x18b,
522 CB_PERF_SEL_RBP_EXPORT_8PIX_LIT_BOTH = 0x18c,
523 CB_PERF_SEL_RBP_EXPORT_8PIX_LIT_LEFT = 0x18d,
524 CB_PERF_SEL_RBP_EXPORT_8PIX_LIT_RIGHT = 0x18e,
525 CB_PERF_SEL_RBP_SPLIT_MICROTILE = 0x18f,
526 CB_PERF_SEL_RBP_SPLIT_AA_SAMPLE_MASK = 0x190,
527 CB_PERF_SEL_RBP_SPLIT_PARTIAL_TARGET_MASK = 0x191,
528 CB_PERF_SEL_RBP_SPLIT_LINEAR_ADDRESSING = 0x192,
529 CB_PERF_SEL_RBP_SPLIT_AA_NO_FMASK_COMPRESS = 0x193,
530 CB_PERF_SEL_RBP_INSERT_MISSING_LAST_QUAD = 0x194,
531} CBPerfSel;
532typedef enum CBPerfOpFilterSel {
533 CB_PERF_OP_FILTER_SEL_WRITE_ONLY = 0x0,
534 CB_PERF_OP_FILTER_SEL_NEEDS_DESTINATION = 0x1,
535 CB_PERF_OP_FILTER_SEL_RESOLVE = 0x2,
536 CB_PERF_OP_FILTER_SEL_DECOMPRESS = 0x3,
537 CB_PERF_OP_FILTER_SEL_FMASK_DECOMPRESS = 0x4,
538 CB_PERF_OP_FILTER_SEL_ELIMINATE_FAST_CLEAR = 0x5,
539} CBPerfOpFilterSel;
540typedef enum CBPerfClearFilterSel {
541 CB_PERF_CLEAR_FILTER_SEL_NONCLEAR = 0x0,
542 CB_PERF_CLEAR_FILTER_SEL_CLEAR = 0x1,
543} CBPerfClearFilterSel;
544typedef enum CP_RING_ID {
545 RINGID0 = 0x0,
546 RINGID1 = 0x1,
547 RINGID2 = 0x2,
548 RINGID3 = 0x3,
549} CP_RING_ID;
550typedef enum CP_PIPE_ID {
551 PIPE_ID0 = 0x0,
552 PIPE_ID1 = 0x1,
553 PIPE_ID2 = 0x2,
554 PIPE_ID3 = 0x3,
555} CP_PIPE_ID;
556typedef enum CP_ME_ID {
557 ME_ID0 = 0x0,
558 ME_ID1 = 0x1,
559 ME_ID2 = 0x2,
560 ME_ID3 = 0x3,
561} CP_ME_ID;
562typedef enum SPM_PERFMON_STATE {
563 STRM_PERFMON_STATE_DISABLE_AND_RESET = 0x0,
564 STRM_PERFMON_STATE_START_COUNTING = 0x1,
565 STRM_PERFMON_STATE_STOP_COUNTING = 0x2,
566 STRM_PERFMON_STATE_RESERVED_3 = 0x3,
567 STRM_PERFMON_STATE_DISABLE_AND_RESET_PHANTOM = 0x4,
568 STRM_PERFMON_STATE_COUNT_AND_DUMP_PHANTOM = 0x5,
569} SPM_PERFMON_STATE;
570typedef enum CP_PERFMON_STATE {
571 CP_PERFMON_STATE_DISABLE_AND_RESET = 0x0,
572 CP_PERFMON_STATE_START_COUNTING = 0x1,
573 CP_PERFMON_STATE_STOP_COUNTING = 0x2,
574 CP_PERFMON_STATE_RESERVED_3 = 0x3,
575 CP_PERFMON_STATE_DISABLE_AND_RESET_PHANTOM = 0x4,
576 CP_PERFMON_STATE_COUNT_AND_DUMP_PHANTOM = 0x5,
577} CP_PERFMON_STATE;
578typedef enum CP_PERFMON_ENABLE_MODE {
579 CP_PERFMON_ENABLE_MODE_ALWAYS_COUNT = 0x0,
580 CP_PERFMON_ENABLE_MODE_RESERVED_1 = 0x1,
581 CP_PERFMON_ENABLE_MODE_COUNT_CONTEXT_TRUE = 0x2,
582 CP_PERFMON_ENABLE_MODE_COUNT_CONTEXT_FALSE = 0x3,
583} CP_PERFMON_ENABLE_MODE;
584typedef enum CPG_PERFCOUNT_SEL {
585 CPG_PERF_SEL_ALWAYS_COUNT = 0x0,
586 CPG_PERF_SEL_RBIU_FIFO_FULL = 0x1,
587 CPG_PERF_SEL_CSF_RTS_BUT_MIU_NOT_RTR = 0x2,
588 CPG_PERF_SEL_CSF_ST_BASE_SIZE_FIFO_FULL = 0x3,
589 CPG_PERF_SEL_CP_GRBM_DWORDS_SENT = 0x4,
590 CPG_PERF_SEL_ME_PARSER_BUSY = 0x5,
591 CPG_PERF_SEL_COUNT_TYPE0_PACKETS = 0x6,
592 CPG_PERF_SEL_COUNT_TYPE3_PACKETS = 0x7,
593 CPG_PERF_SEL_CSF_FETCHING_CMD_BUFFERS = 0x8,
594 CPG_PERF_SEL_CP_GRBM_OUT_OF_CREDITS = 0x9,
595 CPG_PERF_SEL_CP_PFP_GRBM_OUT_OF_CREDITS = 0xa,
596 CPG_PERF_SEL_CP_GDS_GRBM_OUT_OF_CREDITS = 0xb,
597 CPG_PERF_SEL_RCIU_STALLED_ON_ME_READ = 0xc,
598 CPG_PERF_SEL_RCIU_STALLED_ON_DMA_READ = 0xd,
599 CPG_PERF_SEL_SSU_STALLED_ON_ACTIVE_CNTX = 0xe,
600 CPG_PERF_SEL_SSU_STALLED_ON_CLEAN_SIGNALS = 0xf,
601 CPG_PERF_SEL_QU_STALLED_ON_EOP_DONE_PULSE = 0x10,
602 CPG_PERF_SEL_QU_STALLED_ON_EOP_DONE_WR_CONFIRM = 0x11,
603 CPG_PERF_SEL_PFP_STALLED_ON_CSF_READY = 0x12,
604 CPG_PERF_SEL_PFP_STALLED_ON_MEQ_READY = 0x13,
605 CPG_PERF_SEL_PFP_STALLED_ON_RCIU_READY = 0x14,
606 CPG_PERF_SEL_PFP_STALLED_FOR_DATA_FROM_ROQ = 0x15,
607 CPG_PERF_SEL_ME_STALLED_FOR_DATA_FROM_PFP = 0x16,
608 CPG_PERF_SEL_ME_STALLED_FOR_DATA_FROM_STQ = 0x17,
609 CPG_PERF_SEL_ME_STALLED_ON_NO_AVAIL_GFX_CNTX = 0x18,
610 CPG_PERF_SEL_ME_STALLED_WRITING_TO_RCIU = 0x19,
611 CPG_PERF_SEL_ME_STALLED_WRITING_CONSTANTS = 0x1a,
612 CPG_PERF_SEL_ME_STALLED_ON_PARTIAL_FLUSH = 0x1b,
613 CPG_PERF_SEL_ME_WAIT_ON_CE_COUNTER = 0x1c,
614 CPG_PERF_SEL_ME_WAIT_ON_AVAIL_BUFFER = 0x1d,
615 CPG_PERF_SEL_SEMAPHORE_BUSY_POLLING_FOR_PASS = 0x1e,
616 CPG_PERF_SEL_LOAD_STALLED_ON_SET_COHERENCY = 0x1f,
617 CPG_PERF_SEL_DYNAMIC_CLK_VALID = 0x20,
618 CPG_PERF_SEL_REGISTER_CLK_VALID = 0x21,
619 CPG_PERF_SEL_MIU_WRITE_REQUEST_SENT = 0x22,
620 CPG_PERF_SEL_MIU_READ_REQUEST_SENT = 0x23,
621 CPG_PERF_SEL_CE_STALL_RAM_DUMP = 0x24,
622 CPG_PERF_SEL_CE_STALL_RAM_WRITE = 0x25,
623 CPG_PERF_SEL_CE_STALL_ON_INC_FIFO = 0x26,
624 CPG_PERF_SEL_CE_STALL_ON_WR_RAM_FIFO = 0x27,
625 CPG_PERF_SEL_CE_STALL_ON_DATA_FROM_MIU = 0x28,
626 CPG_PERF_SEL_CE_STALL_ON_DATA_FROM_ROQ = 0x29,
627 CPG_PERF_SEL_CE_STALL_ON_CE_BUFFER_FLAG = 0x2a,
628 CPG_PERF_SEL_CE_STALL_ON_DE_COUNTER = 0x2b,
629 CPG_PERF_SEL_TCIU_STALL_WAIT_ON_FREE = 0x2c,
630 CPG_PERF_SEL_TCIU_STALL_WAIT_ON_TAGS = 0x2d,
631 CPG_PERF_SEL_ATCL2IU_STALL_WAIT_ON_FREE = 0x2e,
632 CPG_PERF_SEL_ATCL2IU_STALL_WAIT_ON_TAGS = 0x2f,
633 CPG_PERF_SEL_ATCL1_STALL_ON_TRANSLATION = 0x30,
634} CPG_PERFCOUNT_SEL;
635typedef enum CPF_PERFCOUNT_SEL {
636 CPF_PERF_SEL_ALWAYS_COUNT = 0x0,
637 CPF_PERF_SEL_MIU_STALLED_WAITING_RDREQ_FREE = 0x1,
638 CPF_PERF_SEL_TCIU_STALLED_WAITING_ON_FREE = 0x2,
639 CPF_PERF_SEL_TCIU_STALLED_WAITING_ON_TAGS = 0x3,
640 CPF_PERF_SEL_CSF_BUSY_FOR_FETCHING_RING = 0x4,
641 CPF_PERF_SEL_CSF_BUSY_FOR_FETCHING_IB1 = 0x5,
642 CPF_PERF_SEL_CSF_BUSY_FOR_FETCHING_IB2 = 0x6,
643 CPF_PERF_SEL_CSF_BUSY_FOR_FECTHINC_STATE = 0x7,
644 CPF_PERF_SEL_MIU_BUSY_FOR_OUTSTANDING_TAGS = 0x8,
645 CPF_PERF_SEL_CSF_RTS_MIU_NOT_RTR = 0x9,
646 CPF_PERF_SEL_CSF_STATE_FIFO_NOT_RTR = 0xa,
647 CPF_PERF_SEL_CSF_FETCHING_CMD_BUFFERS = 0xb,
648 CPF_PERF_SEL_GRBM_DWORDS_SENT = 0xc,
649 CPF_PERF_SEL_DYNAMIC_CLOCK_VALID = 0xd,
650 CPF_PERF_SEL_REGISTER_CLOCK_VALID = 0xe,
651 CPF_PERF_SEL_MIU_WRITE_REQUEST_SEND = 0xf,
652 CPF_PERF_SEL_MIU_READ_REQUEST_SEND = 0x10,
653 CPF_PERF_SEL_ATCL2IU_STALL_WAIT_ON_FREE = 0x11,
654 CPF_PERF_SEL_ATCL2IU_STALL_WAIT_ON_TAGS = 0x12,
655 CPF_PERF_SEL_ATCL1_STALL_ON_TRANSLATION = 0x13,
656} CPF_PERFCOUNT_SEL;
657typedef enum CPC_PERFCOUNT_SEL {
658 CPC_PERF_SEL_ALWAYS_COUNT = 0x0,
659 CPC_PERF_SEL_RCIU_STALL_WAIT_ON_FREE = 0x1,
660 CPC_PERF_SEL_RCIU_STALL_PRIV_VIOLATION = 0x2,
661 CPC_PERF_SEL_MIU_STALL_ON_RDREQ_FREE = 0x3,
662 CPC_PERF_SEL_MIU_STALL_ON_WRREQ_FREE = 0x4,
663 CPC_PERF_SEL_TCIU_STALL_WAIT_ON_FREE = 0x5,
664 CPC_PERF_SEL_ME1_STALL_WAIT_ON_RCIU_READY = 0x6,
665 CPC_PERF_SEL_ME1_STALL_WAIT_ON_RCIU_READY_PERF = 0x7,
666 CPC_PERF_SEL_ME1_STALL_WAIT_ON_RCIU_READ = 0x8,
667 CPC_PERF_SEL_ME1_STALL_WAIT_ON_MIU_READ = 0x9,
668 CPC_PERF_SEL_ME1_STALL_WAIT_ON_MIU_WRITE = 0xa,
669 CPC_PERF_SEL_ME1_STALL_ON_DATA_FROM_ROQ = 0xb,
670 CPC_PERF_SEL_ME1_STALL_ON_DATA_FROM_ROQ_PERF = 0xc,
671 CPC_PERF_SEL_ME1_BUSY_FOR_PACKET_DECODE = 0xd,
672 CPC_PERF_SEL_ME2_STALL_WAIT_ON_RCIU_READY = 0xe,
673 CPC_PERF_SEL_ME2_STALL_WAIT_ON_RCIU_READY_PERF = 0xf,
674 CPC_PERF_SEL_ME2_STALL_WAIT_ON_RCIU_READ = 0x10,
675 CPC_PERF_SEL_ME2_STALL_WAIT_ON_MIU_READ = 0x11,
676 CPC_PERF_SEL_ME2_STALL_WAIT_ON_MIU_WRITE = 0x12,
677 CPC_PERF_SEL_ME2_STALL_ON_DATA_FROM_ROQ = 0x13,
678 CPC_PERF_SEL_ME2_STALL_ON_DATA_FROM_ROQ_PERF = 0x14,
679 CPC_PERF_SEL_ME2_BUSY_FOR_PACKET_DECODE = 0x15,
680 CPC_PERF_SEL_ATCL2IU_STALL_WAIT_ON_FREE = 0x16,
681 CPC_PERF_SEL_ATCL2IU_STALL_WAIT_ON_TAGS = 0x17,
682 CPC_PERF_SEL_ATCL1_STALL_ON_TRANSLATION = 0x18,
683} CPC_PERFCOUNT_SEL;
684typedef enum CP_ALPHA_TAG_RAM_SEL {
685 CPG_TAG_RAM = 0x0,
686 CPC_TAG_RAM = 0x1,
687 CPF_TAG_RAM = 0x2,
688 RSV_TAG_RAM = 0x3,
689} CP_ALPHA_TAG_RAM_SEL;
690#define SEM_ECC_ERROR 0x0
691#define SEM_RESERVED 0x1
692#define SEM_FAILED 0x2
693#define SEM_PASSED 0x3
694#define IQ_QUEUE_SLEEP 0x0
695#define IQ_OFFLOAD_RETRY 0x1
696#define IQ_SCH_WAVE_MSG 0x2
697#define IQ_SEM_REARM 0x3
698#define IQ_DEQUEUE_RETRY 0x4
699#define IQ_INTR_TYPE_PQ 0x0
700#define IQ_INTR_TYPE_IB 0x1
701#define IQ_INTR_TYPE_MQD 0x2
702#define VMID_SZ 0x4
703#define CONFIG_SPACE_START 0x2000
704#define CONFIG_SPACE_END 0x9fff
705#define CONFIG_SPACE1_START 0x2000
706#define CONFIG_SPACE1_END 0x2bff
707#define CONFIG_SPACE2_START 0x3000
708#define CONFIG_SPACE2_END 0x9fff
709#define UCONFIG_SPACE_START 0xc000
710#define UCONFIG_SPACE_END 0xffff
711#define PERSISTENT_SPACE_START 0x2c00
712#define PERSISTENT_SPACE_END 0x2fff
713#define CONTEXT_SPACE_START 0xa000
714#define CONTEXT_SPACE_END 0xbfff
715typedef enum ForceControl {
716 FORCE_OFF = 0x0,
717 FORCE_ENABLE = 0x1,
718 FORCE_DISABLE = 0x2,
719 FORCE_RESERVED = 0x3,
720} ForceControl;
721typedef enum ZSamplePosition {
722 Z_SAMPLE_CENTER = 0x0,
723 Z_SAMPLE_CENTROID = 0x1,
724} ZSamplePosition;
725typedef enum ZOrder {
726 LATE_Z = 0x0,
727 EARLY_Z_THEN_LATE_Z = 0x1,
728 RE_Z = 0x2,
729 EARLY_Z_THEN_RE_Z = 0x3,
730} ZOrder;
731typedef enum ZpassControl {
732 ZPASS_DISABLE = 0x0,
733 ZPASS_SAMPLES = 0x1,
734 ZPASS_PIXELS = 0x2,
735} ZpassControl;
736typedef enum ZModeForce {
737 NO_FORCE = 0x0,
738 FORCE_EARLY_Z = 0x1,
739 FORCE_LATE_Z = 0x2,
740 FORCE_RE_Z = 0x3,
741} ZModeForce;
742typedef enum ZLimitSumm {
743 FORCE_SUMM_OFF = 0x0,
744 FORCE_SUMM_MINZ = 0x1,
745 FORCE_SUMM_MAXZ = 0x2,
746 FORCE_SUMM_BOTH = 0x3,
747} ZLimitSumm;
748typedef enum CompareFrag {
749 FRAG_NEVER = 0x0,
750 FRAG_LESS = 0x1,
751 FRAG_EQUAL = 0x2,
752 FRAG_LEQUAL = 0x3,
753 FRAG_GREATER = 0x4,
754 FRAG_NOTEQUAL = 0x5,
755 FRAG_GEQUAL = 0x6,
756 FRAG_ALWAYS = 0x7,
757} CompareFrag;
758typedef enum StencilOp {
759 STENCIL_KEEP = 0x0,
760 STENCIL_ZERO = 0x1,
761 STENCIL_ONES = 0x2,
762 STENCIL_REPLACE_TEST = 0x3,
763 STENCIL_REPLACE_OP = 0x4,
764 STENCIL_ADD_CLAMP = 0x5,
765 STENCIL_SUB_CLAMP = 0x6,
766 STENCIL_INVERT = 0x7,
767 STENCIL_ADD_WRAP = 0x8,
768 STENCIL_SUB_WRAP = 0x9,
769 STENCIL_AND = 0xa,
770 STENCIL_OR = 0xb,
771 STENCIL_XOR = 0xc,
772 STENCIL_NAND = 0xd,
773 STENCIL_NOR = 0xe,
774 STENCIL_XNOR = 0xf,
775} StencilOp;
776typedef enum ConservativeZExport {
777 EXPORT_ANY_Z = 0x0,
778 EXPORT_LESS_THAN_Z = 0x1,
779 EXPORT_GREATER_THAN_Z = 0x2,
780 EXPORT_RESERVED = 0x3,
781} ConservativeZExport;
782typedef enum DbPSLControl {
783 PSLC_AUTO = 0x0,
784 PSLC_ON_HANG_ONLY = 0x1,
785 PSLC_ASAP = 0x2,
786 PSLC_COUNTDOWN = 0x3,
787} DbPSLControl;
788typedef enum PerfCounter_Vals {
789 DB_PERF_SEL_SC_DB_tile_sends = 0x0,
790 DB_PERF_SEL_SC_DB_tile_busy = 0x1,
791 DB_PERF_SEL_SC_DB_tile_stalls = 0x2,
792 DB_PERF_SEL_SC_DB_tile_events = 0x3,
793 DB_PERF_SEL_SC_DB_tile_tiles = 0x4,
794 DB_PERF_SEL_SC_DB_tile_covered = 0x5,
795 DB_PERF_SEL_hiz_tc_read_starved = 0x6,
796 DB_PERF_SEL_hiz_tc_write_stall = 0x7,
797 DB_PERF_SEL_hiz_qtiles_culled = 0x8,
798 DB_PERF_SEL_his_qtiles_culled = 0x9,
799 DB_PERF_SEL_DB_SC_tile_sends = 0xa,
800 DB_PERF_SEL_DB_SC_tile_busy = 0xb,
801 DB_PERF_SEL_DB_SC_tile_stalls = 0xc,
802 DB_PERF_SEL_DB_SC_tile_df_stalls = 0xd,
803 DB_PERF_SEL_DB_SC_tile_tiles = 0xe,
804 DB_PERF_SEL_DB_SC_tile_culled = 0xf,
805 DB_PERF_SEL_DB_SC_tile_hier_kill = 0x10,
806 DB_PERF_SEL_DB_SC_tile_fast_ops = 0x11,
807 DB_PERF_SEL_DB_SC_tile_no_ops = 0x12,
808 DB_PERF_SEL_DB_SC_tile_tile_rate = 0x13,
809 DB_PERF_SEL_DB_SC_tile_ssaa_kill = 0x14,
810 DB_PERF_SEL_DB_SC_tile_fast_z_ops = 0x15,
811 DB_PERF_SEL_DB_SC_tile_fast_stencil_ops = 0x16,
812 DB_PERF_SEL_SC_DB_quad_sends = 0x17,
813 DB_PERF_SEL_SC_DB_quad_busy = 0x18,
814 DB_PERF_SEL_SC_DB_quad_squads = 0x19,
815 DB_PERF_SEL_SC_DB_quad_tiles = 0x1a,
816 DB_PERF_SEL_SC_DB_quad_pixels = 0x1b,
817 DB_PERF_SEL_SC_DB_quad_killed_tiles = 0x1c,
818 DB_PERF_SEL_DB_SC_quad_sends = 0x1d,
819 DB_PERF_SEL_DB_SC_quad_busy = 0x1e,
820 DB_PERF_SEL_DB_SC_quad_stalls = 0x1f,
821 DB_PERF_SEL_DB_SC_quad_tiles = 0x20,
822 DB_PERF_SEL_DB_SC_quad_lit_quad = 0x21,
823 DB_PERF_SEL_DB_CB_tile_sends = 0x22,
824 DB_PERF_SEL_DB_CB_tile_busy = 0x23,
825 DB_PERF_SEL_DB_CB_tile_stalls = 0x24,
826 DB_PERF_SEL_SX_DB_quad_sends = 0x25,
827 DB_PERF_SEL_SX_DB_quad_busy = 0x26,
828 DB_PERF_SEL_SX_DB_quad_stalls = 0x27,
829 DB_PERF_SEL_SX_DB_quad_quads = 0x28,
830 DB_PERF_SEL_SX_DB_quad_pixels = 0x29,
831 DB_PERF_SEL_SX_DB_quad_exports = 0x2a,
832 DB_PERF_SEL_SH_quads_outstanding_sum = 0x2b,
833 DB_PERF_SEL_DB_CB_lquad_sends = 0x2c,
834 DB_PERF_SEL_DB_CB_lquad_busy = 0x2d,
835 DB_PERF_SEL_DB_CB_lquad_stalls = 0x2e,
836 DB_PERF_SEL_DB_CB_lquad_quads = 0x2f,
837 DB_PERF_SEL_tile_rd_sends = 0x30,
838 DB_PERF_SEL_mi_tile_rd_outstanding_sum = 0x31,
839 DB_PERF_SEL_quad_rd_sends = 0x32,
840 DB_PERF_SEL_quad_rd_busy = 0x33,
841 DB_PERF_SEL_quad_rd_mi_stall = 0x34,
842 DB_PERF_SEL_quad_rd_rw_collision = 0x35,
843 DB_PERF_SEL_quad_rd_tag_stall = 0x36,
844 DB_PERF_SEL_quad_rd_32byte_reqs = 0x37,
845 DB_PERF_SEL_quad_rd_panic = 0x38,
846 DB_PERF_SEL_mi_quad_rd_outstanding_sum = 0x39,
847 DB_PERF_SEL_quad_rdret_sends = 0x3a,
848 DB_PERF_SEL_quad_rdret_busy = 0x3b,
849 DB_PERF_SEL_tile_wr_sends = 0x3c,
850 DB_PERF_SEL_tile_wr_acks = 0x3d,
851 DB_PERF_SEL_mi_tile_wr_outstanding_sum = 0x3e,
852 DB_PERF_SEL_quad_wr_sends = 0x3f,
853 DB_PERF_SEL_quad_wr_busy = 0x40,
854 DB_PERF_SEL_quad_wr_mi_stall = 0x41,
855 DB_PERF_SEL_quad_wr_coherency_stall = 0x42,
856 DB_PERF_SEL_quad_wr_acks = 0x43,
857 DB_PERF_SEL_mi_quad_wr_outstanding_sum = 0x44,
858 DB_PERF_SEL_Tile_Cache_misses = 0x45,
859 DB_PERF_SEL_Tile_Cache_hits = 0x46,
860 DB_PERF_SEL_Tile_Cache_flushes = 0x47,
861 DB_PERF_SEL_Tile_Cache_surface_stall = 0x48,
862 DB_PERF_SEL_Tile_Cache_starves = 0x49,
863 DB_PERF_SEL_Tile_Cache_mem_return_starve = 0x4a,
864 DB_PERF_SEL_tcp_dispatcher_reads = 0x4b,
865 DB_PERF_SEL_tcp_prefetcher_reads = 0x4c,
866 DB_PERF_SEL_tcp_preloader_reads = 0x4d,
867 DB_PERF_SEL_tcp_dispatcher_flushes = 0x4e,
868 DB_PERF_SEL_tcp_prefetcher_flushes = 0x4f,
869 DB_PERF_SEL_tcp_preloader_flushes = 0x50,
870 DB_PERF_SEL_Depth_Tile_Cache_sends = 0x51,
871 DB_PERF_SEL_Depth_Tile_Cache_busy = 0x52,
872 DB_PERF_SEL_Depth_Tile_Cache_starves = 0x53,
873 DB_PERF_SEL_Depth_Tile_Cache_dtile_locked = 0x54,
874 DB_PERF_SEL_Depth_Tile_Cache_alloc_stall = 0x55,
875 DB_PERF_SEL_Depth_Tile_Cache_misses = 0x56,
876 DB_PERF_SEL_Depth_Tile_Cache_hits = 0x57,
877 DB_PERF_SEL_Depth_Tile_Cache_flushes = 0x58,
878 DB_PERF_SEL_Depth_Tile_Cache_noop_tile = 0x59,
879 DB_PERF_SEL_Depth_Tile_Cache_detailed_noop = 0x5a,
880 DB_PERF_SEL_Depth_Tile_Cache_event = 0x5b,
881 DB_PERF_SEL_Depth_Tile_Cache_tile_frees = 0x5c,
882 DB_PERF_SEL_Depth_Tile_Cache_data_frees = 0x5d,
883 DB_PERF_SEL_Depth_Tile_Cache_mem_return_starve = 0x5e,
884 DB_PERF_SEL_Stencil_Cache_misses = 0x5f,
885 DB_PERF_SEL_Stencil_Cache_hits = 0x60,
886 DB_PERF_SEL_Stencil_Cache_flushes = 0x61,
887 DB_PERF_SEL_Stencil_Cache_starves = 0x62,
888 DB_PERF_SEL_Stencil_Cache_frees = 0x63,
889 DB_PERF_SEL_Z_Cache_separate_Z_misses = 0x64,
890 DB_PERF_SEL_Z_Cache_separate_Z_hits = 0x65,
891 DB_PERF_SEL_Z_Cache_separate_Z_flushes = 0x66,
892 DB_PERF_SEL_Z_Cache_separate_Z_starves = 0x67,
893 DB_PERF_SEL_Z_Cache_pmask_misses = 0x68,
894 DB_PERF_SEL_Z_Cache_pmask_hits = 0x69,
895 DB_PERF_SEL_Z_Cache_pmask_flushes = 0x6a,
896 DB_PERF_SEL_Z_Cache_pmask_starves = 0x6b,
897 DB_PERF_SEL_Z_Cache_frees = 0x6c,
898 DB_PERF_SEL_Plane_Cache_misses = 0x6d,
899 DB_PERF_SEL_Plane_Cache_hits = 0x6e,
900 DB_PERF_SEL_Plane_Cache_flushes = 0x6f,
901 DB_PERF_SEL_Plane_Cache_starves = 0x70,
902 DB_PERF_SEL_Plane_Cache_frees = 0x71,
903 DB_PERF_SEL_flush_expanded_stencil = 0x72,
904 DB_PERF_SEL_flush_compressed_stencil = 0x73,
905 DB_PERF_SEL_flush_single_stencil = 0x74,
906 DB_PERF_SEL_planes_flushed = 0x75,
907 DB_PERF_SEL_flush_1plane = 0x76,
908 DB_PERF_SEL_flush_2plane = 0x77,
909 DB_PERF_SEL_flush_3plane = 0x78,
910 DB_PERF_SEL_flush_4plane = 0x79,
911 DB_PERF_SEL_flush_5plane = 0x7a,
912 DB_PERF_SEL_flush_6plane = 0x7b,
913 DB_PERF_SEL_flush_7plane = 0x7c,
914 DB_PERF_SEL_flush_8plane = 0x7d,
915 DB_PERF_SEL_flush_9plane = 0x7e,
916 DB_PERF_SEL_flush_10plane = 0x7f,
917 DB_PERF_SEL_flush_11plane = 0x80,
918 DB_PERF_SEL_flush_12plane = 0x81,
919 DB_PERF_SEL_flush_13plane = 0x82,
920 DB_PERF_SEL_flush_14plane = 0x83,
921 DB_PERF_SEL_flush_15plane = 0x84,
922 DB_PERF_SEL_flush_16plane = 0x85,
923 DB_PERF_SEL_flush_expanded_z = 0x86,
924 DB_PERF_SEL_earlyZ_waiting_for_postZ_done = 0x87,
925 DB_PERF_SEL_reZ_waiting_for_postZ_done = 0x88,
926 DB_PERF_SEL_dk_tile_sends = 0x89,
927 DB_PERF_SEL_dk_tile_busy = 0x8a,
928 DB_PERF_SEL_dk_tile_quad_starves = 0x8b,
929 DB_PERF_SEL_dk_tile_stalls = 0x8c,
930 DB_PERF_SEL_dk_squad_sends = 0x8d,
931 DB_PERF_SEL_dk_squad_busy = 0x8e,
932 DB_PERF_SEL_dk_squad_stalls = 0x8f,
933 DB_PERF_SEL_Op_Pipe_Busy = 0x90,
934 DB_PERF_SEL_Op_Pipe_MC_Read_stall = 0x91,
935 DB_PERF_SEL_qc_busy = 0x92,
936 DB_PERF_SEL_qc_xfc = 0x93,
937 DB_PERF_SEL_qc_conflicts = 0x94,
938 DB_PERF_SEL_qc_full_stall = 0x95,
939 DB_PERF_SEL_qc_in_preZ_tile_stalls_postZ = 0x96,
940 DB_PERF_SEL_qc_in_postZ_tile_stalls_preZ = 0x97,
941 DB_PERF_SEL_tsc_insert_summarize_stall = 0x98,
942 DB_PERF_SEL_tl_busy = 0x99,
943 DB_PERF_SEL_tl_dtc_read_starved = 0x9a,
944 DB_PERF_SEL_tl_z_fetch_stall = 0x9b,
945 DB_PERF_SEL_tl_stencil_stall = 0x9c,
946 DB_PERF_SEL_tl_z_decompress_stall = 0x9d,
947 DB_PERF_SEL_tl_stencil_locked_stall = 0x9e,
948 DB_PERF_SEL_tl_events = 0x9f,
949 DB_PERF_SEL_tl_summarize_squads = 0xa0,
950 DB_PERF_SEL_tl_flush_expand_squads = 0xa1,
951 DB_PERF_SEL_tl_expand_squads = 0xa2,
952 DB_PERF_SEL_tl_preZ_squads = 0xa3,
953 DB_PERF_SEL_tl_postZ_squads = 0xa4,
954 DB_PERF_SEL_tl_preZ_noop_squads = 0xa5,
955 DB_PERF_SEL_tl_postZ_noop_squads = 0xa6,
956 DB_PERF_SEL_tl_tile_ops = 0xa7,
957 DB_PERF_SEL_tl_in_xfc = 0xa8,
958 DB_PERF_SEL_tl_in_single_stencil_expand_stall = 0xa9,
959 DB_PERF_SEL_tl_in_fast_z_stall = 0xaa,
960 DB_PERF_SEL_tl_out_xfc = 0xab,
961 DB_PERF_SEL_tl_out_squads = 0xac,
962 DB_PERF_SEL_zf_plane_multicycle = 0xad,
963 DB_PERF_SEL_PostZ_Samples_passing_Z = 0xae,
964 DB_PERF_SEL_PostZ_Samples_failing_Z = 0xaf,
965 DB_PERF_SEL_PostZ_Samples_failing_S = 0xb0,
966 DB_PERF_SEL_PreZ_Samples_passing_Z = 0xb1,
967 DB_PERF_SEL_PreZ_Samples_failing_Z = 0xb2,
968 DB_PERF_SEL_PreZ_Samples_failing_S = 0xb3,
969 DB_PERF_SEL_ts_tc_update_stall = 0xb4,
970 DB_PERF_SEL_sc_kick_start = 0xb5,
971 DB_PERF_SEL_sc_kick_end = 0xb6,
972 DB_PERF_SEL_clock_reg_active = 0xb7,
973 DB_PERF_SEL_clock_main_active = 0xb8,
974 DB_PERF_SEL_clock_mem_export_active = 0xb9,
975 DB_PERF_SEL_esr_ps_out_busy = 0xba,
976 DB_PERF_SEL_esr_ps_lqf_busy = 0xbb,
977 DB_PERF_SEL_esr_ps_lqf_stall = 0xbc,
978 DB_PERF_SEL_etr_out_send = 0xbd,
979 DB_PERF_SEL_etr_out_busy = 0xbe,
980 DB_PERF_SEL_etr_out_ltile_probe_fifo_full_stall = 0xbf,
981 DB_PERF_SEL_etr_out_cb_tile_stall = 0xc0,
982 DB_PERF_SEL_etr_out_esr_stall = 0xc1,
983 DB_PERF_SEL_esr_ps_sqq_busy = 0xc2,
984 DB_PERF_SEL_esr_ps_sqq_stall = 0xc3,
985 DB_PERF_SEL_esr_eot_fwd_busy = 0xc4,
986 DB_PERF_SEL_esr_eot_fwd_holding_squad = 0xc5,
987 DB_PERF_SEL_esr_eot_fwd_forward = 0xc6,
988 DB_PERF_SEL_esr_sqq_zi_busy = 0xc7,
989 DB_PERF_SEL_esr_sqq_zi_stall = 0xc8,
990 DB_PERF_SEL_postzl_sq_pt_busy = 0xc9,
991 DB_PERF_SEL_postzl_sq_pt_stall = 0xca,
992 DB_PERF_SEL_postzl_se_busy = 0xcb,
993 DB_PERF_SEL_postzl_se_stall = 0xcc,
994 DB_PERF_SEL_postzl_partial_launch = 0xcd,
995 DB_PERF_SEL_postzl_full_launch = 0xce,
996 DB_PERF_SEL_postzl_partial_waiting = 0xcf,
997 DB_PERF_SEL_postzl_tile_mem_stall = 0xd0,
998 DB_PERF_SEL_postzl_tile_init_stall = 0xd1,
999 DB_PEFF_SEL_prezl_tile_mem_stall = 0xd2,
1000 DB_PERF_SEL_prezl_tile_init_stall = 0xd3,
1001 DB_PERF_SEL_dtt_sm_clash_stall = 0xd4,
1002 DB_PERF_SEL_dtt_sm_slot_stall = 0xd5,
1003 DB_PERF_SEL_dtt_sm_miss_stall = 0xd6,
1004 DB_PERF_SEL_mi_rdreq_busy = 0xd7,
1005 DB_PERF_SEL_mi_rdreq_stall = 0xd8,
1006 DB_PERF_SEL_mi_wrreq_busy = 0xd9,
1007 DB_PERF_SEL_mi_wrreq_stall = 0xda,
1008 DB_PERF_SEL_recomp_tile_to_1zplane_no_fastop = 0xdb,
1009 DB_PERF_SEL_dkg_tile_rate_tile = 0xdc,
1010 DB_PERF_SEL_prezl_src_in_sends = 0xdd,
1011 DB_PERF_SEL_prezl_src_in_stall = 0xde,
1012 DB_PERF_SEL_prezl_src_in_squads = 0xdf,
1013 DB_PERF_SEL_prezl_src_in_squads_unrolled = 0xe0,
1014 DB_PERF_SEL_prezl_src_in_tile_rate = 0xe1,
1015 DB_PERF_SEL_prezl_src_in_tile_rate_unrolled = 0xe2,
1016 DB_PERF_SEL_prezl_src_out_stall = 0xe3,
1017 DB_PERF_SEL_postzl_src_in_sends = 0xe4,
1018 DB_PERF_SEL_postzl_src_in_stall = 0xe5,
1019 DB_PERF_SEL_postzl_src_in_squads = 0xe6,
1020 DB_PERF_SEL_postzl_src_in_squads_unrolled = 0xe7,
1021 DB_PERF_SEL_postzl_src_in_tile_rate = 0xe8,
1022 DB_PERF_SEL_postzl_src_in_tile_rate_unrolled = 0xe9,
1023 DB_PERF_SEL_postzl_src_out_stall = 0xea,
1024 DB_PERF_SEL_esr_ps_src_in_sends = 0xeb,
1025 DB_PERF_SEL_esr_ps_src_in_stall = 0xec,
1026 DB_PERF_SEL_esr_ps_src_in_squads = 0xed,
1027 DB_PERF_SEL_esr_ps_src_in_squads_unrolled = 0xee,
1028 DB_PERF_SEL_esr_ps_src_in_tile_rate = 0xef,
1029 DB_PERF_SEL_esr_ps_src_in_tile_rate_unrolled = 0xf0,
1030 DB_PERF_SEL_esr_ps_src_in_tile_rate_unrolled_to_pixel_rate= 0xf1,
1031 DB_PERF_SEL_esr_ps_src_out_stall = 0xf2,
1032 DB_PERF_SEL_depth_bounds_qtiles_culled = 0xf3,
1033 DB_PERF_SEL_PreZ_Samples_failing_DB = 0xf4,
1034 DB_PERF_SEL_PostZ_Samples_failing_DB = 0xf5,
1035 DB_PERF_SEL_flush_compressed = 0xf6,
1036 DB_PERF_SEL_flush_plane_le4 = 0xf7,
1037 DB_PERF_SEL_tiles_z_fully_summarized = 0xf8,
1038 DB_PERF_SEL_tiles_stencil_fully_summarized = 0xf9,
1039 DB_PERF_SEL_tiles_z_clear_on_expclear = 0xfa,
1040 DB_PERF_SEL_tiles_s_clear_on_expclear = 0xfb,
1041 DB_PERF_SEL_tiles_decomp_on_expclear = 0xfc,
1042 DB_PERF_SEL_tiles_compressed_to_decompressed = 0xfd,
1043 DB_PERF_SEL_Op_Pipe_Prez_Busy = 0xfe,
1044 DB_PERF_SEL_Op_Pipe_Postz_Busy = 0xff,
1045 DB_PERF_SEL_di_dt_stall = 0x100,
1046 DB_PERF_SEL_DB_SC_quad_double_quad = 0x101,
1047 DB_PERF_SEL_SX_DB_quad_export_quads = 0x102,
1048 DB_PERF_SEL_SX_DB_quad_double_format = 0x103,
1049 DB_PERF_SEL_SX_DB_quad_fast_format = 0x104,
1050 DB_PERF_SEL_SX_DB_quad_slow_format = 0x105,
1051 DB_PERF_SEL_DB_CB_lquad_export_quads = 0x106,
1052 DB_PERF_SEL_DB_CB_lquad_double_format = 0x107,
1053 DB_PERF_SEL_DB_CB_lquad_fast_format = 0x108,
1054 DB_PERF_SEL_DB_CB_lquad_slow_format = 0x109,
1055} PerfCounter_Vals;
1056typedef enum RingCounterControl {
1057 COUNTER_RING_SPLIT = 0x0,
1058 COUNTER_RING_0 = 0x1,
1059 COUNTER_RING_1 = 0x2,
1060} RingCounterControl;
1061typedef enum PixelPipeCounterId {
1062 PIXEL_PIPE_OCCLUSION_COUNT_0 = 0x0,
1063 PIXEL_PIPE_OCCLUSION_COUNT_1 = 0x1,
1064 PIXEL_PIPE_OCCLUSION_COUNT_2 = 0x2,
1065 PIXEL_PIPE_OCCLUSION_COUNT_3 = 0x3,
1066 PIXEL_PIPE_SCREEN_MIN_EXTENTS_0 = 0x4,
1067 PIXEL_PIPE_SCREEN_MAX_EXTENTS_0 = 0x5,
1068 PIXEL_PIPE_SCREEN_MIN_EXTENTS_1 = 0x6,
1069 PIXEL_PIPE_SCREEN_MAX_EXTENTS_1 = 0x7,
1070} PixelPipeCounterId;
1071typedef enum PixelPipeStride {
1072 PIXEL_PIPE_STRIDE_32_BITS = 0x0,
1073 PIXEL_PIPE_STRIDE_64_BITS = 0x1,
1074 PIXEL_PIPE_STRIDE_128_BITS = 0x2,
1075 PIXEL_PIPE_STRIDE_256_BITS = 0x3,
1076} PixelPipeStride;
1077typedef enum GB_EDC_DED_MODE {
1078 GB_EDC_DED_MODE_LOG = 0x0,
1079 GB_EDC_DED_MODE_HALT = 0x1,
1080 GB_EDC_DED_MODE_INT_HALT = 0x2,
1081} GB_EDC_DED_MODE;
1082#define GB_TILING_CONFIG_TABLE_SIZE 0x20
1083#define GB_TILING_CONFIG_MACROTABLE_SIZE 0x10
1084typedef enum GRBM_PERF_SEL {
1085 GRBM_PERF_SEL_COUNT = 0x0,
1086 GRBM_PERF_SEL_USER_DEFINED = 0x1,
1087 GRBM_PERF_SEL_GUI_ACTIVE = 0x2,
1088 GRBM_PERF_SEL_CP_BUSY = 0x3,
1089 GRBM_PERF_SEL_CP_COHER_BUSY = 0x4,
1090 GRBM_PERF_SEL_CP_DMA_BUSY = 0x5,
1091 GRBM_PERF_SEL_CB_BUSY = 0x6,
1092 GRBM_PERF_SEL_DB_BUSY = 0x7,
1093 GRBM_PERF_SEL_PA_BUSY = 0x8,
1094 GRBM_PERF_SEL_SC_BUSY = 0x9,
1095 GRBM_PERF_SEL_RESERVED_6 = 0xa,
1096 GRBM_PERF_SEL_SPI_BUSY = 0xb,
1097 GRBM_PERF_SEL_SX_BUSY = 0xc,
1098 GRBM_PERF_SEL_TA_BUSY = 0xd,
1099 GRBM_PERF_SEL_CB_CLEAN = 0xe,
1100 GRBM_PERF_SEL_DB_CLEAN = 0xf,
1101 GRBM_PERF_SEL_RESERVED_5 = 0x10,
1102 GRBM_PERF_SEL_VGT_BUSY = 0x11,
1103 GRBM_PERF_SEL_RESERVED_4 = 0x12,
1104 GRBM_PERF_SEL_RESERVED_3 = 0x13,
1105 GRBM_PERF_SEL_RESERVED_2 = 0x14,
1106 GRBM_PERF_SEL_RESERVED_1 = 0x15,
1107 GRBM_PERF_SEL_RESERVED_0 = 0x16,
1108 GRBM_PERF_SEL_IA_BUSY = 0x17,
1109 GRBM_PERF_SEL_IA_NO_DMA_BUSY = 0x18,
1110 GRBM_PERF_SEL_GDS_BUSY = 0x19,
1111 GRBM_PERF_SEL_BCI_BUSY = 0x1a,
1112 GRBM_PERF_SEL_RLC_BUSY = 0x1b,
1113 GRBM_PERF_SEL_TC_BUSY = 0x1c,
1114 GRBM_PERF_SEL_CPG_BUSY = 0x1d,
1115 GRBM_PERF_SEL_CPC_BUSY = 0x1e,
1116 GRBM_PERF_SEL_CPF_BUSY = 0x1f,
1117 GRBM_PERF_SEL_WD_BUSY = 0x20,
1118 GRBM_PERF_SEL_WD_NO_DMA_BUSY = 0x21,
1119} GRBM_PERF_SEL;
1120typedef enum GRBM_SE0_PERF_SEL {
1121 GRBM_SE0_PERF_SEL_COUNT = 0x0,
1122 GRBM_SE0_PERF_SEL_USER_DEFINED = 0x1,
1123 GRBM_SE0_PERF_SEL_CB_BUSY = 0x2,
1124 GRBM_SE0_PERF_SEL_DB_BUSY = 0x3,
1125 GRBM_SE0_PERF_SEL_SC_BUSY = 0x4,
1126 GRBM_SE0_PERF_SEL_RESERVED_1 = 0x5,
1127 GRBM_SE0_PERF_SEL_SPI_BUSY = 0x6,
1128 GRBM_SE0_PERF_SEL_SX_BUSY = 0x7,
1129 GRBM_SE0_PERF_SEL_TA_BUSY = 0x8,
1130 GRBM_SE0_PERF_SEL_CB_CLEAN = 0x9,
1131 GRBM_SE0_PERF_SEL_DB_CLEAN = 0xa,
1132 GRBM_SE0_PERF_SEL_RESERVED_0 = 0xb,
1133 GRBM_SE0_PERF_SEL_PA_BUSY = 0xc,
1134 GRBM_SE0_PERF_SEL_VGT_BUSY = 0xd,
1135 GRBM_SE0_PERF_SEL_BCI_BUSY = 0xe,
1136} GRBM_SE0_PERF_SEL;
1137typedef enum GRBM_SE1_PERF_SEL {
1138 GRBM_SE1_PERF_SEL_COUNT = 0x0,
1139 GRBM_SE1_PERF_SEL_USER_DEFINED = 0x1,
1140 GRBM_SE1_PERF_SEL_CB_BUSY = 0x2,
1141 GRBM_SE1_PERF_SEL_DB_BUSY = 0x3,
1142 GRBM_SE1_PERF_SEL_SC_BUSY = 0x4,
1143 GRBM_SE1_PERF_SEL_RESERVED_1 = 0x5,
1144 GRBM_SE1_PERF_SEL_SPI_BUSY = 0x6,
1145 GRBM_SE1_PERF_SEL_SX_BUSY = 0x7,
1146 GRBM_SE1_PERF_SEL_TA_BUSY = 0x8,
1147 GRBM_SE1_PERF_SEL_CB_CLEAN = 0x9,
1148 GRBM_SE1_PERF_SEL_DB_CLEAN = 0xa,
1149 GRBM_SE1_PERF_SEL_RESERVED_0 = 0xb,
1150 GRBM_SE1_PERF_SEL_PA_BUSY = 0xc,
1151 GRBM_SE1_PERF_SEL_VGT_BUSY = 0xd,
1152 GRBM_SE1_PERF_SEL_BCI_BUSY = 0xe,
1153} GRBM_SE1_PERF_SEL;
1154typedef enum GRBM_SE2_PERF_SEL {
1155 GRBM_SE2_PERF_SEL_COUNT = 0x0,
1156 GRBM_SE2_PERF_SEL_USER_DEFINED = 0x1,
1157 GRBM_SE2_PERF_SEL_CB_BUSY = 0x2,
1158 GRBM_SE2_PERF_SEL_DB_BUSY = 0x3,
1159 GRBM_SE2_PERF_SEL_SC_BUSY = 0x4,
1160 GRBM_SE2_PERF_SEL_RESERVED_1 = 0x5,
1161 GRBM_SE2_PERF_SEL_SPI_BUSY = 0x6,
1162 GRBM_SE2_PERF_SEL_SX_BUSY = 0x7,
1163 GRBM_SE2_PERF_SEL_TA_BUSY = 0x8,
1164 GRBM_SE2_PERF_SEL_CB_CLEAN = 0x9,
1165 GRBM_SE2_PERF_SEL_DB_CLEAN = 0xa,
1166 GRBM_SE2_PERF_SEL_RESERVED_0 = 0xb,
1167 GRBM_SE2_PERF_SEL_PA_BUSY = 0xc,
1168 GRBM_SE2_PERF_SEL_VGT_BUSY = 0xd,
1169 GRBM_SE2_PERF_SEL_BCI_BUSY = 0xe,
1170} GRBM_SE2_PERF_SEL;
1171typedef enum GRBM_SE3_PERF_SEL {
1172 GRBM_SE3_PERF_SEL_COUNT = 0x0,
1173 GRBM_SE3_PERF_SEL_USER_DEFINED = 0x1,
1174 GRBM_SE3_PERF_SEL_CB_BUSY = 0x2,
1175 GRBM_SE3_PERF_SEL_DB_BUSY = 0x3,
1176 GRBM_SE3_PERF_SEL_SC_BUSY = 0x4,
1177 GRBM_SE3_PERF_SEL_RESERVED_1 = 0x5,
1178 GRBM_SE3_PERF_SEL_SPI_BUSY = 0x6,
1179 GRBM_SE3_PERF_SEL_SX_BUSY = 0x7,
1180 GRBM_SE3_PERF_SEL_TA_BUSY = 0x8,
1181 GRBM_SE3_PERF_SEL_CB_CLEAN = 0x9,
1182 GRBM_SE3_PERF_SEL_DB_CLEAN = 0xa,
1183 GRBM_SE3_PERF_SEL_RESERVED_0 = 0xb,
1184 GRBM_SE3_PERF_SEL_PA_BUSY = 0xc,
1185 GRBM_SE3_PERF_SEL_VGT_BUSY = 0xd,
1186 GRBM_SE3_PERF_SEL_BCI_BUSY = 0xe,
1187} GRBM_SE3_PERF_SEL;
1188typedef enum SU_PERFCNT_SEL {
1189 PERF_PAPC_PASX_REQ = 0x0,
1190 PERF_PAPC_PASX_DISABLE_PIPE = 0x1,
1191 PERF_PAPC_PASX_FIRST_VECTOR = 0x2,
1192 PERF_PAPC_PASX_SECOND_VECTOR = 0x3,
1193 PERF_PAPC_PASX_FIRST_DEAD = 0x4,
1194 PERF_PAPC_PASX_SECOND_DEAD = 0x5,
1195 PERF_PAPC_PASX_VTX_KILL_DISCARD = 0x6,
1196 PERF_PAPC_PASX_VTX_NAN_DISCARD = 0x7,
1197 PERF_PAPC_PA_INPUT_PRIM = 0x8,
1198 PERF_PAPC_PA_INPUT_NULL_PRIM = 0x9,
1199 PERF_PAPC_PA_INPUT_EVENT_FLAG = 0xa,
1200 PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 0xb,
1201 PERF_PAPC_PA_INPUT_END_OF_PACKET = 0xc,
1202 PERF_PAPC_PA_INPUT_EXTENDED_EVENT = 0xd,
1203 PERF_PAPC_CLPR_CULL_PRIM = 0xe,
1204 PERF_PAPC_CLPR_VVUCP_CULL_PRIM = 0xf,
1205 PERF_PAPC_CLPR_VV_CULL_PRIM = 0x10,
1206 PERF_PAPC_CLPR_UCP_CULL_PRIM = 0x11,
1207 PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 0x12,
1208 PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 0x13,
1209 PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 0x14,
1210 PERF_PAPC_CLPR_VVUCP_CLIP_PRIM = 0x15,
1211 PERF_PAPC_CLPR_VV_CLIP_PRIM = 0x16,
1212 PERF_PAPC_CLPR_UCP_CLIP_PRIM = 0x17,
1213 PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 0x18,
1214 PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 0x19,
1215 PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 0x1a,
1216 PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 0x1b,
1217 PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 0x1c,
1218 PERF_PAPC_CLPR_CLIP_PLANE_CNT_5_8 = 0x1d,
1219 PERF_PAPC_CLPR_CLIP_PLANE_CNT_9_12 = 0x1e,
1220 PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 0x1f,
1221 PERF_PAPC_CLPR_CLIP_PLANE_FAR = 0x20,
1222 PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 0x21,
1223 PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 0x22,
1224 PERF_PAPC_CLPR_CLIP_PLANE_TOP = 0x23,
1225 PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 0x24,
1226 PERF_PAPC_CLPR_GSC_KILL_CULL_PRIM = 0x25,
1227 PERF_PAPC_CLPR_RASTER_KILL_CULL_PRIM = 0x26,
1228 PERF_PAPC_CLSM_NULL_PRIM = 0x27,
1229 PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 0x28,
1230 PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 0x29,
1231 PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 0x2a,
1232 PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 0x2b,
1233 PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 0x2c,
1234 PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 0x2d,
1235 PERF_PAPC_CLSM_OUT_PRIM_CNT_5_8 = 0x2e,
1236 PERF_PAPC_CLSM_OUT_PRIM_CNT_9_13 = 0x2f,
1237 PERF_PAPC_CLIPGA_VTE_KILL_PRIM = 0x30,
1238 PERF_PAPC_SU_INPUT_PRIM = 0x31,
1239 PERF_PAPC_SU_INPUT_CLIP_PRIM = 0x32,
1240 PERF_PAPC_SU_INPUT_NULL_PRIM = 0x33,
1241 PERF_PAPC_SU_INPUT_PRIM_DUAL = 0x34,
1242 PERF_PAPC_SU_INPUT_CLIP_PRIM_DUAL = 0x35,
1243 PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 0x36,
1244 PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 0x37,
1245 PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 0x38,
1246 PERF_PAPC_SU_POLYMODE_FACE_CULL = 0x39,
1247 PERF_PAPC_SU_POLYMODE_BACK_CULL = 0x3a,
1248 PERF_PAPC_SU_POLYMODE_FRONT_CULL = 0x3b,
1249 PERF_PAPC_SU_POLYMODE_INVALID_FILL = 0x3c,
1250 PERF_PAPC_SU_OUTPUT_PRIM = 0x3d,
1251 PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 0x3e,
1252 PERF_PAPC_SU_OUTPUT_NULL_PRIM = 0x3f,
1253 PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 0x40,
1254 PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 0x41,
1255 PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 0x42,
1256 PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 0x43,
1257 PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 0x44,
1258 PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 0x45,
1259 PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 0x46,
1260 PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 0x47,
1261 PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 0x48,
1262 PERF_PAPC_SU_OUTPUT_PRIM_DUAL = 0x49,
1263 PERF_PAPC_SU_OUTPUT_CLIP_PRIM_DUAL = 0x4a,
1264 PERF_PAPC_SU_OUTPUT_POLYMODE_DUAL = 0x4b,
1265 PERF_PAPC_SU_OUTPUT_CLIP_POLYMODE_DUAL = 0x4c,
1266 PERF_PAPC_PASX_REQ_IDLE = 0x4d,
1267 PERF_PAPC_PASX_REQ_BUSY = 0x4e,
1268 PERF_PAPC_PASX_REQ_STALLED = 0x4f,
1269 PERF_PAPC_PASX_REC_IDLE = 0x50,
1270 PERF_PAPC_PASX_REC_BUSY = 0x51,
1271 PERF_PAPC_PASX_REC_STARVED_SX = 0x52,
1272 PERF_PAPC_PASX_REC_STALLED = 0x53,
1273 PERF_PAPC_PASX_REC_STALLED_POS_MEM = 0x54,
1274 PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 0x55,
1275 PERF_PAPC_CCGSM_IDLE = 0x56,
1276 PERF_PAPC_CCGSM_BUSY = 0x57,
1277 PERF_PAPC_CCGSM_STALLED = 0x58,
1278 PERF_PAPC_CLPRIM_IDLE = 0x59,
1279 PERF_PAPC_CLPRIM_BUSY = 0x5a,
1280 PERF_PAPC_CLPRIM_STALLED = 0x5b,
1281 PERF_PAPC_CLPRIM_STARVED_CCGSM = 0x5c,
1282 PERF_PAPC_CLIPSM_IDLE = 0x5d,
1283 PERF_PAPC_CLIPSM_BUSY = 0x5e,
1284 PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 0x5f,
1285 PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 0x60,
1286 PERF_PAPC_CLIPSM_WAIT_CLIPGA = 0x61,
1287 PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 0x62,
1288 PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 0x63,
1289 PERF_PAPC_CLIPGA_IDLE = 0x64,
1290 PERF_PAPC_CLIPGA_BUSY = 0x65,
1291 PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 0x66,
1292 PERF_PAPC_CLIPGA_STALLED = 0x67,
1293 PERF_PAPC_CLIP_IDLE = 0x68,
1294 PERF_PAPC_CLIP_BUSY = 0x69,
1295 PERF_PAPC_SU_IDLE = 0x6a,
1296 PERF_PAPC_SU_BUSY = 0x6b,
1297 PERF_PAPC_SU_STARVED_CLIP = 0x6c,
1298 PERF_PAPC_SU_STALLED_SC = 0x6d,
1299 PERF_PAPC_CL_DYN_SCLK_VLD = 0x6e,
1300 PERF_PAPC_SU_DYN_SCLK_VLD = 0x6f,
1301 PERF_PAPC_PA_REG_SCLK_VLD = 0x70,
1302 PERF_PAPC_SU_MULTI_GPU_PRIM_FILTER_CULL = 0x71,
1303 PERF_PAPC_PASX_SE0_REQ = 0x72,
1304 PERF_PAPC_PASX_SE1_REQ = 0x73,
1305 PERF_PAPC_PASX_SE0_FIRST_VECTOR = 0x74,
1306 PERF_PAPC_PASX_SE0_SECOND_VECTOR = 0x75,
1307 PERF_PAPC_PASX_SE1_FIRST_VECTOR = 0x76,
1308 PERF_PAPC_PASX_SE1_SECOND_VECTOR = 0x77,
1309 PERF_PAPC_SU_SE0_PRIM_FILTER_CULL = 0x78,
1310 PERF_PAPC_SU_SE1_PRIM_FILTER_CULL = 0x79,
1311 PERF_PAPC_SU_SE01_PRIM_FILTER_CULL = 0x7a,
1312 PERF_PAPC_SU_SE0_OUTPUT_PRIM = 0x7b,
1313 PERF_PAPC_SU_SE1_OUTPUT_PRIM = 0x7c,
1314 PERF_PAPC_SU_SE01_OUTPUT_PRIM = 0x7d,
1315 PERF_PAPC_SU_SE0_OUTPUT_NULL_PRIM = 0x7e,
1316 PERF_PAPC_SU_SE1_OUTPUT_NULL_PRIM = 0x7f,
1317 PERF_PAPC_SU_SE01_OUTPUT_NULL_PRIM = 0x80,
1318 PERF_PAPC_SU_SE0_OUTPUT_FIRST_PRIM_SLOT = 0x81,
1319 PERF_PAPC_SU_SE1_OUTPUT_FIRST_PRIM_SLOT = 0x82,
1320 PERF_PAPC_SU_SE0_STALLED_SC = 0x83,
1321 PERF_PAPC_SU_SE1_STALLED_SC = 0x84,
1322 PERF_PAPC_SU_SE01_STALLED_SC = 0x85,
1323 PERF_PAPC_CLSM_CLIPPING_PRIM = 0x86,
1324 PERF_PAPC_SU_CULLED_PRIM = 0x87,
1325 PERF_PAPC_SU_OUTPUT_EOPG = 0x88,
1326 PERF_PAPC_SU_SE2_PRIM_FILTER_CULL = 0x89,
1327 PERF_PAPC_SU_SE3_PRIM_FILTER_CULL = 0x8a,
1328 PERF_PAPC_SU_SE2_OUTPUT_PRIM = 0x8b,
1329 PERF_PAPC_SU_SE3_OUTPUT_PRIM = 0x8c,
1330 PERF_PAPC_SU_SE2_OUTPUT_NULL_PRIM = 0x8d,
1331 PERF_PAPC_SU_SE3_OUTPUT_NULL_PRIM = 0x8e,
1332 PERF_PAPC_SU_SE0_OUTPUT_END_OF_PACKET = 0x8f,
1333 PERF_PAPC_SU_SE1_OUTPUT_END_OF_PACKET = 0x90,
1334 PERF_PAPC_SU_SE2_OUTPUT_END_OF_PACKET = 0x91,
1335 PERF_PAPC_SU_SE3_OUTPUT_END_OF_PACKET = 0x92,
1336 PERF_PAPC_SU_SE0_OUTPUT_EOPG = 0x93,
1337 PERF_PAPC_SU_SE1_OUTPUT_EOPG = 0x94,
1338 PERF_PAPC_SU_SE2_OUTPUT_EOPG = 0x95,
1339 PERF_PAPC_SU_SE3_OUTPUT_EOPG = 0x96,
1340 PERF_PAPC_SU_SE2_STALLED_SC = 0x97,
1341 PERF_PAPC_SU_SE3_STALLED_SC = 0x98,
1342} SU_PERFCNT_SEL;
1343typedef enum SC_PERFCNT_SEL {
1344 SC_SRPS_WINDOW_VALID = 0x0,
1345 SC_PSSW_WINDOW_VALID = 0x1,
1346 SC_TPQZ_WINDOW_VALID = 0x2,
1347 SC_QZQP_WINDOW_VALID = 0x3,
1348 SC_TRPK_WINDOW_VALID = 0x4,
1349 SC_SRPS_WINDOW_VALID_BUSY = 0x5,
1350 SC_PSSW_WINDOW_VALID_BUSY = 0x6,
1351 SC_TPQZ_WINDOW_VALID_BUSY = 0x7,
1352 SC_QZQP_WINDOW_VALID_BUSY = 0x8,
1353 SC_TRPK_WINDOW_VALID_BUSY = 0x9,
1354 SC_STARVED_BY_PA = 0xa,
1355 SC_STALLED_BY_PRIMFIFO = 0xb,
1356 SC_STALLED_BY_DB_TILE = 0xc,
1357 SC_STARVED_BY_DB_TILE = 0xd,
1358 SC_STALLED_BY_TILEORDERFIFO = 0xe,
1359 SC_STALLED_BY_TILEFIFO = 0xf,
1360 SC_STALLED_BY_DB_QUAD = 0x10,
1361 SC_STARVED_BY_DB_QUAD = 0x11,
1362 SC_STALLED_BY_QUADFIFO = 0x12,
1363 SC_STALLED_BY_BCI = 0x13,
1364 SC_STALLED_BY_SPI = 0x14,
1365 SC_SCISSOR_DISCARD = 0x15,
1366 SC_BB_DISCARD = 0x16,
1367 SC_SUPERTILE_COUNT = 0x17,
1368 SC_SUPERTILE_PER_PRIM_H0 = 0x18,
1369 SC_SUPERTILE_PER_PRIM_H1 = 0x19,
1370 SC_SUPERTILE_PER_PRIM_H2 = 0x1a,
1371 SC_SUPERTILE_PER_PRIM_H3 = 0x1b,
1372 SC_SUPERTILE_PER_PRIM_H4 = 0x1c,
1373 SC_SUPERTILE_PER_PRIM_H5 = 0x1d,
1374 SC_SUPERTILE_PER_PRIM_H6 = 0x1e,
1375 SC_SUPERTILE_PER_PRIM_H7 = 0x1f,
1376 SC_SUPERTILE_PER_PRIM_H8 = 0x20,
1377 SC_SUPERTILE_PER_PRIM_H9 = 0x21,
1378 SC_SUPERTILE_PER_PRIM_H10 = 0x22,
1379 SC_SUPERTILE_PER_PRIM_H11 = 0x23,
1380 SC_SUPERTILE_PER_PRIM_H12 = 0x24,
1381 SC_SUPERTILE_PER_PRIM_H13 = 0x25,
1382 SC_SUPERTILE_PER_PRIM_H14 = 0x26,
1383 SC_SUPERTILE_PER_PRIM_H15 = 0x27,
1384 SC_SUPERTILE_PER_PRIM_H16 = 0x28,
1385 SC_TILE_PER_PRIM_H0 = 0x29,
1386 SC_TILE_PER_PRIM_H1 = 0x2a,
1387 SC_TILE_PER_PRIM_H2 = 0x2b,
1388 SC_TILE_PER_PRIM_H3 = 0x2c,
1389 SC_TILE_PER_PRIM_H4 = 0x2d,
1390 SC_TILE_PER_PRIM_H5 = 0x2e,
1391 SC_TILE_PER_PRIM_H6 = 0x2f,
1392 SC_TILE_PER_PRIM_H7 = 0x30,
1393 SC_TILE_PER_PRIM_H8 = 0x31,
1394 SC_TILE_PER_PRIM_H9 = 0x32,
1395 SC_TILE_PER_PRIM_H10 = 0x33,
1396 SC_TILE_PER_PRIM_H11 = 0x34,
1397 SC_TILE_PER_PRIM_H12 = 0x35,
1398 SC_TILE_PER_PRIM_H13 = 0x36,
1399 SC_TILE_PER_PRIM_H14 = 0x37,
1400 SC_TILE_PER_PRIM_H15 = 0x38,
1401 SC_TILE_PER_PRIM_H16 = 0x39,
1402 SC_TILE_PER_SUPERTILE_H0 = 0x3a,
1403 SC_TILE_PER_SUPERTILE_H1 = 0x3b,
1404 SC_TILE_PER_SUPERTILE_H2 = 0x3c,
1405 SC_TILE_PER_SUPERTILE_H3 = 0x3d,
1406 SC_TILE_PER_SUPERTILE_H4 = 0x3e,
1407 SC_TILE_PER_SUPERTILE_H5 = 0x3f,
1408 SC_TILE_PER_SUPERTILE_H6 = 0x40,
1409 SC_TILE_PER_SUPERTILE_H7 = 0x41,
1410 SC_TILE_PER_SUPERTILE_H8 = 0x42,
1411 SC_TILE_PER_SUPERTILE_H9 = 0x43,
1412 SC_TILE_PER_SUPERTILE_H10 = 0x44,
1413 SC_TILE_PER_SUPERTILE_H11 = 0x45,
1414 SC_TILE_PER_SUPERTILE_H12 = 0x46,
1415 SC_TILE_PER_SUPERTILE_H13 = 0x47,
1416 SC_TILE_PER_SUPERTILE_H14 = 0x48,
1417 SC_TILE_PER_SUPERTILE_H15 = 0x49,
1418 SC_TILE_PER_SUPERTILE_H16 = 0x4a,
1419 SC_TILE_PICKED_H1 = 0x4b,
1420 SC_TILE_PICKED_H2 = 0x4c,
1421 SC_TILE_PICKED_H3 = 0x4d,
1422 SC_TILE_PICKED_H4 = 0x4e,
1423 SC_QZ0_MULTI_GPU_TILE_DISCARD = 0x4f,
1424 SC_QZ1_MULTI_GPU_TILE_DISCARD = 0x50,
1425 SC_QZ2_MULTI_GPU_TILE_DISCARD = 0x51,
1426 SC_QZ3_MULTI_GPU_TILE_DISCARD = 0x52,
1427 SC_QZ0_TILE_COUNT = 0x53,
1428 SC_QZ1_TILE_COUNT = 0x54,
1429 SC_QZ2_TILE_COUNT = 0x55,
1430 SC_QZ3_TILE_COUNT = 0x56,
1431 SC_QZ0_TILE_COVERED_COUNT = 0x57,
1432 SC_QZ1_TILE_COVERED_COUNT = 0x58,
1433 SC_QZ2_TILE_COVERED_COUNT = 0x59,
1434 SC_QZ3_TILE_COVERED_COUNT = 0x5a,
1435 SC_QZ0_TILE_NOT_COVERED_COUNT = 0x5b,
1436 SC_QZ1_TILE_NOT_COVERED_COUNT = 0x5c,
1437 SC_QZ2_TILE_NOT_COVERED_COUNT = 0x5d,
1438 SC_QZ3_TILE_NOT_COVERED_COUNT = 0x5e,
1439 SC_QZ0_QUAD_PER_TILE_H0 = 0x5f,
1440 SC_QZ0_QUAD_PER_TILE_H1 = 0x60,
1441 SC_QZ0_QUAD_PER_TILE_H2 = 0x61,
1442 SC_QZ0_QUAD_PER_TILE_H3 = 0x62,
1443 SC_QZ0_QUAD_PER_TILE_H4 = 0x63,
1444 SC_QZ0_QUAD_PER_TILE_H5 = 0x64,
1445 SC_QZ0_QUAD_PER_TILE_H6 = 0x65,
1446 SC_QZ0_QUAD_PER_TILE_H7 = 0x66,
1447 SC_QZ0_QUAD_PER_TILE_H8 = 0x67,
1448 SC_QZ0_QUAD_PER_TILE_H9 = 0x68,
1449 SC_QZ0_QUAD_PER_TILE_H10 = 0x69,
1450 SC_QZ0_QUAD_PER_TILE_H11 = 0x6a,
1451 SC_QZ0_QUAD_PER_TILE_H12 = 0x6b,
1452 SC_QZ0_QUAD_PER_TILE_H13 = 0x6c,
1453 SC_QZ0_QUAD_PER_TILE_H14 = 0x6d,
1454 SC_QZ0_QUAD_PER_TILE_H15 = 0x6e,
1455 SC_QZ0_QUAD_PER_TILE_H16 = 0x6f,
1456 SC_QZ1_QUAD_PER_TILE_H0 = 0x70,
1457 SC_QZ1_QUAD_PER_TILE_H1 = 0x71,
1458 SC_QZ1_QUAD_PER_TILE_H2 = 0x72,
1459 SC_QZ1_QUAD_PER_TILE_H3 = 0x73,
1460 SC_QZ1_QUAD_PER_TILE_H4 = 0x74,
1461 SC_QZ1_QUAD_PER_TILE_H5 = 0x75,
1462 SC_QZ1_QUAD_PER_TILE_H6 = 0x76,
1463 SC_QZ1_QUAD_PER_TILE_H7 = 0x77,
1464 SC_QZ1_QUAD_PER_TILE_H8 = 0x78,
1465 SC_QZ1_QUAD_PER_TILE_H9 = 0x79,
1466 SC_QZ1_QUAD_PER_TILE_H10 = 0x7a,
1467 SC_QZ1_QUAD_PER_TILE_H11 = 0x7b,
1468 SC_QZ1_QUAD_PER_TILE_H12 = 0x7c,
1469 SC_QZ1_QUAD_PER_TILE_H13 = 0x7d,
1470 SC_QZ1_QUAD_PER_TILE_H14 = 0x7e,
1471 SC_QZ1_QUAD_PER_TILE_H15 = 0x7f,
1472 SC_QZ1_QUAD_PER_TILE_H16 = 0x80,
1473 SC_QZ2_QUAD_PER_TILE_H0 = 0x81,
1474 SC_QZ2_QUAD_PER_TILE_H1 = 0x82,
1475 SC_QZ2_QUAD_PER_TILE_H2 = 0x83,
1476 SC_QZ2_QUAD_PER_TILE_H3 = 0x84,
1477 SC_QZ2_QUAD_PER_TILE_H4 = 0x85,
1478 SC_QZ2_QUAD_PER_TILE_H5 = 0x86,
1479 SC_QZ2_QUAD_PER_TILE_H6 = 0x87,
1480 SC_QZ2_QUAD_PER_TILE_H7 = 0x88,
1481 SC_QZ2_QUAD_PER_TILE_H8 = 0x89,
1482 SC_QZ2_QUAD_PER_TILE_H9 = 0x8a,
1483 SC_QZ2_QUAD_PER_TILE_H10 = 0x8b,
1484 SC_QZ2_QUAD_PER_TILE_H11 = 0x8c,
1485 SC_QZ2_QUAD_PER_TILE_H12 = 0x8d,
1486 SC_QZ2_QUAD_PER_TILE_H13 = 0x8e,
1487 SC_QZ2_QUAD_PER_TILE_H14 = 0x8f,
1488 SC_QZ2_QUAD_PER_TILE_H15 = 0x90,
1489 SC_QZ2_QUAD_PER_TILE_H16 = 0x91,
1490 SC_QZ3_QUAD_PER_TILE_H0 = 0x92,
1491 SC_QZ3_QUAD_PER_TILE_H1 = 0x93,
1492 SC_QZ3_QUAD_PER_TILE_H2 = 0x94,
1493 SC_QZ3_QUAD_PER_TILE_H3 = 0x95,
1494 SC_QZ3_QUAD_PER_TILE_H4 = 0x96,
1495 SC_QZ3_QUAD_PER_TILE_H5 = 0x97,
1496 SC_QZ3_QUAD_PER_TILE_H6 = 0x98,
1497 SC_QZ3_QUAD_PER_TILE_H7 = 0x99,
1498 SC_QZ3_QUAD_PER_TILE_H8 = 0x9a,
1499 SC_QZ3_QUAD_PER_TILE_H9 = 0x9b,
1500 SC_QZ3_QUAD_PER_TILE_H10 = 0x9c,
1501 SC_QZ3_QUAD_PER_TILE_H11 = 0x9d,
1502 SC_QZ3_QUAD_PER_TILE_H12 = 0x9e,
1503 SC_QZ3_QUAD_PER_TILE_H13 = 0x9f,
1504 SC_QZ3_QUAD_PER_TILE_H14 = 0xa0,
1505 SC_QZ3_QUAD_PER_TILE_H15 = 0xa1,
1506 SC_QZ3_QUAD_PER_TILE_H16 = 0xa2,
1507 SC_QZ0_QUAD_COUNT = 0xa3,
1508 SC_QZ1_QUAD_COUNT = 0xa4,
1509 SC_QZ2_QUAD_COUNT = 0xa5,
1510 SC_QZ3_QUAD_COUNT = 0xa6,
1511 SC_P0_HIZ_TILE_COUNT = 0xa7,
1512 SC_P1_HIZ_TILE_COUNT = 0xa8,
1513 SC_P2_HIZ_TILE_COUNT = 0xa9,
1514 SC_P3_HIZ_TILE_COUNT = 0xaa,
1515 SC_P0_HIZ_QUAD_PER_TILE_H0 = 0xab,
1516 SC_P0_HIZ_QUAD_PER_TILE_H1 = 0xac,
1517 SC_P0_HIZ_QUAD_PER_TILE_H2 = 0xad,
1518 SC_P0_HIZ_QUAD_PER_TILE_H3 = 0xae,
1519 SC_P0_HIZ_QUAD_PER_TILE_H4 = 0xaf,
1520 SC_P0_HIZ_QUAD_PER_TILE_H5 = 0xb0,
1521 SC_P0_HIZ_QUAD_PER_TILE_H6 = 0xb1,
1522 SC_P0_HIZ_QUAD_PER_TILE_H7 = 0xb2,
1523 SC_P0_HIZ_QUAD_PER_TILE_H8 = 0xb3,
1524 SC_P0_HIZ_QUAD_PER_TILE_H9 = 0xb4,
1525 SC_P0_HIZ_QUAD_PER_TILE_H10 = 0xb5,
1526 SC_P0_HIZ_QUAD_PER_TILE_H11 = 0xb6,
1527 SC_P0_HIZ_QUAD_PER_TILE_H12 = 0xb7,
1528 SC_P0_HIZ_QUAD_PER_TILE_H13 = 0xb8,
1529 SC_P0_HIZ_QUAD_PER_TILE_H14 = 0xb9,
1530 SC_P0_HIZ_QUAD_PER_TILE_H15 = 0xba,
1531 SC_P0_HIZ_QUAD_PER_TILE_H16 = 0xbb,
1532 SC_P1_HIZ_QUAD_PER_TILE_H0 = 0xbc,
1533 SC_P1_HIZ_QUAD_PER_TILE_H1 = 0xbd,
1534 SC_P1_HIZ_QUAD_PER_TILE_H2 = 0xbe,
1535 SC_P1_HIZ_QUAD_PER_TILE_H3 = 0xbf,
1536 SC_P1_HIZ_QUAD_PER_TILE_H4 = 0xc0,
1537 SC_P1_HIZ_QUAD_PER_TILE_H5 = 0xc1,
1538 SC_P1_HIZ_QUAD_PER_TILE_H6 = 0xc2,
1539 SC_P1_HIZ_QUAD_PER_TILE_H7 = 0xc3,
1540 SC_P1_HIZ_QUAD_PER_TILE_H8 = 0xc4,
1541 SC_P1_HIZ_QUAD_PER_TILE_H9 = 0xc5,
1542 SC_P1_HIZ_QUAD_PER_TILE_H10 = 0xc6,
1543 SC_P1_HIZ_QUAD_PER_TILE_H11 = 0xc7,
1544 SC_P1_HIZ_QUAD_PER_TILE_H12 = 0xc8,
1545 SC_P1_HIZ_QUAD_PER_TILE_H13 = 0xc9,
1546 SC_P1_HIZ_QUAD_PER_TILE_H14 = 0xca,
1547 SC_P1_HIZ_QUAD_PER_TILE_H15 = 0xcb,
1548 SC_P1_HIZ_QUAD_PER_TILE_H16 = 0xcc,
1549 SC_P2_HIZ_QUAD_PER_TILE_H0 = 0xcd,
1550 SC_P2_HIZ_QUAD_PER_TILE_H1 = 0xce,
1551 SC_P2_HIZ_QUAD_PER_TILE_H2 = 0xcf,
1552 SC_P2_HIZ_QUAD_PER_TILE_H3 = 0xd0,
1553 SC_P2_HIZ_QUAD_PER_TILE_H4 = 0xd1,
1554 SC_P2_HIZ_QUAD_PER_TILE_H5 = 0xd2,
1555 SC_P2_HIZ_QUAD_PER_TILE_H6 = 0xd3,
1556 SC_P2_HIZ_QUAD_PER_TILE_H7 = 0xd4,
1557 SC_P2_HIZ_QUAD_PER_TILE_H8 = 0xd5,
1558 SC_P2_HIZ_QUAD_PER_TILE_H9 = 0xd6,
1559 SC_P2_HIZ_QUAD_PER_TILE_H10 = 0xd7,
1560 SC_P2_HIZ_QUAD_PER_TILE_H11 = 0xd8,
1561 SC_P2_HIZ_QUAD_PER_TILE_H12 = 0xd9,
1562 SC_P2_HIZ_QUAD_PER_TILE_H13 = 0xda,
1563 SC_P2_HIZ_QUAD_PER_TILE_H14 = 0xdb,
1564 SC_P2_HIZ_QUAD_PER_TILE_H15 = 0xdc,
1565 SC_P2_HIZ_QUAD_PER_TILE_H16 = 0xdd,
1566 SC_P3_HIZ_QUAD_PER_TILE_H0 = 0xde,
1567 SC_P3_HIZ_QUAD_PER_TILE_H1 = 0xdf,
1568 SC_P3_HIZ_QUAD_PER_TILE_H2 = 0xe0,
1569 SC_P3_HIZ_QUAD_PER_TILE_H3 = 0xe1,
1570 SC_P3_HIZ_QUAD_PER_TILE_H4 = 0xe2,
1571 SC_P3_HIZ_QUAD_PER_TILE_H5 = 0xe3,
1572 SC_P3_HIZ_QUAD_PER_TILE_H6 = 0xe4,
1573 SC_P3_HIZ_QUAD_PER_TILE_H7 = 0xe5,
1574 SC_P3_HIZ_QUAD_PER_TILE_H8 = 0xe6,
1575 SC_P3_HIZ_QUAD_PER_TILE_H9 = 0xe7,
1576 SC_P3_HIZ_QUAD_PER_TILE_H10 = 0xe8,
1577 SC_P3_HIZ_QUAD_PER_TILE_H11 = 0xe9,
1578 SC_P3_HIZ_QUAD_PER_TILE_H12 = 0xea,
1579 SC_P3_HIZ_QUAD_PER_TILE_H13 = 0xeb,
1580 SC_P3_HIZ_QUAD_PER_TILE_H14 = 0xec,
1581 SC_P3_HIZ_QUAD_PER_TILE_H15 = 0xed,
1582 SC_P3_HIZ_QUAD_PER_TILE_H16 = 0xee,
1583 SC_P0_HIZ_QUAD_COUNT = 0xef,
1584 SC_P1_HIZ_QUAD_COUNT = 0xf0,
1585 SC_P2_HIZ_QUAD_COUNT = 0xf1,
1586 SC_P3_HIZ_QUAD_COUNT = 0xf2,
1587 SC_P0_DETAIL_QUAD_COUNT = 0xf3,
1588 SC_P1_DETAIL_QUAD_COUNT = 0xf4,
1589 SC_P2_DETAIL_QUAD_COUNT = 0xf5,
1590 SC_P3_DETAIL_QUAD_COUNT = 0xf6,
1591 SC_P0_DETAIL_QUAD_WITH_1_PIX = 0xf7,
1592 SC_P0_DETAIL_QUAD_WITH_2_PIX = 0xf8,
1593 SC_P0_DETAIL_QUAD_WITH_3_PIX = 0xf9,
1594 SC_P0_DETAIL_QUAD_WITH_4_PIX = 0xfa,
1595 SC_P1_DETAIL_QUAD_WITH_1_PIX = 0xfb,
1596 SC_P1_DETAIL_QUAD_WITH_2_PIX = 0xfc,
1597 SC_P1_DETAIL_QUAD_WITH_3_PIX = 0xfd,
1598 SC_P1_DETAIL_QUAD_WITH_4_PIX = 0xfe,
1599 SC_P2_DETAIL_QUAD_WITH_1_PIX = 0xff,
1600 SC_P2_DETAIL_QUAD_WITH_2_PIX = 0x100,
1601 SC_P2_DETAIL_QUAD_WITH_3_PIX = 0x101,
1602 SC_P2_DETAIL_QUAD_WITH_4_PIX = 0x102,
1603 SC_P3_DETAIL_QUAD_WITH_1_PIX = 0x103,
1604 SC_P3_DETAIL_QUAD_WITH_2_PIX = 0x104,
1605 SC_P3_DETAIL_QUAD_WITH_3_PIX = 0x105,
1606 SC_P3_DETAIL_QUAD_WITH_4_PIX = 0x106,
1607 SC_EARLYZ_QUAD_COUNT = 0x107,
1608 SC_EARLYZ_QUAD_WITH_1_PIX = 0x108,
1609 SC_EARLYZ_QUAD_WITH_2_PIX = 0x109,
1610 SC_EARLYZ_QUAD_WITH_3_PIX = 0x10a,
1611 SC_EARLYZ_QUAD_WITH_4_PIX = 0x10b,
1612 SC_PKR_QUAD_PER_ROW_H1 = 0x10c,
1613 SC_PKR_QUAD_PER_ROW_H2 = 0x10d,
1614 SC_PKR_4X2_QUAD_SPLIT = 0x10e,
1615 SC_PKR_4X2_FILL_QUAD = 0x10f,
1616 SC_PKR_END_OF_VECTOR = 0x110,
1617 SC_PKR_CONTROL_XFER = 0x111,
1618 SC_PKR_DBHANG_FORCE_EOV = 0x112,
1619 SC_REG_SCLK_BUSY = 0x113,
1620 SC_GRP0_DYN_SCLK_BUSY = 0x114,
1621 SC_GRP1_DYN_SCLK_BUSY = 0x115,
1622 SC_GRP2_DYN_SCLK_BUSY = 0x116,
1623 SC_GRP3_DYN_SCLK_BUSY = 0x117,
1624 SC_GRP4_DYN_SCLK_BUSY = 0x118,
1625 SC_PA0_SC_DATA_FIFO_RD = 0x119,
1626 SC_PA0_SC_DATA_FIFO_WE = 0x11a,
1627 SC_PA1_SC_DATA_FIFO_RD = 0x11b,
1628 SC_PA1_SC_DATA_FIFO_WE = 0x11c,
1629 SC_PS_ARB_XFC_ALL_EVENT_OR_PRIM_CYCLES = 0x11d,
1630 SC_PS_ARB_XFC_ONLY_PRIM_CYCLES = 0x11e,
1631 SC_PS_ARB_XFC_ONLY_ONE_INC_PER_PRIM = 0x11f,
1632 SC_PS_ARB_STALLED_FROM_BELOW = 0x120,
1633 SC_PS_ARB_STARVED_FROM_ABOVE = 0x121,
1634 SC_PS_ARB_SC_BUSY = 0x122,
1635 SC_PS_ARB_PA_SC_BUSY = 0x123,
1636 SC_PA2_SC_DATA_FIFO_RD = 0x124,
1637 SC_PA2_SC_DATA_FIFO_WE = 0x125,
1638 SC_PA3_SC_DATA_FIFO_RD = 0x126,
1639 SC_PA3_SC_DATA_FIFO_WE = 0x127,
1640 SC_PA_SC_DEALLOC_0_0_WE = 0x128,
1641 SC_PA_SC_DEALLOC_0_1_WE = 0x129,
1642 SC_PA_SC_DEALLOC_1_0_WE = 0x12a,
1643 SC_PA_SC_DEALLOC_1_1_WE = 0x12b,
1644 SC_PA_SC_DEALLOC_2_0_WE = 0x12c,
1645 SC_PA_SC_DEALLOC_2_1_WE = 0x12d,
1646 SC_PA_SC_DEALLOC_3_0_WE = 0x12e,
1647 SC_PA_SC_DEALLOC_3_1_WE = 0x12f,
1648 SC_PA0_SC_EOP_WE = 0x130,
1649 SC_PA0_SC_EOPG_WE = 0x131,
1650 SC_PA0_SC_EVENT_WE = 0x132,
1651 SC_PA1_SC_EOP_WE = 0x133,
1652 SC_PA1_SC_EOPG_WE = 0x134,
1653 SC_PA1_SC_EVENT_WE = 0x135,
1654 SC_PA2_SC_EOP_WE = 0x136,
1655 SC_PA2_SC_EOPG_WE = 0x137,
1656 SC_PA2_SC_EVENT_WE = 0x138,
1657 SC_PA3_SC_EOP_WE = 0x139,
1658 SC_PA3_SC_EOPG_WE = 0x13a,
1659 SC_PA3_SC_EVENT_WE = 0x13b,
1660 SC_PS_ARB_OOO_THRESHOLD_SWITCH_TO_DESIRED_FIFO = 0x13c,
1661 SC_PS_ARB_OOO_FIFO_EMPTY_SWITCH = 0x13d,
1662 SC_PS_ARB_NULL_PRIM_BUBBLE_POP = 0x13e,
1663 SC_PS_ARB_EOP_POP_SYNC_POP = 0x13f,
1664 SC_PS_ARB_EVENT_SYNC_POP = 0x140,
1665 SC_SC_PS_ENG_MULTICYCLE_BUBBLE = 0x141,
1666 SC_PA0_SC_FPOV_WE = 0x142,
1667 SC_PA1_SC_FPOV_WE = 0x143,
1668 SC_PA2_SC_FPOV_WE = 0x144,
1669 SC_PA3_SC_FPOV_WE = 0x145,
1670 SC_PA0_SC_LPOV_WE = 0x146,
1671 SC_PA1_SC_LPOV_WE = 0x147,
1672 SC_PA2_SC_LPOV_WE = 0x148,
1673 SC_PA3_SC_LPOV_WE = 0x149,
1674 SC_SC_SPI_DEALLOC_0_0 = 0x14a,
1675 SC_SC_SPI_DEALLOC_0_1 = 0x14b,
1676 SC_SC_SPI_DEALLOC_0_2 = 0x14c,
1677 SC_SC_SPI_DEALLOC_1_0 = 0x14d,
1678 SC_SC_SPI_DEALLOC_1_1 = 0x14e,
1679 SC_SC_SPI_DEALLOC_1_2 = 0x14f,
1680 SC_SC_SPI_DEALLOC_2_0 = 0x150,
1681 SC_SC_SPI_DEALLOC_2_1 = 0x151,
1682 SC_SC_SPI_DEALLOC_2_2 = 0x152,
1683 SC_SC_SPI_DEALLOC_3_0 = 0x153,
1684 SC_SC_SPI_DEALLOC_3_1 = 0x154,
1685 SC_SC_SPI_DEALLOC_3_2 = 0x155,
1686 SC_SC_SPI_FPOV_0 = 0x156,
1687 SC_SC_SPI_FPOV_1 = 0x157,
1688 SC_SC_SPI_FPOV_2 = 0x158,
1689 SC_SC_SPI_FPOV_3 = 0x159,
1690 SC_SC_SPI_EVENT = 0x15a,
1691 SC_PS_TS_EVENT_FIFO_PUSH = 0x15b,
1692 SC_PS_TS_EVENT_FIFO_POP = 0x15c,
1693 SC_PS_CTX_DONE_FIFO_PUSH = 0x15d,
1694 SC_PS_CTX_DONE_FIFO_POP = 0x15e,
1695 SC_MULTICYCLE_BUBBLE_FREEZE = 0x15f,
1696 SC_EOP_SYNC_WINDOW = 0x160,
1697 SC_PA0_SC_NULL_WE = 0x161,
1698 SC_PA0_SC_NULL_DEALLOC_WE = 0x162,
1699 SC_PA0_SC_DATA_FIFO_EOPG_RD = 0x163,
1700 SC_PA0_SC_DATA_FIFO_EOP_RD = 0x164,
1701 SC_PA0_SC_DEALLOC_0_RD = 0x165,
1702 SC_PA0_SC_DEALLOC_1_RD = 0x166,
1703 SC_PA1_SC_DATA_FIFO_EOPG_RD = 0x167,
1704 SC_PA1_SC_DATA_FIFO_EOP_RD = 0x168,
1705 SC_PA1_SC_DEALLOC_0_RD = 0x169,
1706 SC_PA1_SC_DEALLOC_1_RD = 0x16a,
1707 SC_PA1_SC_NULL_WE = 0x16b,
1708 SC_PA1_SC_NULL_DEALLOC_WE = 0x16c,
1709 SC_PA2_SC_DATA_FIFO_EOPG_RD = 0x16d,
1710 SC_PA2_SC_DATA_FIFO_EOP_RD = 0x16e,
1711 SC_PA2_SC_DEALLOC_0_RD = 0x16f,
1712 SC_PA2_SC_DEALLOC_1_RD = 0x170,
1713 SC_PA2_SC_NULL_WE = 0x171,
1714 SC_PA2_SC_NULL_DEALLOC_WE = 0x172,
1715 SC_PA3_SC_DATA_FIFO_EOPG_RD = 0x173,
1716 SC_PA3_SC_DATA_FIFO_EOP_RD = 0x174,
1717 SC_PA3_SC_DEALLOC_0_RD = 0x175,
1718 SC_PA3_SC_DEALLOC_1_RD = 0x176,
1719 SC_PA3_SC_NULL_WE = 0x177,
1720 SC_PA3_SC_NULL_DEALLOC_WE = 0x178,
1721 SC_PS_PA0_SC_FIFO_EMPTY = 0x179,
1722 SC_PS_PA0_SC_FIFO_FULL = 0x17a,
1723 SC_PA0_PS_DATA_SEND = 0x17b,
1724 SC_PS_PA1_SC_FIFO_EMPTY = 0x17c,
1725 SC_PS_PA1_SC_FIFO_FULL = 0x17d,
1726 SC_PA1_PS_DATA_SEND = 0x17e,
1727 SC_PS_PA2_SC_FIFO_EMPTY = 0x17f,
1728 SC_PS_PA2_SC_FIFO_FULL = 0x180,
1729 SC_PA2_PS_DATA_SEND = 0x181,
1730 SC_PS_PA3_SC_FIFO_EMPTY = 0x182,
1731 SC_PS_PA3_SC_FIFO_FULL = 0x183,
1732 SC_PA3_PS_DATA_SEND = 0x184,
1733 SC_BUSY_PROCESSING_MULTICYCLE_PRIM = 0x185,
1734 SC_BUSY_CNT_NOT_ZERO = 0x186,
1735 SC_BM_BUSY = 0x187,
1736 SC_BACKEND_BUSY = 0x188,
1737 SC_SCF_SCB_INTERFACE_BUSY = 0x189,
1738 SC_SCB_BUSY = 0x18a,
1739 SC_STARVED_BY_PA_WITH_UNSELECTED_PA_NOT_EMPTY = 0x18b,
1740 SC_STARVED_BY_PA_WITH_UNSELECTED_PA_FULL = 0x18c,
1741} SC_PERFCNT_SEL;
1742typedef enum SePairXsel {
1743 RASTER_CONFIG_SE_PAIR_XSEL_8_WIDE_TILE = 0x0,
1744 RASTER_CONFIG_SE_PAIR_XSEL_16_WIDE_TILE = 0x1,
1745 RASTER_CONFIG_SE_PAIR_XSEL_32_WIDE_TILE = 0x2,
1746 RASTER_CONFIG_SE_PAIR_XSEL_64_WIDE_TILE = 0x3,
1747} SePairXsel;
1748typedef enum SePairYsel {
1749 RASTER_CONFIG_SE_PAIR_YSEL_8_WIDE_TILE = 0x0,
1750 RASTER_CONFIG_SE_PAIR_YSEL_16_WIDE_TILE = 0x1,
1751 RASTER_CONFIG_SE_PAIR_YSEL_32_WIDE_TILE = 0x2,
1752 RASTER_CONFIG_SE_PAIR_YSEL_64_WIDE_TILE = 0x3,
1753} SePairYsel;
1754typedef enum SePairMap {
1755 RASTER_CONFIG_SE_PAIR_MAP_0 = 0x0,
1756 RASTER_CONFIG_SE_PAIR_MAP_1 = 0x1,
1757 RASTER_CONFIG_SE_PAIR_MAP_2 = 0x2,
1758 RASTER_CONFIG_SE_PAIR_MAP_3 = 0x3,
1759} SePairMap;
1760typedef enum SeXsel {
1761 RASTER_CONFIG_SE_XSEL_8_WIDE_TILE = 0x0,
1762 RASTER_CONFIG_SE_XSEL_16_WIDE_TILE = 0x1,
1763 RASTER_CONFIG_SE_XSEL_32_WIDE_TILE = 0x2,
1764 RASTER_CONFIG_SE_XSEL_64_WIDE_TILE = 0x3,
1765} SeXsel;
1766typedef enum SeYsel {
1767 RASTER_CONFIG_SE_YSEL_8_WIDE_TILE = 0x0,
1768 RASTER_CONFIG_SE_YSEL_16_WIDE_TILE = 0x1,
1769 RASTER_CONFIG_SE_YSEL_32_WIDE_TILE = 0x2,
1770 RASTER_CONFIG_SE_YSEL_64_WIDE_TILE = 0x3,
1771} SeYsel;
1772typedef enum SeMap {
1773 RASTER_CONFIG_SE_MAP_0 = 0x0,
1774 RASTER_CONFIG_SE_MAP_1 = 0x1,
1775 RASTER_CONFIG_SE_MAP_2 = 0x2,
1776 RASTER_CONFIG_SE_MAP_3 = 0x3,
1777} SeMap;
1778typedef enum ScXsel {
1779 RASTER_CONFIG_SC_XSEL_8_WIDE_TILE = 0x0,
1780 RASTER_CONFIG_SC_XSEL_16_WIDE_TILE = 0x1,
1781 RASTER_CONFIG_SC_XSEL_32_WIDE_TILE = 0x2,
1782 RASTER_CONFIG_SC_XSEL_64_WIDE_TILE = 0x3,
1783} ScXsel;
1784typedef enum ScYsel {
1785 RASTER_CONFIG_SC_YSEL_8_WIDE_TILE = 0x0,
1786 RASTER_CONFIG_SC_YSEL_16_WIDE_TILE = 0x1,
1787 RASTER_CONFIG_SC_YSEL_32_WIDE_TILE = 0x2,
1788 RASTER_CONFIG_SC_YSEL_64_WIDE_TILE = 0x3,
1789} ScYsel;
1790typedef enum ScMap {
1791 RASTER_CONFIG_SC_MAP_0 = 0x0,
1792 RASTER_CONFIG_SC_MAP_1 = 0x1,
1793 RASTER_CONFIG_SC_MAP_2 = 0x2,
1794 RASTER_CONFIG_SC_MAP_3 = 0x3,
1795} ScMap;
1796typedef enum PkrXsel2 {
1797 RASTER_CONFIG_PKR_XSEL2_0 = 0x0,
1798 RASTER_CONFIG_PKR_XSEL2_1 = 0x1,
1799 RASTER_CONFIG_PKR_XSEL2_2 = 0x2,
1800 RASTER_CONFIG_PKR_XSEL2_3 = 0x3,
1801} PkrXsel2;
1802typedef enum PkrXsel {
1803 RASTER_CONFIG_PKR_XSEL_0 = 0x0,
1804 RASTER_CONFIG_PKR_XSEL_1 = 0x1,
1805 RASTER_CONFIG_PKR_XSEL_2 = 0x2,
1806 RASTER_CONFIG_PKR_XSEL_3 = 0x3,
1807} PkrXsel;
1808typedef enum PkrYsel {
1809 RASTER_CONFIG_PKR_YSEL_0 = 0x0,
1810 RASTER_CONFIG_PKR_YSEL_1 = 0x1,
1811 RASTER_CONFIG_PKR_YSEL_2 = 0x2,
1812 RASTER_CONFIG_PKR_YSEL_3 = 0x3,
1813} PkrYsel;
1814typedef enum PkrMap {
1815 RASTER_CONFIG_PKR_MAP_0 = 0x0,
1816 RASTER_CONFIG_PKR_MAP_1 = 0x1,
1817 RASTER_CONFIG_PKR_MAP_2 = 0x2,
1818 RASTER_CONFIG_PKR_MAP_3 = 0x3,
1819} PkrMap;
1820typedef enum RbXsel {
1821 RASTER_CONFIG_RB_XSEL_0 = 0x0,
1822 RASTER_CONFIG_RB_XSEL_1 = 0x1,
1823} RbXsel;
1824typedef enum RbYsel {
1825 RASTER_CONFIG_RB_YSEL_0 = 0x0,
1826 RASTER_CONFIG_RB_YSEL_1 = 0x1,
1827} RbYsel;
1828typedef enum RbXsel2 {
1829 RASTER_CONFIG_RB_XSEL2_0 = 0x0,
1830 RASTER_CONFIG_RB_XSEL2_1 = 0x1,
1831 RASTER_CONFIG_RB_XSEL2_2 = 0x2,
1832 RASTER_CONFIG_RB_XSEL2_3 = 0x3,
1833} RbXsel2;
1834typedef enum RbMap {
1835 RASTER_CONFIG_RB_MAP_0 = 0x0,
1836 RASTER_CONFIG_RB_MAP_1 = 0x1,
1837 RASTER_CONFIG_RB_MAP_2 = 0x2,
1838 RASTER_CONFIG_RB_MAP_3 = 0x3,
1839} RbMap;
1840typedef enum CSDATA_TYPE {
1841 CSDATA_TYPE_TG = 0x0,
1842 CSDATA_TYPE_STATE = 0x1,
1843 CSDATA_TYPE_EVENT = 0x2,
1844 CSDATA_TYPE_PRIVATE = 0x3,
1845} CSDATA_TYPE;
1846#define CSDATA_TYPE_WIDTH 0x2
1847#define CSDATA_ADDR_WIDTH 0x7
1848#define CSDATA_DATA_WIDTH 0x20
1849typedef enum SPI_SAMPLE_CNTL {
1850 CENTROIDS_ONLY = 0x0,
1851 CENTERS_ONLY = 0x1,
1852 CENTROIDS_AND_CENTERS = 0x2,
1853 UNDEF = 0x3,
1854} SPI_SAMPLE_CNTL;
1855typedef enum SPI_FOG_MODE {
1856 SPI_FOG_NONE = 0x0,
1857 SPI_FOG_EXP = 0x1,
1858 SPI_FOG_EXP2 = 0x2,
1859 SPI_FOG_LINEAR = 0x3,
1860} SPI_FOG_MODE;
1861typedef enum SPI_PNT_SPRITE_OVERRIDE {
1862 SPI_PNT_SPRITE_SEL_0 = 0x0,
1863 SPI_PNT_SPRITE_SEL_1 = 0x1,
1864 SPI_PNT_SPRITE_SEL_S = 0x2,
1865 SPI_PNT_SPRITE_SEL_T = 0x3,
1866 SPI_PNT_SPRITE_SEL_NONE = 0x4,
1867} SPI_PNT_SPRITE_OVERRIDE;
1868typedef enum SPI_PERFCNT_SEL {
1869 SPI_PERF_VS_WINDOW_VALID = 0x0,
1870 SPI_PERF_VS_BUSY = 0x1,
1871 SPI_PERF_VS_FIRST_WAVE = 0x2,
1872 SPI_PERF_VS_LAST_WAVE = 0x3,
1873 SPI_PERF_VS_LSHS_DEALLOC = 0x4,
1874 SPI_PERF_VS_PC_STALL = 0x5,
1875 SPI_PERF_VS_POS0_STALL = 0x6,
1876 SPI_PERF_VS_POS1_STALL = 0x7,
1877 SPI_PERF_VS_CRAWLER_STALL = 0x8,
1878 SPI_PERF_VS_EVENT_WAVE = 0x9,
1879 SPI_PERF_VS_WAVE = 0xa,
1880 SPI_PERF_VS_PERS_UPD_FULL0 = 0xb,
1881 SPI_PERF_VS_PERS_UPD_FULL1 = 0xc,
1882 SPI_PERF_VS_LATE_ALLOC_FULL = 0xd,
1883 SPI_PERF_VS_FIRST_SUBGRP = 0xe,
1884 SPI_PERF_VS_LAST_SUBGRP = 0xf,
1885 SPI_PERF_GS_WINDOW_VALID = 0x10,
1886 SPI_PERF_GS_BUSY = 0x11,
1887 SPI_PERF_GS_CRAWLER_STALL = 0x12,
1888 SPI_PERF_GS_EVENT_WAVE = 0x13,
1889 SPI_PERF_GS_WAVE = 0x14,
1890 SPI_PERF_GS_PERS_UPD_FULL0 = 0x15,
1891 SPI_PERF_GS_PERS_UPD_FULL1 = 0x16,
1892 SPI_PERF_GS_FIRST_SUBGRP = 0x17,
1893 SPI_PERF_GS_LAST_SUBGRP = 0x18,
1894 SPI_PERF_ES_WINDOW_VALID = 0x19,
1895 SPI_PERF_ES_BUSY = 0x1a,
1896 SPI_PERF_ES_CRAWLER_STALL = 0x1b,
1897 SPI_PERF_ES_FIRST_WAVE = 0x1c,
1898 SPI_PERF_ES_LAST_WAVE = 0x1d,
1899 SPI_PERF_ES_LSHS_DEALLOC = 0x1e,
1900 SPI_PERF_ES_EVENT_WAVE = 0x1f,
1901 SPI_PERF_ES_WAVE = 0x20,
1902 SPI_PERF_ES_PERS_UPD_FULL0 = 0x21,
1903 SPI_PERF_ES_PERS_UPD_FULL1 = 0x22,
1904 SPI_PERF_ES_FIRST_SUBGRP = 0x23,
1905 SPI_PERF_ES_LAST_SUBGRP = 0x24,
1906 SPI_PERF_HS_WINDOW_VALID = 0x25,
1907 SPI_PERF_HS_BUSY = 0x26,
1908 SPI_PERF_HS_CRAWLER_STALL = 0x27,
1909 SPI_PERF_HS_FIRST_WAVE = 0x28,
1910 SPI_PERF_HS_LAST_WAVE = 0x29,
1911 SPI_PERF_HS_LSHS_DEALLOC = 0x2a,
1912 SPI_PERF_HS_EVENT_WAVE = 0x2b,
1913 SPI_PERF_HS_WAVE = 0x2c,
1914 SPI_PERF_HS_PERS_UPD_FULL0 = 0x2d,
1915 SPI_PERF_HS_PERS_UPD_FULL1 = 0x2e,
1916 SPI_PERF_LS_WINDOW_VALID = 0x2f,
1917 SPI_PERF_LS_BUSY = 0x30,
1918 SPI_PERF_LS_CRAWLER_STALL = 0x31,
1919 SPI_PERF_LS_FIRST_WAVE = 0x32,
1920 SPI_PERF_LS_LAST_WAVE = 0x33,
1921 SPI_PERF_OFFCHIP_LDS_STALL_LS = 0x34,
1922 SPI_PERF_LS_EVENT_WAVE = 0x35,
1923 SPI_PERF_LS_WAVE = 0x36,
1924 SPI_PERF_LS_PERS_UPD_FULL0 = 0x37,
1925 SPI_PERF_LS_PERS_UPD_FULL1 = 0x38,
1926 SPI_PERF_CSG_WINDOW_VALID = 0x39,
1927 SPI_PERF_CSG_BUSY = 0x3a,
1928 SPI_PERF_CSG_NUM_THREADGROUPS = 0x3b,
1929 SPI_PERF_CSG_CRAWLER_STALL = 0x3c,
1930 SPI_PERF_CSG_EVENT_WAVE = 0x3d,
1931 SPI_PERF_CSG_WAVE = 0x3e,
1932 SPI_PERF_CSN_WINDOW_VALID = 0x3f,
1933 SPI_PERF_CSN_BUSY = 0x40,
1934 SPI_PERF_CSN_NUM_THREADGROUPS = 0x41,
1935 SPI_PERF_CSN_CRAWLER_STALL = 0x42,
1936 SPI_PERF_CSN_EVENT_WAVE = 0x43,
1937 SPI_PERF_CSN_WAVE = 0x44,
1938 SPI_PERF_PS_CTL_WINDOW_VALID = 0x45,
1939 SPI_PERF_PS_CTL_BUSY = 0x46,
1940 SPI_PERF_PS_CTL_ACTIVE = 0x47,
1941 SPI_PERF_PS_CTL_DEALLOC_BIN0 = 0x48,
1942 SPI_PERF_PS_CTL_FPOS_BIN1_STALL = 0x49,
1943 SPI_PERF_PS_CTL_EVENT_WAVE = 0x4a,
1944 SPI_PERF_PS_CTL_WAVE = 0x4b,
1945 SPI_PERF_PS_CTL_OPT_WAVE = 0x4c,
1946 SPI_PERF_PS_CTL_PASS_BIN0 = 0x4d,
1947 SPI_PERF_PS_CTL_PASS_BIN1 = 0x4e,
1948 SPI_PERF_PS_CTL_FPOS_BIN2 = 0x4f,
1949 SPI_PERF_PS_CTL_PRIM_BIN0 = 0x50,
1950 SPI_PERF_PS_CTL_PRIM_BIN1 = 0x51,
1951 SPI_PERF_PS_CTL_CNF_BIN2 = 0x52,
1952 SPI_PERF_PS_CTL_CNF_BIN3 = 0x53,
1953 SPI_PERF_PS_CTL_CRAWLER_STALL = 0x54,
1954 SPI_PERF_PS_CTL_LDS_RES_FULL = 0x55,
1955 SPI_PERF_PS_PERS_UPD_FULL0 = 0x56,
1956 SPI_PERF_PS_PERS_UPD_FULL1 = 0x57,
1957 SPI_PERF_PIX_ALLOC_PEND_CNT = 0x58,
1958 SPI_PERF_PIX_ALLOC_SCB_STALL = 0x59,
1959 SPI_PERF_PIX_ALLOC_DB0_STALL = 0x5a,
1960 SPI_PERF_PIX_ALLOC_DB1_STALL = 0x5b,
1961 SPI_PERF_PIX_ALLOC_DB2_STALL = 0x5c,
1962 SPI_PERF_PIX_ALLOC_DB3_STALL = 0x5d,
1963 SPI_PERF_LDS0_PC_VALID = 0x5e,
1964 SPI_PERF_LDS1_PC_VALID = 0x5f,
1965 SPI_PERF_RA_PIPE_REQ_BIN2 = 0x60,
1966 SPI_PERF_RA_TASK_REQ_BIN3 = 0x61,
1967 SPI_PERF_RA_WR_CTL_FULL = 0x62,
1968 SPI_PERF_RA_REQ_NO_ALLOC = 0x63,
1969 SPI_PERF_RA_REQ_NO_ALLOC_PS = 0x64,
1970 SPI_PERF_RA_REQ_NO_ALLOC_VS = 0x65,
1971 SPI_PERF_RA_REQ_NO_ALLOC_GS = 0x66,
1972 SPI_PERF_RA_REQ_NO_ALLOC_ES = 0x67,
1973 SPI_PERF_RA_REQ_NO_ALLOC_HS = 0x68,
1974 SPI_PERF_RA_REQ_NO_ALLOC_LS = 0x69,
1975 SPI_PERF_RA_REQ_NO_ALLOC_CSG = 0x6a,
1976 SPI_PERF_RA_REQ_NO_ALLOC_CSN = 0x6b,
1977 SPI_PERF_RA_RES_STALL_PS = 0x6c,
1978 SPI_PERF_RA_RES_STALL_VS = 0x6d,
1979 SPI_PERF_RA_RES_STALL_GS = 0x6e,
1980 SPI_PERF_RA_RES_STALL_ES = 0x6f,
1981 SPI_PERF_RA_RES_STALL_HS = 0x70,
1982 SPI_PERF_RA_RES_STALL_LS = 0x71,
1983 SPI_PERF_RA_RES_STALL_CSG = 0x72,
1984 SPI_PERF_RA_RES_STALL_CSN = 0x73,
1985 SPI_PERF_RA_TMP_STALL_PS = 0x74,
1986 SPI_PERF_RA_TMP_STALL_VS = 0x75,
1987 SPI_PERF_RA_TMP_STALL_GS = 0x76,
1988 SPI_PERF_RA_TMP_STALL_ES = 0x77,
1989 SPI_PERF_RA_TMP_STALL_HS = 0x78,
1990 SPI_PERF_RA_TMP_STALL_LS = 0x79,
1991 SPI_PERF_RA_TMP_STALL_CSG = 0x7a,
1992 SPI_PERF_RA_TMP_STALL_CSN = 0x7b,
1993 SPI_PERF_RA_WAVE_SIMD_FULL_PS = 0x7c,
1994 SPI_PERF_RA_WAVE_SIMD_FULL_VS = 0x7d,
1995 SPI_PERF_RA_WAVE_SIMD_FULL_GS = 0x7e,
1996 SPI_PERF_RA_WAVE_SIMD_FULL_ES = 0x7f,
1997 SPI_PERF_RA_WAVE_SIMD_FULL_HS = 0x80,
1998 SPI_PERF_RA_WAVE_SIMD_FULL_LS = 0x81,
1999 SPI_PERF_RA_WAVE_SIMD_FULL_CSG = 0x82,
2000 SPI_PERF_RA_WAVE_SIMD_FULL_CSN = 0x83,
2001 SPI_PERF_RA_VGPR_SIMD_FULL_PS = 0x84,
2002 SPI_PERF_RA_VGPR_SIMD_FULL_VS = 0x85,
2003 SPI_PERF_RA_VGPR_SIMD_FULL_GS = 0x86,
2004 SPI_PERF_RA_VGPR_SIMD_FULL_ES = 0x87,
2005 SPI_PERF_RA_VGPR_SIMD_FULL_HS = 0x88,
2006 SPI_PERF_RA_VGPR_SIMD_FULL_LS = 0x89,
2007 SPI_PERF_RA_VGPR_SIMD_FULL_CSG = 0x8a,
2008 SPI_PERF_RA_VGPR_SIMD_FULL_CSN = 0x8b,
2009 SPI_PERF_RA_SGPR_SIMD_FULL_PS = 0x8c,
2010 SPI_PERF_RA_SGPR_SIMD_FULL_VS = 0x8d,
2011 SPI_PERF_RA_SGPR_SIMD_FULL_GS = 0x8e,
2012 SPI_PERF_RA_SGPR_SIMD_FULL_ES = 0x8f,
2013 SPI_PERF_RA_SGPR_SIMD_FULL_HS = 0x90,
2014 SPI_PERF_RA_SGPR_SIMD_FULL_LS = 0x91,
2015 SPI_PERF_RA_SGPR_SIMD_FULL_CSG = 0x92,
2016 SPI_PERF_RA_SGPR_SIMD_FULL_CSN = 0x93,
2017 SPI_PERF_RA_LDS_CU_FULL_PS = 0x94,
2018 SPI_PERF_RA_LDS_CU_FULL_LS = 0x95,
2019 SPI_PERF_RA_LDS_CU_FULL_ES = 0x96,
2020 SPI_PERF_RA_LDS_CU_FULL_CSG = 0x97,
2021 SPI_PERF_RA_LDS_CU_FULL_CSN = 0x98,
2022 SPI_PERF_RA_BAR_CU_FULL_HS = 0x99,
2023 SPI_PERF_RA_BAR_CU_FULL_CSG = 0x9a,
2024 SPI_PERF_RA_BAR_CU_FULL_CSN = 0x9b,
2025 SPI_PERF_RA_BULKY_CU_FULL_CSG = 0x9c,
2026 SPI_PERF_RA_BULKY_CU_FULL_CSN = 0x9d,
2027 SPI_PERF_RA_TGLIM_CU_FULL_CSG = 0x9e,
2028 SPI_PERF_RA_TGLIM_CU_FULL_CSN = 0x9f,
2029 SPI_PERF_RA_WVLIM_STALL_PS = 0xa0,
2030 SPI_PERF_RA_WVLIM_STALL_VS = 0xa1,
2031 SPI_PERF_RA_WVLIM_STALL_GS = 0xa2,
2032 SPI_PERF_RA_WVLIM_STALL_ES = 0xa3,
2033 SPI_PERF_RA_WVLIM_STALL_HS = 0xa4,
2034 SPI_PERF_RA_WVLIM_STALL_LS = 0xa5,
2035 SPI_PERF_RA_WVLIM_STALL_CSG = 0xa6,
2036 SPI_PERF_RA_WVLIM_STALL_CSN = 0xa7,
2037 SPI_PERF_RA_PS_LOCK_NA = 0xa8,
2038 SPI_PERF_RA_VS_LOCK = 0xa9,
2039 SPI_PERF_RA_GS_LOCK = 0xaa,
2040 SPI_PERF_RA_ES_LOCK = 0xab,
2041 SPI_PERF_RA_HS_LOCK = 0xac,
2042 SPI_PERF_RA_LS_LOCK = 0xad,
2043 SPI_PERF_RA_CSG_LOCK = 0xae,
2044 SPI_PERF_RA_CSN_LOCK = 0xaf,
2045 SPI_PERF_RA_RSV_UPD = 0xb0,
2046 SPI_PERF_EXP_ARB_COL_CNT = 0xb1,
2047 SPI_PERF_EXP_ARB_PAR_CNT = 0xb2,
2048 SPI_PERF_EXP_ARB_POS_CNT = 0xb3,
2049 SPI_PERF_EXP_ARB_GDS_CNT = 0xb4,
2050 SPI_PERF_CLKGATE_BUSY_STALL = 0xb5,
2051 SPI_PERF_CLKGATE_ACTIVE_STALL = 0xb6,
2052 SPI_PERF_CLKGATE_ALL_CLOCKS_ON = 0xb7,
2053 SPI_PERF_CLKGATE_CGTT_DYN_ON = 0xb8,
2054 SPI_PERF_CLKGATE_CGTT_REG_ON = 0xb9,
2055 SPI_PERF_NUM_VS_POS_EXPORTS = 0xba,
2056 SPI_PERF_NUM_VS_PARAM_EXPORTS = 0xbb,
2057 SPI_PERF_NUM_PS_COL_EXPORTS = 0xbc,
2058 SPI_PERF_ES_GRP_FIFO_FULL = 0xbd,
2059 SPI_PERF_GS_GRP_FIFO_FULL = 0xbe,
2060 SPI_PERF_HS_GRP_FIFO_FULL = 0xbf,
2061 SPI_PERF_LS_GRP_FIFO_FULL = 0xc0,
2062 SPI_PERF_VS_ALLOC_CNT = 0xc1,
2063 SPI_PERF_VS_LATE_ALLOC_ACCUM = 0xc2,
2064 SPI_PERF_PC_ALLOC_CNT = 0xc3,
2065 SPI_PERF_PC_ALLOC_ACCUM = 0xc4,
2066} SPI_PERFCNT_SEL;
2067typedef enum SPI_SHADER_FORMAT {
2068 SPI_SHADER_NONE = 0x0,
2069 SPI_SHADER_1COMP = 0x1,
2070 SPI_SHADER_2COMP = 0x2,
2071 SPI_SHADER_4COMPRESS = 0x3,
2072 SPI_SHADER_4COMP = 0x4,
2073} SPI_SHADER_FORMAT;
2074typedef enum SPI_SHADER_EX_FORMAT {
2075 SPI_SHADER_ZERO = 0x0,
2076 SPI_SHADER_32_R = 0x1,
2077 SPI_SHADER_32_GR = 0x2,
2078 SPI_SHADER_32_AR = 0x3,
2079 SPI_SHADER_FP16_ABGR = 0x4,
2080 SPI_SHADER_UNORM16_ABGR = 0x5,
2081 SPI_SHADER_SNORM16_ABGR = 0x6,
2082 SPI_SHADER_UINT16_ABGR = 0x7,
2083 SPI_SHADER_SINT16_ABGR = 0x8,
2084 SPI_SHADER_32_ABGR = 0x9,
2085} SPI_SHADER_EX_FORMAT;
2086typedef enum CLKGATE_SM_MODE {
2087 ON_SEQ = 0x0,
2088 OFF_SEQ = 0x1,
2089 PROG_SEQ = 0x2,
2090 READ_SEQ = 0x3,
2091 SM_MODE_RESERVED = 0x4,
2092} CLKGATE_SM_MODE;
2093typedef enum CLKGATE_BASE_MODE {
2094 MULT_8 = 0x0,
2095 MULT_16 = 0x1,
2096} CLKGATE_BASE_MODE;
2097typedef enum SQ_TEX_CLAMP {
2098 SQ_TEX_WRAP = 0x0,
2099 SQ_TEX_MIRROR = 0x1,
2100 SQ_TEX_CLAMP_LAST_TEXEL = 0x2,
2101 SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 0x3,
2102 SQ_TEX_CLAMP_HALF_BORDER = 0x4,
2103 SQ_TEX_MIRROR_ONCE_HALF_BORDER = 0x5,
2104 SQ_TEX_CLAMP_BORDER = 0x6,
2105 SQ_TEX_MIRROR_ONCE_BORDER = 0x7,
2106} SQ_TEX_CLAMP;
2107typedef enum SQ_TEX_XY_FILTER {
2108 SQ_TEX_XY_FILTER_POINT = 0x0,
2109 SQ_TEX_XY_FILTER_BILINEAR = 0x1,
2110 SQ_TEX_XY_FILTER_ANISO_POINT = 0x2,
2111 SQ_TEX_XY_FILTER_ANISO_BILINEAR = 0x3,
2112} SQ_TEX_XY_FILTER;
2113typedef enum SQ_TEX_Z_FILTER {
2114 SQ_TEX_Z_FILTER_NONE = 0x0,
2115 SQ_TEX_Z_FILTER_POINT = 0x1,
2116 SQ_TEX_Z_FILTER_LINEAR = 0x2,
2117} SQ_TEX_Z_FILTER;
2118typedef enum SQ_TEX_MIP_FILTER {
2119 SQ_TEX_MIP_FILTER_NONE = 0x0,
2120 SQ_TEX_MIP_FILTER_POINT = 0x1,
2121 SQ_TEX_MIP_FILTER_LINEAR = 0x2,
2122 SQ_TEX_MIP_FILTER_POINT_ANISO_ADJ = 0x3,
2123} SQ_TEX_MIP_FILTER;
2124typedef enum SQ_TEX_ANISO_RATIO {
2125 SQ_TEX_ANISO_RATIO_1 = 0x0,
2126 SQ_TEX_ANISO_RATIO_2 = 0x1,
2127 SQ_TEX_ANISO_RATIO_4 = 0x2,
2128 SQ_TEX_ANISO_RATIO_8 = 0x3,
2129 SQ_TEX_ANISO_RATIO_16 = 0x4,
2130} SQ_TEX_ANISO_RATIO;
2131typedef enum SQ_TEX_DEPTH_COMPARE {
2132 SQ_TEX_DEPTH_COMPARE_NEVER = 0x0,
2133 SQ_TEX_DEPTH_COMPARE_LESS = 0x1,
2134 SQ_TEX_DEPTH_COMPARE_EQUAL = 0x2,
2135 SQ_TEX_DEPTH_COMPARE_LESSEQUAL = 0x3,
2136 SQ_TEX_DEPTH_COMPARE_GREATER = 0x4,
2137 SQ_TEX_DEPTH_COMPARE_NOTEQUAL = 0x5,
2138 SQ_TEX_DEPTH_COMPARE_GREATEREQUAL = 0x6,
2139 SQ_TEX_DEPTH_COMPARE_ALWAYS = 0x7,
2140} SQ_TEX_DEPTH_COMPARE;
2141typedef enum SQ_TEX_BORDER_COLOR {
2142 SQ_TEX_BORDER_COLOR_TRANS_BLACK = 0x0,
2143 SQ_TEX_BORDER_COLOR_OPAQUE_BLACK = 0x1,
2144 SQ_TEX_BORDER_COLOR_OPAQUE_WHITE = 0x2,
2145 SQ_TEX_BORDER_COLOR_REGISTER = 0x3,
2146} SQ_TEX_BORDER_COLOR;
2147typedef enum SQ_RSRC_BUF_TYPE {
2148 SQ_RSRC_BUF = 0x0,
2149 SQ_RSRC_BUF_RSVD_1 = 0x1,
2150 SQ_RSRC_BUF_RSVD_2 = 0x2,
2151 SQ_RSRC_BUF_RSVD_3 = 0x3,
2152} SQ_RSRC_BUF_TYPE;
2153typedef enum SQ_RSRC_IMG_TYPE {
2154 SQ_RSRC_IMG_RSVD_0 = 0x0,
2155 SQ_RSRC_IMG_RSVD_1 = 0x1,
2156 SQ_RSRC_IMG_RSVD_2 = 0x2,
2157 SQ_RSRC_IMG_RSVD_3 = 0x3,
2158 SQ_RSRC_IMG_RSVD_4 = 0x4,
2159 SQ_RSRC_IMG_RSVD_5 = 0x5,
2160 SQ_RSRC_IMG_RSVD_6 = 0x6,
2161 SQ_RSRC_IMG_RSVD_7 = 0x7,
2162 SQ_RSRC_IMG_1D = 0x8,
2163 SQ_RSRC_IMG_2D = 0x9,
2164 SQ_RSRC_IMG_3D = 0xa,
2165 SQ_RSRC_IMG_CUBE = 0xb,
2166 SQ_RSRC_IMG_1D_ARRAY = 0xc,
2167 SQ_RSRC_IMG_2D_ARRAY = 0xd,
2168 SQ_RSRC_IMG_2D_MSAA = 0xe,
2169 SQ_RSRC_IMG_2D_MSAA_ARRAY = 0xf,
2170} SQ_RSRC_IMG_TYPE;
2171typedef enum SQ_RSRC_FLAT_TYPE {
2172 SQ_RSRC_FLAT_RSVD_0 = 0x0,
2173 SQ_RSRC_FLAT = 0x1,
2174 SQ_RSRC_FLAT_RSVD_2 = 0x2,
2175 SQ_RSRC_FLAT_RSVD_3 = 0x3,
2176} SQ_RSRC_FLAT_TYPE;
2177typedef enum SQ_IMG_FILTER_TYPE {
2178 SQ_IMG_FILTER_MODE_BLEND = 0x0,
2179 SQ_IMG_FILTER_MODE_MIN = 0x1,
2180 SQ_IMG_FILTER_MODE_MAX = 0x2,
2181} SQ_IMG_FILTER_TYPE;
2182typedef enum SQ_SEL_XYZW01 {
2183 SQ_SEL_0 = 0x0,
2184 SQ_SEL_1 = 0x1,
2185 SQ_SEL_RESERVED_0 = 0x2,
2186 SQ_SEL_RESERVED_1 = 0x3,
2187 SQ_SEL_X = 0x4,
2188 SQ_SEL_Y = 0x5,
2189 SQ_SEL_Z = 0x6,
2190 SQ_SEL_W = 0x7,
2191} SQ_SEL_XYZW01;
2192typedef enum SQ_WAVE_TYPE {
2193 SQ_WAVE_TYPE_PS = 0x0,
2194 SQ_WAVE_TYPE_VS = 0x1,
2195 SQ_WAVE_TYPE_GS = 0x2,
2196 SQ_WAVE_TYPE_ES = 0x3,
2197 SQ_WAVE_TYPE_HS = 0x4,
2198 SQ_WAVE_TYPE_LS = 0x5,
2199 SQ_WAVE_TYPE_CS = 0x6,
2200 SQ_WAVE_TYPE_PS1 = 0x7,
2201} SQ_WAVE_TYPE;
2202typedef enum SQ_THREAD_TRACE_TOKEN_TYPE {
2203 SQ_THREAD_TRACE_TOKEN_MISC = 0x0,
2204 SQ_THREAD_TRACE_TOKEN_TIMESTAMP = 0x1,
2205 SQ_THREAD_TRACE_TOKEN_REG = 0x2,
2206 SQ_THREAD_TRACE_TOKEN_WAVE_START = 0x3,
2207 SQ_THREAD_TRACE_TOKEN_WAVE_ALLOC = 0x4,
2208 SQ_THREAD_TRACE_TOKEN_REG_CSPRIV = 0x5,
2209 SQ_THREAD_TRACE_TOKEN_WAVE_END = 0x6,
2210 SQ_THREAD_TRACE_TOKEN_EVENT = 0x7,
2211 SQ_THREAD_TRACE_TOKEN_EVENT_CS = 0x8,
2212 SQ_THREAD_TRACE_TOKEN_EVENT_GFX1 = 0x9,
2213 SQ_THREAD_TRACE_TOKEN_INST = 0xa,
2214 SQ_THREAD_TRACE_TOKEN_INST_PC = 0xb,
2215 SQ_THREAD_TRACE_TOKEN_INST_USERDATA = 0xc,
2216 SQ_THREAD_TRACE_TOKEN_ISSUE = 0xd,
2217 SQ_THREAD_TRACE_TOKEN_PERF = 0xe,
2218 SQ_THREAD_TRACE_TOKEN_REG_CS = 0xf,
2219} SQ_THREAD_TRACE_TOKEN_TYPE;
2220typedef enum SQ_THREAD_TRACE_MISC_TOKEN_TYPE {
2221 SQ_THREAD_TRACE_MISC_TOKEN_TIME = 0x0,
2222 SQ_THREAD_TRACE_MISC_TOKEN_TIME_RESET = 0x1,
2223 SQ_THREAD_TRACE_MISC_TOKEN_PACKET_LOST = 0x2,
2224 SQ_THREAD_TRACE_MISC_TOKEN_SURF_SYNC = 0x3,
2225 SQ_THREAD_TRACE_MISC_TOKEN_TTRACE_STALL_BEGIN = 0x4,
2226 SQ_THREAD_TRACE_MISC_TOKEN_TTRACE_STALL_END = 0x5,
2227 SQ_THREAD_TRACE_MISC_TOKEN_SAVECTX = 0x6,
2228 SQ_THREAD_TRACE_MISC_TOKEN_SHOOT_DOWN = 0x7,
2229} SQ_THREAD_TRACE_MISC_TOKEN_TYPE;
2230typedef enum SQ_THREAD_TRACE_INST_TYPE {
2231 SQ_THREAD_TRACE_INST_TYPE_SMEM_RD = 0x0,
2232 SQ_THREAD_TRACE_INST_TYPE_SALU_32 = 0x1,
2233 SQ_THREAD_TRACE_INST_TYPE_VMEM_RD = 0x2,
2234 SQ_THREAD_TRACE_INST_TYPE_VMEM_WR = 0x3,
2235 SQ_THREAD_TRACE_INST_TYPE_FLAT_WR = 0x4,
2236 SQ_THREAD_TRACE_INST_TYPE_VALU_32 = 0x5,
2237 SQ_THREAD_TRACE_INST_TYPE_LDS = 0x6,
2238 SQ_THREAD_TRACE_INST_TYPE_PC = 0x7,
2239 SQ_THREAD_TRACE_INST_TYPE_EXPREQ_GDS = 0x8,
2240 SQ_THREAD_TRACE_INST_TYPE_EXPREQ_GFX = 0x9,
2241 SQ_THREAD_TRACE_INST_TYPE_EXPGNT_PAR_COL = 0xa,
2242 SQ_THREAD_TRACE_INST_TYPE_EXPGNT_POS_GDS = 0xb,
2243 SQ_THREAD_TRACE_INST_TYPE_JUMP = 0xc,
2244 SQ_THREAD_TRACE_INST_TYPE_NEXT = 0xd,
2245 SQ_THREAD_TRACE_INST_TYPE_FLAT_RD = 0xe,
2246 SQ_THREAD_TRACE_INST_TYPE_OTHER_MSG = 0xf,
2247 SQ_THREAD_TRACE_INST_TYPE_SMEM_WR = 0x10,
2248 SQ_THREAD_TRACE_INST_TYPE_SALU_64 = 0x11,
2249 SQ_THREAD_TRACE_INST_TYPE_VALU_64 = 0x12,
2250 SQ_THREAD_TRACE_INST_TYPE_SMEM_RD_REPLAY = 0x13,
2251 SQ_THREAD_TRACE_INST_TYPE_SMEM_WR_REPLAY = 0x14,
2252 SQ_THREAD_TRACE_INST_TYPE_VMEM_RD_REPLAY = 0x15,
2253 SQ_THREAD_TRACE_INST_TYPE_VMEM_WR_REPLAY = 0x16,
2254 SQ_THREAD_TRACE_INST_TYPE_FLAT_RD_REPLAY = 0x17,
2255 SQ_THREAD_TRACE_INST_TYPE_FLAT_WR_REPLAY = 0x18,
2256} SQ_THREAD_TRACE_INST_TYPE;
2257typedef enum SQ_THREAD_TRACE_REG_TYPE {
2258 SQ_THREAD_TRACE_REG_TYPE_EVENT = 0x0,
2259 SQ_THREAD_TRACE_REG_TYPE_DRAW = 0x1,
2260 SQ_THREAD_TRACE_REG_TYPE_DISPATCH = 0x2,
2261 SQ_THREAD_TRACE_REG_TYPE_USERDATA = 0x3,
2262 SQ_THREAD_TRACE_REG_TYPE_MARKER = 0x4,
2263 SQ_THREAD_TRACE_REG_TYPE_GFXDEC = 0x5,
2264 SQ_THREAD_TRACE_REG_TYPE_SHDEC = 0x6,
2265 SQ_THREAD_TRACE_REG_TYPE_OTHER = 0x7,
2266} SQ_THREAD_TRACE_REG_TYPE;
2267typedef enum SQ_THREAD_TRACE_REG_OP {
2268 SQ_THREAD_TRACE_REG_OP_READ = 0x0,
2269 SQ_THREAD_TRACE_REG_OP_WRITE = 0x1,
2270} SQ_THREAD_TRACE_REG_OP;
2271typedef enum SQ_THREAD_TRACE_MODE_SEL {
2272 SQ_THREAD_TRACE_MODE_OFF = 0x0,
2273 SQ_THREAD_TRACE_MODE_ON = 0x1,
2274} SQ_THREAD_TRACE_MODE_SEL;
2275typedef enum SQ_THREAD_TRACE_CAPTURE_MODE {
2276 SQ_THREAD_TRACE_CAPTURE_MODE_ALL = 0x0,
2277 SQ_THREAD_TRACE_CAPTURE_MODE_SELECT = 0x1,
2278 SQ_THREAD_TRACE_CAPTURE_MODE_SELECT_DETAIL = 0x2,
2279} SQ_THREAD_TRACE_CAPTURE_MODE;
2280typedef enum SQ_THREAD_TRACE_VM_ID_MASK {
2281 SQ_THREAD_TRACE_VM_ID_MASK_SINGLE = 0x0,
2282 SQ_THREAD_TRACE_VM_ID_MASK_ALL = 0x1,
2283 SQ_THREAD_TRACE_VM_ID_MASK_SINGLE_DETAIL = 0x2,
2284} SQ_THREAD_TRACE_VM_ID_MASK;
2285typedef enum SQ_THREAD_TRACE_WAVE_MASK {
2286 SQ_THREAD_TRACE_WAVE_MASK_NONE = 0x0,
2287 SQ_THREAD_TRACE_WAVE_MASK_ALL = 0x1,
2288} SQ_THREAD_TRACE_WAVE_MASK;
2289typedef enum SQ_THREAD_TRACE_ISSUE {
2290 SQ_THREAD_TRACE_ISSUE_NULL = 0x0,
2291 SQ_THREAD_TRACE_ISSUE_STALL = 0x1,
2292 SQ_THREAD_TRACE_ISSUE_INST = 0x2,
2293 SQ_THREAD_TRACE_ISSUE_IMMED = 0x3,
2294} SQ_THREAD_TRACE_ISSUE;
2295typedef enum SQ_THREAD_TRACE_ISSUE_MASK {
2296 SQ_THREAD_TRACE_ISSUE_MASK_ALL = 0x0,
2297 SQ_THREAD_TRACE_ISSUE_MASK_STALLED = 0x1,
2298 SQ_THREAD_TRACE_ISSUE_MASK_STALLED_AND_IMMED = 0x2,
2299 SQ_THREAD_TRACE_ISSUE_MASK_IMMED = 0x3,
2300} SQ_THREAD_TRACE_ISSUE_MASK;
2301typedef enum SQ_PERF_SEL {
2302 SQ_PERF_SEL_NONE = 0x0,
2303 SQ_PERF_SEL_ACCUM_PREV = 0x1,
2304 SQ_PERF_SEL_CYCLES = 0x2,
2305 SQ_PERF_SEL_BUSY_CYCLES = 0x3,
2306 SQ_PERF_SEL_WAVES = 0x4,
2307 SQ_PERF_SEL_LEVEL_WAVES = 0x5,
2308 SQ_PERF_SEL_WAVES_EQ_64 = 0x6,
2309 SQ_PERF_SEL_WAVES_LT_64 = 0x7,
2310 SQ_PERF_SEL_WAVES_LT_48 = 0x8,
2311 SQ_PERF_SEL_WAVES_LT_32 = 0x9,
2312 SQ_PERF_SEL_WAVES_LT_16 = 0xa,
2313 SQ_PERF_SEL_WAVES_CU = 0xb,
2314 SQ_PERF_SEL_LEVEL_WAVES_CU = 0xc,
2315 SQ_PERF_SEL_BUSY_CU_CYCLES = 0xd,
2316 SQ_PERF_SEL_ITEMS = 0xe,
2317 SQ_PERF_SEL_QUADS = 0xf,
2318 SQ_PERF_SEL_EVENTS = 0x10,
2319 SQ_PERF_SEL_SURF_SYNCS = 0x11,
2320 SQ_PERF_SEL_TTRACE_REQS = 0x12,
2321 SQ_PERF_SEL_TTRACE_INFLIGHT_REQS = 0x13,
2322 SQ_PERF_SEL_TTRACE_STALL = 0x14,
2323 SQ_PERF_SEL_MSG_CNTR = 0x15,
2324 SQ_PERF_SEL_MSG_PERF = 0x16,
2325 SQ_PERF_SEL_MSG_GSCNT = 0x17,
2326 SQ_PERF_SEL_MSG_INTERRUPT = 0x18,
2327 SQ_PERF_SEL_INSTS = 0x19,
2328 SQ_PERF_SEL_INSTS_VALU = 0x1a,
2329 SQ_PERF_SEL_INSTS_VMEM_WR = 0x1b,
2330 SQ_PERF_SEL_INSTS_VMEM_RD = 0x1c,
2331 SQ_PERF_SEL_INSTS_VMEM = 0x1d,
2332 SQ_PERF_SEL_INSTS_SALU = 0x1e,
2333 SQ_PERF_SEL_INSTS_SMEM = 0x1f,
2334 SQ_PERF_SEL_INSTS_FLAT = 0x20,
2335 SQ_PERF_SEL_INSTS_FLAT_LDS_ONLY = 0x21,
2336 SQ_PERF_SEL_INSTS_LDS = 0x22,
2337 SQ_PERF_SEL_INSTS_GDS = 0x23,
2338 SQ_PERF_SEL_INSTS_EXP = 0x24,
2339 SQ_PERF_SEL_INSTS_EXP_GDS = 0x25,
2340 SQ_PERF_SEL_INSTS_BRANCH = 0x26,
2341 SQ_PERF_SEL_INSTS_SENDMSG = 0x27,
2342 SQ_PERF_SEL_INSTS_VSKIPPED = 0x28,
2343 SQ_PERF_SEL_INST_LEVEL_VMEM = 0x29,
2344 SQ_PERF_SEL_INST_LEVEL_SMEM = 0x2a,
2345 SQ_PERF_SEL_INST_LEVEL_LDS = 0x2b,
2346 SQ_PERF_SEL_INST_LEVEL_GDS = 0x2c,
2347 SQ_PERF_SEL_INST_LEVEL_EXP = 0x2d,
2348 SQ_PERF_SEL_WAVE_CYCLES = 0x2e,
2349 SQ_PERF_SEL_WAVE_READY = 0x2f,
2350 SQ_PERF_SEL_WAIT_CNT_VM = 0x30,
2351 SQ_PERF_SEL_WAIT_CNT_LGKM = 0x31,
2352 SQ_PERF_SEL_WAIT_CNT_EXP = 0x32,
2353 SQ_PERF_SEL_WAIT_CNT_ANY = 0x33,
2354 SQ_PERF_SEL_WAIT_BARRIER = 0x34,
2355 SQ_PERF_SEL_WAIT_EXP_ALLOC = 0x35,
2356 SQ_PERF_SEL_WAIT_SLEEP = 0x36,
2357 SQ_PERF_SEL_WAIT_OTHER = 0x37,
2358 SQ_PERF_SEL_WAIT_ANY = 0x38,
2359 SQ_PERF_SEL_WAIT_TTRACE = 0x39,
2360 SQ_PERF_SEL_WAIT_IFETCH = 0x3a,
2361 SQ_PERF_SEL_WAIT_INST_VMEM = 0x3b,
2362 SQ_PERF_SEL_WAIT_INST_SCA = 0x3c,
2363 SQ_PERF_SEL_WAIT_INST_LDS = 0x3d,
2364 SQ_PERF_SEL_WAIT_INST_VALU = 0x3e,
2365 SQ_PERF_SEL_WAIT_INST_EXP_GDS = 0x3f,
2366 SQ_PERF_SEL_WAIT_INST_MISC = 0x40,
2367 SQ_PERF_SEL_WAIT_INST_FLAT = 0x41,
2368 SQ_PERF_SEL_ACTIVE_INST_ANY = 0x42,
2369 SQ_PERF_SEL_ACTIVE_INST_VMEM = 0x43,
2370 SQ_PERF_SEL_ACTIVE_INST_LDS = 0x44,
2371 SQ_PERF_SEL_ACTIVE_INST_VALU = 0x45,
2372 SQ_PERF_SEL_ACTIVE_INST_SCA = 0x46,
2373 SQ_PERF_SEL_ACTIVE_INST_EXP_GDS = 0x47,
2374 SQ_PERF_SEL_ACTIVE_INST_MISC = 0x48,
2375 SQ_PERF_SEL_ACTIVE_INST_FLAT = 0x49,
2376 SQ_PERF_SEL_INST_CYCLES_VMEM_WR = 0x4a,
2377 SQ_PERF_SEL_INST_CYCLES_VMEM_RD = 0x4b,
2378 SQ_PERF_SEL_INST_CYCLES_VMEM_ADDR = 0x4c,
2379 SQ_PERF_SEL_INST_CYCLES_VMEM_DATA = 0x4d,
2380 SQ_PERF_SEL_INST_CYCLES_VMEM_CMD = 0x4e,
2381 SQ_PERF_SEL_INST_CYCLES_VMEM = 0x4f,
2382 SQ_PERF_SEL_INST_CYCLES_LDS = 0x50,
2383 SQ_PERF_SEL_INST_CYCLES_VALU = 0x51,
2384 SQ_PERF_SEL_INST_CYCLES_EXP = 0x52,
2385 SQ_PERF_SEL_INST_CYCLES_GDS = 0x53,
2386 SQ_PERF_SEL_INST_CYCLES_SCA = 0x54,
2387 SQ_PERF_SEL_INST_CYCLES_SMEM = 0x55,
2388 SQ_PERF_SEL_INST_CYCLES_SALU = 0x56,
2389 SQ_PERF_SEL_INST_CYCLES_EXP_GDS = 0x57,
2390 SQ_PERF_SEL_INST_CYCLES_MISC = 0x58,
2391 SQ_PERF_SEL_THREAD_CYCLES_VALU = 0x59,
2392 SQ_PERF_SEL_THREAD_CYCLES_VALU_MAX = 0x5a,
2393 SQ_PERF_SEL_IFETCH = 0x5b,
2394 SQ_PERF_SEL_IFETCH_LEVEL = 0x5c,
2395 SQ_PERF_SEL_CBRANCH_FORK = 0x5d,
2396 SQ_PERF_SEL_CBRANCH_FORK_SPLIT = 0x5e,
2397 SQ_PERF_SEL_VALU_LDS_DIRECT_RD = 0x5f,
2398 SQ_PERF_SEL_VALU_LDS_INTERP_OP = 0x60,
2399 SQ_PERF_SEL_LDS_BANK_CONFLICT = 0x61,
2400 SQ_PERF_SEL_LDS_ADDR_CONFLICT = 0x62,
2401 SQ_PERF_SEL_LDS_UNALIGNED_STALL = 0x63,
2402 SQ_PERF_SEL_LDS_MEM_VIOLATIONS = 0x64,
2403 SQ_PERF_SEL_LDS_ATOMIC_RETURN = 0x65,
2404 SQ_PERF_SEL_LDS_IDX_ACTIVE = 0x66,
2405 SQ_PERF_SEL_VALU_DEP_STALL = 0x67,
2406 SQ_PERF_SEL_VALU_STARVE = 0x68,
2407 SQ_PERF_SEL_EXP_REQ_FIFO_FULL = 0x69,
2408 SQ_PERF_SEL_LDS_BACK2BACK_STALL = 0x6a,
2409 SQ_PERF_SEL_LDS_DATA_FIFO_FULL = 0x6b,
2410 SQ_PERF_SEL_LDS_CMD_FIFO_FULL = 0x6c,
2411 SQ_PERF_SEL_VMEM_BACK2BACK_STALL = 0x6d,
2412 SQ_PERF_SEL_VMEM_TA_ADDR_FIFO_FULL = 0x6e,
2413 SQ_PERF_SEL_VMEM_TA_CMD_FIFO_FULL = 0x6f,
2414 SQ_PERF_SEL_VMEM_EX_DATA_REG_BUSY = 0x70,
2415 SQ_PERF_SEL_VMEM_WR_BACK2BACK_STALL = 0x71,
2416 SQ_PERF_SEL_VMEM_WR_TA_DATA_FIFO_FULL = 0x72,
2417 SQ_PERF_SEL_VALU_SRC_C_CONFLICT = 0x73,
2418 SQ_PERF_SEL_VMEM_RD_SRC_CD_CONFLICT = 0x74,
2419 SQ_PERF_SEL_VMEM_WR_SRC_CD_CONFLICT = 0x75,
2420 SQ_PERF_SEL_FLAT_SRC_CD_CONFLICT = 0x76,
2421 SQ_PERF_SEL_LDS_SRC_CD_CONFLICT = 0x77,
2422 SQ_PERF_SEL_SRC_CD_BUSY = 0x78,
2423 SQ_PERF_SEL_PT_POWER_STALL = 0x79,
2424 SQ_PERF_SEL_USER0 = 0x7a,
2425 SQ_PERF_SEL_USER1 = 0x7b,
2426 SQ_PERF_SEL_USER2 = 0x7c,
2427 SQ_PERF_SEL_USER3 = 0x7d,
2428 SQ_PERF_SEL_USER4 = 0x7e,
2429 SQ_PERF_SEL_USER5 = 0x7f,
2430 SQ_PERF_SEL_USER6 = 0x80,
2431 SQ_PERF_SEL_USER7 = 0x81,
2432 SQ_PERF_SEL_USER8 = 0x82,
2433 SQ_PERF_SEL_USER9 = 0x83,
2434 SQ_PERF_SEL_USER10 = 0x84,
2435 SQ_PERF_SEL_USER11 = 0x85,
2436 SQ_PERF_SEL_USER12 = 0x86,
2437 SQ_PERF_SEL_USER13 = 0x87,
2438 SQ_PERF_SEL_USER14 = 0x88,
2439 SQ_PERF_SEL_USER15 = 0x89,
2440 SQ_PERF_SEL_USER_LEVEL0 = 0x8a,
2441 SQ_PERF_SEL_USER_LEVEL1 = 0x8b,
2442 SQ_PERF_SEL_USER_LEVEL2 = 0x8c,
2443 SQ_PERF_SEL_USER_LEVEL3 = 0x8d,
2444 SQ_PERF_SEL_USER_LEVEL4 = 0x8e,
2445 SQ_PERF_SEL_USER_LEVEL5 = 0x8f,
2446 SQ_PERF_SEL_USER_LEVEL6 = 0x90,
2447 SQ_PERF_SEL_USER_LEVEL7 = 0x91,
2448 SQ_PERF_SEL_USER_LEVEL8 = 0x92,
2449 SQ_PERF_SEL_USER_LEVEL9 = 0x93,
2450 SQ_PERF_SEL_USER_LEVEL10 = 0x94,
2451 SQ_PERF_SEL_USER_LEVEL11 = 0x95,
2452 SQ_PERF_SEL_USER_LEVEL12 = 0x96,
2453 SQ_PERF_SEL_USER_LEVEL13 = 0x97,
2454 SQ_PERF_SEL_USER_LEVEL14 = 0x98,
2455 SQ_PERF_SEL_USER_LEVEL15 = 0x99,
2456 SQ_PERF_SEL_POWER_VALU = 0x9a,
2457 SQ_PERF_SEL_POWER_VALU0 = 0x9b,
2458 SQ_PERF_SEL_POWER_VALU1 = 0x9c,
2459 SQ_PERF_SEL_POWER_VALU2 = 0x9d,
2460 SQ_PERF_SEL_POWER_GPR_RD = 0x9e,
2461 SQ_PERF_SEL_POWER_GPR_WR = 0x9f,
2462 SQ_PERF_SEL_POWER_LDS_BUSY = 0xa0,
2463 SQ_PERF_SEL_POWER_ALU_BUSY = 0xa1,
2464 SQ_PERF_SEL_POWER_TEX_BUSY = 0xa2,
2465 SQ_PERF_SEL_ACCUM_PREV_HIRES = 0xa3,
2466 SQ_PERF_SEL_WAVES_RESTORED = 0xa4,
2467 SQ_PERF_SEL_WAVES_SAVED = 0xa5,
2468 SQ_PERF_SEL_DUMMY_LAST = 0xa7,
2469 SQC_PERF_SEL_ICACHE_INPUT_VALID_READY = 0xa8,
2470 SQC_PERF_SEL_ICACHE_INPUT_VALID_READYB = 0xa9,
2471 SQC_PERF_SEL_ICACHE_INPUT_VALIDB = 0xaa,
2472 SQC_PERF_SEL_DCACHE_INPUT_VALID_READY = 0xab,
2473 SQC_PERF_SEL_DCACHE_INPUT_VALID_READYB = 0xac,
2474 SQC_PERF_SEL_DCACHE_INPUT_VALIDB = 0xad,
2475 SQC_PERF_SEL_TC_REQ = 0xae,
2476 SQC_PERF_SEL_TC_INST_REQ = 0xaf,
2477 SQC_PERF_SEL_TC_DATA_READ_REQ = 0xb0,
2478 SQC_PERF_SEL_TC_DATA_WRITE_REQ = 0xb1,
2479 SQC_PERF_SEL_TC_DATA_ATOMIC_REQ = 0xb2,
2480 SQC_PERF_SEL_TC_STALL = 0xb3,
2481 SQC_PERF_SEL_TC_STARVE = 0xb4,
2482 SQC_PERF_SEL_ICACHE_BUSY_CYCLES = 0xb5,
2483 SQC_PERF_SEL_ICACHE_REQ = 0xb6,
2484 SQC_PERF_SEL_ICACHE_HITS = 0xb7,
2485 SQC_PERF_SEL_ICACHE_MISSES = 0xb8,
2486 SQC_PERF_SEL_ICACHE_MISSES_DUPLICATE = 0xb9,
2487 SQC_PERF_SEL_ICACHE_INVAL_INST = 0xba,
2488 SQC_PERF_SEL_ICACHE_INVAL_ASYNC = 0xbb,
2489 SQC_PERF_SEL_ICACHE_INPUT_STALL_ARB_NO_GRANT = 0xbc,
2490 SQC_PERF_SEL_ICACHE_INPUT_STALL_BANK_READYB = 0xbd,
2491 SQC_PERF_SEL_ICACHE_CACHE_STALLED = 0xbe,
2492 SQC_PERF_SEL_ICACHE_CACHE_STALL_INFLIGHT_NONZERO = 0xbf,
2493 SQC_PERF_SEL_ICACHE_CACHE_STALL_INFLIGHT_MAX = 0xc0,
2494 SQC_PERF_SEL_ICACHE_CACHE_STALL_OUTPUT = 0xc1,
2495 SQC_PERF_SEL_ICACHE_CACHE_STALL_OUTPUT_MISS_FIFO = 0xc2,
2496 SQC_PERF_SEL_ICACHE_CACHE_STALL_OUTPUT_HIT_FIFO = 0xc3,
2497 SQC_PERF_SEL_ICACHE_CACHE_STALL_OUTPUT_TC_IF = 0xc4,
2498 SQC_PERF_SEL_ICACHE_STALL_OUTXBAR_ARB_NO_GRANT = 0xc5,
2499 SQC_PERF_SEL_DCACHE_BUSY_CYCLES = 0xc6,
2500 SQC_PERF_SEL_DCACHE_REQ = 0xc7,
2501 SQC_PERF_SEL_DCACHE_HITS = 0xc8,
2502 SQC_PERF_SEL_DCACHE_MISSES = 0xc9,
2503 SQC_PERF_SEL_DCACHE_MISSES_DUPLICATE = 0xca,
2504 SQC_PERF_SEL_DCACHE_HIT_LRU_READ = 0xcb,
2505 SQC_PERF_SEL_DCACHE_MISS_EVICT_READ = 0xcc,
2506 SQC_PERF_SEL_DCACHE_WC_LRU_WRITE = 0xcd,
2507 SQC_PERF_SEL_DCACHE_WT_EVICT_WRITE = 0xce,
2508 SQC_PERF_SEL_DCACHE_ATOMIC = 0xcf,
2509 SQC_PERF_SEL_DCACHE_VOLATILE = 0xd0,
2510 SQC_PERF_SEL_DCACHE_INVAL_INST = 0xd1,
2511 SQC_PERF_SEL_DCACHE_INVAL_ASYNC = 0xd2,
2512 SQC_PERF_SEL_DCACHE_INVAL_VOLATILE_INST = 0xd3,
2513 SQC_PERF_SEL_DCACHE_INVAL_VOLATILE_ASYNC = 0xd4,
2514 SQC_PERF_SEL_DCACHE_WB_INST = 0xd5,
2515 SQC_PERF_SEL_DCACHE_WB_ASYNC = 0xd6,
2516 SQC_PERF_SEL_DCACHE_WB_VOLATILE_INST = 0xd7,
2517 SQC_PERF_SEL_DCACHE_WB_VOLATILE_ASYNC = 0xd8,
2518 SQC_PERF_SEL_DCACHE_INPUT_STALL_ARB_NO_GRANT = 0xd9,
2519 SQC_PERF_SEL_DCACHE_INPUT_STALL_BANK_READYB = 0xda,
2520 SQC_PERF_SEL_DCACHE_CACHE_STALLED = 0xdb,
2521 SQC_PERF_SEL_DCACHE_CACHE_STALL_INFLIGHT_MAX = 0xdc,
2522 SQC_PERF_SEL_DCACHE_CACHE_STALL_OUTPUT = 0xdd,
2523 SQC_PERF_SEL_DCACHE_CACHE_STALL_EVICT = 0xde,
2524 SQC_PERF_SEL_DCACHE_CACHE_STALL_UNORDERED = 0xdf,
2525 SQC_PERF_SEL_DCACHE_CACHE_STALL_ALLOC_UNAVAILABLE= 0xe0,
2526 SQC_PERF_SEL_DCACHE_CACHE_STALL_FORCE_EVICT = 0xe1,
2527 SQC_PERF_SEL_DCACHE_CACHE_STALL_MULTI_FLUSH = 0xe2,
2528 SQC_PERF_SEL_DCACHE_CACHE_STALL_FLUSH_DONE = 0xe3,
2529 SQC_PERF_SEL_DCACHE_CACHE_STALL_OUTPUT_MISS_FIFO = 0xe4,
2530 SQC_PERF_SEL_DCACHE_CACHE_STALL_OUTPUT_HIT_FIFO = 0xe5,
2531 SQC_PERF_SEL_DCACHE_CACHE_STALL_OUTPUT_TC_IF = 0xe6,
2532 SQC_PERF_SEL_DCACHE_STALL_OUTXBAR_ARB_NO_GRANT = 0xe7,
2533 SQC_PERF_SEL_DCACHE_REQ_READ_1 = 0xe8,
2534 SQC_PERF_SEL_DCACHE_REQ_READ_2 = 0xe9,
2535 SQC_PERF_SEL_DCACHE_REQ_READ_4 = 0xea,
2536 SQC_PERF_SEL_DCACHE_REQ_READ_8 = 0xeb,
2537 SQC_PERF_SEL_DCACHE_REQ_READ_16 = 0xec,
2538 SQC_PERF_SEL_DCACHE_REQ_TIME = 0xed,
2539 SQC_PERF_SEL_DCACHE_REQ_WRITE_1 = 0xee,
2540 SQC_PERF_SEL_DCACHE_REQ_WRITE_2 = 0xef,
2541 SQC_PERF_SEL_DCACHE_REQ_WRITE_4 = 0xf0,
2542 SQC_PERF_SEL_DCACHE_REQ_ATC_PROBE = 0xf1,
2543 SQC_PERF_SEL_SQ_DCACHE_REQS = 0xf2,
2544 SQC_PERF_SEL_DCACHE_FLAT_REQ = 0xf3,
2545 SQC_PERF_SEL_DCACHE_NONFLAT_REQ = 0xf4,
2546 SQC_PERF_SEL_ICACHE_INFLIGHT_LEVEL = 0xf5,
2547 SQC_PERF_SEL_DCACHE_INFLIGHT_LEVEL = 0xf6,
2548 SQC_PERF_SEL_TC_INFLIGHT_LEVEL = 0xf7,
2549 SQC_PERF_SEL_ICACHE_TC_INFLIGHT_LEVEL = 0xf8,
2550 SQC_PERF_SEL_DCACHE_TC_INFLIGHT_LEVEL = 0xf9,
2551 SQC_PERF_SEL_ICACHE_GATCL1_TRANSLATION_MISS = 0xfa,
2552 SQC_PERF_SEL_ICACHE_GATCL1_PERMISSION_MISS = 0xfb,
2553 SQC_PERF_SEL_ICACHE_GATCL1_REQUEST = 0xfc,
2554 SQC_PERF_SEL_ICACHE_GATCL1_STALL_INFLIGHT_MAX = 0xfd,
2555 SQC_PERF_SEL_ICACHE_GATCL1_STALL_LRU_INFLIGHT = 0xfe,
2556 SQC_PERF_SEL_ICACHE_GATCL1_LFIFO_FULL = 0xff,
2557 SQC_PERF_SEL_ICACHE_GATCL1_STALL_LFIFO_NOT_RES = 0x100,
2558 SQC_PERF_SEL_ICACHE_GATCL1_STALL_ATCL2_REQ_OUT_OF_CREDITS= 0x101,
2559 SQC_PERF_SEL_ICACHE_GATCL1_ATCL2_INFLIGHT = 0x102,
2560 SQC_PERF_SEL_ICACHE_GATCL1_STALL_MISSFIFO_FULL = 0x103,
2561 SQC_PERF_SEL_DCACHE_GATCL1_TRANSLATION_MISS = 0x104,
2562 SQC_PERF_SEL_DCACHE_GATCL1_PERMISSION_MISS = 0x105,
2563 SQC_PERF_SEL_DCACHE_GATCL1_REQUEST = 0x106,
2564 SQC_PERF_SEL_DCACHE_GATCL1_STALL_INFLIGHT_MAX = 0x107,
2565 SQC_PERF_SEL_DCACHE_GATCL1_STALL_LRU_INFLIGHT = 0x108,
2566 SQC_PERF_SEL_DCACHE_GATCL1_LFIFO_FULL = 0x109,
2567 SQC_PERF_SEL_DCACHE_GATCL1_STALL_LFIFO_NOT_RES = 0x10a,
2568 SQC_PERF_SEL_DCACHE_GATCL1_STALL_ATCL2_REQ_OUT_OF_CREDITS= 0x10b,
2569 SQC_PERF_SEL_DCACHE_GATCL1_ATCL2_INFLIGHT = 0x10c,
2570 SQC_PERF_SEL_DCACHE_GATCL1_STALL_MISSFIFO_FULL = 0x10d,
2571 SQC_PERF_SEL_DCACHE_GATCL1_STALL_MULTI_MISS = 0x10e,
2572 SQC_PERF_SEL_DCACHE_GATCL1_HIT_FIFO_FULL = 0x10f,
2573 SQC_PERF_SEL_DUMMY_LAST = 0x110,
2574 SQ_PERF_SEL_INSTS_SMEM_NORM = 0x111,
2575 SQ_PERF_SEL_ATC_INSTS_VMEM = 0x112,
2576 SQ_PERF_SEL_ATC_INST_LEVEL_VMEM = 0x113,
2577 SQ_PERF_SEL_ATC_XNACK_FIRST = 0x114,
2578 SQ_PERF_SEL_ATC_XNACK_ALL = 0x115,
2579 SQ_PERF_SEL_ATC_XNACK_FIFO_FULL = 0x116,
2580 SQ_PERF_SEL_ATC_INSTS_SMEM = 0x117,
2581 SQ_PERF_SEL_ATC_INST_LEVEL_SMEM = 0x118,
2582 SQ_PERF_SEL_IFETCH_XNACK = 0x119,
2583 SQ_PERF_SEL_TLB_SHOOTDOWN = 0x11a,
2584 SQ_PERF_SEL_TLB_SHOOTDOWN_CYCLES = 0x11b,
2585 SQ_PERF_SEL_INSTS_VMEM_WR_REPLAY = 0x11c,
2586 SQ_PERF_SEL_INSTS_VMEM_RD_REPLAY = 0x11d,
2587 SQ_PERF_SEL_INSTS_VMEM_REPLAY = 0x11e,
2588 SQ_PERF_SEL_INSTS_SMEM_REPLAY = 0x11f,
2589 SQ_PERF_SEL_INSTS_SMEM_NORM_REPLAY = 0x120,
2590 SQ_PERF_SEL_INSTS_FLAT_REPLAY = 0x121,
2591 SQ_PERF_SEL_ATC_INSTS_VMEM_REPLAY = 0x122,
2592 SQ_PERF_SEL_ATC_INSTS_SMEM_REPLAY = 0x123,
2593 SQ_PERF_SEL_DUMMY_LAST1 = 0x12a,
2594} SQ_PERF_SEL;
2595typedef enum SQ_CAC_POWER_SEL {
2596 SQ_CAC_POWER_VALU = 0x0,
2597 SQ_CAC_POWER_VALU0 = 0x1,
2598 SQ_CAC_POWER_VALU1 = 0x2,
2599 SQ_CAC_POWER_VALU2 = 0x3,
2600 SQ_CAC_POWER_GPR_RD = 0x4,
2601 SQ_CAC_POWER_GPR_WR = 0x5,
2602 SQ_CAC_POWER_LDS_BUSY = 0x6,
2603 SQ_CAC_POWER_ALU_BUSY = 0x7,
2604 SQ_CAC_POWER_TEX_BUSY = 0x8,
2605} SQ_CAC_POWER_SEL;
2606typedef enum SQ_IND_CMD_CMD {
2607 SQ_IND_CMD_CMD_NULL = 0x0,
2608 SQ_IND_CMD_CMD_SETHALT = 0x1,
2609 SQ_IND_CMD_CMD_SAVECTX = 0x2,
2610 SQ_IND_CMD_CMD_KILL = 0x3,
2611 SQ_IND_CMD_CMD_DEBUG = 0x4,
2612 SQ_IND_CMD_CMD_TRAP = 0x5,
2613 SQ_IND_CMD_CMD_SET_SPI_PRIO = 0x6,
2614} SQ_IND_CMD_CMD;
2615typedef enum SQ_IND_CMD_MODE {
2616 SQ_IND_CMD_MODE_SINGLE = 0x0,
2617 SQ_IND_CMD_MODE_BROADCAST = 0x1,
2618 SQ_IND_CMD_MODE_BROADCAST_QUEUE = 0x2,
2619 SQ_IND_CMD_MODE_BROADCAST_PIPE = 0x3,
2620 SQ_IND_CMD_MODE_BROADCAST_ME = 0x4,
2621} SQ_IND_CMD_MODE;
2622typedef enum SQ_EDC_INFO_SOURCE {
2623 SQ_EDC_INFO_SOURCE_INVALID = 0x0,
2624 SQ_EDC_INFO_SOURCE_INST = 0x1,
2625 SQ_EDC_INFO_SOURCE_SGPR = 0x2,
2626 SQ_EDC_INFO_SOURCE_VGPR = 0x3,
2627 SQ_EDC_INFO_SOURCE_LDS = 0x4,
2628 SQ_EDC_INFO_SOURCE_GDS = 0x5,
2629 SQ_EDC_INFO_SOURCE_TA = 0x6,
2630} SQ_EDC_INFO_SOURCE;
2631typedef enum SQ_ROUND_MODE {
2632 SQ_ROUND_NEAREST_EVEN = 0x0,
2633 SQ_ROUND_PLUS_INFINITY = 0x1,
2634 SQ_ROUND_MINUS_INFINITY = 0x2,
2635 SQ_ROUND_TO_ZERO = 0x3,
2636} SQ_ROUND_MODE;
2637typedef enum SQ_INTERRUPT_WORD_ENCODING {
2638 SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
2639 SQ_INTERRUPT_WORD_ENCODING_INST = 0x1,
2640 SQ_INTERRUPT_WORD_ENCODING_ERROR = 0x2,
2641} SQ_INTERRUPT_WORD_ENCODING;
2642typedef enum ENUM_SQ_EXPORT_RAT_INST {
2643 SQ_EXPORT_RAT_INST_NOP = 0x0,
2644 SQ_EXPORT_RAT_INST_STORE_TYPED = 0x1,
2645 SQ_EXPORT_RAT_INST_STORE_RAW = 0x2,
2646 SQ_EXPORT_RAT_INST_STORE_RAW_FDENORM = 0x3,
2647 SQ_EXPORT_RAT_INST_CMPXCHG_INT = 0x4,
2648 SQ_EXPORT_RAT_INST_CMPXCHG_FLT = 0x5,
2649 SQ_EXPORT_RAT_INST_CMPXCHG_FDENORM = 0x6,
2650 SQ_EXPORT_RAT_INST_ADD = 0x7,
2651 SQ_EXPORT_RAT_INST_SUB = 0x8,
2652 SQ_EXPORT_RAT_INST_RSUB = 0x9,
2653 SQ_EXPORT_RAT_INST_MIN_INT = 0xa,
2654 SQ_EXPORT_RAT_INST_MIN_UINT = 0xb,
2655 SQ_EXPORT_RAT_INST_MAX_INT = 0xc,
2656 SQ_EXPORT_RAT_INST_MAX_UINT = 0xd,
2657 SQ_EXPORT_RAT_INST_AND = 0xe,
2658 SQ_EXPORT_RAT_INST_OR = 0xf,
2659 SQ_EXPORT_RAT_INST_XOR = 0x10,
2660 SQ_EXPORT_RAT_INST_MSKOR = 0x11,
2661 SQ_EXPORT_RAT_INST_INC_UINT = 0x12,
2662 SQ_EXPORT_RAT_INST_DEC_UINT = 0x13,
2663 SQ_EXPORT_RAT_INST_STORE_DWORD = 0x14,
2664 SQ_EXPORT_RAT_INST_STORE_SHORT = 0x15,
2665 SQ_EXPORT_RAT_INST_STORE_BYTE = 0x16,
2666 SQ_EXPORT_RAT_INST_NOP_RTN = 0x20,
2667 SQ_EXPORT_RAT_INST_XCHG_RTN = 0x22,
2668 SQ_EXPORT_RAT_INST_XCHG_FDENORM_RTN = 0x23,
2669 SQ_EXPORT_RAT_INST_CMPXCHG_INT_RTN = 0x24,
2670 SQ_EXPORT_RAT_INST_CMPXCHG_FLT_RTN = 0x25,
2671 SQ_EXPORT_RAT_INST_CMPXCHG_FDENORM_RTN = 0x26,
2672 SQ_EXPORT_RAT_INST_ADD_RTN = 0x27,
2673 SQ_EXPORT_RAT_INST_SUB_RTN = 0x28,
2674 SQ_EXPORT_RAT_INST_RSUB_RTN = 0x29,
2675 SQ_EXPORT_RAT_INST_MIN_INT_RTN = 0x2a,
2676 SQ_EXPORT_RAT_INST_MIN_UINT_RTN = 0x2b,
2677 SQ_EXPORT_RAT_INST_MAX_INT_RTN = 0x2c,
2678 SQ_EXPORT_RAT_INST_MAX_UINT_RTN = 0x2d,
2679 SQ_EXPORT_RAT_INST_AND_RTN = 0x2e,
2680 SQ_EXPORT_RAT_INST_OR_RTN = 0x2f,
2681 SQ_EXPORT_RAT_INST_XOR_RTN = 0x30,
2682 SQ_EXPORT_RAT_INST_MSKOR_RTN = 0x31,
2683 SQ_EXPORT_RAT_INST_INC_UINT_RTN = 0x32,
2684 SQ_EXPORT_RAT_INST_DEC_UINT_RTN = 0x33,
2685} ENUM_SQ_EXPORT_RAT_INST;
2686typedef enum SQ_IBUF_ST {
2687 SQ_IBUF_IB_IDLE = 0x0,
2688 SQ_IBUF_IB_INI_WAIT_GNT = 0x1,
2689 SQ_IBUF_IB_INI_WAIT_DRET = 0x2,
2690 SQ_IBUF_IB_LE_4DW = 0x3,
2691 SQ_IBUF_IB_WAIT_DRET = 0x4,
2692 SQ_IBUF_IB_EMPTY_WAIT_DRET = 0x5,
2693 SQ_IBUF_IB_DRET = 0x6,
2694 SQ_IBUF_IB_EMPTY_WAIT_GNT = 0x7,
2695} SQ_IBUF_ST;
2696typedef enum SQ_INST_STR_ST {
2697 SQ_INST_STR_IB_WAVE_NORML = 0x0,
2698 SQ_INST_STR_IB_WAVE2ID_NORMAL_INST_AV = 0x1,
2699 SQ_INST_STR_IB_WAVE_INTERNAL_INST_AV = 0x2,
2700 SQ_INST_STR_IB_WAVE_INST_SKIP_AV = 0x3,
2701 SQ_INST_STR_IB_WAVE_SETVSKIP_ST0 = 0x4,
2702 SQ_INST_STR_IB_WAVE_SETVSKIP_ST1 = 0x5,
2703 SQ_INST_STR_IB_WAVE_NOP_SLEEP_WAIT = 0x6,
2704 SQ_INST_STR_IB_WAVE_PC_FROM_SGPR_MSG_WAIT = 0x7,
2705} SQ_INST_STR_ST;
2706typedef enum SQ_WAVE_IB_ECC_ST {
2707 SQ_WAVE_IB_ECC_CLEAN = 0x0,
2708 SQ_WAVE_IB_ECC_ERR_CONTINUE = 0x1,
2709 SQ_WAVE_IB_ECC_ERR_HALT = 0x2,
2710 SQ_WAVE_IB_ECC_WITH_ERR_MSG = 0x3,
2711} SQ_WAVE_IB_ECC_ST;
2712typedef enum SH_MEM_ADDRESS_MODE {
2713 SH_MEM_ADDRESS_MODE_GPUVM64 = 0x0,
2714 SH_MEM_ADDRESS_MODE_GPUVM32 = 0x1,
2715 SH_MEM_ADDRESS_MODE_HSA64 = 0x2,
2716 SH_MEM_ADDRESS_MODE_HSA32 = 0x3,
2717} SH_MEM_ADDRESS_MODE;
2718typedef enum SH_MEM_ALIGNMENT_MODE {
2719 SH_MEM_ALIGNMENT_MODE_DWORD = 0x0,
2720 SH_MEM_ALIGNMENT_MODE_DWORD_STRICT = 0x1,
2721 SH_MEM_ALIGNMENT_MODE_STRICT = 0x2,
2722 SH_MEM_ALIGNMENT_MODE_UNALIGNED = 0x3,
2723} SH_MEM_ALIGNMENT_MODE;
2724typedef enum SQ_THREAD_TRACE_WAVE_START_COUNT_PREFIX {
2725 SQ_THREAD_TRACE_WAVE_START_COUNT_PREFIX_WREXEC = 0x18,
2726 SQ_THREAD_TRACE_WAVE_START_COUNT_PREFIX_RESTORE = 0x19,
2727} SQ_THREAD_TRACE_WAVE_START_COUNT_PREFIX;
2728#define SQ_WAVE_TYPE_PS0 0x0
2729#define SQIND_GLOBAL_REGS_OFFSET 0x0
2730#define SQIND_GLOBAL_REGS_SIZE 0x8
2731#define SQIND_LOCAL_REGS_OFFSET 0x8
2732#define SQIND_LOCAL_REGS_SIZE 0x8
2733#define SQIND_WAVE_HWREGS_OFFSET 0x10
2734#define SQIND_WAVE_HWREGS_SIZE 0x1f0
2735#define SQIND_WAVE_SGPRS_OFFSET 0x200
2736#define SQIND_WAVE_SGPRS_SIZE 0x200
2737#define SQ_GFXDEC_BEGIN 0xa000
2738#define SQ_GFXDEC_END 0xc000
2739#define SQ_GFXDEC_STATE_ID_SHIFT 0xa
2740#define SQDEC_BEGIN 0x2300
2741#define SQDEC_END 0x23ff
2742#define SQPERFSDEC_BEGIN 0xd9c0
2743#define SQPERFSDEC_END 0xda40
2744#define SQPERFDDEC_BEGIN 0xd1c0
2745#define SQPERFDDEC_END 0xd240
2746#define SQGFXUDEC_BEGIN 0xc330
2747#define SQGFXUDEC_END 0xc380
2748#define SQPWRDEC_BEGIN 0xf08c
2749#define SQPWRDEC_END 0xf094
2750#define SQ_DISPATCHER_GFX_MIN 0x10
2751#define SQ_DISPATCHER_GFX_CNT_PER_RING 0x8
2752#define SQ_MAX_PGM_SGPRS 0x68
2753#define SQ_MAX_PGM_VGPRS 0x100
2754#define SQ_THREAD_TRACE_TIME_UNIT 0x4
2755#define SQ_EX_MODE_EXCP_VALU_BASE 0x0
2756#define SQ_EX_MODE_EXCP_VALU_SIZE 0x7
2757#define SQ_EX_MODE_EXCP_INVALID 0x0
2758#define SQ_EX_MODE_EXCP_INPUT_DENORM 0x1
2759#define SQ_EX_MODE_EXCP_DIV0 0x2
2760#define SQ_EX_MODE_EXCP_OVERFLOW 0x3
2761#define SQ_EX_MODE_EXCP_UNDERFLOW 0x4
2762#define SQ_EX_MODE_EXCP_INEXACT 0x5
2763#define SQ_EX_MODE_EXCP_INT_DIV0 0x6
2764#define SQ_EX_MODE_EXCP_ADDR_WATCH 0x7
2765#define SQ_EX_MODE_EXCP_MEM_VIOL 0x8
2766#define INST_ID_PRIV_START 0x80000000
2767#define INST_ID_ECC_INTERRUPT_MSG 0xfffffff0
2768#define INST_ID_TTRACE_NEW_PC_MSG 0xfffffff1
2769#define INST_ID_HW_TRAP 0xfffffff2
2770#define INST_ID_KILL_SEQ 0xfffffff3
2771#define INST_ID_SPI_WREXEC 0xfffffff4
2772#define INST_ID_HOST_REG_TRAP_MSG 0xfffffffe
2773#define SQ_ENC_SOP1_BITS 0xbe800000
2774#define SQ_ENC_SOP1_MASK 0xff800000
2775#define SQ_ENC_SOP1_FIELD 0x17d
2776#define SQ_ENC_SOPC_BITS 0xbf000000
2777#define SQ_ENC_SOPC_MASK 0xff800000
2778#define SQ_ENC_SOPC_FIELD 0x17e
2779#define SQ_ENC_SOPP_BITS 0xbf800000
2780#define SQ_ENC_SOPP_MASK 0xff800000
2781#define SQ_ENC_SOPP_FIELD 0x17f
2782#define SQ_ENC_SOPK_BITS 0xb0000000
2783#define SQ_ENC_SOPK_MASK 0xf0000000
2784#define SQ_ENC_SOPK_FIELD 0xb
2785#define SQ_ENC_SOP2_BITS 0x80000000
2786#define SQ_ENC_SOP2_MASK 0xc0000000
2787#define SQ_ENC_SOP2_FIELD 0x2
2788#define SQ_ENC_SMEM_BITS 0xc0000000
2789#define SQ_ENC_SMEM_MASK 0xfc000000
2790#define SQ_ENC_SMEM_FIELD 0x30
2791#define SQ_ENC_VOP1_BITS 0x7e000000
2792#define SQ_ENC_VOP1_MASK 0xfe000000
2793#define SQ_ENC_VOP1_FIELD 0x3f
2794#define SQ_ENC_VOPC_BITS 0x7c000000
2795#define SQ_ENC_VOPC_MASK 0xfe000000
2796#define SQ_ENC_VOPC_FIELD 0x3e
2797#define SQ_ENC_VOP2_BITS 0x0
2798#define SQ_ENC_VOP2_MASK 0x80000000
2799#define SQ_ENC_VOP2_FIELD 0x0
2800#define SQ_ENC_VINTRP_BITS 0xd4000000
2801#define SQ_ENC_VINTRP_MASK 0xfc000000
2802#define SQ_ENC_VINTRP_FIELD 0x35
2803#define SQ_ENC_VOP3_BITS 0xd0000000
2804#define SQ_ENC_VOP3_MASK 0xfc000000
2805#define SQ_ENC_VOP3_FIELD 0x34
2806#define SQ_ENC_DS_BITS 0xd8000000
2807#define SQ_ENC_DS_MASK 0xfc000000
2808#define SQ_ENC_DS_FIELD 0x36
2809#define SQ_ENC_MUBUF_BITS 0xe0000000
2810#define SQ_ENC_MUBUF_MASK 0xfc000000
2811#define SQ_ENC_MUBUF_FIELD 0x38
2812#define SQ_ENC_MTBUF_BITS 0xe8000000
2813#define SQ_ENC_MTBUF_MASK 0xfc000000
2814#define SQ_ENC_MTBUF_FIELD 0x3a
2815#define SQ_ENC_MIMG_BITS 0xf0000000
2816#define SQ_ENC_MIMG_MASK 0xfc000000
2817#define SQ_ENC_MIMG_FIELD 0x3c
2818#define SQ_ENC_EXP_BITS 0xc4000000
2819#define SQ_ENC_EXP_MASK 0xfc000000
2820#define SQ_ENC_EXP_FIELD 0x31
2821#define SQ_ENC_FLAT_BITS 0xdc000000
2822#define SQ_ENC_FLAT_MASK 0xfc000000
2823#define SQ_ENC_FLAT_FIELD 0x37
2824#define SQ_V_OP3_INTRP_OFFSET 0x274
2825#define SQ_WAITCNT_VM_SHIFT 0x0
2826#define SQ_SENDMSG_STREAMID_SIZE 0x2
2827#define SQ_V_OPC_COUNT 0x100
2828#define SQ_V_OP3_INTRP_COUNT 0xc
2829#define SQ_XLATE_VOP3_TO_VOP2_OFFSET 0x100
2830#define SQ_HWREG_OFFSET_SIZE 0x5
2831#define SQ_HWREG_OFFSET_SHIFT 0x6
2832#define SQ_V_OP3_3IN_OFFSET 0x1c0
2833#define SQ_NUM_ATTR 0x21
2834#define SQ_NUM_VGPR 0x100
2835#define SQ_XLATE_VOP3_TO_VINTRP_COUNT 0x4
2836#define SQ_SENDMSG_MSG_SIZE 0x4
2837#define SQ_NUM_TTMP 0xc
2838#define SQ_HWREG_ID_SIZE 0x6
2839#define SQ_SENDMSG_GSOP_SIZE 0x2
2840#define SQ_NUM_SGPR 0x66
2841#define SQ_EXP_NUM_MRT 0x8
2842#define SQ_SENDMSG_SYSTEM_SIZE 0x3
2843#define SQ_WAITCNT_LGKM_SHIFT 0x8
2844#define SQ_XLATE_VOP3_TO_VOP2_COUNT 0x40
2845#define SQ_V_OP3_3IN_COUNT 0xb0
2846#define SQ_V_INTRP_COUNT 0x4
2847#define SQ_WAITCNT_EXP_SIZE 0x3
2848#define SQ_SENDMSG_SYSTEM_SHIFT 0x4
2849#define SQ_EXP_NUM_GDS 0x5
2850#define SQ_HWREG_SIZE_SHIFT 0xb
2851#define SQ_XLATE_VOP3_TO_VOPC_OFFSET 0x0
2852#define SQ_V_OP3_2IN_COUNT 0x80
2853#define SQ_XLATE_VOP3_TO_VINTRP_OFFSET 0x270
2854#define SQ_SENDMSG_MSG_SHIFT 0x0
2855#define SQ_WAITCNT_EXP_SHIFT 0x4
2856#define SQ_WAITCNT_VM_SIZE 0x4
2857#define SQ_XLATE_VOP3_TO_VOP1_OFFSET 0x140
2858#define SQ_SENDMSG_GSOP_SHIFT 0x4
2859#define SQ_XLATE_VOP3_TO_VOP1_COUNT 0x80
2860#define SQ_SRC_VGPR_BIT 0x100
2861#define SQ_V_OP2_COUNT 0x40
2862#define SQ_EXP_NUM_PARAM 0x20
2863#define SQ_V_OP1_COUNT 0x80
2864#define SQ_SENDMSG_STREAMID_SHIFT 0x8
2865#define SQ_V_OP3_2IN_OFFSET 0x280
2866#define SQ_WAITCNT_LGKM_SIZE 0x4
2867#define SQ_XLATE_VOP3_TO_VOPC_COUNT 0x100
2868#define SQ_EXP_NUM_POS 0x4
2869#define SQ_HWREG_SIZE_SIZE 0x5
2870#define SQ_HWREG_ID_SHIFT 0x0
2871#define SQ_S_MOV_B32 0x0
2872#define SQ_S_MOV_B64 0x1
2873#define SQ_S_CMOV_B32 0x2
2874#define SQ_S_CMOV_B64 0x3
2875#define SQ_S_NOT_B32 0x4
2876#define SQ_S_NOT_B64 0x5
2877#define SQ_S_WQM_B32 0x6
2878#define SQ_S_WQM_B64 0x7
2879#define SQ_S_BREV_B32 0x8
2880#define SQ_S_BREV_B64 0x9
2881#define SQ_S_BCNT0_I32_B32 0xa
2882#define SQ_S_BCNT0_I32_B64 0xb
2883#define SQ_S_BCNT1_I32_B32 0xc
2884#define SQ_S_BCNT1_I32_B64 0xd
2885#define SQ_S_FF0_I32_B32 0xe
2886#define SQ_S_FF0_I32_B64 0xf
2887#define SQ_S_FF1_I32_B32 0x10
2888#define SQ_S_FF1_I32_B64 0x11
2889#define SQ_S_FLBIT_I32_B32 0x12
2890#define SQ_S_FLBIT_I32_B64 0x13
2891#define SQ_S_FLBIT_I32 0x14
2892#define SQ_S_FLBIT_I32_I64 0x15
2893#define SQ_S_SEXT_I32_I8 0x16
2894#define SQ_S_SEXT_I32_I16 0x17
2895#define SQ_S_BITSET0_B32 0x18
2896#define SQ_S_BITSET0_B64 0x19
2897#define SQ_S_BITSET1_B32 0x1a
2898#define SQ_S_BITSET1_B64 0x1b
2899#define SQ_S_GETPC_B64 0x1c
2900#define SQ_S_SETPC_B64 0x1d
2901#define SQ_S_SWAPPC_B64 0x1e
2902#define SQ_S_RFE_B64 0x1f
2903#define SQ_S_AND_SAVEEXEC_B64 0x20
2904#define SQ_S_OR_SAVEEXEC_B64 0x21
2905#define SQ_S_XOR_SAVEEXEC_B64 0x22
2906#define SQ_S_ANDN2_SAVEEXEC_B64 0x23
2907#define SQ_S_ORN2_SAVEEXEC_B64 0x24
2908#define SQ_S_NAND_SAVEEXEC_B64 0x25
2909#define SQ_S_NOR_SAVEEXEC_B64 0x26
2910#define SQ_S_XNOR_SAVEEXEC_B64 0x27
2911#define SQ_S_QUADMASK_B32 0x28
2912#define SQ_S_QUADMASK_B64 0x29
2913#define SQ_S_MOVRELS_B32 0x2a
2914#define SQ_S_MOVRELS_B64 0x2b
2915#define SQ_S_MOVRELD_B32 0x2c
2916#define SQ_S_MOVRELD_B64 0x2d
2917#define SQ_S_CBRANCH_JOIN 0x2e
2918#define SQ_S_MOV_REGRD_B32 0x2f
2919#define SQ_S_ABS_I32 0x30
2920#define SQ_S_MOV_FED_B32 0x31
2921#define SQ_S_SET_GPR_IDX_IDX 0x32
2922#define SQ_ATTR0 0x0
2923#define SQ_S_MOVK_I32 0x0
2924#define SQ_S_CMOVK_I32 0x1
2925#define SQ_S_CMPK_EQ_I32 0x2
2926#define SQ_S_CMPK_LG_I32 0x3
2927#define SQ_S_CMPK_GT_I32 0x4
2928#define SQ_S_CMPK_GE_I32 0x5
2929#define SQ_S_CMPK_LT_I32 0x6
2930#define SQ_S_CMPK_LE_I32 0x7
2931#define SQ_S_CMPK_EQ_U32 0x8
2932#define SQ_S_CMPK_LG_U32 0x9
2933#define SQ_S_CMPK_GT_U32 0xa
2934#define SQ_S_CMPK_GE_U32 0xb
2935#define SQ_S_CMPK_LT_U32 0xc
2936#define SQ_S_CMPK_LE_U32 0xd
2937#define SQ_S_ADDK_I32 0xe
2938#define SQ_S_MULK_I32 0xf
2939#define SQ_S_CBRANCH_I_FORK 0x10
2940#define SQ_S_GETREG_B32 0x11
2941#define SQ_S_SETREG_B32 0x12
2942#define SQ_S_GETREG_REGRD_B32 0x13
2943#define SQ_S_SETREG_IMM32_B32 0x14
2944#define SQ_TBA_LO 0x6c
2945#define SQ_TBA_HI 0x6d
2946#define SQ_TMA_LO 0x6e
2947#define SQ_TMA_HI 0x6f
2948#define SQ_TTMP0 0x70
2949#define SQ_TTMP1 0x71
2950#define SQ_TTMP2 0x72
2951#define SQ_TTMP3 0x73
2952#define SQ_TTMP4 0x74
2953#define SQ_TTMP5 0x75
2954#define SQ_TTMP6 0x76
2955#define SQ_TTMP7 0x77
2956#define SQ_TTMP8 0x78
2957#define SQ_TTMP9 0x79
2958#define SQ_TTMP10 0x7a
2959#define SQ_TTMP11 0x7b
2960#define SQ_VGPR0 0x0
2961#define SQ_EXP 0x0
2962#define SQ_EXP_MRT0 0x0
2963#define SQ_EXP_MRTZ 0x8
2964#define SQ_EXP_NULL 0x9
2965#define SQ_EXP_POS0 0xc
2966#define SQ_EXP_PARAM0 0x20
2967#define SQ_CNT1 0x0
2968#define SQ_CNT2 0x1
2969#define SQ_CNT3 0x2
2970#define SQ_CNT4 0x3
2971#define SQ_S_LOAD_DWORD 0x0
2972#define SQ_S_LOAD_DWORDX2 0x1
2973#define SQ_S_LOAD_DWORDX4 0x2
2974#define SQ_S_LOAD_DWORDX8 0x3
2975#define SQ_S_LOAD_DWORDX16 0x4
2976#define SQ_S_BUFFER_LOAD_DWORD 0x8
2977#define SQ_S_BUFFER_LOAD_DWORDX2 0x9
2978#define SQ_S_BUFFER_LOAD_DWORDX4 0xa
2979#define SQ_S_BUFFER_LOAD_DWORDX8 0xb
2980#define SQ_S_BUFFER_LOAD_DWORDX16 0xc
2981#define SQ_S_STORE_DWORD 0x10
2982#define SQ_S_STORE_DWORDX2 0x11
2983#define SQ_S_STORE_DWORDX4 0x12
2984#define SQ_S_BUFFER_STORE_DWORD 0x18
2985#define SQ_S_BUFFER_STORE_DWORDX2 0x19
2986#define SQ_S_BUFFER_STORE_DWORDX4 0x1a
2987#define SQ_S_DCACHE_INV 0x20
2988#define SQ_S_DCACHE_WB 0x21
2989#define SQ_S_DCACHE_INV_VOL 0x22
2990#define SQ_S_DCACHE_WB_VOL 0x23
2991#define SQ_S_MEMTIME 0x24
2992#define SQ_S_MEMREALTIME 0x25
2993#define SQ_S_ATC_PROBE 0x26
2994#define SQ_S_ATC_PROBE_BUFFER 0x27
2995#define SQ_S_BUFFER_ATOMIC_SWAP 0x40
2996#define SQ_S_BUFFER_ATOMIC_CMPSWAP 0x41
2997#define SQ_S_BUFFER_ATOMIC_ADD 0x42
2998#define SQ_S_BUFFER_ATOMIC_SUB 0x43
2999#define SQ_S_BUFFER_ATOMIC_SMIN 0x44
3000#define SQ_S_BUFFER_ATOMIC_UMIN 0x45
3001#define SQ_S_BUFFER_ATOMIC_SMAX 0x46
3002#define SQ_S_BUFFER_ATOMIC_UMAX 0x47
3003#define SQ_S_BUFFER_ATOMIC_AND 0x48
3004#define SQ_S_BUFFER_ATOMIC_OR 0x49
3005#define SQ_S_BUFFER_ATOMIC_XOR 0x4a
3006#define SQ_S_BUFFER_ATOMIC_INC 0x4b
3007#define SQ_S_BUFFER_ATOMIC_DEC 0x4c
3008#define SQ_S_BUFFER_ATOMIC_SWAP_X2 0x60
3009#define SQ_S_BUFFER_ATOMIC_CMPSWAP_X2 0x61
3010#define SQ_S_BUFFER_ATOMIC_ADD_X2 0x62
3011#define SQ_S_BUFFER_ATOMIC_SUB_X2 0x63
3012#define SQ_S_BUFFER_ATOMIC_SMIN_X2 0x64
3013#define SQ_S_BUFFER_ATOMIC_UMIN_X2 0x65
3014#define SQ_S_BUFFER_ATOMIC_SMAX_X2 0x66
3015#define SQ_S_BUFFER_ATOMIC_UMAX_X2 0x67
3016#define SQ_S_BUFFER_ATOMIC_AND_X2 0x68
3017#define SQ_S_BUFFER_ATOMIC_OR_X2 0x69
3018#define SQ_S_BUFFER_ATOMIC_XOR_X2 0x6a
3019#define SQ_S_BUFFER_ATOMIC_INC_X2 0x6b
3020#define SQ_S_BUFFER_ATOMIC_DEC_X2 0x6c
3021#define SQ_F 0x0
3022#define SQ_LT 0x1
3023#define SQ_EQ 0x2
3024#define SQ_LE 0x3
3025#define SQ_GT 0x4
3026#define SQ_LG 0x5
3027#define SQ_GE 0x6
3028#define SQ_O 0x7
3029#define SQ_U 0x8
3030#define SQ_NGE 0x9
3031#define SQ_NLG 0xa
3032#define SQ_NGT 0xb
3033#define SQ_NLE 0xc
3034#define SQ_NEQ 0xd
3035#define SQ_NLT 0xe
3036#define SQ_TRU 0xf
3037#define SQ_V_CMP_CLASS_F32 0x10
3038#define SQ_V_CMPX_CLASS_F32 0x11
3039#define SQ_V_CMP_CLASS_F64 0x12
3040#define SQ_V_CMPX_CLASS_F64 0x13
3041#define SQ_V_CMP_CLASS_F16 0x14
3042#define SQ_V_CMPX_CLASS_F16 0x15
3043#define SQ_V_CMP_F_F16 0x20
3044#define SQ_V_CMP_LT_F16 0x21
3045#define SQ_V_CMP_EQ_F16 0x22
3046#define SQ_V_CMP_LE_F16 0x23
3047#define SQ_V_CMP_GT_F16 0x24
3048#define SQ_V_CMP_LG_F16 0x25
3049#define SQ_V_CMP_GE_F16 0x26
3050#define SQ_V_CMP_O_F16 0x27
3051#define SQ_V_CMP_U_F16 0x28
3052#define SQ_V_CMP_NGE_F16 0x29
3053#define SQ_V_CMP_NLG_F16 0x2a
3054#define SQ_V_CMP_NGT_F16 0x2b
3055#define SQ_V_CMP_NLE_F16 0x2c
3056#define SQ_V_CMP_NEQ_F16 0x2d
3057#define SQ_V_CMP_NLT_F16 0x2e
3058#define SQ_V_CMP_TRU_F16 0x2f
3059#define SQ_V_CMPX_F_F16 0x30
3060#define SQ_V_CMPX_LT_F16 0x31
3061#define SQ_V_CMPX_EQ_F16 0x32
3062#define SQ_V_CMPX_LE_F16 0x33
3063#define SQ_V_CMPX_GT_F16 0x34
3064#define SQ_V_CMPX_LG_F16 0x35
3065#define SQ_V_CMPX_GE_F16 0x36
3066#define SQ_V_CMPX_O_F16 0x37
3067#define SQ_V_CMPX_U_F16 0x38
3068#define SQ_V_CMPX_NGE_F16 0x39
3069#define SQ_V_CMPX_NLG_F16 0x3a
3070#define SQ_V_CMPX_NGT_F16 0x3b
3071#define SQ_V_CMPX_NLE_F16 0x3c
3072#define SQ_V_CMPX_NEQ_F16 0x3d
3073#define SQ_V_CMPX_NLT_F16 0x3e
3074#define SQ_V_CMPX_TRU_F16 0x3f
3075#define SQ_V_CMP_F_F32 0x40
3076#define SQ_V_CMP_LT_F32 0x41
3077#define SQ_V_CMP_EQ_F32 0x42
3078#define SQ_V_CMP_LE_F32 0x43
3079#define SQ_V_CMP_GT_F32 0x44
3080#define SQ_V_CMP_LG_F32 0x45
3081#define SQ_V_CMP_GE_F32 0x46
3082#define SQ_V_CMP_O_F32 0x47
3083#define SQ_V_CMP_U_F32 0x48
3084#define SQ_V_CMP_NGE_F32 0x49
3085#define SQ_V_CMP_NLG_F32 0x4a
3086#define SQ_V_CMP_NGT_F32 0x4b
3087#define SQ_V_CMP_NLE_F32 0x4c
3088#define SQ_V_CMP_NEQ_F32 0x4d
3089#define SQ_V_CMP_NLT_F32 0x4e
3090#define SQ_V_CMP_TRU_F32 0x4f
3091#define SQ_V_CMPX_F_F32 0x50
3092#define SQ_V_CMPX_LT_F32 0x51
3093#define SQ_V_CMPX_EQ_F32 0x52
3094#define SQ_V_CMPX_LE_F32 0x53
3095#define SQ_V_CMPX_GT_F32 0x54
3096#define SQ_V_CMPX_LG_F32 0x55
3097#define SQ_V_CMPX_GE_F32 0x56
3098#define SQ_V_CMPX_O_F32 0x57
3099#define SQ_V_CMPX_U_F32 0x58
3100#define SQ_V_CMPX_NGE_F32 0x59
3101#define SQ_V_CMPX_NLG_F32 0x5a
3102#define SQ_V_CMPX_NGT_F32 0x5b
3103#define SQ_V_CMPX_NLE_F32 0x5c
3104#define SQ_V_CMPX_NEQ_F32 0x5d
3105#define SQ_V_CMPX_NLT_F32 0x5e
3106#define SQ_V_CMPX_TRU_F32 0x5f
3107#define SQ_V_CMP_F_F64 0x60
3108#define SQ_V_CMP_LT_F64 0x61
3109#define SQ_V_CMP_EQ_F64 0x62
3110#define SQ_V_CMP_LE_F64 0x63
3111#define SQ_V_CMP_GT_F64 0x64
3112#define SQ_V_CMP_LG_F64 0x65
3113#define SQ_V_CMP_GE_F64 0x66
3114#define SQ_V_CMP_O_F64 0x67
3115#define SQ_V_CMP_U_F64 0x68
3116#define SQ_V_CMP_NGE_F64 0x69
3117#define SQ_V_CMP_NLG_F64 0x6a
3118#define SQ_V_CMP_NGT_F64 0x6b
3119#define SQ_V_CMP_NLE_F64 0x6c
3120#define SQ_V_CMP_NEQ_F64 0x6d
3121#define SQ_V_CMP_NLT_F64 0x6e
3122#define SQ_V_CMP_TRU_F64 0x6f
3123#define SQ_V_CMPX_F_F64 0x70
3124#define SQ_V_CMPX_LT_F64 0x71
3125#define SQ_V_CMPX_EQ_F64 0x72
3126#define SQ_V_CMPX_LE_F64 0x73
3127#define SQ_V_CMPX_GT_F64 0x74
3128#define SQ_V_CMPX_LG_F64 0x75
3129#define SQ_V_CMPX_GE_F64 0x76
3130#define SQ_V_CMPX_O_F64 0x77
3131#define SQ_V_CMPX_U_F64 0x78
3132#define SQ_V_CMPX_NGE_F64 0x79
3133#define SQ_V_CMPX_NLG_F64 0x7a
3134#define SQ_V_CMPX_NGT_F64 0x7b
3135#define SQ_V_CMPX_NLE_F64 0x7c
3136#define SQ_V_CMPX_NEQ_F64 0x7d
3137#define SQ_V_CMPX_NLT_F64 0x7e
3138#define SQ_V_CMPX_TRU_F64 0x7f
3139#define SQ_V_CMP_F_I16 0xa0
3140#define SQ_V_CMP_LT_I16 0xa1
3141#define SQ_V_CMP_EQ_I16 0xa2
3142#define SQ_V_CMP_LE_I16 0xa3
3143#define SQ_V_CMP_GT_I16 0xa4
3144#define SQ_V_CMP_NE_I16 0xa5
3145#define SQ_V_CMP_GE_I16 0xa6
3146#define SQ_V_CMP_T_I16 0xa7
3147#define SQ_V_CMP_F_U16 0xa8
3148#define SQ_V_CMP_LT_U16 0xa9
3149#define SQ_V_CMP_EQ_U16 0xaa
3150#define SQ_V_CMP_LE_U16 0xab
3151#define SQ_V_CMP_GT_U16 0xac
3152#define SQ_V_CMP_NE_U16 0xad
3153#define SQ_V_CMP_GE_U16 0xae
3154#define SQ_V_CMP_T_U16 0xaf
3155#define SQ_V_CMPX_F_I16 0xb0
3156#define SQ_V_CMPX_LT_I16 0xb1
3157#define SQ_V_CMPX_EQ_I16 0xb2
3158#define SQ_V_CMPX_LE_I16 0xb3
3159#define SQ_V_CMPX_GT_I16 0xb4
3160#define SQ_V_CMPX_NE_I16 0xb5
3161#define SQ_V_CMPX_GE_I16 0xb6
3162#define SQ_V_CMPX_T_I16 0xb7
3163#define SQ_V_CMPX_F_U16 0xb8
3164#define SQ_V_CMPX_LT_U16 0xb9
3165#define SQ_V_CMPX_EQ_U16 0xba
3166#define SQ_V_CMPX_LE_U16 0xbb
3167#define SQ_V_CMPX_GT_U16 0xbc
3168#define SQ_V_CMPX_NE_U16 0xbd
3169#define SQ_V_CMPX_GE_U16 0xbe
3170#define SQ_V_CMPX_T_U16 0xbf
3171#define SQ_V_CMP_F_I32 0xc0
3172#define SQ_V_CMP_LT_I32 0xc1
3173#define SQ_V_CMP_EQ_I32 0xc2
3174#define SQ_V_CMP_LE_I32 0xc3
3175#define SQ_V_CMP_GT_I32 0xc4
3176#define SQ_V_CMP_NE_I32 0xc5
3177#define SQ_V_CMP_GE_I32 0xc6
3178#define SQ_V_CMP_T_I32 0xc7
3179#define SQ_V_CMP_F_U32 0xc8
3180#define SQ_V_CMP_LT_U32 0xc9
3181#define SQ_V_CMP_EQ_U32 0xca
3182#define SQ_V_CMP_LE_U32 0xcb
3183#define SQ_V_CMP_GT_U32 0xcc
3184#define SQ_V_CMP_NE_U32 0xcd
3185#define SQ_V_CMP_GE_U32 0xce
3186#define SQ_V_CMP_T_U32 0xcf
3187#define SQ_V_CMPX_F_I32 0xd0
3188#define SQ_V_CMPX_LT_I32 0xd1
3189#define SQ_V_CMPX_EQ_I32 0xd2
3190#define SQ_V_CMPX_LE_I32 0xd3
3191#define SQ_V_CMPX_GT_I32 0xd4
3192#define SQ_V_CMPX_NE_I32 0xd5
3193#define SQ_V_CMPX_GE_I32 0xd6
3194#define SQ_V_CMPX_T_I32 0xd7
3195#define SQ_V_CMPX_F_U32 0xd8
3196#define SQ_V_CMPX_LT_U32 0xd9
3197#define SQ_V_CMPX_EQ_U32 0xda
3198#define SQ_V_CMPX_LE_U32 0xdb
3199#define SQ_V_CMPX_GT_U32 0xdc
3200#define SQ_V_CMPX_NE_U32 0xdd
3201#define SQ_V_CMPX_GE_U32 0xde
3202#define SQ_V_CMPX_T_U32 0xdf
3203#define SQ_V_CMP_F_I64 0xe0
3204#define SQ_V_CMP_LT_I64 0xe1
3205#define SQ_V_CMP_EQ_I64 0xe2
3206#define SQ_V_CMP_LE_I64 0xe3
3207#define SQ_V_CMP_GT_I64 0xe4
3208#define SQ_V_CMP_NE_I64 0xe5
3209#define SQ_V_CMP_GE_I64 0xe6
3210#define SQ_V_CMP_T_I64 0xe7
3211#define SQ_V_CMP_F_U64 0xe8
3212#define SQ_V_CMP_LT_U64 0xe9
3213#define SQ_V_CMP_EQ_U64 0xea
3214#define SQ_V_CMP_LE_U64 0xeb
3215#define SQ_V_CMP_GT_U64 0xec
3216#define SQ_V_CMP_NE_U64 0xed
3217#define SQ_V_CMP_GE_U64 0xee
3218#define SQ_V_CMP_T_U64 0xef
3219#define SQ_V_CMPX_F_I64 0xf0
3220#define SQ_V_CMPX_LT_I64 0xf1
3221#define SQ_V_CMPX_EQ_I64 0xf2
3222#define SQ_V_CMPX_LE_I64 0xf3
3223#define SQ_V_CMPX_GT_I64 0xf4
3224#define SQ_V_CMPX_NE_I64 0xf5
3225#define SQ_V_CMPX_GE_I64 0xf6
3226#define SQ_V_CMPX_T_I64 0xf7
3227#define SQ_V_CMPX_F_U64 0xf8
3228#define SQ_V_CMPX_LT_U64 0xf9
3229#define SQ_V_CMPX_EQ_U64 0xfa
3230#define SQ_V_CMPX_LE_U64 0xfb
3231#define SQ_V_CMPX_GT_U64 0xfc
3232#define SQ_V_CMPX_NE_U64 0xfd
3233#define SQ_V_CMPX_GE_U64 0xfe
3234#define SQ_V_CMPX_T_U64 0xff
3235#define SQ_L1 0x1
3236#define SQ_L2 0x2
3237#define SQ_L3 0x3
3238#define SQ_L4 0x4
3239#define SQ_L5 0x5
3240#define SQ_L6 0x6
3241#define SQ_L7 0x7
3242#define SQ_L8 0x8
3243#define SQ_L9 0x9
3244#define SQ_L10 0xa
3245#define SQ_L11 0xb
3246#define SQ_L12 0xc
3247#define SQ_L13 0xd
3248#define SQ_L14 0xe
3249#define SQ_L15 0xf
3250#define SQ_SGPR0 0x0
3251#define SQ_SDWA_UNUSED_PAD 0x0
3252#define SQ_SDWA_UNUSED_SEXT 0x1
3253#define SQ_SDWA_UNUSED_PRESERVE 0x2
3254#define SQ_F 0x0
3255#define SQ_LT 0x1
3256#define SQ_EQ 0x2
3257#define SQ_LE 0x3
3258#define SQ_GT 0x4
3259#define SQ_NE 0x5
3260#define SQ_GE 0x6
3261#define SQ_T 0x7
3262#define SQ_SRC_64_INT 0xc0
3263#define SQ_SRC_M_1_INT 0xc1
3264#define SQ_SRC_M_2_INT 0xc2
3265#define SQ_SRC_M_3_INT 0xc3
3266#define SQ_SRC_M_4_INT 0xc4
3267#define SQ_SRC_M_5_INT 0xc5
3268#define SQ_SRC_M_6_INT 0xc6
3269#define SQ_SRC_M_7_INT 0xc7
3270#define SQ_SRC_M_8_INT 0xc8
3271#define SQ_SRC_M_9_INT 0xc9
3272#define SQ_SRC_M_10_INT 0xca
3273#define SQ_SRC_M_11_INT 0xcb
3274#define SQ_SRC_M_12_INT 0xcc
3275#define SQ_SRC_M_13_INT 0xcd
3276#define SQ_SRC_M_14_INT 0xce
3277#define SQ_SRC_M_15_INT 0xcf
3278#define SQ_SRC_M_16_INT 0xd0
3279#define SQ_SRC_0_5 0xf0
3280#define SQ_SRC_M_0_5 0xf1
3281#define SQ_SRC_1 0xf2
3282#define SQ_SRC_M_1 0xf3
3283#define SQ_SRC_2 0xf4
3284#define SQ_SRC_M_2 0xf5
3285#define SQ_SRC_4 0xf6
3286#define SQ_SRC_M_4 0xf7
3287#define SQ_SRC_INV_2PI 0xf8
3288#define SQ_SRC_0 0x80
3289#define SQ_SRC_1_INT 0x81
3290#define SQ_SRC_2_INT 0x82
3291#define SQ_SRC_3_INT 0x83
3292#define SQ_SRC_4_INT 0x84
3293#define SQ_SRC_5_INT 0x85
3294#define SQ_SRC_6_INT 0x86
3295#define SQ_SRC_7_INT 0x87
3296#define SQ_SRC_8_INT 0x88
3297#define SQ_SRC_9_INT 0x89
3298#define SQ_SRC_10_INT 0x8a
3299#define SQ_SRC_11_INT 0x8b
3300#define SQ_SRC_12_INT 0x8c
3301#define SQ_SRC_13_INT 0x8d
3302#define SQ_SRC_14_INT 0x8e
3303#define SQ_SRC_15_INT 0x8f
3304#define SQ_SRC_16_INT 0x90
3305#define SQ_SRC_17_INT 0x91
3306#define SQ_SRC_18_INT 0x92
3307#define SQ_SRC_19_INT 0x93
3308#define SQ_SRC_20_INT 0x94
3309#define SQ_SRC_21_INT 0x95
3310#define SQ_SRC_22_INT 0x96
3311#define SQ_SRC_23_INT 0x97
3312#define SQ_SRC_24_INT 0x98
3313#define SQ_SRC_25_INT 0x99
3314#define SQ_SRC_26_INT 0x9a
3315#define SQ_SRC_27_INT 0x9b
3316#define SQ_SRC_28_INT 0x9c
3317#define SQ_SRC_29_INT 0x9d
3318#define SQ_SRC_30_INT 0x9e
3319#define SQ_SRC_31_INT 0x9f
3320#define SQ_SRC_32_INT 0xa0
3321#define SQ_SRC_33_INT 0xa1
3322#define SQ_SRC_34_INT 0xa2
3323#define SQ_SRC_35_INT 0xa3
3324#define SQ_SRC_36_INT 0xa4
3325#define SQ_SRC_37_INT 0xa5
3326#define SQ_SRC_38_INT 0xa6
3327#define SQ_SRC_39_INT 0xa7
3328#define SQ_SRC_40_INT 0xa8
3329#define SQ_SRC_41_INT 0xa9
3330#define SQ_SRC_42_INT 0xaa
3331#define SQ_SRC_43_INT 0xab
3332#define SQ_SRC_44_INT 0xac
3333#define SQ_SRC_45_INT 0xad
3334#define SQ_SRC_46_INT 0xae
3335#define SQ_SRC_47_INT 0xaf
3336#define SQ_SRC_48_INT 0xb0
3337#define SQ_SRC_49_INT 0xb1
3338#define SQ_SRC_50_INT 0xb2
3339#define SQ_SRC_51_INT 0xb3
3340#define SQ_SRC_52_INT 0xb4
3341#define SQ_SRC_53_INT 0xb5
3342#define SQ_SRC_54_INT 0xb6
3343#define SQ_SRC_55_INT 0xb7
3344#define SQ_SRC_56_INT 0xb8
3345#define SQ_SRC_57_INT 0xb9
3346#define SQ_SRC_58_INT 0xba
3347#define SQ_SRC_59_INT 0xbb
3348#define SQ_SRC_60_INT 0xbc
3349#define SQ_SRC_61_INT 0xbd
3350#define SQ_SRC_62_INT 0xbe
3351#define SQ_SRC_63_INT 0xbf
3352#define SQ_DS_ADD_U32 0x0
3353#define SQ_DS_SUB_U32 0x1
3354#define SQ_DS_RSUB_U32 0x2
3355#define SQ_DS_INC_U32 0x3
3356#define SQ_DS_DEC_U32 0x4
3357#define SQ_DS_MIN_I32 0x5
3358#define SQ_DS_MAX_I32 0x6
3359#define SQ_DS_MIN_U32 0x7
3360#define SQ_DS_MAX_U32 0x8
3361#define SQ_DS_AND_B32 0x9
3362#define SQ_DS_OR_B32 0xa
3363#define SQ_DS_XOR_B32 0xb
3364#define SQ_DS_MSKOR_B32 0xc
3365#define SQ_DS_WRITE_B32 0xd
3366#define SQ_DS_WRITE2_B32 0xe
3367#define SQ_DS_WRITE2ST64_B32 0xf
3368#define SQ_DS_CMPST_B32 0x10
3369#define SQ_DS_CMPST_F32 0x11
3370#define SQ_DS_MIN_F32 0x12
3371#define SQ_DS_MAX_F32 0x13
3372#define SQ_DS_NOP 0x14
3373#define SQ_DS_ADD_F32 0x15
3374#define SQ_DS_WRITE_B8 0x1e
3375#define SQ_DS_WRITE_B16 0x1f
3376#define SQ_DS_ADD_RTN_U32 0x20
3377#define SQ_DS_SUB_RTN_U32 0x21
3378#define SQ_DS_RSUB_RTN_U32 0x22
3379#define SQ_DS_INC_RTN_U32 0x23
3380#define SQ_DS_DEC_RTN_U32 0x24
3381#define SQ_DS_MIN_RTN_I32 0x25
3382#define SQ_DS_MAX_RTN_I32 0x26
3383#define SQ_DS_MIN_RTN_U32 0x27
3384#define SQ_DS_MAX_RTN_U32 0x28
3385#define SQ_DS_AND_RTN_B32 0x29
3386#define SQ_DS_OR_RTN_B32 0x2a
3387#define SQ_DS_XOR_RTN_B32 0x2b
3388#define SQ_DS_MSKOR_RTN_B32 0x2c
3389#define SQ_DS_WRXCHG_RTN_B32 0x2d
3390#define SQ_DS_WRXCHG2_RTN_B32 0x2e
3391#define SQ_DS_WRXCHG2ST64_RTN_B32 0x2f
3392#define SQ_DS_CMPST_RTN_B32 0x30
3393#define SQ_DS_CMPST_RTN_F32 0x31
3394#define SQ_DS_MIN_RTN_F32 0x32
3395#define SQ_DS_MAX_RTN_F32 0x33
3396#define SQ_DS_WRAP_RTN_B32 0x34
3397#define SQ_DS_ADD_RTN_F32 0x35
3398#define SQ_DS_READ_B32 0x36
3399#define SQ_DS_READ2_B32 0x37
3400#define SQ_DS_READ2ST64_B32 0x38
3401#define SQ_DS_READ_I8 0x39
3402#define SQ_DS_READ_U8 0x3a
3403#define SQ_DS_READ_I16 0x3b
3404#define SQ_DS_READ_U16 0x3c
3405#define SQ_DS_SWIZZLE_B32 0x3d
3406#define SQ_DS_PERMUTE_B32 0x3e
3407#define SQ_DS_BPERMUTE_B32 0x3f
3408#define SQ_DS_ADD_U64 0x40
3409#define SQ_DS_SUB_U64 0x41
3410#define SQ_DS_RSUB_U64 0x42
3411#define SQ_DS_INC_U64 0x43
3412#define SQ_DS_DEC_U64 0x44
3413#define SQ_DS_MIN_I64 0x45
3414#define SQ_DS_MAX_I64 0x46
3415#define SQ_DS_MIN_U64 0x47
3416#define SQ_DS_MAX_U64 0x48
3417#define SQ_DS_AND_B64 0x49
3418#define SQ_DS_OR_B64 0x4a
3419#define SQ_DS_XOR_B64 0x4b
3420#define SQ_DS_MSKOR_B64 0x4c
3421#define SQ_DS_WRITE_B64 0x4d
3422#define SQ_DS_WRITE2_B64 0x4e
3423#define SQ_DS_WRITE2ST64_B64 0x4f
3424#define SQ_DS_CMPST_B64 0x50
3425#define SQ_DS_CMPST_F64 0x51
3426#define SQ_DS_MIN_F64 0x52
3427#define SQ_DS_MAX_F64 0x53
3428#define SQ_DS_ADD_RTN_U64 0x60
3429#define SQ_DS_SUB_RTN_U64 0x61
3430#define SQ_DS_RSUB_RTN_U64 0x62
3431#define SQ_DS_INC_RTN_U64 0x63
3432#define SQ_DS_DEC_RTN_U64 0x64
3433#define SQ_DS_MIN_RTN_I64 0x65
3434#define SQ_DS_MAX_RTN_I64 0x66
3435#define SQ_DS_MIN_RTN_U64 0x67
3436#define SQ_DS_MAX_RTN_U64 0x68
3437#define SQ_DS_AND_RTN_B64 0x69
3438#define SQ_DS_OR_RTN_B64 0x6a
3439#define SQ_DS_XOR_RTN_B64 0x6b
3440#define SQ_DS_MSKOR_RTN_B64 0x6c
3441#define SQ_DS_WRXCHG_RTN_B64 0x6d
3442#define SQ_DS_WRXCHG2_RTN_B64 0x6e
3443#define SQ_DS_WRXCHG2ST64_RTN_B64 0x6f
3444#define SQ_DS_CMPST_RTN_B64 0x70
3445#define SQ_DS_CMPST_RTN_F64 0x71
3446#define SQ_DS_MIN_RTN_F64 0x72
3447#define SQ_DS_MAX_RTN_F64 0x73
3448#define SQ_DS_READ_B64 0x76
3449#define SQ_DS_READ2_B64 0x77
3450#define SQ_DS_READ2ST64_B64 0x78
3451#define SQ_DS_CONDXCHG32_RTN_B64 0x7e
3452#define SQ_DS_ADD_SRC2_U32 0x80
3453#define SQ_DS_SUB_SRC2_U32 0x81
3454#define SQ_DS_RSUB_SRC2_U32 0x82
3455#define SQ_DS_INC_SRC2_U32 0x83
3456#define SQ_DS_DEC_SRC2_U32 0x84
3457#define SQ_DS_MIN_SRC2_I32 0x85
3458#define SQ_DS_MAX_SRC2_I32 0x86
3459#define SQ_DS_MIN_SRC2_U32 0x87
3460#define SQ_DS_MAX_SRC2_U32 0x88
3461#define SQ_DS_AND_SRC2_B32 0x89
3462#define SQ_DS_OR_SRC2_B32 0x8a
3463#define SQ_DS_XOR_SRC2_B32 0x8b
3464#define SQ_DS_WRITE_SRC2_B32 0x8d
3465#define SQ_DS_MIN_SRC2_F32 0x92
3466#define SQ_DS_MAX_SRC2_F32 0x93
3467#define SQ_DS_ADD_SRC2_F32 0x95
3468#define SQ_DS_GWS_SEMA_RELEASE_ALL 0x98
3469#define SQ_DS_GWS_INIT 0x99
3470#define SQ_DS_GWS_SEMA_V 0x9a
3471#define SQ_DS_GWS_SEMA_BR 0x9b
3472#define SQ_DS_GWS_SEMA_P 0x9c
3473#define SQ_DS_GWS_BARRIER 0x9d
3474#define SQ_DS_CONSUME 0xbd
3475#define SQ_DS_APPEND 0xbe
3476#define SQ_DS_ORDERED_COUNT 0xbf
3477#define SQ_DS_ADD_SRC2_U64 0xc0
3478#define SQ_DS_SUB_SRC2_U64 0xc1
3479#define SQ_DS_RSUB_SRC2_U64 0xc2
3480#define SQ_DS_INC_SRC2_U64 0xc3
3481#define SQ_DS_DEC_SRC2_U64 0xc4
3482#define SQ_DS_MIN_SRC2_I64 0xc5
3483#define SQ_DS_MAX_SRC2_I64 0xc6
3484#define SQ_DS_MIN_SRC2_U64 0xc7
3485#define SQ_DS_MAX_SRC2_U64 0xc8
3486#define SQ_DS_AND_SRC2_B64 0xc9
3487#define SQ_DS_OR_SRC2_B64 0xca
3488#define SQ_DS_XOR_SRC2_B64 0xcb
3489#define SQ_DS_WRITE_SRC2_B64 0xcd
3490#define SQ_DS_MIN_SRC2_F64 0xd2
3491#define SQ_DS_MAX_SRC2_F64 0xd3
3492#define SQ_DS_WRITE_B96 0xde
3493#define SQ_DS_WRITE_B128 0xdf
3494#define SQ_DS_CONDXCHG32_RTN_B128 0xfd
3495#define SQ_DS_READ_B96 0xfe
3496#define SQ_DS_READ_B128 0xff
3497#define SQ_BUFFER_LOAD_FORMAT_X 0x0
3498#define SQ_BUFFER_LOAD_FORMAT_XY 0x1
3499#define SQ_BUFFER_LOAD_FORMAT_XYZ 0x2
3500#define SQ_BUFFER_LOAD_FORMAT_XYZW 0x3
3501#define SQ_BUFFER_STORE_FORMAT_X 0x4
3502#define SQ_BUFFER_STORE_FORMAT_XY 0x5
3503#define SQ_BUFFER_STORE_FORMAT_XYZ 0x6
3504#define SQ_BUFFER_STORE_FORMAT_XYZW 0x7
3505#define SQ_BUFFER_LOAD_FORMAT_D16_X 0x8
3506#define SQ_BUFFER_LOAD_FORMAT_D16_XY 0x9
3507#define SQ_BUFFER_LOAD_FORMAT_D16_XYZ 0xa
3508#define SQ_BUFFER_LOAD_FORMAT_D16_XYZW 0xb
3509#define SQ_BUFFER_STORE_FORMAT_D16_X 0xc
3510#define SQ_BUFFER_STORE_FORMAT_D16_XY 0xd
3511#define SQ_BUFFER_STORE_FORMAT_D16_XYZ 0xe
3512#define SQ_BUFFER_STORE_FORMAT_D16_XYZW 0xf
3513#define SQ_BUFFER_LOAD_UBYTE 0x10
3514#define SQ_BUFFER_LOAD_SBYTE 0x11
3515#define SQ_BUFFER_LOAD_USHORT 0x12
3516#define SQ_BUFFER_LOAD_SSHORT 0x13
3517#define SQ_BUFFER_LOAD_DWORD 0x14
3518#define SQ_BUFFER_LOAD_DWORDX2 0x15
3519#define SQ_BUFFER_LOAD_DWORDX3 0x16
3520#define SQ_BUFFER_LOAD_DWORDX4 0x17
3521#define SQ_BUFFER_STORE_BYTE 0x18
3522#define SQ_BUFFER_STORE_SHORT 0x1a
3523#define SQ_BUFFER_STORE_DWORD 0x1c
3524#define SQ_BUFFER_STORE_DWORDX2 0x1d
3525#define SQ_BUFFER_STORE_DWORDX3 0x1e
3526#define SQ_BUFFER_STORE_DWORDX4 0x1f
3527#define SQ_BUFFER_STORE_LDS_DWORD 0x3d
3528#define SQ_BUFFER_WBINVL1 0x3e
3529#define SQ_BUFFER_WBINVL1_VOL 0x3f
3530#define SQ_BUFFER_ATOMIC_SWAP 0x40
3531#define SQ_BUFFER_ATOMIC_CMPSWAP 0x41
3532#define SQ_BUFFER_ATOMIC_ADD 0x42
3533#define SQ_BUFFER_ATOMIC_SUB 0x43
3534#define SQ_BUFFER_ATOMIC_SMIN 0x44
3535#define SQ_BUFFER_ATOMIC_UMIN 0x45
3536#define SQ_BUFFER_ATOMIC_SMAX 0x46
3537#define SQ_BUFFER_ATOMIC_UMAX 0x47
3538#define SQ_BUFFER_ATOMIC_AND 0x48
3539#define SQ_BUFFER_ATOMIC_OR 0x49
3540#define SQ_BUFFER_ATOMIC_XOR 0x4a
3541#define SQ_BUFFER_ATOMIC_INC 0x4b
3542#define SQ_BUFFER_ATOMIC_DEC 0x4c
3543#define SQ_BUFFER_ATOMIC_SWAP_X2 0x60
3544#define SQ_BUFFER_ATOMIC_CMPSWAP_X2 0x61
3545#define SQ_BUFFER_ATOMIC_ADD_X2 0x62
3546#define SQ_BUFFER_ATOMIC_SUB_X2 0x63
3547#define SQ_BUFFER_ATOMIC_SMIN_X2 0x64
3548#define SQ_BUFFER_ATOMIC_UMIN_X2 0x65
3549#define SQ_BUFFER_ATOMIC_SMAX_X2 0x66
3550#define SQ_BUFFER_ATOMIC_UMAX_X2 0x67
3551#define SQ_BUFFER_ATOMIC_AND_X2 0x68
3552#define SQ_BUFFER_ATOMIC_OR_X2 0x69
3553#define SQ_BUFFER_ATOMIC_XOR_X2 0x6a
3554#define SQ_BUFFER_ATOMIC_INC_X2 0x6b
3555#define SQ_BUFFER_ATOMIC_DEC_X2 0x6c
3556#define SQ_EXEC_LO 0x7e
3557#define SQ_EXEC_HI 0x7f
3558#define SQ_SRC_SCC 0xfd
3559#define SQ_OMOD_OFF 0x0
3560#define SQ_OMOD_M2 0x1
3561#define SQ_OMOD_M4 0x2
3562#define SQ_OMOD_D2 0x3
3563#define SQ_DPP_QUAD_PERM 0x0
3564#define SQ_DPP_ROW_SL1 0x101
3565#define SQ_DPP_ROW_SL2 0x102
3566#define SQ_DPP_ROW_SL3 0x103
3567#define SQ_DPP_ROW_SL4 0x104
3568#define SQ_DPP_ROW_SL5 0x105
3569#define SQ_DPP_ROW_SL6 0x106
3570#define SQ_DPP_ROW_SL7 0x107
3571#define SQ_DPP_ROW_SL8 0x108
3572#define SQ_DPP_ROW_SL9 0x109
3573#define SQ_DPP_ROW_SL10 0x10a
3574#define SQ_DPP_ROW_SL11 0x10b
3575#define SQ_DPP_ROW_SL12 0x10c
3576#define SQ_DPP_ROW_SL13 0x10d
3577#define SQ_DPP_ROW_SL14 0x10e
3578#define SQ_DPP_ROW_SL15 0x10f
3579#define SQ_DPP_ROW_SR1 0x111
3580#define SQ_DPP_ROW_SR2 0x112
3581#define SQ_DPP_ROW_SR3 0x113
3582#define SQ_DPP_ROW_SR4 0x114
3583#define SQ_DPP_ROW_SR5 0x115
3584#define SQ_DPP_ROW_SR6 0x116
3585#define SQ_DPP_ROW_SR7 0x117
3586#define SQ_DPP_ROW_SR8 0x118
3587#define SQ_DPP_ROW_SR9 0x119
3588#define SQ_DPP_ROW_SR10 0x11a
3589#define SQ_DPP_ROW_SR11 0x11b
3590#define SQ_DPP_ROW_SR12 0x11c
3591#define SQ_DPP_ROW_SR13 0x11d
3592#define SQ_DPP_ROW_SR14 0x11e
3593#define SQ_DPP_ROW_SR15 0x11f
3594#define SQ_DPP_ROW_RR1 0x121
3595#define SQ_DPP_ROW_RR2 0x122
3596#define SQ_DPP_ROW_RR3 0x123
3597#define SQ_DPP_ROW_RR4 0x124
3598#define SQ_DPP_ROW_RR5 0x125
3599#define SQ_DPP_ROW_RR6 0x126
3600#define SQ_DPP_ROW_RR7 0x127
3601#define SQ_DPP_ROW_RR8 0x128
3602#define SQ_DPP_ROW_RR9 0x129
3603#define SQ_DPP_ROW_RR10 0x12a
3604#define SQ_DPP_ROW_RR11 0x12b
3605#define SQ_DPP_ROW_RR12 0x12c
3606#define SQ_DPP_ROW_RR13 0x12d
3607#define SQ_DPP_ROW_RR14 0x12e
3608#define SQ_DPP_ROW_RR15 0x12f
3609#define SQ_DPP_WF_SL1 0x130
3610#define SQ_DPP_WF_RL1 0x134
3611#define SQ_DPP_WF_SR1 0x138
3612#define SQ_DPP_WF_RR1 0x13c
3613#define SQ_DPP_ROW_MIRROR 0x140
3614#define SQ_DPP_ROW_HALF_MIRROR 0x141
3615#define SQ_DPP_ROW_BCAST15 0x142
3616#define SQ_DPP_ROW_BCAST31 0x143
3617#define SQ_EXP_GDS0 0x18
3618#define SQ_GS_OP_NOP 0x0
3619#define SQ_GS_OP_CUT 0x1
3620#define SQ_GS_OP_EMIT 0x2
3621#define SQ_GS_OP_EMIT_CUT 0x3
3622#define SQ_IMAGE_LOAD 0x0
3623#define SQ_IMAGE_LOAD_MIP 0x1
3624#define SQ_IMAGE_LOAD_PCK 0x2
3625#define SQ_IMAGE_LOAD_PCK_SGN 0x3
3626#define SQ_IMAGE_LOAD_MIP_PCK 0x4
3627#define SQ_IMAGE_LOAD_MIP_PCK_SGN 0x5
3628#define SQ_IMAGE_STORE 0x8
3629#define SQ_IMAGE_STORE_MIP 0x9
3630#define SQ_IMAGE_STORE_PCK 0xa
3631#define SQ_IMAGE_STORE_MIP_PCK 0xb
3632#define SQ_IMAGE_GET_RESINFO 0xe
3633#define SQ_IMAGE_ATOMIC_SWAP 0x10
3634#define SQ_IMAGE_ATOMIC_CMPSWAP 0x11
3635#define SQ_IMAGE_ATOMIC_ADD 0x12
3636#define SQ_IMAGE_ATOMIC_SUB 0x13
3637#define SQ_IMAGE_ATOMIC_SMIN 0x14
3638#define SQ_IMAGE_ATOMIC_UMIN 0x15
3639#define SQ_IMAGE_ATOMIC_SMAX 0x16
3640#define SQ_IMAGE_ATOMIC_UMAX 0x17
3641#define SQ_IMAGE_ATOMIC_AND 0x18
3642#define SQ_IMAGE_ATOMIC_OR 0x19
3643#define SQ_IMAGE_ATOMIC_XOR 0x1a
3644#define SQ_IMAGE_ATOMIC_INC 0x1b
3645#define SQ_IMAGE_ATOMIC_DEC 0x1c
3646#define SQ_IMAGE_SAMPLE 0x20
3647#define SQ_IMAGE_SAMPLE_CL 0x21
3648#define SQ_IMAGE_SAMPLE_D 0x22
3649#define SQ_IMAGE_SAMPLE_D_CL 0x23
3650#define SQ_IMAGE_SAMPLE_L 0x24
3651#define SQ_IMAGE_SAMPLE_B 0x25
3652#define SQ_IMAGE_SAMPLE_B_CL 0x26
3653#define SQ_IMAGE_SAMPLE_LZ 0x27
3654#define SQ_IMAGE_SAMPLE_C 0x28
3655#define SQ_IMAGE_SAMPLE_C_CL 0x29
3656#define SQ_IMAGE_SAMPLE_C_D 0x2a
3657#define SQ_IMAGE_SAMPLE_C_D_CL 0x2b
3658#define SQ_IMAGE_SAMPLE_C_L 0x2c
3659#define SQ_IMAGE_SAMPLE_C_B 0x2d
3660#define SQ_IMAGE_SAMPLE_C_B_CL 0x2e
3661#define SQ_IMAGE_SAMPLE_C_LZ 0x2f
3662#define SQ_IMAGE_SAMPLE_O 0x30
3663#define SQ_IMAGE_SAMPLE_CL_O 0x31
3664#define SQ_IMAGE_SAMPLE_D_O 0x32
3665#define SQ_IMAGE_SAMPLE_D_CL_O 0x33
3666#define SQ_IMAGE_SAMPLE_L_O 0x34
3667#define SQ_IMAGE_SAMPLE_B_O 0x35
3668#define SQ_IMAGE_SAMPLE_B_CL_O 0x36
3669#define SQ_IMAGE_SAMPLE_LZ_O 0x37
3670#define SQ_IMAGE_SAMPLE_C_O 0x38
3671#define SQ_IMAGE_SAMPLE_C_CL_O 0x39
3672#define SQ_IMAGE_SAMPLE_C_D_O 0x3a
3673#define SQ_IMAGE_SAMPLE_C_D_CL_O 0x3b
3674#define SQ_IMAGE_SAMPLE_C_L_O 0x3c
3675#define SQ_IMAGE_SAMPLE_C_B_O 0x3d
3676#define SQ_IMAGE_SAMPLE_C_B_CL_O 0x3e
3677#define SQ_IMAGE_SAMPLE_C_LZ_O 0x3f
3678#define SQ_IMAGE_GATHER4 0x40
3679#define SQ_IMAGE_GATHER4_CL 0x41
3680#define SQ_IMAGE_GATHER4_L 0x44
3681#define SQ_IMAGE_GATHER4_B 0x45
3682#define SQ_IMAGE_GATHER4_B_CL 0x46
3683#define SQ_IMAGE_GATHER4_LZ 0x47
3684#define SQ_IMAGE_GATHER4_C 0x48
3685#define SQ_IMAGE_GATHER4_C_CL 0x49
3686#define SQ_IMAGE_GATHER4_C_L 0x4c
3687#define SQ_IMAGE_GATHER4_C_B 0x4d
3688#define SQ_IMAGE_GATHER4_C_B_CL 0x4e
3689#define SQ_IMAGE_GATHER4_C_LZ 0x4f
3690#define SQ_IMAGE_GATHER4_O 0x50
3691#define SQ_IMAGE_GATHER4_CL_O 0x51
3692#define SQ_IMAGE_GATHER4_L_O 0x54
3693#define SQ_IMAGE_GATHER4_B_O 0x55
3694#define SQ_IMAGE_GATHER4_B_CL_O 0x56
3695#define SQ_IMAGE_GATHER4_LZ_O 0x57
3696#define SQ_IMAGE_GATHER4_C_O 0x58
3697#define SQ_IMAGE_GATHER4_C_CL_O 0x59
3698#define SQ_IMAGE_GATHER4_C_L_O 0x5c
3699#define SQ_IMAGE_GATHER4_C_B_O 0x5d
3700#define SQ_IMAGE_GATHER4_C_B_CL_O 0x5e
3701#define SQ_IMAGE_GATHER4_C_LZ_O 0x5f
3702#define SQ_IMAGE_GET_LOD 0x60
3703#define SQ_IMAGE_SAMPLE_CD 0x68
3704#define SQ_IMAGE_SAMPLE_CD_CL 0x69
3705#define SQ_IMAGE_SAMPLE_C_CD 0x6a
3706#define SQ_IMAGE_SAMPLE_C_CD_CL 0x6b
3707#define SQ_IMAGE_SAMPLE_CD_O 0x6c
3708#define SQ_IMAGE_SAMPLE_CD_CL_O 0x6d
3709#define SQ_IMAGE_SAMPLE_C_CD_O 0x6e
3710#define SQ_IMAGE_SAMPLE_C_CD_CL_O 0x6f
3711#define SQ_IMAGE_RSRC256 0x7e
3712#define SQ_IMAGE_SAMPLER 0x7f
3713#define SQ_SRC_VCCZ 0xfb
3714#define SQ_SRC_VGPR0 0x100
3715#define SQ_SDWA_BYTE_0 0x0
3716#define SQ_SDWA_BYTE_1 0x1
3717#define SQ_SDWA_BYTE_2 0x2
3718#define SQ_SDWA_BYTE_3 0x3
3719#define SQ_SDWA_WORD_0 0x4
3720#define SQ_SDWA_WORD_1 0x5
3721#define SQ_SDWA_DWORD 0x6
3722#define SQ_XNACK_MASK_LO 0x68
3723#define SQ_XNACK_MASK_HI 0x69
3724#define SQ_TBUFFER_LOAD_FORMAT_X 0x0
3725#define SQ_TBUFFER_LOAD_FORMAT_XY 0x1
3726#define SQ_TBUFFER_LOAD_FORMAT_XYZ 0x2
3727#define SQ_TBUFFER_LOAD_FORMAT_XYZW 0x3
3728#define SQ_TBUFFER_STORE_FORMAT_X 0x4
3729#define SQ_TBUFFER_STORE_FORMAT_XY 0x5
3730#define SQ_TBUFFER_STORE_FORMAT_XYZ 0x6
3731#define SQ_TBUFFER_STORE_FORMAT_XYZW 0x7
3732#define SQ_TBUFFER_LOAD_FORMAT_D16_X 0x8
3733#define SQ_TBUFFER_LOAD_FORMAT_D16_XY 0x9
3734#define SQ_TBUFFER_LOAD_FORMAT_D16_XYZ 0xa
3735#define SQ_TBUFFER_LOAD_FORMAT_D16_XYZW 0xb
3736#define SQ_TBUFFER_STORE_FORMAT_D16_X 0xc
3737#define SQ_TBUFFER_STORE_FORMAT_D16_XY 0xd
3738#define SQ_TBUFFER_STORE_FORMAT_D16_XYZ 0xe
3739#define SQ_TBUFFER_STORE_FORMAT_D16_XYZW 0xf
3740#define SQ_CHAN_X 0x0
3741#define SQ_CHAN_Y 0x1
3742#define SQ_CHAN_Z 0x2
3743#define SQ_CHAN_W 0x3
3744#define SQ_V_NOP 0x0
3745#define SQ_V_MOV_B32 0x1
3746#define SQ_V_READFIRSTLANE_B32 0x2
3747#define SQ_V_CVT_I32_F64 0x3
3748#define SQ_V_CVT_F64_I32 0x4
3749#define SQ_V_CVT_F32_I32 0x5
3750#define SQ_V_CVT_F32_U32 0x6
3751#define SQ_V_CVT_U32_F32 0x7
3752#define SQ_V_CVT_I32_F32 0x8
3753#define SQ_V_MOV_FED_B32 0x9
3754#define SQ_V_CVT_F16_F32 0xa
3755#define SQ_V_CVT_F32_F16 0xb
3756#define SQ_V_CVT_RPI_I32_F32 0xc
3757#define SQ_V_CVT_FLR_I32_F32 0xd
3758#define SQ_V_CVT_OFF_F32_I4 0xe
3759#define SQ_V_CVT_F32_F64 0xf
3760#define SQ_V_CVT_F64_F32 0x10
3761#define SQ_V_CVT_F32_UBYTE0 0x11
3762#define SQ_V_CVT_F32_UBYTE1 0x12
3763#define SQ_V_CVT_F32_UBYTE2 0x13
3764#define SQ_V_CVT_F32_UBYTE3 0x14
3765#define SQ_V_CVT_U32_F64 0x15
3766#define SQ_V_CVT_F64_U32 0x16
3767#define SQ_V_TRUNC_F64 0x17
3768#define SQ_V_CEIL_F64 0x18
3769#define SQ_V_RNDNE_F64 0x19
3770#define SQ_V_FLOOR_F64 0x1a
3771#define SQ_V_FRACT_F32 0x1b
3772#define SQ_V_TRUNC_F32 0x1c
3773#define SQ_V_CEIL_F32 0x1d
3774#define SQ_V_RNDNE_F32 0x1e
3775#define SQ_V_FLOOR_F32 0x1f
3776#define SQ_V_EXP_F32 0x20
3777#define SQ_V_LOG_F32 0x21
3778#define SQ_V_RCP_F32 0x22
3779#define SQ_V_RCP_IFLAG_F32 0x23
3780#define SQ_V_RSQ_F32 0x24
3781#define SQ_V_RCP_F64 0x25
3782#define SQ_V_RSQ_F64 0x26
3783#define SQ_V_SQRT_F32 0x27
3784#define SQ_V_SQRT_F64 0x28
3785#define SQ_V_SIN_F32 0x29
3786#define SQ_V_COS_F32 0x2a
3787#define SQ_V_NOT_B32 0x2b
3788#define SQ_V_BFREV_B32 0x2c
3789#define SQ_V_FFBH_U32 0x2d
3790#define SQ_V_FFBL_B32 0x2e
3791#define SQ_V_FFBH_I32 0x2f
3792#define SQ_V_FREXP_EXP_I32_F64 0x30
3793#define SQ_V_FREXP_MANT_F64 0x31
3794#define SQ_V_FRACT_F64 0x32
3795#define SQ_V_FREXP_EXP_I32_F32 0x33
3796#define SQ_V_FREXP_MANT_F32 0x34
3797#define SQ_V_CLREXCP 0x35
3798#define SQ_V_MOVRELD_B32 0x36
3799#define SQ_V_MOVRELS_B32 0x37
3800#define SQ_V_MOVRELSD_B32 0x38
3801#define SQ_V_CVT_F16_U16 0x39
3802#define SQ_V_CVT_F16_I16 0x3a
3803#define SQ_V_CVT_U16_F16 0x3b
3804#define SQ_V_CVT_I16_F16 0x3c
3805#define SQ_V_RCP_F16 0x3d
3806#define SQ_V_SQRT_F16 0x3e
3807#define SQ_V_RSQ_F16 0x3f
3808#define SQ_V_LOG_F16 0x40
3809#define SQ_V_EXP_F16 0x41
3810#define SQ_V_FREXP_MANT_F16 0x42
3811#define SQ_V_FREXP_EXP_I16_F16 0x43
3812#define SQ_V_FLOOR_F16 0x44
3813#define SQ_V_CEIL_F16 0x45
3814#define SQ_V_TRUNC_F16 0x46
3815#define SQ_V_RNDNE_F16 0x47
3816#define SQ_V_FRACT_F16 0x48
3817#define SQ_V_SIN_F16 0x49
3818#define SQ_V_COS_F16 0x4a
3819#define SQ_V_EXP_LEGACY_F32 0x4b
3820#define SQ_V_LOG_LEGACY_F32 0x4c
3821#define SQ_V_CVT_NORM_I16_F16 0x4d
3822#define SQ_V_CVT_NORM_U16_F16 0x4e
3823#define SQ_SRC_SDWA 0xf9
3824#define SQ_V_OPC_OFFSET 0x0
3825#define SQ_V_OP2_OFFSET 0x100
3826#define SQ_V_OP1_OFFSET 0x140
3827#define SQ_V_INTRP_OFFSET 0x270
3828#define SQ_V_INTERP_P1_F32 0x0
3829#define SQ_V_INTERP_P2_F32 0x1
3830#define SQ_V_INTERP_MOV_F32 0x2
3831#define SQ_S_NOP 0x0
3832#define SQ_S_ENDPGM 0x1
3833#define SQ_S_BRANCH 0x2
3834#define SQ_S_WAKEUP 0x3
3835#define SQ_S_CBRANCH_SCC0 0x4
3836#define SQ_S_CBRANCH_SCC1 0x5
3837#define SQ_S_CBRANCH_VCCZ 0x6
3838#define SQ_S_CBRANCH_VCCNZ 0x7
3839#define SQ_S_CBRANCH_EXECZ 0x8
3840#define SQ_S_CBRANCH_EXECNZ 0x9
3841#define SQ_S_BARRIER 0xa
3842#define SQ_S_SETKILL 0xb
3843#define SQ_S_WAITCNT 0xc
3844#define SQ_S_SETHALT 0xd
3845#define SQ_S_SLEEP 0xe
3846#define SQ_S_SETPRIO 0xf
3847#define SQ_S_SENDMSG 0x10
3848#define SQ_S_SENDMSGHALT 0x11
3849#define SQ_S_TRAP 0x12
3850#define SQ_S_ICACHE_INV 0x13
3851#define SQ_S_INCPERFLEVEL 0x14
3852#define SQ_S_DECPERFLEVEL 0x15
3853#define SQ_S_TTRACEDATA 0x16
3854#define SQ_S_CBRANCH_CDBGSYS 0x17
3855#define SQ_S_CBRANCH_CDBGUSER 0x18
3856#define SQ_S_CBRANCH_CDBGSYS_OR_USER 0x19
3857#define SQ_S_CBRANCH_CDBGSYS_AND_USER 0x1a
3858#define SQ_S_ENDPGM_SAVED 0x1b
3859#define SQ_S_SET_GPR_IDX_OFF 0x1c
3860#define SQ_S_SET_GPR_IDX_MODE 0x1d
3861#define SQ_SRC_DPP 0xfa
3862#define SQ_SRC_LITERAL 0xff
3863#define SQ_VCC_LO 0x6a
3864#define SQ_VCC_HI 0x6b
3865#define SQ_PARAM_P10 0x0
3866#define SQ_PARAM_P20 0x1
3867#define SQ_PARAM_P0 0x2
3868#define SQ_SRC_LDS_DIRECT 0xfe
3869#define SQ_V_CNDMASK_B32 0x0
3870#define SQ_V_ADD_F32 0x1
3871#define SQ_V_SUB_F32 0x2
3872#define SQ_V_SUBREV_F32 0x3
3873#define SQ_V_MUL_LEGACY_F32 0x4
3874#define SQ_V_MUL_F32 0x5
3875#define SQ_V_MUL_I32_I24 0x6
3876#define SQ_V_MUL_HI_I32_I24 0x7
3877#define SQ_V_MUL_U32_U24 0x8
3878#define SQ_V_MUL_HI_U32_U24 0x9
3879#define SQ_V_MIN_F32 0xa
3880#define SQ_V_MAX_F32 0xb
3881#define SQ_V_MIN_I32 0xc
3882#define SQ_V_MAX_I32 0xd
3883#define SQ_V_MIN_U32 0xe
3884#define SQ_V_MAX_U32 0xf
3885#define SQ_V_LSHRREV_B32 0x10
3886#define SQ_V_ASHRREV_I32 0x11
3887#define SQ_V_LSHLREV_B32 0x12
3888#define SQ_V_AND_B32 0x13
3889#define SQ_V_OR_B32 0x14
3890#define SQ_V_XOR_B32 0x15
3891#define SQ_V_MAC_F32 0x16
3892#define SQ_V_MADMK_F32 0x17
3893#define SQ_V_MADAK_F32 0x18
3894#define SQ_V_ADD_U32 0x19
3895#define SQ_V_SUB_U32 0x1a
3896#define SQ_V_SUBREV_U32 0x1b
3897#define SQ_V_ADDC_U32 0x1c
3898#define SQ_V_SUBB_U32 0x1d
3899#define SQ_V_SUBBREV_U32 0x1e
3900#define SQ_V_ADD_F16 0x1f
3901#define SQ_V_SUB_F16 0x20
3902#define SQ_V_SUBREV_F16 0x21
3903#define SQ_V_MUL_F16 0x22
3904#define SQ_V_MAC_F16 0x23
3905#define SQ_V_MADMK_F16 0x24
3906#define SQ_V_MADAK_F16 0x25
3907#define SQ_V_ADD_U16 0x26
3908#define SQ_V_SUB_U16 0x27
3909#define SQ_V_SUBREV_U16 0x28
3910#define SQ_V_MUL_LO_U16 0x29
3911#define SQ_V_LSHLREV_B16 0x2a
3912#define SQ_V_LSHRREV_B16 0x2b
3913#define SQ_V_ASHRREV_I16 0x2c
3914#define SQ_V_MAX_F16 0x2d
3915#define SQ_V_MIN_F16 0x2e
3916#define SQ_V_MAX_U16 0x2f
3917#define SQ_V_MAX_I16 0x30
3918#define SQ_V_MIN_U16 0x31
3919#define SQ_V_MIN_I16 0x32
3920#define SQ_V_LDEXP_F16 0x33
3921#define SQ_FLAT_LOAD_UBYTE 0x10
3922#define SQ_FLAT_LOAD_SBYTE 0x11
3923#define SQ_FLAT_LOAD_USHORT 0x12
3924#define SQ_FLAT_LOAD_SSHORT 0x13
3925#define SQ_FLAT_LOAD_DWORD 0x14
3926#define SQ_FLAT_LOAD_DWORDX2 0x15
3927#define SQ_FLAT_LOAD_DWORDX3 0x16
3928#define SQ_FLAT_LOAD_DWORDX4 0x17
3929#define SQ_FLAT_STORE_BYTE 0x18
3930#define SQ_FLAT_STORE_SHORT 0x1a
3931#define SQ_FLAT_STORE_DWORD 0x1c
3932#define SQ_FLAT_STORE_DWORDX2 0x1d
3933#define SQ_FLAT_STORE_DWORDX3 0x1e
3934#define SQ_FLAT_STORE_DWORDX4 0x1f
3935#define SQ_FLAT_ATOMIC_SWAP 0x40
3936#define SQ_FLAT_ATOMIC_CMPSWAP 0x41
3937#define SQ_FLAT_ATOMIC_ADD 0x42
3938#define SQ_FLAT_ATOMIC_SUB 0x43
3939#define SQ_FLAT_ATOMIC_SMIN 0x44
3940#define SQ_FLAT_ATOMIC_UMIN 0x45
3941#define SQ_FLAT_ATOMIC_SMAX 0x46
3942#define SQ_FLAT_ATOMIC_UMAX 0x47
3943#define SQ_FLAT_ATOMIC_AND 0x48
3944#define SQ_FLAT_ATOMIC_OR 0x49
3945#define SQ_FLAT_ATOMIC_XOR 0x4a
3946#define SQ_FLAT_ATOMIC_INC 0x4b
3947#define SQ_FLAT_ATOMIC_DEC 0x4c
3948#define SQ_FLAT_ATOMIC_SWAP_X2 0x60
3949#define SQ_FLAT_ATOMIC_CMPSWAP_X2 0x61
3950#define SQ_FLAT_ATOMIC_ADD_X2 0x62
3951#define SQ_FLAT_ATOMIC_SUB_X2 0x63
3952#define SQ_FLAT_ATOMIC_SMIN_X2 0x64
3953#define SQ_FLAT_ATOMIC_UMIN_X2 0x65
3954#define SQ_FLAT_ATOMIC_SMAX_X2 0x66
3955#define SQ_FLAT_ATOMIC_UMAX_X2 0x67
3956#define SQ_FLAT_ATOMIC_AND_X2 0x68
3957#define SQ_FLAT_ATOMIC_OR_X2 0x69
3958#define SQ_FLAT_ATOMIC_XOR_X2 0x6a
3959#define SQ_FLAT_ATOMIC_INC_X2 0x6b
3960#define SQ_FLAT_ATOMIC_DEC_X2 0x6c
3961#define SQ_S_CMP_EQ_I32 0x0
3962#define SQ_S_CMP_LG_I32 0x1
3963#define SQ_S_CMP_GT_I32 0x2
3964#define SQ_S_CMP_GE_I32 0x3
3965#define SQ_S_CMP_LT_I32 0x4
3966#define SQ_S_CMP_LE_I32 0x5
3967#define SQ_S_CMP_EQ_U32 0x6
3968#define SQ_S_CMP_LG_U32 0x7
3969#define SQ_S_CMP_GT_U32 0x8
3970#define SQ_S_CMP_GE_U32 0x9
3971#define SQ_S_CMP_LT_U32 0xa
3972#define SQ_S_CMP_LE_U32 0xb
3973#define SQ_S_BITCMP0_B32 0xc
3974#define SQ_S_BITCMP1_B32 0xd
3975#define SQ_S_BITCMP0_B64 0xe
3976#define SQ_S_BITCMP1_B64 0xf
3977#define SQ_S_SETVSKIP 0x10
3978#define SQ_S_SET_GPR_IDX_ON 0x11
3979#define SQ_S_CMP_EQ_U64 0x12
3980#define SQ_S_CMP_LG_U64 0x13
3981#define SQ_M0 0x7c
3982#define SQ_V_MAD_LEGACY_F32 0x1c0
3983#define SQ_V_MAD_F32 0x1c1
3984#define SQ_V_MAD_I32_I24 0x1c2
3985#define SQ_V_MAD_U32_U24 0x1c3
3986#define SQ_V_CUBEID_F32 0x1c4
3987#define SQ_V_CUBESC_F32 0x1c5
3988#define SQ_V_CUBETC_F32 0x1c6
3989#define SQ_V_CUBEMA_F32 0x1c7
3990#define SQ_V_BFE_U32 0x1c8
3991#define SQ_V_BFE_I32 0x1c9
3992#define SQ_V_BFI_B32 0x1ca
3993#define SQ_V_FMA_F32 0x1cb
3994#define SQ_V_FMA_F64 0x1cc
3995#define SQ_V_LERP_U8 0x1cd
3996#define SQ_V_ALIGNBIT_B32 0x1ce
3997#define SQ_V_ALIGNBYTE_B32 0x1cf
3998#define SQ_V_MIN3_F32 0x1d0
3999#define SQ_V_MIN3_I32 0x1d1
4000#define SQ_V_MIN3_U32 0x1d2
4001#define SQ_V_MAX3_F32 0x1d3
4002#define SQ_V_MAX3_I32 0x1d4
4003#define SQ_V_MAX3_U32 0x1d5
4004#define SQ_V_MED3_F32 0x1d6
4005#define SQ_V_MED3_I32 0x1d7
4006#define SQ_V_MED3_U32 0x1d8
4007#define SQ_V_SAD_U8 0x1d9
4008#define SQ_V_SAD_HI_U8 0x1da
4009#define SQ_V_SAD_U16 0x1db
4010#define SQ_V_SAD_U32 0x1dc
4011#define SQ_V_CVT_PK_U8_F32 0x1dd
4012#define SQ_V_DIV_FIXUP_F32 0x1de
4013#define SQ_V_DIV_FIXUP_F64 0x1df
4014#define SQ_V_DIV_SCALE_F32 0x1e0
4015#define SQ_V_DIV_SCALE_F64 0x1e1
4016#define SQ_V_DIV_FMAS_F32 0x1e2
4017#define SQ_V_DIV_FMAS_F64 0x1e3
4018#define SQ_V_MSAD_U8 0x1e4
4019#define SQ_V_QSAD_PK_U16_U8 0x1e5
4020#define SQ_V_MQSAD_PK_U16_U8 0x1e6
4021#define SQ_V_MQSAD_U32_U8 0x1e7
4022#define SQ_V_MAD_U64_U32 0x1e8
4023#define SQ_V_MAD_I64_I32 0x1e9
4024#define SQ_V_MAD_F16 0x1ea
4025#define SQ_V_MAD_U16 0x1eb
4026#define SQ_V_MAD_I16 0x1ec
4027#define SQ_V_PERM_B32 0x1ed
4028#define SQ_V_FMA_F16 0x1ee
4029#define SQ_V_DIV_FIXUP_F16 0x1ef
4030#define SQ_V_CVT_PKACCUM_U8_F32 0x1f0
4031#define SQ_V_INTERP_P1LL_F16 0x274
4032#define SQ_V_INTERP_P1LV_F16 0x275
4033#define SQ_V_INTERP_P2_F16 0x276
4034#define SQ_V_ADD_F64 0x280
4035#define SQ_V_MUL_F64 0x281
4036#define SQ_V_MIN_F64 0x282
4037#define SQ_V_MAX_F64 0x283
4038#define SQ_V_LDEXP_F64 0x284
4039#define SQ_V_MUL_LO_U32 0x285
4040#define SQ_V_MUL_HI_U32 0x286
4041#define SQ_V_MUL_HI_I32 0x287
4042#define SQ_V_LDEXP_F32 0x288
4043#define SQ_V_READLANE_B32 0x289
4044#define SQ_V_WRITELANE_B32 0x28a
4045#define SQ_V_BCNT_U32_B32 0x28b
4046#define SQ_V_MBCNT_LO_U32_B32 0x28c
4047#define SQ_V_MBCNT_HI_U32_B32 0x28d
4048#define SQ_V_MAC_LEGACY_F32 0x28e
4049#define SQ_V_LSHLREV_B64 0x28f
4050#define SQ_V_LSHRREV_B64 0x290
4051#define SQ_V_ASHRREV_I64 0x291
4052#define SQ_V_TRIG_PREOP_F64 0x292
4053#define SQ_V_BFM_B32 0x293
4054#define SQ_V_CVT_PKNORM_I16_F32 0x294
4055#define SQ_V_CVT_PKNORM_U16_F32 0x295
4056#define SQ_V_CVT_PKRTZ_F16_F32 0x296
4057#define SQ_V_CVT_PK_U16_U32 0x297
4058#define SQ_V_CVT_PK_I16_I32 0x298
4059#define SQ_V_CVT_PKNORM_I16_F16 0x299
4060#define SQ_V_CVT_PKNORM_U16_F16 0x29a
4061#define SQ_VCC_ALL 0x0
4062#define SQ_SRC_EXECZ 0xfc
4063#define SQ_FLAT_SCRATCH_LO 0x66
4064#define SQ_FLAT_SCRATCH_HI 0x67
4065#define SQ_SYSMSG_OP_ECC_ERR_INTERRUPT 0x1
4066#define SQ_SYSMSG_OP_REG_RD 0x2
4067#define SQ_SYSMSG_OP_HOST_TRAP_ACK 0x3
4068#define SQ_SYSMSG_OP_TTRACE_PC 0x4
4069#define SQ_HW_REG_MODE 0x1
4070#define SQ_HW_REG_STATUS 0x2
4071#define SQ_HW_REG_TRAPSTS 0x3
4072#define SQ_HW_REG_HW_ID 0x4
4073#define SQ_HW_REG_GPR_ALLOC 0x5
4074#define SQ_HW_REG_LDS_ALLOC 0x6
4075#define SQ_HW_REG_IB_STS 0x7
4076#define SQ_HW_REG_PC_LO 0x8
4077#define SQ_HW_REG_PC_HI 0x9
4078#define SQ_HW_REG_INST_DW0 0xa
4079#define SQ_HW_REG_INST_DW1 0xb
4080#define SQ_HW_REG_IB_DBG0 0xc
4081#define SQ_HW_REG_IB_DBG1 0xd
4082#define SQ_DPP_BOUND_OFF 0x0
4083#define SQ_DPP_BOUND_ZERO 0x1
4084#define SQ_R1 0x1
4085#define SQ_R2 0x2
4086#define SQ_R3 0x3
4087#define SQ_R4 0x4
4088#define SQ_R5 0x5
4089#define SQ_R6 0x6
4090#define SQ_R7 0x7
4091#define SQ_R8 0x8
4092#define SQ_R9 0x9
4093#define SQ_R10 0xa
4094#define SQ_R11 0xb
4095#define SQ_R12 0xc
4096#define SQ_R13 0xd
4097#define SQ_R14 0xe
4098#define SQ_R15 0xf
4099#define SQ_S_ADD_U32 0x0
4100#define SQ_S_SUB_U32 0x1
4101#define SQ_S_ADD_I32 0x2
4102#define SQ_S_SUB_I32 0x3
4103#define SQ_S_ADDC_U32 0x4
4104#define SQ_S_SUBB_U32 0x5
4105#define SQ_S_MIN_I32 0x6
4106#define SQ_S_MIN_U32 0x7
4107#define SQ_S_MAX_I32 0x8
4108#define SQ_S_MAX_U32 0x9
4109#define SQ_S_CSELECT_B32 0xa
4110#define SQ_S_CSELECT_B64 0xb
4111#define SQ_S_AND_B32 0xc
4112#define SQ_S_AND_B64 0xd
4113#define SQ_S_OR_B32 0xe
4114#define SQ_S_OR_B64 0xf
4115#define SQ_S_XOR_B32 0x10
4116#define SQ_S_XOR_B64 0x11
4117#define SQ_S_ANDN2_B32 0x12
4118#define SQ_S_ANDN2_B64 0x13
4119#define SQ_S_ORN2_B32 0x14
4120#define SQ_S_ORN2_B64 0x15
4121#define SQ_S_NAND_B32 0x16
4122#define SQ_S_NAND_B64 0x17
4123#define SQ_S_NOR_B32 0x18
4124#define SQ_S_NOR_B64 0x19
4125#define SQ_S_XNOR_B32 0x1a
4126#define SQ_S_XNOR_B64 0x1b
4127#define SQ_S_LSHL_B32 0x1c
4128#define SQ_S_LSHL_B64 0x1d
4129#define SQ_S_LSHR_B32 0x1e
4130#define SQ_S_LSHR_B64 0x1f
4131#define SQ_S_ASHR_I32 0x20
4132#define SQ_S_ASHR_I64 0x21
4133#define SQ_S_BFM_B32 0x22
4134#define SQ_S_BFM_B64 0x23
4135#define SQ_S_MUL_I32 0x24
4136#define SQ_S_BFE_U32 0x25
4137#define SQ_S_BFE_I32 0x26
4138#define SQ_S_BFE_U64 0x27
4139#define SQ_S_BFE_I64 0x28
4140#define SQ_S_CBRANCH_G_FORK 0x29
4141#define SQ_S_ABSDIFF_I32 0x2a
4142#define SQ_S_RFE_RESTORE_B64 0x2b
4143#define SQ_MSG_INTERRUPT 0x1
4144#define SQ_MSG_GS 0x2
4145#define SQ_MSG_GS_DONE 0x3
4146#define SQ_MSG_SAVEWAVE 0x4
4147#define SQ_MSG_SYSMSG 0xf
4148typedef enum SX_BLEND_OPT {
4149 BLEND_OPT_PRESERVE_NONE_IGNORE_ALL = 0x0,
4150 BLEND_OPT_PRESERVE_ALL_IGNORE_NONE = 0x1,
4151 BLEND_OPT_PRESERVE_C1_IGNORE_C0 = 0x2,
4152 BLEND_OPT_PRESERVE_C0_IGNORE_C1 = 0x3,
4153 BLEND_OPT_PRESERVE_A1_IGNORE_A0 = 0x4,
4154 BLEND_OPT_PRESERVE_A0_IGNORE_A1 = 0x5,
4155 BLEND_OPT_PRESERVE_NONE_IGNORE_A0 = 0x6,
4156 BLEND_OPT_PRESERVE_NONE_IGNORE_NONE = 0x7,
4157} SX_BLEND_OPT;
4158typedef enum SX_OPT_COMB_FCN {
4159 OPT_COMB_NONE = 0x0,
4160 OPT_COMB_ADD = 0x1,
4161 OPT_COMB_SUBTRACT = 0x2,
4162 OPT_COMB_MIN = 0x3,
4163 OPT_COMB_MAX = 0x4,
4164 OPT_COMB_REVSUBTRACT = 0x5,
4165 OPT_COMB_BLEND_DISABLED = 0x6,
4166 OPT_COMB_SAFE_ADD = 0x7,
4167} SX_OPT_COMB_FCN;
4168typedef enum SX_DOWNCONVERT_FORMAT {
4169 SX_RT_EXPORT_NO_CONVERSION = 0x0,
4170 SX_RT_EXPORT_32_R = 0x1,
4171 SX_RT_EXPORT_32_A = 0x2,
4172 SX_RT_EXPORT_10_11_11 = 0x3,
4173 SX_RT_EXPORT_2_10_10_10 = 0x4,
4174 SX_RT_EXPORT_8_8_8_8 = 0x5,
4175 SX_RT_EXPORT_5_6_5 = 0x6,
4176 SX_RT_EXPORT_1_5_5_5 = 0x7,
4177 SX_RT_EXPORT_4_4_4_4 = 0x8,
4178 SX_RT_EXPORT_16_16_GR = 0x9,
4179 SX_RT_EXPORT_16_16_AR = 0xa,
4180} SX_DOWNCONVERT_FORMAT;
4181typedef enum TEX_BORDER_COLOR_TYPE {
4182 TEX_BorderColor_TransparentBlack = 0x0,
4183 TEX_BorderColor_OpaqueBlack = 0x1,
4184 TEX_BorderColor_OpaqueWhite = 0x2,
4185 TEX_BorderColor_Register = 0x3,
4186} TEX_BORDER_COLOR_TYPE;
4187typedef enum TEX_CHROMA_KEY {
4188 TEX_ChromaKey_Disabled = 0x0,
4189 TEX_ChromaKey_Kill = 0x1,
4190 TEX_ChromaKey_Blend = 0x2,
4191 TEX_ChromaKey_RESERVED_3 = 0x3,
4192} TEX_CHROMA_KEY;
4193typedef enum TEX_CLAMP {
4194 TEX_Clamp_Repeat = 0x0,
4195 TEX_Clamp_Mirror = 0x1,
4196 TEX_Clamp_ClampToLast = 0x2,
4197 TEX_Clamp_MirrorOnceToLast = 0x3,
4198 TEX_Clamp_ClampHalfToBorder = 0x4,
4199 TEX_Clamp_MirrorOnceHalfToBorder = 0x5,
4200 TEX_Clamp_ClampToBorder = 0x6,
4201 TEX_Clamp_MirrorOnceToBorder = 0x7,
4202} TEX_CLAMP;
4203typedef enum TEX_COORD_TYPE {
4204 TEX_CoordType_Unnormalized = 0x0,
4205 TEX_CoordType_Normalized = 0x1,
4206} TEX_COORD_TYPE;
4207typedef enum TEX_DEPTH_COMPARE_FUNCTION {
4208 TEX_DepthCompareFunction_Never = 0x0,
4209 TEX_DepthCompareFunction_Less = 0x1,
4210 TEX_DepthCompareFunction_Equal = 0x2,
4211 TEX_DepthCompareFunction_LessEqual = 0x3,
4212 TEX_DepthCompareFunction_Greater = 0x4,
4213 TEX_DepthCompareFunction_NotEqual = 0x5,
4214 TEX_DepthCompareFunction_GreaterEqual = 0x6,
4215 TEX_DepthCompareFunction_Always = 0x7,
4216} TEX_DEPTH_COMPARE_FUNCTION;
4217typedef enum TEX_DIM {
4218 TEX_Dim_1D = 0x0,
4219 TEX_Dim_2D = 0x1,
4220 TEX_Dim_3D = 0x2,
4221 TEX_Dim_CubeMap = 0x3,
4222 TEX_Dim_1DArray = 0x4,
4223 TEX_Dim_2DArray = 0x5,
4224 TEX_Dim_2D_MSAA = 0x6,
4225 TEX_Dim_2DArray_MSAA = 0x7,
4226} TEX_DIM;
4227typedef enum TEX_FORMAT_COMP {
4228 TEX_FormatComp_Unsigned = 0x0,
4229 TEX_FormatComp_Signed = 0x1,
4230 TEX_FormatComp_UnsignedBiased = 0x2,
4231 TEX_FormatComp_RESERVED_3 = 0x3,
4232} TEX_FORMAT_COMP;
4233typedef enum TEX_MAX_ANISO_RATIO {
4234 TEX_MaxAnisoRatio_1to1 = 0x0,
4235 TEX_MaxAnisoRatio_2to1 = 0x1,
4236 TEX_MaxAnisoRatio_4to1 = 0x2,
4237 TEX_MaxAnisoRatio_8to1 = 0x3,
4238 TEX_MaxAnisoRatio_16to1 = 0x4,
4239 TEX_MaxAnisoRatio_RESERVED_5 = 0x5,
4240 TEX_MaxAnisoRatio_RESERVED_6 = 0x6,
4241 TEX_MaxAnisoRatio_RESERVED_7 = 0x7,
4242} TEX_MAX_ANISO_RATIO;
4243typedef enum TEX_MIP_FILTER {
4244 TEX_MipFilter_None = 0x0,
4245 TEX_MipFilter_Point = 0x1,
4246 TEX_MipFilter_Linear = 0x2,
4247 TEX_MipFilter_Point_Aniso_Adj = 0x3,
4248} TEX_MIP_FILTER;
4249typedef enum TEX_REQUEST_SIZE {
4250 TEX_RequestSize_32B = 0x0,
4251 TEX_RequestSize_64B = 0x1,
4252 TEX_RequestSize_128B = 0x2,
4253 TEX_RequestSize_2X64B = 0x3,
4254} TEX_REQUEST_SIZE;
4255typedef enum TEX_SAMPLER_TYPE {
4256 TEX_SamplerType_Invalid = 0x0,
4257 TEX_SamplerType_Valid = 0x1,
4258} TEX_SAMPLER_TYPE;
4259typedef enum TEX_XY_FILTER {
4260 TEX_XYFilter_Point = 0x0,
4261 TEX_XYFilter_Linear = 0x1,
4262 TEX_XYFilter_AnisoPoint = 0x2,
4263 TEX_XYFilter_AnisoLinear = 0x3,
4264} TEX_XY_FILTER;
4265typedef enum TEX_Z_FILTER {
4266 TEX_ZFilter_None = 0x0,
4267 TEX_ZFilter_Point = 0x1,
4268 TEX_ZFilter_Linear = 0x2,
4269 TEX_ZFilter_RESERVED_3 = 0x3,
4270} TEX_Z_FILTER;
4271typedef enum VTX_CLAMP {
4272 VTX_Clamp_ClampToZero = 0x0,
4273 VTX_Clamp_ClampToNAN = 0x1,
4274} VTX_CLAMP;
4275typedef enum VTX_FETCH_TYPE {
4276 VTX_FetchType_VertexData = 0x0,
4277 VTX_FetchType_InstanceData = 0x1,
4278 VTX_FetchType_NoIndexOffset = 0x2,
4279 VTX_FetchType_RESERVED_3 = 0x3,
4280} VTX_FETCH_TYPE;
4281typedef enum VTX_FORMAT_COMP_ALL {
4282 VTX_FormatCompAll_Unsigned = 0x0,
4283 VTX_FormatCompAll_Signed = 0x1,
4284} VTX_FORMAT_COMP_ALL;
4285typedef enum VTX_MEM_REQUEST_SIZE {
4286 VTX_MemRequestSize_32B = 0x0,
4287 VTX_MemRequestSize_64B = 0x1,
4288} VTX_MEM_REQUEST_SIZE;
4289typedef enum TVX_DATA_FORMAT {
4290 TVX_FMT_INVALID = 0x0,
4291 TVX_FMT_8 = 0x1,
4292 TVX_FMT_4_4 = 0x2,
4293 TVX_FMT_3_3_2 = 0x3,
4294 TVX_FMT_RESERVED_4 = 0x4,
4295 TVX_FMT_16 = 0x5,
4296 TVX_FMT_16_FLOAT = 0x6,
4297 TVX_FMT_8_8 = 0x7,
4298 TVX_FMT_5_6_5 = 0x8,
4299 TVX_FMT_6_5_5 = 0x9,
4300 TVX_FMT_1_5_5_5 = 0xa,
4301 TVX_FMT_4_4_4_4 = 0xb,
4302 TVX_FMT_5_5_5_1 = 0xc,
4303 TVX_FMT_32 = 0xd,
4304 TVX_FMT_32_FLOAT = 0xe,
4305 TVX_FMT_16_16 = 0xf,
4306 TVX_FMT_16_16_FLOAT = 0x10,
4307 TVX_FMT_8_24 = 0x11,
4308 TVX_FMT_8_24_FLOAT = 0x12,
4309 TVX_FMT_24_8 = 0x13,
4310 TVX_FMT_24_8_FLOAT = 0x14,
4311 TVX_FMT_10_11_11 = 0x15,
4312 TVX_FMT_10_11_11_FLOAT = 0x16,
4313 TVX_FMT_11_11_10 = 0x17,
4314 TVX_FMT_11_11_10_FLOAT = 0x18,
4315 TVX_FMT_2_10_10_10 = 0x19,
4316 TVX_FMT_8_8_8_8 = 0x1a,
4317 TVX_FMT_10_10_10_2 = 0x1b,
4318 TVX_FMT_X24_8_32_FLOAT = 0x1c,
4319 TVX_FMT_32_32 = 0x1d,
4320 TVX_FMT_32_32_FLOAT = 0x1e,
4321 TVX_FMT_16_16_16_16 = 0x1f,
4322 TVX_FMT_16_16_16_16_FLOAT = 0x20,
4323 TVX_FMT_RESERVED_33 = 0x21,
4324 TVX_FMT_32_32_32_32 = 0x22,
4325 TVX_FMT_32_32_32_32_FLOAT = 0x23,
4326 TVX_FMT_RESERVED_36 = 0x24,
4327 TVX_FMT_1 = 0x25,
4328 TVX_FMT_1_REVERSED = 0x26,
4329 TVX_FMT_GB_GR = 0x27,
4330 TVX_FMT_BG_RG = 0x28,
4331 TVX_FMT_32_AS_8 = 0x29,
4332 TVX_FMT_32_AS_8_8 = 0x2a,
4333 TVX_FMT_5_9_9_9_SHAREDEXP = 0x2b,
4334 TVX_FMT_8_8_8 = 0x2c,
4335 TVX_FMT_16_16_16 = 0x2d,
4336 TVX_FMT_16_16_16_FLOAT = 0x2e,
4337 TVX_FMT_32_32_32 = 0x2f,
4338 TVX_FMT_32_32_32_FLOAT = 0x30,
4339 TVX_FMT_BC1 = 0x31,
4340 TVX_FMT_BC2 = 0x32,
4341 TVX_FMT_BC3 = 0x33,
4342 TVX_FMT_BC4 = 0x34,
4343 TVX_FMT_BC5 = 0x35,
4344 TVX_FMT_APC0 = 0x36,
4345 TVX_FMT_APC1 = 0x37,
4346 TVX_FMT_APC2 = 0x38,
4347 TVX_FMT_APC3 = 0x39,
4348 TVX_FMT_APC4 = 0x3a,
4349 TVX_FMT_APC5 = 0x3b,
4350 TVX_FMT_APC6 = 0x3c,
4351 TVX_FMT_APC7 = 0x3d,
4352 TVX_FMT_CTX1 = 0x3e,
4353 TVX_FMT_RESERVED_63 = 0x3f,
4354} TVX_DATA_FORMAT;
4355typedef enum TVX_DST_SEL {
4356 TVX_DstSel_X = 0x0,
4357 TVX_DstSel_Y = 0x1,
4358 TVX_DstSel_Z = 0x2,
4359 TVX_DstSel_W = 0x3,
4360 TVX_DstSel_0f = 0x4,
4361 TVX_DstSel_1f = 0x5,
4362 TVX_DstSel_RESERVED_6 = 0x6,
4363 TVX_DstSel_Mask = 0x7,
4364} TVX_DST_SEL;
4365typedef enum TVX_ENDIAN_SWAP {
4366 TVX_EndianSwap_None = 0x0,
4367 TVX_EndianSwap_8in16 = 0x1,
4368 TVX_EndianSwap_8in32 = 0x2,
4369 TVX_EndianSwap_8in64 = 0x3,
4370} TVX_ENDIAN_SWAP;
4371typedef enum TVX_INST {
4372 TVX_Inst_NormalVertexFetch = 0x0,
4373 TVX_Inst_SemanticVertexFetch = 0x1,
4374 TVX_Inst_RESERVED_2 = 0x2,
4375 TVX_Inst_LD = 0x3,
4376 TVX_Inst_GetTextureResInfo = 0x4,
4377 TVX_Inst_GetNumberOfSamples = 0x5,
4378 TVX_Inst_GetLOD = 0x6,
4379 TVX_Inst_GetGradientsH = 0x7,
4380 TVX_Inst_GetGradientsV = 0x8,
4381 TVX_Inst_SetTextureOffsets = 0x9,
4382 TVX_Inst_KeepGradients = 0xa,
4383 TVX_Inst_SetGradientsH = 0xb,
4384 TVX_Inst_SetGradientsV = 0xc,
4385 TVX_Inst_Pass = 0xd,
4386 TVX_Inst_GetBufferResInfo = 0xe,
4387 TVX_Inst_RESERVED_15 = 0xf,
4388 TVX_Inst_Sample = 0x10,
4389 TVX_Inst_Sample_L = 0x11,
4390 TVX_Inst_Sample_LB = 0x12,
4391 TVX_Inst_Sample_LZ = 0x13,
4392 TVX_Inst_Sample_G = 0x14,
4393 TVX_Inst_Gather4 = 0x15,
4394 TVX_Inst_Sample_G_LB = 0x16,
4395 TVX_Inst_Gather4_O = 0x17,
4396 TVX_Inst_Sample_C = 0x18,
4397 TVX_Inst_Sample_C_L = 0x19,
4398 TVX_Inst_Sample_C_LB = 0x1a,
4399 TVX_Inst_Sample_C_LZ = 0x1b,
4400 TVX_Inst_Sample_C_G = 0x1c,
4401 TVX_Inst_Gather4_C = 0x1d,
4402 TVX_Inst_Sample_C_G_LB = 0x1e,
4403 TVX_Inst_Gather4_C_O = 0x1f,
4404} TVX_INST;
4405typedef enum TVX_NUM_FORMAT_ALL {
4406 TVX_NumFormatAll_Norm = 0x0,
4407 TVX_NumFormatAll_Int = 0x1,
4408 TVX_NumFormatAll_Scaled = 0x2,
4409 TVX_NumFormatAll_RESERVED_3 = 0x3,
4410} TVX_NUM_FORMAT_ALL;
4411typedef enum TVX_SRC_SEL {
4412 TVX_SrcSel_X = 0x0,
4413 TVX_SrcSel_Y = 0x1,
4414 TVX_SrcSel_Z = 0x2,
4415 TVX_SrcSel_W = 0x3,
4416 TVX_SrcSel_0f = 0x4,
4417 TVX_SrcSel_1f = 0x5,
4418} TVX_SRC_SEL;
4419typedef enum TVX_SRF_MODE_ALL {
4420 TVX_SRFModeAll_ZCMO = 0x0,
4421 TVX_SRFModeAll_NZ = 0x1,
4422} TVX_SRF_MODE_ALL;
4423typedef enum TVX_TYPE {
4424 TVX_Type_InvalidTextureResource = 0x0,
4425 TVX_Type_InvalidVertexBuffer = 0x1,
4426 TVX_Type_ValidTextureResource = 0x2,
4427 TVX_Type_ValidVertexBuffer = 0x3,
4428} TVX_TYPE;
4429typedef enum TC_OP_MASKS {
4430 TC_OP_MASK_FLUSH_DENROM = 0x8,
4431 TC_OP_MASK_64 = 0x20,
4432 TC_OP_MASK_NO_RTN = 0x40,
4433} TC_OP_MASKS;
4434typedef enum TC_OP {
4435 TC_OP_READ = 0x0,
4436 TC_OP_ATOMIC_FCMPSWAP_RTN_32 = 0x1,
4437 TC_OP_ATOMIC_FMIN_RTN_32 = 0x2,
4438 TC_OP_ATOMIC_FMAX_RTN_32 = 0x3,
4439 TC_OP_RESERVED_FOP_RTN_32_0 = 0x4,
4440 TC_OP_RESERVED_FOP_RTN_32_1 = 0x5,
4441 TC_OP_RESERVED_FOP_RTN_32_2 = 0x6,
4442 TC_OP_ATOMIC_SWAP_RTN_32 = 0x7,
4443 TC_OP_ATOMIC_CMPSWAP_RTN_32 = 0x8,
4444 TC_OP_ATOMIC_FCMPSWAP_FLUSH_DENORM_RTN_32 = 0x9,
4445 TC_OP_ATOMIC_FMIN_FLUSH_DENORM_RTN_32 = 0xa,
4446 TC_OP_ATOMIC_FMAX_FLUSH_DENORM_RTN_32 = 0xb,
4447 TC_OP_RESERVED_FOP_FLUSH_DENORM_RTN_32_0 = 0xc,
4448 TC_OP_RESERVED_FOP_FLUSH_DENORM_RTN_32_1 = 0xd,
4449 TC_OP_RESERVED_FOP_FLUSH_DENORM_RTN_32_2 = 0xe,
4450 TC_OP_ATOMIC_ADD_RTN_32 = 0xf,
4451 TC_OP_ATOMIC_SUB_RTN_32 = 0x10,
4452 TC_OP_ATOMIC_SMIN_RTN_32 = 0x11,
4453 TC_OP_ATOMIC_UMIN_RTN_32 = 0x12,
4454 TC_OP_ATOMIC_SMAX_RTN_32 = 0x13,
4455 TC_OP_ATOMIC_UMAX_RTN_32 = 0x14,
4456 TC_OP_ATOMIC_AND_RTN_32 = 0x15,
4457 TC_OP_ATOMIC_OR_RTN_32 = 0x16,
4458 TC_OP_ATOMIC_XOR_RTN_32 = 0x17,
4459 TC_OP_ATOMIC_INC_RTN_32 = 0x18,
4460 TC_OP_ATOMIC_DEC_RTN_32 = 0x19,
4461 TC_OP_WBINVL1_VOL = 0x1a,
4462 TC_OP_WBINVL1_SD = 0x1b,
4463 TC_OP_RESERVED_NON_FLOAT_RTN_32_0 = 0x1c,
4464 TC_OP_RESERVED_NON_FLOAT_RTN_32_1 = 0x1d,
4465 TC_OP_RESERVED_NON_FLOAT_RTN_32_2 = 0x1e,
4466 TC_OP_RESERVED_NON_FLOAT_RTN_32_3 = 0x1f,
4467 TC_OP_WRITE = 0x20,
4468 TC_OP_ATOMIC_FCMPSWAP_RTN_64 = 0x21,
4469 TC_OP_ATOMIC_FMIN_RTN_64 = 0x22,
4470 TC_OP_ATOMIC_FMAX_RTN_64 = 0x23,
4471 TC_OP_RESERVED_FOP_RTN_64_0 = 0x24,
4472 TC_OP_RESERVED_FOP_RTN_64_1 = 0x25,
4473 TC_OP_RESERVED_FOP_RTN_64_2 = 0x26,
4474 TC_OP_ATOMIC_SWAP_RTN_64 = 0x27,
4475 TC_OP_ATOMIC_CMPSWAP_RTN_64 = 0x28,
4476 TC_OP_ATOMIC_FCMPSWAP_FLUSH_DENORM_RTN_64 = 0x29,
4477 TC_OP_ATOMIC_FMIN_FLUSH_DENORM_RTN_64 = 0x2a,
4478 TC_OP_ATOMIC_FMAX_FLUSH_DENORM_RTN_64 = 0x2b,
4479 TC_OP_WBINVL2_SD = 0x2c,
4480 TC_OP_RESERVED_FOP_FLUSH_DENORM_RTN_64_0 = 0x2d,
4481 TC_OP_RESERVED_FOP_FLUSH_DENORM_RTN_64_1 = 0x2e,
4482 TC_OP_ATOMIC_ADD_RTN_64 = 0x2f,
4483 TC_OP_ATOMIC_SUB_RTN_64 = 0x30,
4484 TC_OP_ATOMIC_SMIN_RTN_64 = 0x31,
4485 TC_OP_ATOMIC_UMIN_RTN_64 = 0x32,
4486 TC_OP_ATOMIC_SMAX_RTN_64 = 0x33,
4487 TC_OP_ATOMIC_UMAX_RTN_64 = 0x34,
4488 TC_OP_ATOMIC_AND_RTN_64 = 0x35,
4489 TC_OP_ATOMIC_OR_RTN_64 = 0x36,
4490 TC_OP_ATOMIC_XOR_RTN_64 = 0x37,
4491 TC_OP_ATOMIC_INC_RTN_64 = 0x38,
4492 TC_OP_ATOMIC_DEC_RTN_64 = 0x39,
4493 TC_OP_WBL2_NC = 0x3a,
4494 TC_OP_RESERVED_NON_FLOAT_RTN_64_0 = 0x3b,
4495 TC_OP_RESERVED_NON_FLOAT_RTN_64_1 = 0x3c,
4496 TC_OP_RESERVED_NON_FLOAT_RTN_64_2 = 0x3d,
4497 TC_OP_RESERVED_NON_FLOAT_RTN_64_3 = 0x3e,
4498 TC_OP_RESERVED_NON_FLOAT_RTN_64_4 = 0x3f,
4499 TC_OP_WBINVL1 = 0x40,
4500 TC_OP_ATOMIC_FCMPSWAP_32 = 0x41,
4501 TC_OP_ATOMIC_FMIN_32 = 0x42,
4502 TC_OP_ATOMIC_FMAX_32 = 0x43,
4503 TC_OP_RESERVED_FOP_32_0 = 0x44,
4504 TC_OP_RESERVED_FOP_32_1 = 0x45,
4505 TC_OP_RESERVED_FOP_32_2 = 0x46,
4506 TC_OP_ATOMIC_SWAP_32 = 0x47,
4507 TC_OP_ATOMIC_CMPSWAP_32 = 0x48,
4508 TC_OP_ATOMIC_FCMPSWAP_FLUSH_DENORM_32 = 0x49,
4509 TC_OP_ATOMIC_FMIN_FLUSH_DENORM_32 = 0x4a,
4510 TC_OP_ATOMIC_FMAX_FLUSH_DENORM_32 = 0x4b,
4511 TC_OP_RESERVED_FOP_FLUSH_DENORM_32_0 = 0x4c,
4512 TC_OP_RESERVED_FOP_FLUSH_DENORM_32_1 = 0x4d,
4513 TC_OP_RESERVED_FOP_FLUSH_DENORM_32_2 = 0x4e,
4514 TC_OP_ATOMIC_ADD_32 = 0x4f,
4515 TC_OP_ATOMIC_SUB_32 = 0x50,
4516 TC_OP_ATOMIC_SMIN_32 = 0x51,
4517 TC_OP_ATOMIC_UMIN_32 = 0x52,
4518 TC_OP_ATOMIC_SMAX_32 = 0x53,
4519 TC_OP_ATOMIC_UMAX_32 = 0x54,
4520 TC_OP_ATOMIC_AND_32 = 0x55,
4521 TC_OP_ATOMIC_OR_32 = 0x56,
4522 TC_OP_ATOMIC_XOR_32 = 0x57,
4523 TC_OP_ATOMIC_INC_32 = 0x58,
4524 TC_OP_ATOMIC_DEC_32 = 0x59,
4525 TC_OP_INVL2_NC = 0x5a,
4526 TC_OP_RESERVED_NON_FLOAT_32_0 = 0x5b,
4527 TC_OP_RESERVED_NON_FLOAT_32_1 = 0x5c,
4528 TC_OP_RESERVED_NON_FLOAT_32_2 = 0x5d,
4529 TC_OP_RESERVED_NON_FLOAT_32_3 = 0x5e,
4530 TC_OP_RESERVED_NON_FLOAT_32_4 = 0x5f,
4531 TC_OP_WBINVL2 = 0x60,
4532 TC_OP_ATOMIC_FCMPSWAP_64 = 0x61,
4533 TC_OP_ATOMIC_FMIN_64 = 0x62,
4534 TC_OP_ATOMIC_FMAX_64 = 0x63,
4535 TC_OP_RESERVED_FOP_64_0 = 0x64,
4536 TC_OP_RESERVED_FOP_64_1 = 0x65,
4537 TC_OP_RESERVED_FOP_64_2 = 0x66,
4538 TC_OP_ATOMIC_SWAP_64 = 0x67,
4539 TC_OP_ATOMIC_CMPSWAP_64 = 0x68,
4540 TC_OP_ATOMIC_FCMPSWAP_FLUSH_DENORM_64 = 0x69,
4541 TC_OP_ATOMIC_FMIN_FLUSH_DENORM_64 = 0x6a,
4542 TC_OP_ATOMIC_FMAX_FLUSH_DENORM_64 = 0x6b,
4543 TC_OP_RESERVED_FOP_FLUSH_DENORM_64_0 = 0x6c,
4544 TC_OP_RESERVED_FOP_FLUSH_DENORM_64_1 = 0x6d,
4545 TC_OP_RESERVED_FOP_FLUSH_DENORM_64_2 = 0x6e,
4546 TC_OP_ATOMIC_ADD_64 = 0x6f,
4547 TC_OP_ATOMIC_SUB_64 = 0x70,
4548 TC_OP_ATOMIC_SMIN_64 = 0x71,
4549 TC_OP_ATOMIC_UMIN_64 = 0x72,
4550 TC_OP_ATOMIC_SMAX_64 = 0x73,
4551 TC_OP_ATOMIC_UMAX_64 = 0x74,
4552 TC_OP_ATOMIC_AND_64 = 0x75,
4553 TC_OP_ATOMIC_OR_64 = 0x76,
4554 TC_OP_ATOMIC_XOR_64 = 0x77,
4555 TC_OP_ATOMIC_INC_64 = 0x78,
4556 TC_OP_ATOMIC_DEC_64 = 0x79,
4557 TC_OP_WBINVL2_NC = 0x7a,
4558 TC_OP_RESERVED_NON_FLOAT_64_0 = 0x7b,
4559 TC_OP_RESERVED_NON_FLOAT_64_1 = 0x7c,
4560 TC_OP_RESERVED_NON_FLOAT_64_2 = 0x7d,
4561 TC_OP_RESERVED_NON_FLOAT_64_3 = 0x7e,
4562 TC_OP_RESERVED_NON_FLOAT_64_4 = 0x7f,
4563} TC_OP;
4564typedef enum TC_CHUB_REQ_CREDITS_ENUM {
4565 TC_CHUB_REQ_CREDITS = 0x10,
4566} TC_CHUB_REQ_CREDITS_ENUM;
4567typedef enum CHUB_TC_RET_CREDITS_ENUM {
4568 CHUB_TC_RET_CREDITS = 0x20,
4569} CHUB_TC_RET_CREDITS_ENUM;
4570typedef enum TC_NACKS {
4571 TC_NACK_NO_FAULT = 0x0,
4572 TC_NACK_PAGE_FAULT = 0x1,
4573 TC_NACK_PROTECTION_FAULT = 0x2,
4574 TC_NACK_DATA_ERROR = 0x3,
4575} TC_NACKS;
4576typedef enum TCC_PERF_SEL {
4577 TCC_PERF_SEL_NONE = 0x0,
4578 TCC_PERF_SEL_CYCLE = 0x1,
4579 TCC_PERF_SEL_BUSY = 0x2,
4580 TCC_PERF_SEL_REQ = 0x3,
4581 TCC_PERF_SEL_STREAMING_REQ = 0x4,
4582 TCC_PERF_SEL_EXE_REQ = 0x5,
4583 TCC_PERF_SEL_COMPRESSED_REQ = 0x6,
4584 TCC_PERF_SEL_COMPRESSED_0_REQ = 0x7,
4585 TCC_PERF_SEL_METADATA_REQ = 0x8,
4586 TCC_PERF_SEL_NC_VIRTUAL_REQ = 0x9,
4587 TCC_PERF_SEL_NC_PHYSICAL_REQ = 0xa,
4588 TCC_PERF_SEL_UC_VIRTUAL_REQ = 0xb,
4589 TCC_PERF_SEL_UC_PHYSICAL_REQ = 0xc,
4590 TCC_PERF_SEL_CC_PHYSICAL_REQ = 0xd,
4591 TCC_PERF_SEL_PROBE = 0xe,
4592 TCC_PERF_SEL_READ = 0xf,
4593 TCC_PERF_SEL_WRITE = 0x10,
4594 TCC_PERF_SEL_ATOMIC = 0x11,
4595 TCC_PERF_SEL_HIT = 0x12,
4596 TCC_PERF_SEL_MISS = 0x13,
4597 TCC_PERF_SEL_DEWRITE_ALLOCATE_HIT = 0x14,
4598 TCC_PERF_SEL_FULLY_WRITTEN_HIT = 0x15,
4599 TCC_PERF_SEL_WRITEBACK = 0x16,
4600 TCC_PERF_SEL_LATENCY_FIFO_FULL = 0x17,
4601 TCC_PERF_SEL_SRC_FIFO_FULL = 0x18,
4602 TCC_PERF_SEL_HOLE_FIFO_FULL = 0x19,
4603 TCC_PERF_SEL_MC_WRREQ = 0x1a,
4604 TCC_PERF_SEL_MC_WRREQ_UNCACHED = 0x1b,
4605 TCC_PERF_SEL_MC_WRREQ_STALL = 0x1c,
4606 TCC_PERF_SEL_MC_WRREQ_CREDIT_STALL = 0x1d,
4607 TCC_PERF_SEL_MC_WRREQ_MC_HALT_STALL = 0x1e,
4608 TCC_PERF_SEL_TOO_MANY_MC_WRREQS_STALL = 0x1f,
4609 TCC_PERF_SEL_MC_WRREQ_LEVEL = 0x20,
4610 TCC_PERF_SEL_MC_ATOMIC = 0x21,
4611 TCC_PERF_SEL_MC_ATOMIC_LEVEL = 0x22,
4612 TCC_PERF_SEL_MC_RDREQ = 0x23,
4613 TCC_PERF_SEL_MC_RDREQ_UNCACHED = 0x24,
4614 TCC_PERF_SEL_MC_RDREQ_MDC = 0x25,
4615 TCC_PERF_SEL_MC_RDREQ_COMPRESSED = 0x26,
4616 TCC_PERF_SEL_MC_RDREQ_CREDIT_STALL = 0x27,
4617 TCC_PERF_SEL_MC_RDREQ_MC_HALT_STALL = 0x28,
4618 TCC_PERF_SEL_MC_RDREQ_LEVEL = 0x29,
4619 TCC_PERF_SEL_TAG_STALL = 0x2a,
4620 TCC_PERF_SEL_TAG_WRITEBACK_FIFO_FULL_STALL = 0x2b,
4621 TCC_PERF_SEL_TAG_MISS_NOTHING_REPLACEABLE_STALL = 0x2c,
4622 TCC_PERF_SEL_TAG_UNCACHED_WRITE_ATOMIC_FIFO_FULL_STALL= 0x2d,
4623 TCC_PERF_SEL_TAG_NO_UNCACHED_WRITE_ATOMIC_ENTRIES_STALL= 0x2e,
4624 TCC_PERF_SEL_TAG_PROBE_STALL = 0x2f,
4625 TCC_PERF_SEL_TAG_PROBE_FILTER_STALL = 0x30,
4626 TCC_PERF_SEL_READ_RETURN_TIMEOUT = 0x31,
4627 TCC_PERF_SEL_WRITEBACK_READ_TIMEOUT = 0x32,
4628 TCC_PERF_SEL_READ_RETURN_FULL_BUBBLE = 0x33,
4629 TCC_PERF_SEL_BUBBLE = 0x34,
4630 TCC_PERF_SEL_RETURN_ACK = 0x35,
4631 TCC_PERF_SEL_RETURN_DATA = 0x36,
4632 TCC_PERF_SEL_RETURN_HOLE = 0x37,
4633 TCC_PERF_SEL_RETURN_ACK_HOLE = 0x38,
4634 TCC_PERF_SEL_IB_REQ = 0x39,
4635 TCC_PERF_SEL_IB_STALL = 0x3a,
4636 TCC_PERF_SEL_IB_TAG_STALL = 0x3b,
4637 TCC_PERF_SEL_IB_MDC_STALL = 0x3c,
4638 TCC_PERF_SEL_TCA_LEVEL = 0x3d,
4639 TCC_PERF_SEL_HOLE_LEVEL = 0x3e,
4640 TCC_PERF_SEL_MC_RDRET_NACK = 0x3f,
4641 TCC_PERF_SEL_MC_WRRET_NACK = 0x40,
4642 TCC_PERF_SEL_NORMAL_WRITEBACK = 0x41,
4643 TCC_PERF_SEL_TC_OP_WBL2_NC_WRITEBACK = 0x42,
4644 TCC_PERF_SEL_TC_OP_WBINVL2_WRITEBACK = 0x43,
4645 TCC_PERF_SEL_TC_OP_WBINVL2_NC_WRITEBACK = 0x44,
4646 TCC_PERF_SEL_TC_OP_WBINVL2_SD_WRITEBACK = 0x45,
4647 TCC_PERF_SEL_ALL_TC_OP_WB_WRITEBACK = 0x46,
4648 TCC_PERF_SEL_NORMAL_EVICT = 0x47,
4649 TCC_PERF_SEL_TC_OP_WBL2_NC_EVICT = 0x48,
4650 TCC_PERF_SEL_TC_OP_INVL2_NC_EVICT = 0x49,
4651 TCC_PERF_SEL_TC_OP_WBINVL2_EVICT = 0x4a,
4652 TCC_PERF_SEL_TC_OP_WBINVL2_NC_EVICT = 0x4b,
4653 TCC_PERF_SEL_TC_OP_WBINVL2_SD_EVICT = 0x4c,
4654 TCC_PERF_SEL_ALL_TC_OP_INV_EVICT = 0x4d,
4655 TCC_PERF_SEL_PROBE_EVICT = 0x4e,
4656 TCC_PERF_SEL_TC_OP_WBL2_NC_CYCLE = 0x4f,
4657 TCC_PERF_SEL_TC_OP_INVL2_NC_CYCLE = 0x50,
4658 TCC_PERF_SEL_TC_OP_WBINVL2_CYCLE = 0x51,
4659 TCC_PERF_SEL_TC_OP_WBINVL2_NC_CYCLE = 0x52,
4660 TCC_PERF_SEL_TC_OP_WBINVL2_SD_CYCLE = 0x53,
4661 TCC_PERF_SEL_ALL_TC_OP_WB_OR_INV_CYCLE = 0x54,
4662 TCC_PERF_SEL_TC_OP_WBL2_NC_START = 0x55,
4663 TCC_PERF_SEL_TC_OP_INVL2_NC_START = 0x56,
4664 TCC_PERF_SEL_TC_OP_WBINVL2_START = 0x57,
4665 TCC_PERF_SEL_TC_OP_WBINVL2_NC_START = 0x58,
4666 TCC_PERF_SEL_TC_OP_WBINVL2_SD_START = 0x59,
4667 TCC_PERF_SEL_ALL_TC_OP_WB_OR_INV_START = 0x5a,
4668 TCC_PERF_SEL_TC_OP_WBL2_NC_FINISH = 0x5b,
4669 TCC_PERF_SEL_TC_OP_INVL2_NC_FINISH = 0x5c,
4670 TCC_PERF_SEL_TC_OP_WBINVL2_FINISH = 0x5d,
4671 TCC_PERF_SEL_TC_OP_WBINVL2_NC_FINISH = 0x5e,
4672 TCC_PERF_SEL_TC_OP_WBINVL2_SD_FINISH = 0x5f,
4673 TCC_PERF_SEL_ALL_TC_OP_WB_OR_INV_FINISH = 0x60,
4674 TCC_PERF_SEL_MDC_REQ = 0x61,
4675 TCC_PERF_SEL_MDC_LEVEL = 0x62,
4676 TCC_PERF_SEL_MDC_TAG_HIT = 0x63,
4677 TCC_PERF_SEL_MDC_SECTOR_HIT = 0x64,
4678 TCC_PERF_SEL_MDC_SECTOR_MISS = 0x65,
4679 TCC_PERF_SEL_MDC_TAG_STALL = 0x66,
4680 TCC_PERF_SEL_MDC_TAG_REPLACEMENT_LINE_IN_USE_STALL= 0x67,
4681 TCC_PERF_SEL_MDC_TAG_DESECTORIZATION_FIFO_FULL_STALL= 0x68,
4682 TCC_PERF_SEL_MDC_TAG_WAITING_FOR_INVALIDATE_COMPLETION_STALL= 0x69,
4683 TCC_PERF_SEL_PROBE_FILTER_DISABLE_TRANSITION = 0x6a,
4684 TCC_PERF_SEL_PROBE_FILTER_DISABLED = 0x6b,
4685 TCC_PERF_SEL_CLIENT0_REQ = 0x80,
4686 TCC_PERF_SEL_CLIENT1_REQ = 0x81,
4687 TCC_PERF_SEL_CLIENT2_REQ = 0x82,
4688 TCC_PERF_SEL_CLIENT3_REQ = 0x83,
4689 TCC_PERF_SEL_CLIENT4_REQ = 0x84,
4690 TCC_PERF_SEL_CLIENT5_REQ = 0x85,
4691 TCC_PERF_SEL_CLIENT6_REQ = 0x86,
4692 TCC_PERF_SEL_CLIENT7_REQ = 0x87,
4693 TCC_PERF_SEL_CLIENT8_REQ = 0x88,
4694 TCC_PERF_SEL_CLIENT9_REQ = 0x89,
4695 TCC_PERF_SEL_CLIENT10_REQ = 0x8a,
4696 TCC_PERF_SEL_CLIENT11_REQ = 0x8b,
4697 TCC_PERF_SEL_CLIENT12_REQ = 0x8c,
4698 TCC_PERF_SEL_CLIENT13_REQ = 0x8d,
4699 TCC_PERF_SEL_CLIENT14_REQ = 0x8e,
4700 TCC_PERF_SEL_CLIENT15_REQ = 0x8f,
4701 TCC_PERF_SEL_CLIENT16_REQ = 0x90,
4702 TCC_PERF_SEL_CLIENT17_REQ = 0x91,
4703 TCC_PERF_SEL_CLIENT18_REQ = 0x92,
4704 TCC_PERF_SEL_CLIENT19_REQ = 0x93,
4705 TCC_PERF_SEL_CLIENT20_REQ = 0x94,
4706 TCC_PERF_SEL_CLIENT21_REQ = 0x95,
4707 TCC_PERF_SEL_CLIENT22_REQ = 0x96,
4708 TCC_PERF_SEL_CLIENT23_REQ = 0x97,
4709 TCC_PERF_SEL_CLIENT24_REQ = 0x98,
4710 TCC_PERF_SEL_CLIENT25_REQ = 0x99,
4711 TCC_PERF_SEL_CLIENT26_REQ = 0x9a,
4712 TCC_PERF_SEL_CLIENT27_REQ = 0x9b,
4713 TCC_PERF_SEL_CLIENT28_REQ = 0x9c,
4714 TCC_PERF_SEL_CLIENT29_REQ = 0x9d,
4715 TCC_PERF_SEL_CLIENT30_REQ = 0x9e,
4716 TCC_PERF_SEL_CLIENT31_REQ = 0x9f,
4717 TCC_PERF_SEL_CLIENT32_REQ = 0xa0,
4718 TCC_PERF_SEL_CLIENT33_REQ = 0xa1,
4719 TCC_PERF_SEL_CLIENT34_REQ = 0xa2,
4720 TCC_PERF_SEL_CLIENT35_REQ = 0xa3,
4721 TCC_PERF_SEL_CLIENT36_REQ = 0xa4,
4722 TCC_PERF_SEL_CLIENT37_REQ = 0xa5,
4723 TCC_PERF_SEL_CLIENT38_REQ = 0xa6,
4724 TCC_PERF_SEL_CLIENT39_REQ = 0xa7,
4725 TCC_PERF_SEL_CLIENT40_REQ = 0xa8,
4726 TCC_PERF_SEL_CLIENT41_REQ = 0xa9,
4727 TCC_PERF_SEL_CLIENT42_REQ = 0xaa,
4728 TCC_PERF_SEL_CLIENT43_REQ = 0xab,
4729 TCC_PERF_SEL_CLIENT44_REQ = 0xac,
4730 TCC_PERF_SEL_CLIENT45_REQ = 0xad,
4731 TCC_PERF_SEL_CLIENT46_REQ = 0xae,
4732 TCC_PERF_SEL_CLIENT47_REQ = 0xaf,
4733 TCC_PERF_SEL_CLIENT48_REQ = 0xb0,
4734 TCC_PERF_SEL_CLIENT49_REQ = 0xb1,
4735 TCC_PERF_SEL_CLIENT50_REQ = 0xb2,
4736 TCC_PERF_SEL_CLIENT51_REQ = 0xb3,
4737 TCC_PERF_SEL_CLIENT52_REQ = 0xb4,
4738 TCC_PERF_SEL_CLIENT53_REQ = 0xb5,
4739 TCC_PERF_SEL_CLIENT54_REQ = 0xb6,
4740 TCC_PERF_SEL_CLIENT55_REQ = 0xb7,
4741 TCC_PERF_SEL_CLIENT56_REQ = 0xb8,
4742 TCC_PERF_SEL_CLIENT57_REQ = 0xb9,
4743 TCC_PERF_SEL_CLIENT58_REQ = 0xba,
4744 TCC_PERF_SEL_CLIENT59_REQ = 0xbb,
4745 TCC_PERF_SEL_CLIENT60_REQ = 0xbc,
4746 TCC_PERF_SEL_CLIENT61_REQ = 0xbd,
4747 TCC_PERF_SEL_CLIENT62_REQ = 0xbe,
4748 TCC_PERF_SEL_CLIENT63_REQ = 0xbf,
4749 TCC_PERF_SEL_CLIENT64_REQ = 0xc0,
4750 TCC_PERF_SEL_CLIENT65_REQ = 0xc1,
4751 TCC_PERF_SEL_CLIENT66_REQ = 0xc2,
4752 TCC_PERF_SEL_CLIENT67_REQ = 0xc3,
4753 TCC_PERF_SEL_CLIENT68_REQ = 0xc4,
4754 TCC_PERF_SEL_CLIENT69_REQ = 0xc5,
4755 TCC_PERF_SEL_CLIENT70_REQ = 0xc6,
4756 TCC_PERF_SEL_CLIENT71_REQ = 0xc7,
4757 TCC_PERF_SEL_CLIENT72_REQ = 0xc8,
4758 TCC_PERF_SEL_CLIENT73_REQ = 0xc9,
4759 TCC_PERF_SEL_CLIENT74_REQ = 0xca,
4760 TCC_PERF_SEL_CLIENT75_REQ = 0xcb,
4761 TCC_PERF_SEL_CLIENT76_REQ = 0xcc,
4762 TCC_PERF_SEL_CLIENT77_REQ = 0xcd,
4763 TCC_PERF_SEL_CLIENT78_REQ = 0xce,
4764 TCC_PERF_SEL_CLIENT79_REQ = 0xcf,
4765 TCC_PERF_SEL_CLIENT80_REQ = 0xd0,
4766 TCC_PERF_SEL_CLIENT81_REQ = 0xd1,
4767 TCC_PERF_SEL_CLIENT82_REQ = 0xd2,
4768 TCC_PERF_SEL_CLIENT83_REQ = 0xd3,
4769 TCC_PERF_SEL_CLIENT84_REQ = 0xd4,
4770 TCC_PERF_SEL_CLIENT85_REQ = 0xd5,
4771 TCC_PERF_SEL_CLIENT86_REQ = 0xd6,
4772 TCC_PERF_SEL_CLIENT87_REQ = 0xd7,
4773 TCC_PERF_SEL_CLIENT88_REQ = 0xd8,
4774 TCC_PERF_SEL_CLIENT89_REQ = 0xd9,
4775 TCC_PERF_SEL_CLIENT90_REQ = 0xda,
4776 TCC_PERF_SEL_CLIENT91_REQ = 0xdb,
4777 TCC_PERF_SEL_CLIENT92_REQ = 0xdc,
4778 TCC_PERF_SEL_CLIENT93_REQ = 0xdd,
4779 TCC_PERF_SEL_CLIENT94_REQ = 0xde,
4780 TCC_PERF_SEL_CLIENT95_REQ = 0xdf,
4781 TCC_PERF_SEL_CLIENT96_REQ = 0xe0,
4782 TCC_PERF_SEL_CLIENT97_REQ = 0xe1,
4783 TCC_PERF_SEL_CLIENT98_REQ = 0xe2,
4784 TCC_PERF_SEL_CLIENT99_REQ = 0xe3,
4785 TCC_PERF_SEL_CLIENT100_REQ = 0xe4,
4786 TCC_PERF_SEL_CLIENT101_REQ = 0xe5,
4787 TCC_PERF_SEL_CLIENT102_REQ = 0xe6,
4788 TCC_PERF_SEL_CLIENT103_REQ = 0xe7,
4789 TCC_PERF_SEL_CLIENT104_REQ = 0xe8,
4790 TCC_PERF_SEL_CLIENT105_REQ = 0xe9,
4791 TCC_PERF_SEL_CLIENT106_REQ = 0xea,
4792 TCC_PERF_SEL_CLIENT107_REQ = 0xeb,
4793 TCC_PERF_SEL_CLIENT108_REQ = 0xec,
4794 TCC_PERF_SEL_CLIENT109_REQ = 0xed,
4795 TCC_PERF_SEL_CLIENT110_REQ = 0xee,
4796 TCC_PERF_SEL_CLIENT111_REQ = 0xef,
4797 TCC_PERF_SEL_CLIENT112_REQ = 0xf0,
4798 TCC_PERF_SEL_CLIENT113_REQ = 0xf1,
4799 TCC_PERF_SEL_CLIENT114_REQ = 0xf2,
4800 TCC_PERF_SEL_CLIENT115_REQ = 0xf3,
4801 TCC_PERF_SEL_CLIENT116_REQ = 0xf4,
4802 TCC_PERF_SEL_CLIENT117_REQ = 0xf5,
4803 TCC_PERF_SEL_CLIENT118_REQ = 0xf6,
4804 TCC_PERF_SEL_CLIENT119_REQ = 0xf7,
4805 TCC_PERF_SEL_CLIENT120_REQ = 0xf8,
4806 TCC_PERF_SEL_CLIENT121_REQ = 0xf9,
4807 TCC_PERF_SEL_CLIENT122_REQ = 0xfa,
4808 TCC_PERF_SEL_CLIENT123_REQ = 0xfb,
4809 TCC_PERF_SEL_CLIENT124_REQ = 0xfc,
4810 TCC_PERF_SEL_CLIENT125_REQ = 0xfd,
4811 TCC_PERF_SEL_CLIENT126_REQ = 0xfe,
4812 TCC_PERF_SEL_CLIENT127_REQ = 0xff,
4813} TCC_PERF_SEL;
4814typedef enum TCA_PERF_SEL {
4815 TCA_PERF_SEL_NONE = 0x0,
4816 TCA_PERF_SEL_CYCLE = 0x1,
4817 TCA_PERF_SEL_BUSY = 0x2,
4818 TCA_PERF_SEL_FORCED_HOLE_TCC0 = 0x3,
4819 TCA_PERF_SEL_FORCED_HOLE_TCC1 = 0x4,
4820 TCA_PERF_SEL_FORCED_HOLE_TCC2 = 0x5,
4821 TCA_PERF_SEL_FORCED_HOLE_TCC3 = 0x6,
4822 TCA_PERF_SEL_FORCED_HOLE_TCC4 = 0x7,
4823 TCA_PERF_SEL_FORCED_HOLE_TCC5 = 0x8,
4824 TCA_PERF_SEL_FORCED_HOLE_TCC6 = 0x9,
4825 TCA_PERF_SEL_FORCED_HOLE_TCC7 = 0xa,
4826 TCA_PERF_SEL_REQ_TCC0 = 0xb,
4827 TCA_PERF_SEL_REQ_TCC1 = 0xc,
4828 TCA_PERF_SEL_REQ_TCC2 = 0xd,
4829 TCA_PERF_SEL_REQ_TCC3 = 0xe,
4830 TCA_PERF_SEL_REQ_TCC4 = 0xf,
4831 TCA_PERF_SEL_REQ_TCC5 = 0x10,
4832 TCA_PERF_SEL_REQ_TCC6 = 0x11,
4833 TCA_PERF_SEL_REQ_TCC7 = 0x12,
4834 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC0 = 0x13,
4835 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC1 = 0x14,
4836 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC2 = 0x15,
4837 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC3 = 0x16,
4838 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC4 = 0x17,
4839 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC5 = 0x18,
4840 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC6 = 0x19,
4841 TCA_PERF_SEL_CROSSBAR_DOUBLE_ARB_TCC7 = 0x1a,
4842 TCA_PERF_SEL_CROSSBAR_STALL_TCC0 = 0x1b,
4843 TCA_PERF_SEL_CROSSBAR_STALL_TCC1 = 0x1c,
4844 TCA_PERF_SEL_CROSSBAR_STALL_TCC2 = 0x1d,
4845 TCA_PERF_SEL_CROSSBAR_STALL_TCC3 = 0x1e,
4846 TCA_PERF_SEL_CROSSBAR_STALL_TCC4 = 0x1f,
4847 TCA_PERF_SEL_CROSSBAR_STALL_TCC5 = 0x20,
4848 TCA_PERF_SEL_CROSSBAR_STALL_TCC6 = 0x21,
4849 TCA_PERF_SEL_CROSSBAR_STALL_TCC7 = 0x22,
4850} TCA_PERF_SEL;
4851typedef enum TA_TC_ADDR_MODES {
4852 TA_TC_ADDR_MODE_DEFAULT = 0x0,
4853 TA_TC_ADDR_MODE_COMP0 = 0x1,
4854 TA_TC_ADDR_MODE_COMP1 = 0x2,
4855 TA_TC_ADDR_MODE_COMP2 = 0x3,
4856 TA_TC_ADDR_MODE_COMP3 = 0x4,
4857 TA_TC_ADDR_MODE_UNALIGNED = 0x5,
4858 TA_TC_ADDR_MODE_BORDER_COLOR = 0x6,
4859} TA_TC_ADDR_MODES;
4860typedef enum TA_PERFCOUNT_SEL {
4861 TA_PERF_SEL_NULL = 0x0,
4862 TA_PERF_SEL_sh_fifo_busy = 0x1,
4863 TA_PERF_SEL_sh_fifo_cmd_busy = 0x2,
4864 TA_PERF_SEL_sh_fifo_addr_busy = 0x3,
4865 TA_PERF_SEL_sh_fifo_data_busy = 0x4,
4866 TA_PERF_SEL_sh_fifo_data_sfifo_busy = 0x5,
4867 TA_PERF_SEL_sh_fifo_data_tfifo_busy = 0x6,
4868 TA_PERF_SEL_gradient_busy = 0x7,
4869 TA_PERF_SEL_gradient_fifo_busy = 0x8,
4870 TA_PERF_SEL_lod_busy = 0x9,
4871 TA_PERF_SEL_lod_fifo_busy = 0xa,
4872 TA_PERF_SEL_addresser_busy = 0xb,
4873 TA_PERF_SEL_addresser_fifo_busy = 0xc,
4874 TA_PERF_SEL_aligner_busy = 0xd,
4875 TA_PERF_SEL_write_path_busy = 0xe,
4876 TA_PERF_SEL_ta_busy = 0xf,
4877 TA_PERF_SEL_sq_ta_cmd_cycles = 0x10,
4878 TA_PERF_SEL_sp_ta_addr_cycles = 0x11,
4879 TA_PERF_SEL_sp_ta_data_cycles = 0x12,
4880 TA_PERF_SEL_ta_fa_data_state_cycles = 0x13,
4881 TA_PERF_SEL_sh_fifo_addr_waiting_on_cmd_cycles = 0x14,
4882 TA_PERF_SEL_sh_fifo_cmd_waiting_on_addr_cycles = 0x15,
4883 TA_PERF_SEL_sh_fifo_addr_starved_while_busy_cycles= 0x16,
4884 TA_PERF_SEL_sh_fifo_cmd_starved_while_busy_cycles= 0x17,
4885 TA_PERF_SEL_sh_fifo_data_waiting_on_data_state_cycles= 0x18,
4886 TA_PERF_SEL_sh_fifo_data_state_waiting_on_data_cycles= 0x19,
4887 TA_PERF_SEL_sh_fifo_data_starved_while_busy_cycles= 0x1a,
4888 TA_PERF_SEL_sh_fifo_data_state_starved_while_busy_cycles= 0x1b,
4889 TA_PERF_SEL_RESERVED_28 = 0x1c,
4890 TA_PERF_SEL_RESERVED_29 = 0x1d,
4891 TA_PERF_SEL_sh_fifo_addr_cycles = 0x1e,
4892 TA_PERF_SEL_sh_fifo_data_cycles = 0x1f,
4893 TA_PERF_SEL_total_wavefronts = 0x20,
4894 TA_PERF_SEL_gradient_cycles = 0x21,
4895 TA_PERF_SEL_walker_cycles = 0x22,
4896 TA_PERF_SEL_aligner_cycles = 0x23,
4897 TA_PERF_SEL_image_wavefronts = 0x24,
4898 TA_PERF_SEL_image_read_wavefronts = 0x25,
4899 TA_PERF_SEL_image_write_wavefronts = 0x26,
4900 TA_PERF_SEL_image_atomic_wavefronts = 0x27,
4901 TA_PERF_SEL_image_total_cycles = 0x28,
4902 TA_PERF_SEL_RESERVED_41 = 0x29,
4903 TA_PERF_SEL_RESERVED_42 = 0x2a,
4904 TA_PERF_SEL_RESERVED_43 = 0x2b,
4905 TA_PERF_SEL_buffer_wavefronts = 0x2c,
4906 TA_PERF_SEL_buffer_read_wavefronts = 0x2d,
4907 TA_PERF_SEL_buffer_write_wavefronts = 0x2e,
4908 TA_PERF_SEL_buffer_atomic_wavefronts = 0x2f,
4909 TA_PERF_SEL_buffer_coalescable_wavefronts = 0x30,
4910 TA_PERF_SEL_buffer_total_cycles = 0x31,
4911 TA_PERF_SEL_buffer_coalescable_addr_multicycled_cycles= 0x32,
4912 TA_PERF_SEL_buffer_coalescable_clamp_16kdword_multicycled_cycles= 0x33,
4913 TA_PERF_SEL_buffer_coalesced_read_cycles = 0x34,
4914 TA_PERF_SEL_buffer_coalesced_write_cycles = 0x35,
4915 TA_PERF_SEL_addr_stalled_by_tc_cycles = 0x36,
4916 TA_PERF_SEL_addr_stalled_by_td_cycles = 0x37,
4917 TA_PERF_SEL_data_stalled_by_tc_cycles = 0x38,
4918 TA_PERF_SEL_addresser_stalled_by_aligner_only_cycles= 0x39,
4919 TA_PERF_SEL_addresser_stalled_cycles = 0x3a,
4920 TA_PERF_SEL_aniso_stalled_by_addresser_only_cycles= 0x3b,
4921 TA_PERF_SEL_aniso_stalled_cycles = 0x3c,
4922 TA_PERF_SEL_deriv_stalled_by_aniso_only_cycles = 0x3d,
4923 TA_PERF_SEL_deriv_stalled_cycles = 0x3e,
4924 TA_PERF_SEL_aniso_gt1_cycle_quads = 0x3f,
4925 TA_PERF_SEL_color_1_cycle_pixels = 0x40,
4926 TA_PERF_SEL_color_2_cycle_pixels = 0x41,
4927 TA_PERF_SEL_color_3_cycle_pixels = 0x42,
4928 TA_PERF_SEL_color_4_cycle_pixels = 0x43,
4929 TA_PERF_SEL_mip_1_cycle_pixels = 0x44,
4930 TA_PERF_SEL_mip_2_cycle_pixels = 0x45,
4931 TA_PERF_SEL_vol_1_cycle_pixels = 0x46,
4932 TA_PERF_SEL_vol_2_cycle_pixels = 0x47,
4933 TA_PERF_SEL_bilin_point_1_cycle_pixels = 0x48,
4934 TA_PERF_SEL_mipmap_lod_0_samples = 0x49,
4935 TA_PERF_SEL_mipmap_lod_1_samples = 0x4a,
4936 TA_PERF_SEL_mipmap_lod_2_samples = 0x4b,
4937 TA_PERF_SEL_mipmap_lod_3_samples = 0x4c,
4938 TA_PERF_SEL_mipmap_lod_4_samples = 0x4d,
4939 TA_PERF_SEL_mipmap_lod_5_samples = 0x4e,
4940 TA_PERF_SEL_mipmap_lod_6_samples = 0x4f,
4941 TA_PERF_SEL_mipmap_lod_7_samples = 0x50,
4942 TA_PERF_SEL_mipmap_lod_8_samples = 0x51,
4943 TA_PERF_SEL_mipmap_lod_9_samples = 0x52,
4944 TA_PERF_SEL_mipmap_lod_10_samples = 0x53,
4945 TA_PERF_SEL_mipmap_lod_11_samples = 0x54,
4946 TA_PERF_SEL_mipmap_lod_12_samples = 0x55,
4947 TA_PERF_SEL_mipmap_lod_13_samples = 0x56,
4948 TA_PERF_SEL_mipmap_lod_14_samples = 0x57,
4949 TA_PERF_SEL_mipmap_invalid_samples = 0x58,
4950 TA_PERF_SEL_aniso_1_cycle_quads = 0x59,
4951 TA_PERF_SEL_aniso_2_cycle_quads = 0x5a,
4952 TA_PERF_SEL_aniso_4_cycle_quads = 0x5b,
4953 TA_PERF_SEL_aniso_6_cycle_quads = 0x5c,
4954 TA_PERF_SEL_aniso_8_cycle_quads = 0x5d,
4955 TA_PERF_SEL_aniso_10_cycle_quads = 0x5e,
4956 TA_PERF_SEL_aniso_12_cycle_quads = 0x5f,
4957 TA_PERF_SEL_aniso_14_cycle_quads = 0x60,
4958 TA_PERF_SEL_aniso_16_cycle_quads = 0x61,
4959 TA_PERF_SEL_write_path_input_cycles = 0x62,
4960 TA_PERF_SEL_write_path_output_cycles = 0x63,
4961 TA_PERF_SEL_flat_wavefronts = 0x64,
4962 TA_PERF_SEL_flat_read_wavefronts = 0x65,
4963 TA_PERF_SEL_flat_write_wavefronts = 0x66,
4964 TA_PERF_SEL_flat_atomic_wavefronts = 0x67,
4965 TA_PERF_SEL_flat_coalesceable_wavefronts = 0x68,
4966 TA_PERF_SEL_reg_sclk_vld = 0x69,
4967 TA_PERF_SEL_local_cg_dyn_sclk_grp0_en = 0x6a,
4968 TA_PERF_SEL_local_cg_dyn_sclk_grp1_en = 0x6b,
4969 TA_PERF_SEL_local_cg_dyn_sclk_grp1_mems_en = 0x6c,
4970 TA_PERF_SEL_local_cg_dyn_sclk_grp4_en = 0x6d,
4971 TA_PERF_SEL_local_cg_dyn_sclk_grp5_en = 0x6e,
4972 TA_PERF_SEL_xnack_on_phase0 = 0x6f,
4973 TA_PERF_SEL_xnack_on_phase1 = 0x70,
4974 TA_PERF_SEL_xnack_on_phase2 = 0x71,
4975 TA_PERF_SEL_xnack_on_phase3 = 0x72,
4976 TA_PERF_SEL_first_xnack_on_phase0 = 0x73,
4977 TA_PERF_SEL_first_xnack_on_phase1 = 0x74,
4978 TA_PERF_SEL_first_xnack_on_phase2 = 0x75,
4979 TA_PERF_SEL_first_xnack_on_phase3 = 0x76,
4980} TA_PERFCOUNT_SEL;
4981typedef enum TD_PERFCOUNT_SEL {
4982 TD_PERF_SEL_none = 0x0,
4983 TD_PERF_SEL_td_busy = 0x1,
4984 TD_PERF_SEL_input_busy = 0x2,
4985 TD_PERF_SEL_output_busy = 0x3,
4986 TD_PERF_SEL_lerp_busy = 0x4,
4987 TD_PERF_SEL_reg_sclk_vld = 0x5,
4988 TD_PERF_SEL_local_cg_dyn_sclk_grp0_en = 0x6,
4989 TD_PERF_SEL_local_cg_dyn_sclk_grp1_en = 0x7,
4990 TD_PERF_SEL_local_cg_dyn_sclk_grp4_en = 0x8,
4991 TD_PERF_SEL_local_cg_dyn_sclk_grp5_en = 0x9,
4992 TD_PERF_SEL_tc_td_fifo_full = 0xa,
4993 TD_PERF_SEL_constant_state_full = 0xb,
4994 TD_PERF_SEL_sample_state_full = 0xc,
4995 TD_PERF_SEL_output_fifo_full = 0xd,
4996 TD_PERF_SEL_RESERVED_14 = 0xe,
4997 TD_PERF_SEL_tc_stall = 0xf,
4998 TD_PERF_SEL_pc_stall = 0x10,
4999 TD_PERF_SEL_gds_stall = 0x11,
5000 TD_PERF_SEL_RESERVED_18 = 0x12,
5001 TD_PERF_SEL_RESERVED_19 = 0x13,
5002 TD_PERF_SEL_gather4_wavefront = 0x14,
5003 TD_PERF_SEL_sample_c_wavefront = 0x15,
5004 TD_PERF_SEL_load_wavefront = 0x16,
5005 TD_PERF_SEL_atomic_wavefront = 0x17,
5006 TD_PERF_SEL_store_wavefront = 0x18,
5007 TD_PERF_SEL_ldfptr_wavefront = 0x19,
5008 TD_PERF_SEL_RESERVED_26 = 0x1a,
5009 TD_PERF_SEL_RESERVED_27 = 0x1b,
5010 TD_PERF_SEL_d16_en_wavefront = 0x1c,
5011 TD_PERF_SEL_bicubic_filter_wavefront = 0x1d,
5012 TD_PERF_SEL_bypass_filter_wavefront = 0x1e,
5013 TD_PERF_SEL_min_max_filter_wavefront = 0x1f,
5014 TD_PERF_SEL_coalescable_wavefront = 0x20,
5015 TD_PERF_SEL_coalesced_phase = 0x21,
5016 TD_PERF_SEL_four_phase_wavefront = 0x22,
5017 TD_PERF_SEL_eight_phase_wavefront = 0x23,
5018 TD_PERF_SEL_sixteen_phase_wavefront = 0x24,
5019 TD_PERF_SEL_four_phase_forward_wavefront = 0x25,
5020 TD_PERF_SEL_write_ack_wavefront = 0x26,
5021 TD_PERF_SEL_RESERVED_39 = 0x27,
5022 TD_PERF_SEL_user_defined_border = 0x28,
5023 TD_PERF_SEL_white_border = 0x29,
5024 TD_PERF_SEL_opaque_black_border = 0x2a,
5025 TD_PERF_SEL_RESERVED_43 = 0x2b,
5026 TD_PERF_SEL_RESERVED_44 = 0x2c,
5027 TD_PERF_SEL_nack = 0x2d,
5028 TD_PERF_SEL_td_sp_traffic = 0x2e,
5029 TD_PERF_SEL_consume_gds_traffic = 0x2f,
5030 TD_PERF_SEL_addresscmd_poison = 0x30,
5031 TD_PERF_SEL_data_poison = 0x31,
5032 TD_PERF_SEL_start_cycle_0 = 0x32,
5033 TD_PERF_SEL_start_cycle_1 = 0x33,
5034 TD_PERF_SEL_start_cycle_2 = 0x34,
5035 TD_PERF_SEL_start_cycle_3 = 0x35,
5036 TD_PERF_SEL_null_cycle_output = 0x36,
5037 TD_PERF_SEL_d16_data_packed = 0x37,
5038} TD_PERFCOUNT_SEL;
5039typedef enum TCP_PERFCOUNT_SELECT {
5040 TCP_PERF_SEL_TA_TCP_ADDR_STARVE_CYCLES = 0x0,
5041 TCP_PERF_SEL_TA_TCP_DATA_STARVE_CYCLES = 0x1,
5042 TCP_PERF_SEL_TCP_TA_ADDR_STALL_CYCLES = 0x2,
5043 TCP_PERF_SEL_TCP_TA_DATA_STALL_CYCLES = 0x3,
5044 TCP_PERF_SEL_TD_TCP_STALL_CYCLES = 0x4,
5045 TCP_PERF_SEL_TCR_TCP_STALL_CYCLES = 0x5,
5046 TCP_PERF_SEL_LOD_STALL_CYCLES = 0x6,
5047 TCP_PERF_SEL_READ_TAGCONFLICT_STALL_CYCLES = 0x7,
5048 TCP_PERF_SEL_WRITE_TAGCONFLICT_STALL_CYCLES = 0x8,
5049 TCP_PERF_SEL_ATOMIC_TAGCONFLICT_STALL_CYCLES = 0x9,
5050 TCP_PERF_SEL_ALLOC_STALL_CYCLES = 0xa,
5051 TCP_PERF_SEL_LFIFO_STALL_CYCLES = 0xb,
5052 TCP_PERF_SEL_RFIFO_STALL_CYCLES = 0xc,
5053 TCP_PERF_SEL_TCR_RDRET_STALL = 0xd,
5054 TCP_PERF_SEL_WRITE_CONFLICT_STALL = 0xe,
5055 TCP_PERF_SEL_HOLE_READ_STALL = 0xf,
5056 TCP_PERF_SEL_READCONFLICT_STALL_CYCLES = 0x10,
5057 TCP_PERF_SEL_PENDING_STALL_CYCLES = 0x11,
5058 TCP_PERF_SEL_READFIFO_STALL_CYCLES = 0x12,
5059 TCP_PERF_SEL_TCP_LATENCY = 0x13,
5060 TCP_PERF_SEL_TCC_READ_REQ_LATENCY = 0x14,
5061 TCP_PERF_SEL_TCC_WRITE_REQ_LATENCY = 0x15,
5062 TCP_PERF_SEL_TCC_WRITE_REQ_HOLE_LATENCY = 0x16,
5063 TCP_PERF_SEL_TCC_READ_REQ = 0x17,
5064 TCP_PERF_SEL_TCC_WRITE_REQ = 0x18,
5065 TCP_PERF_SEL_TCC_ATOMIC_WITH_RET_REQ = 0x19,
5066 TCP_PERF_SEL_TCC_ATOMIC_WITHOUT_RET_REQ = 0x1a,
5067 TCP_PERF_SEL_TOTAL_LOCAL_READ = 0x1b,
5068 TCP_PERF_SEL_TOTAL_GLOBAL_READ = 0x1c,
5069 TCP_PERF_SEL_TOTAL_LOCAL_WRITE = 0x1d,
5070 TCP_PERF_SEL_TOTAL_GLOBAL_WRITE = 0x1e,
5071 TCP_PERF_SEL_TOTAL_ATOMIC_WITH_RET = 0x1f,
5072 TCP_PERF_SEL_TOTAL_ATOMIC_WITHOUT_RET = 0x20,
5073 TCP_PERF_SEL_TOTAL_WBINVL1 = 0x21,
5074 TCP_PERF_SEL_IMG_READ_FMT_1 = 0x22,
5075 TCP_PERF_SEL_IMG_READ_FMT_8 = 0x23,
5076 TCP_PERF_SEL_IMG_READ_FMT_16 = 0x24,
5077 TCP_PERF_SEL_IMG_READ_FMT_32 = 0x25,
5078 TCP_PERF_SEL_IMG_READ_FMT_32_AS_8 = 0x26,
5079 TCP_PERF_SEL_IMG_READ_FMT_32_AS_16 = 0x27,
5080 TCP_PERF_SEL_IMG_READ_FMT_32_AS_128 = 0x28,
5081 TCP_PERF_SEL_IMG_READ_FMT_64_2_CYCLE = 0x29,
5082 TCP_PERF_SEL_IMG_READ_FMT_64_1_CYCLE = 0x2a,
5083 TCP_PERF_SEL_IMG_READ_FMT_96 = 0x2b,
5084 TCP_PERF_SEL_IMG_READ_FMT_128_4_CYCLE = 0x2c,
5085 TCP_PERF_SEL_IMG_READ_FMT_128_1_CYCLE = 0x2d,
5086 TCP_PERF_SEL_IMG_READ_FMT_BC1 = 0x2e,
5087 TCP_PERF_SEL_IMG_READ_FMT_BC2 = 0x2f,
5088 TCP_PERF_SEL_IMG_READ_FMT_BC3 = 0x30,
5089 TCP_PERF_SEL_IMG_READ_FMT_BC4 = 0x31,
5090 TCP_PERF_SEL_IMG_READ_FMT_BC5 = 0x32,
5091 TCP_PERF_SEL_IMG_READ_FMT_BC6 = 0x33,
5092 TCP_PERF_SEL_IMG_READ_FMT_BC7 = 0x34,
5093 TCP_PERF_SEL_IMG_READ_FMT_I8 = 0x35,
5094 TCP_PERF_SEL_IMG_READ_FMT_I16 = 0x36,
5095 TCP_PERF_SEL_IMG_READ_FMT_I32 = 0x37,
5096 TCP_PERF_SEL_IMG_READ_FMT_I32_AS_8 = 0x38,
5097 TCP_PERF_SEL_IMG_READ_FMT_I32_AS_16 = 0x39,
5098 TCP_PERF_SEL_IMG_READ_FMT_D8 = 0x3a,
5099 TCP_PERF_SEL_IMG_READ_FMT_D16 = 0x3b,
5100 TCP_PERF_SEL_IMG_READ_FMT_D32 = 0x3c,
5101 TCP_PERF_SEL_IMG_WRITE_FMT_8 = 0x3d,
5102 TCP_PERF_SEL_IMG_WRITE_FMT_16 = 0x3e,
5103 TCP_PERF_SEL_IMG_WRITE_FMT_32 = 0x3f,
5104 TCP_PERF_SEL_IMG_WRITE_FMT_64 = 0x40,
5105 TCP_PERF_SEL_IMG_WRITE_FMT_128 = 0x41,
5106 TCP_PERF_SEL_IMG_WRITE_FMT_D8 = 0x42,
5107 TCP_PERF_SEL_IMG_WRITE_FMT_D16 = 0x43,
5108 TCP_PERF_SEL_IMG_WRITE_FMT_D32 = 0x44,
5109 TCP_PERF_SEL_IMG_ATOMIC_WITH_RET_FMT_32 = 0x45,
5110 TCP_PERF_SEL_IMG_ATOMIC_WITHOUT_RET_FMT_32 = 0x46,
5111 TCP_PERF_SEL_IMG_ATOMIC_WITH_RET_FMT_64 = 0x47,
5112 TCP_PERF_SEL_IMG_ATOMIC_WITHOUT_RET_FMT_64 = 0x48,
5113 TCP_PERF_SEL_BUF_READ_FMT_8 = 0x49,
5114 TCP_PERF_SEL_BUF_READ_FMT_16 = 0x4a,
5115 TCP_PERF_SEL_BUF_READ_FMT_32 = 0x4b,
5116 TCP_PERF_SEL_BUF_WRITE_FMT_8 = 0x4c,
5117 TCP_PERF_SEL_BUF_WRITE_FMT_16 = 0x4d,
5118 TCP_PERF_SEL_BUF_WRITE_FMT_32 = 0x4e,
5119 TCP_PERF_SEL_BUF_ATOMIC_WITH_RET_FMT_32 = 0x4f,
5120 TCP_PERF_SEL_BUF_ATOMIC_WITHOUT_RET_FMT_32 = 0x50,
5121 TCP_PERF_SEL_BUF_ATOMIC_WITH_RET_FMT_64 = 0x51,
5122 TCP_PERF_SEL_BUF_ATOMIC_WITHOUT_RET_FMT_64 = 0x52,
5123 TCP_PERF_SEL_ARR_LINEAR_GENERAL = 0x53,
5124 TCP_PERF_SEL_ARR_LINEAR_ALIGNED = 0x54,
5125 TCP_PERF_SEL_ARR_1D_THIN1 = 0x55,
5126 TCP_PERF_SEL_ARR_1D_THICK = 0x56,
5127 TCP_PERF_SEL_ARR_2D_THIN1 = 0x57,
5128 TCP_PERF_SEL_ARR_2D_THICK = 0x58,
5129 TCP_PERF_SEL_ARR_2D_XTHICK = 0x59,
5130 TCP_PERF_SEL_ARR_3D_THIN1 = 0x5a,
5131 TCP_PERF_SEL_ARR_3D_THICK = 0x5b,
5132 TCP_PERF_SEL_ARR_3D_XTHICK = 0x5c,
5133 TCP_PERF_SEL_DIM_1D = 0x5d,
5134 TCP_PERF_SEL_DIM_2D = 0x5e,
5135 TCP_PERF_SEL_DIM_3D = 0x5f,
5136 TCP_PERF_SEL_DIM_1D_ARRAY = 0x60,
5137 TCP_PERF_SEL_DIM_2D_ARRAY = 0x61,
5138 TCP_PERF_SEL_DIM_2D_MSAA = 0x62,
5139 TCP_PERF_SEL_DIM_2D_ARRAY_MSAA = 0x63,
5140 TCP_PERF_SEL_DIM_CUBE_ARRAY = 0x64,
5141 TCP_PERF_SEL_CP_TCP_INVALIDATE = 0x65,
5142 TCP_PERF_SEL_TA_TCP_STATE_READ = 0x66,
5143 TCP_PERF_SEL_TAGRAM0_REQ = 0x67,
5144 TCP_PERF_SEL_TAGRAM1_REQ = 0x68,
5145 TCP_PERF_SEL_TAGRAM2_REQ = 0x69,
5146 TCP_PERF_SEL_TAGRAM3_REQ = 0x6a,
5147 TCP_PERF_SEL_GATE_EN1 = 0x6b,
5148 TCP_PERF_SEL_GATE_EN2 = 0x6c,
5149 TCP_PERF_SEL_CORE_REG_SCLK_VLD = 0x6d,
5150 TCP_PERF_SEL_TCC_REQ = 0x6e,
5151 TCP_PERF_SEL_TCC_NON_READ_REQ = 0x6f,
5152 TCP_PERF_SEL_TCC_BYPASS_READ_REQ = 0x70,
5153 TCP_PERF_SEL_TCC_MISS_EVICT_READ_REQ = 0x71,
5154 TCP_PERF_SEL_TCC_VOLATILE_READ_REQ = 0x72,
5155 TCP_PERF_SEL_TCC_VOLATILE_BYPASS_READ_REQ = 0x73,
5156 TCP_PERF_SEL_TCC_VOLATILE_MISS_EVICT_READ_REQ = 0x74,
5157 TCP_PERF_SEL_TCC_BYPASS_WRITE_REQ = 0x75,
5158 TCP_PERF_SEL_TCC_MISS_EVICT_WRITE_REQ = 0x76,
5159 TCP_PERF_SEL_TCC_VOLATILE_BYPASS_WRITE_REQ = 0x77,
5160 TCP_PERF_SEL_TCC_VOLATILE_WRITE_REQ = 0x78,
5161 TCP_PERF_SEL_TCC_VOLATILE_MISS_EVICT_WRITE_REQ = 0x79,
5162 TCP_PERF_SEL_TCC_BYPASS_ATOMIC_REQ = 0x7a,
5163 TCP_PERF_SEL_TCC_ATOMIC_REQ = 0x7b,
5164 TCP_PERF_SEL_TCC_VOLATILE_ATOMIC_REQ = 0x7c,
5165 TCP_PERF_SEL_TCC_DATA_BUS_BUSY = 0x7d,
5166 TCP_PERF_SEL_TOTAL_ACCESSES = 0x7e,
5167 TCP_PERF_SEL_TOTAL_READ = 0x7f,
5168 TCP_PERF_SEL_TOTAL_HIT_LRU_READ = 0x80,
5169 TCP_PERF_SEL_TOTAL_HIT_EVICT_READ = 0x81,
5170 TCP_PERF_SEL_TOTAL_MISS_LRU_READ = 0x82,
5171 TCP_PERF_SEL_TOTAL_MISS_EVICT_READ = 0x83,
5172 TCP_PERF_SEL_TOTAL_NON_READ = 0x84,
5173 TCP_PERF_SEL_TOTAL_WRITE = 0x85,
5174 TCP_PERF_SEL_TOTAL_MISS_LRU_WRITE = 0x86,
5175 TCP_PERF_SEL_TOTAL_MISS_EVICT_WRITE = 0x87,
5176 TCP_PERF_SEL_TOTAL_WBINVL1_VOL = 0x88,
5177 TCP_PERF_SEL_TOTAL_WRITEBACK_INVALIDATES = 0x89,
5178 TCP_PERF_SEL_DISPLAY_MICROTILING = 0x8a,
5179 TCP_PERF_SEL_THIN_MICROTILING = 0x8b,
5180 TCP_PERF_SEL_DEPTH_MICROTILING = 0x8c,
5181 TCP_PERF_SEL_ARR_PRT_THIN1 = 0x8d,
5182 TCP_PERF_SEL_ARR_PRT_2D_THIN1 = 0x8e,
5183 TCP_PERF_SEL_ARR_PRT_3D_THIN1 = 0x8f,
5184 TCP_PERF_SEL_ARR_PRT_THICK = 0x90,
5185 TCP_PERF_SEL_ARR_PRT_2D_THICK = 0x91,
5186 TCP_PERF_SEL_ARR_PRT_3D_THICK = 0x92,
5187 TCP_PERF_SEL_CP_TCP_INVALIDATE_VOL = 0x93,
5188 TCP_PERF_SEL_SQ_TCP_INVALIDATE_VOL = 0x94,
5189 TCP_PERF_SEL_UNALIGNED = 0x95,
5190 TCP_PERF_SEL_ROTATED_MICROTILING = 0x96,
5191 TCP_PERF_SEL_THICK_MICROTILING = 0x97,
5192 TCP_PERF_SEL_ATC = 0x98,
5193 TCP_PERF_SEL_POWER_STALL = 0x99,
5194 TCP_PERF_SEL_RESERVED_154 = 0x9a,
5195 TCP_PERF_SEL_TCC_LRU_REQ = 0x9b,
5196 TCP_PERF_SEL_TCC_STREAM_REQ = 0x9c,
5197 TCP_PERF_SEL_TCC_NC_READ_REQ = 0x9d,
5198 TCP_PERF_SEL_TCC_NC_WRITE_REQ = 0x9e,
5199 TCP_PERF_SEL_TCC_NC_ATOMIC_REQ = 0x9f,
5200 TCP_PERF_SEL_TCC_UC_READ_REQ = 0xa0,
5201 TCP_PERF_SEL_TCC_UC_WRITE_REQ = 0xa1,
5202 TCP_PERF_SEL_TCC_UC_ATOMIC_REQ = 0xa2,
5203 TCP_PERF_SEL_TCC_CC_READ_REQ = 0xa3,
5204 TCP_PERF_SEL_TCC_CC_WRITE_REQ = 0xa4,
5205 TCP_PERF_SEL_TCC_CC_ATOMIC_REQ = 0xa5,
5206 TCP_PERF_SEL_TCC_DCC_REQ = 0xa6,
5207 TCP_PERF_SEL_TCC_PHYSICAL_REQ = 0xa7,
5208 TCP_PERF_SEL_UNORDERED_MTYPE_STALL = 0xa8,
5209 TCP_PERF_SEL_VOLATILE = 0xa9,
5210 TCP_PERF_SEL_TC_TA_XNACK_STALL = 0xaa,
5211 TCP_PERF_SEL_ATCL1_SERIALIZATION_STALL = 0xab,
5212 TCP_PERF_SEL_SHOOTDOWN = 0xac,
5213 TCP_PERF_SEL_GATCL1_TRANSLATION_MISS = 0xad,
5214 TCP_PERF_SEL_GATCL1_PERMISSION_MISS = 0xae,
5215 TCP_PERF_SEL_GATCL1_REQUEST = 0xaf,
5216 TCP_PERF_SEL_GATCL1_STALL_INFLIGHT_MAX = 0xb0,
5217 TCP_PERF_SEL_GATCL1_STALL_LRU_INFLIGHT = 0xb1,
5218 TCP_PERF_SEL_GATCL1_LFIFO_FULL = 0xb2,
5219 TCP_PERF_SEL_GATCL1_STALL_LFIFO_NOT_RES = 0xb3,
5220 TCP_PERF_SEL_GATCL1_STALL_ATCL2_REQ_OUT_OF_CREDITS= 0xb4,
5221 TCP_PERF_SEL_GATCL1_ATCL2_INFLIGHT = 0xb5,
5222 TCP_PERF_SEL_GATCL1_STALL_MISSFIFO_FULL = 0xb6,
5223 TCP_PERF_SEL_IMG_READ_FMT_ETC2_RGB = 0xb7,
5224 TCP_PERF_SEL_IMG_READ_FMT_ETC2_RGBA = 0xb8,
5225 TCP_PERF_SEL_IMG_READ_FMT_ETC2_RGBA1 = 0xb9,
5226 TCP_PERF_SEL_IMG_READ_FMT_ETC2_R = 0xba,
5227 TCP_PERF_SEL_IMG_READ_FMT_ETC2_RG = 0xbb,
5228 TCP_PERF_SEL_IMG_READ_FMT_8_AS_32 = 0xbc,
5229 TCP_PERF_SEL_IMG_READ_FMT_8_AS_64 = 0xbd,
5230 TCP_PERF_SEL_IMG_READ_FMT_16_AS_64 = 0xbe,
5231 TCP_PERF_SEL_IMG_READ_FMT_16_AS_128 = 0xbf,
5232 TCP_PERF_SEL_IMG_WRITE_FMT_8_AS_32 = 0xc0,
5233 TCP_PERF_SEL_IMG_WRITE_FMT_8_AS_64 = 0xc1,
5234 TCP_PERF_SEL_IMG_WRITE_FMT_16_AS_64 = 0xc2,
5235 TCP_PERF_SEL_IMG_WRITE_FMT_16_AS_128 = 0xc3,
5236} TCP_PERFCOUNT_SELECT;
5237typedef enum TCP_CACHE_POLICIES {
5238 TCP_CACHE_POLICY_MISS_LRU = 0x0,
5239 TCP_CACHE_POLICY_MISS_EVICT = 0x1,
5240 TCP_CACHE_POLICY_HIT_LRU = 0x2,
5241 TCP_CACHE_POLICY_HIT_EVICT = 0x3,
5242} TCP_CACHE_POLICIES;
5243typedef enum TCP_CACHE_STORE_POLICIES {
5244 TCP_CACHE_STORE_POLICY_WT_LRU = 0x0,
5245 TCP_CACHE_STORE_POLICY_WT_EVICT = 0x1,
5246} TCP_CACHE_STORE_POLICIES;
5247typedef enum TCP_WATCH_MODES {
5248 TCP_WATCH_MODE_READ = 0x0,
5249 TCP_WATCH_MODE_NONREAD = 0x1,
5250 TCP_WATCH_MODE_ATOMIC = 0x2,
5251 TCP_WATCH_MODE_ALL = 0x3,
5252} TCP_WATCH_MODES;
5253typedef enum TCP_DSM_DATA_SEL {
5254 TCP_DSM_DISABLE = 0x0,
5255 TCP_DSM_SEL0 = 0x1,
5256 TCP_DSM_SEL1 = 0x2,
5257 TCP_DSM_SEL_BOTH = 0x3,
5258} TCP_DSM_DATA_SEL;
5259typedef enum TCP_DSM_SINGLE_WRITE {
5260 TCP_DSM_SINGLE_WRITE_EN = 0x1,
5261} TCP_DSM_SINGLE_WRITE;
5262typedef enum VGT_OUT_PRIM_TYPE {
5263 VGT_OUT_POINT = 0x0,
5264 VGT_OUT_LINE = 0x1,
5265 VGT_OUT_TRI = 0x2,
5266 VGT_OUT_RECT_V0 = 0x3,
5267 VGT_OUT_RECT_V1 = 0x4,
5268 VGT_OUT_RECT_V2 = 0x5,
5269 VGT_OUT_RECT_V3 = 0x6,
5270 VGT_OUT_RESERVED = 0x7,
5271 VGT_TE_QUAD = 0x8,
5272 VGT_TE_PRIM_INDEX_LINE = 0x9,
5273 VGT_TE_PRIM_INDEX_TRI = 0xa,
5274 VGT_TE_PRIM_INDEX_QUAD = 0xb,
5275 VGT_OUT_LINE_ADJ = 0xc,
5276 VGT_OUT_TRI_ADJ = 0xd,
5277 VGT_OUT_PATCH = 0xe,
5278} VGT_OUT_PRIM_TYPE;
5279typedef enum VGT_DI_PRIM_TYPE {
5280 DI_PT_NONE = 0x0,
5281 DI_PT_POINTLIST = 0x1,
5282 DI_PT_LINELIST = 0x2,
5283 DI_PT_LINESTRIP = 0x3,
5284 DI_PT_TRILIST = 0x4,
5285 DI_PT_TRIFAN = 0x5,
5286 DI_PT_TRISTRIP = 0x6,
5287 DI_PT_UNUSED_0 = 0x7,
5288 DI_PT_UNUSED_1 = 0x8,
5289 DI_PT_PATCH = 0x9,
5290 DI_PT_LINELIST_ADJ = 0xa,
5291 DI_PT_LINESTRIP_ADJ = 0xb,
5292 DI_PT_TRILIST_ADJ = 0xc,
5293 DI_PT_TRISTRIP_ADJ = 0xd,
5294 DI_PT_UNUSED_3 = 0xe,
5295 DI_PT_UNUSED_4 = 0xf,
5296 DI_PT_TRI_WITH_WFLAGS = 0x10,
5297 DI_PT_RECTLIST = 0x11,
5298 DI_PT_LINELOOP = 0x12,
5299 DI_PT_QUADLIST = 0x13,
5300 DI_PT_QUADSTRIP = 0x14,
5301 DI_PT_POLYGON = 0x15,
5302 DI_PT_2D_COPY_RECT_LIST_V0 = 0x16,
5303 DI_PT_2D_COPY_RECT_LIST_V1 = 0x17,
5304 DI_PT_2D_COPY_RECT_LIST_V2 = 0x18,
5305 DI_PT_2D_COPY_RECT_LIST_V3 = 0x19,
5306 DI_PT_2D_FILL_RECT_LIST = 0x1a,
5307 DI_PT_2D_LINE_STRIP = 0x1b,
5308 DI_PT_2D_TRI_STRIP = 0x1c,
5309} VGT_DI_PRIM_TYPE;
5310typedef enum VGT_DI_SOURCE_SELECT {
5311 DI_SRC_SEL_DMA = 0x0,
5312 DI_SRC_SEL_IMMEDIATE = 0x1,
5313 DI_SRC_SEL_AUTO_INDEX = 0x2,
5314 DI_SRC_SEL_RESERVED = 0x3,
5315} VGT_DI_SOURCE_SELECT;
5316typedef enum VGT_DI_MAJOR_MODE_SELECT {
5317 DI_MAJOR_MODE_0 = 0x0,
5318 DI_MAJOR_MODE_1 = 0x1,
5319} VGT_DI_MAJOR_MODE_SELECT;
5320typedef enum VGT_DI_INDEX_SIZE {
5321 DI_INDEX_SIZE_16_BIT = 0x0,
5322 DI_INDEX_SIZE_32_BIT = 0x1,
5323 DI_INDEX_SIZE_8_BIT = 0x2,
5324} VGT_DI_INDEX_SIZE;
5325typedef enum VGT_EVENT_TYPE {
5326 Reserved_0x00 = 0x0,
5327 SAMPLE_STREAMOUTSTATS1 = 0x1,
5328 SAMPLE_STREAMOUTSTATS2 = 0x2,
5329 SAMPLE_STREAMOUTSTATS3 = 0x3,
5330 CACHE_FLUSH_TS = 0x4,
5331 CONTEXT_DONE = 0x5,
5332 CACHE_FLUSH = 0x6,
5333 CS_PARTIAL_FLUSH = 0x7,
5334 VGT_STREAMOUT_SYNC = 0x8,
5335 Reserved_0x09 = 0x9,
5336 VGT_STREAMOUT_RESET = 0xa,
5337 END_OF_PIPE_INCR_DE = 0xb,
5338 END_OF_PIPE_IB_END = 0xc,
5339 RST_PIX_CNT = 0xd,
5340 Reserved_0x0E = 0xe,
5341 VS_PARTIAL_FLUSH = 0xf,
5342 PS_PARTIAL_FLUSH = 0x10,
5343 FLUSH_HS_OUTPUT = 0x11,
5344 FLUSH_LS_OUTPUT = 0x12,
5345 Reserved_0x13 = 0x13,
5346 CACHE_FLUSH_AND_INV_TS_EVENT = 0x14,
5347 ZPASS_DONE = 0x15,
5348 CACHE_FLUSH_AND_INV_EVENT = 0x16,
5349 PERFCOUNTER_START = 0x17,
5350 PERFCOUNTER_STOP = 0x18,
5351 PIPELINESTAT_START = 0x19,
5352 PIPELINESTAT_STOP = 0x1a,
5353 PERFCOUNTER_SAMPLE = 0x1b,
5354 FLUSH_ES_OUTPUT = 0x1c,
5355 FLUSH_GS_OUTPUT = 0x1d,
5356 SAMPLE_PIPELINESTAT = 0x1e,
5357 SO_VGTSTREAMOUT_FLUSH = 0x1f,
5358 SAMPLE_STREAMOUTSTATS = 0x20,
5359 RESET_VTX_CNT = 0x21,
5360 BLOCK_CONTEXT_DONE = 0x22,
5361 CS_CONTEXT_DONE = 0x23,
5362 VGT_FLUSH = 0x24,
5363 TGID_ROLLOVER = 0x25,
5364 SQ_NON_EVENT = 0x26,
5365 SC_SEND_DB_VPZ = 0x27,
5366 BOTTOM_OF_PIPE_TS = 0x28,
5367 FLUSH_SX_TS = 0x29,
5368 DB_CACHE_FLUSH_AND_INV = 0x2a,
5369 FLUSH_AND_INV_DB_DATA_TS = 0x2b,
5370 FLUSH_AND_INV_DB_META = 0x2c,
5371 FLUSH_AND_INV_CB_DATA_TS = 0x2d,
5372 FLUSH_AND_INV_CB_META = 0x2e,
5373 CS_DONE = 0x2f,
5374 PS_DONE = 0x30,
5375 FLUSH_AND_INV_CB_PIXEL_DATA = 0x31,
5376 SX_CB_RAT_ACK_REQUEST = 0x32,
5377 THREAD_TRACE_START = 0x33,
5378 THREAD_TRACE_STOP = 0x34,
5379 THREAD_TRACE_MARKER = 0x35,
5380 THREAD_TRACE_FLUSH = 0x36,
5381 THREAD_TRACE_FINISH = 0x37,
5382 PIXEL_PIPE_STAT_CONTROL = 0x38,
5383 PIXEL_PIPE_STAT_DUMP = 0x39,
5384 PIXEL_PIPE_STAT_RESET = 0x3a,
5385 CONTEXT_SUSPEND = 0x3b,
5386 OFFCHIP_HS_DEALLOC = 0x3c,
5387} VGT_EVENT_TYPE;
5388typedef enum VGT_DMA_SWAP_MODE {
5389 VGT_DMA_SWAP_NONE = 0x0,
5390 VGT_DMA_SWAP_16_BIT = 0x1,
5391 VGT_DMA_SWAP_32_BIT = 0x2,
5392 VGT_DMA_SWAP_WORD = 0x3,
5393} VGT_DMA_SWAP_MODE;
5394typedef enum VGT_INDEX_TYPE_MODE {
5395 VGT_INDEX_16 = 0x0,
5396 VGT_INDEX_32 = 0x1,
5397 VGT_INDEX_8 = 0x2,
5398} VGT_INDEX_TYPE_MODE;
5399typedef enum VGT_DMA_BUF_TYPE {
5400 VGT_DMA_BUF_MEM = 0x0,
5401 VGT_DMA_BUF_RING = 0x1,
5402 VGT_DMA_BUF_SETUP = 0x2,
5403 VGT_DMA_PTR_UPDATE = 0x3,
5404} VGT_DMA_BUF_TYPE;
5405typedef enum VGT_OUTPATH_SELECT {
5406 VGT_OUTPATH_VTX_REUSE = 0x0,
5407 VGT_OUTPATH_TESS_EN = 0x1,
5408 VGT_OUTPATH_PASSTHRU = 0x2,
5409 VGT_OUTPATH_GS_BLOCK = 0x3,
5410 VGT_OUTPATH_HS_BLOCK = 0x4,
5411} VGT_OUTPATH_SELECT;
5412typedef enum VGT_GRP_PRIM_TYPE {
5413 VGT_GRP_3D_POINT = 0x0,
5414 VGT_GRP_3D_LINE = 0x1,
5415 VGT_GRP_3D_TRI = 0x2,
5416 VGT_GRP_3D_RECT = 0x3,
5417 VGT_GRP_3D_QUAD = 0x4,
5418 VGT_GRP_2D_COPY_RECT_V0 = 0x5,
5419 VGT_GRP_2D_COPY_RECT_V1 = 0x6,
5420 VGT_GRP_2D_COPY_RECT_V2 = 0x7,
5421 VGT_GRP_2D_COPY_RECT_V3 = 0x8,
5422 VGT_GRP_2D_FILL_RECT = 0x9,
5423 VGT_GRP_2D_LINE = 0xa,
5424 VGT_GRP_2D_TRI = 0xb,
5425 VGT_GRP_PRIM_INDEX_LINE = 0xc,
5426 VGT_GRP_PRIM_INDEX_TRI = 0xd,
5427 VGT_GRP_PRIM_INDEX_QUAD = 0xe,
5428 VGT_GRP_3D_LINE_ADJ = 0xf,
5429 VGT_GRP_3D_TRI_ADJ = 0x10,
5430 VGT_GRP_3D_PATCH = 0x11,
5431} VGT_GRP_PRIM_TYPE;
5432typedef enum VGT_GRP_PRIM_ORDER {
5433 VGT_GRP_LIST = 0x0,
5434 VGT_GRP_STRIP = 0x1,
5435 VGT_GRP_FAN = 0x2,
5436 VGT_GRP_LOOP = 0x3,
5437 VGT_GRP_POLYGON = 0x4,
5438} VGT_GRP_PRIM_ORDER;
5439typedef enum VGT_GROUP_CONV_SEL {
5440 VGT_GRP_INDEX_16 = 0x0,
5441 VGT_GRP_INDEX_32 = 0x1,
5442 VGT_GRP_UINT_16 = 0x2,
5443 VGT_GRP_UINT_32 = 0x3,
5444 VGT_GRP_SINT_16 = 0x4,
5445 VGT_GRP_SINT_32 = 0x5,
5446 VGT_GRP_FLOAT_32 = 0x6,
5447 VGT_GRP_AUTO_PRIM = 0x7,
5448 VGT_GRP_FIX_1_23_TO_FLOAT = 0x8,
5449} VGT_GROUP_CONV_SEL;
5450typedef enum VGT_GS_MODE_TYPE {
5451 GS_OFF = 0x0,
5452 GS_SCENARIO_A = 0x1,
5453 GS_SCENARIO_B = 0x2,
5454 GS_SCENARIO_G = 0x3,
5455 GS_SCENARIO_C = 0x4,
5456 SPRITE_EN = 0x5,
5457} VGT_GS_MODE_TYPE;
5458typedef enum VGT_GS_CUT_MODE {
5459 GS_CUT_1024 = 0x0,
5460 GS_CUT_512 = 0x1,
5461 GS_CUT_256 = 0x2,
5462 GS_CUT_128 = 0x3,
5463} VGT_GS_CUT_MODE;
5464typedef enum VGT_GS_OUTPRIM_TYPE {
5465 POINTLIST = 0x0,
5466 LINESTRIP = 0x1,
5467 TRISTRIP = 0x2,
5468} VGT_GS_OUTPRIM_TYPE;
5469typedef enum VGT_CACHE_INVALID_MODE {
5470 VC_ONLY = 0x0,
5471 TC_ONLY = 0x1,
5472 VC_AND_TC = 0x2,
5473} VGT_CACHE_INVALID_MODE;
5474typedef enum VGT_TESS_TYPE {
5475 TESS_ISOLINE = 0x0,
5476 TESS_TRIANGLE = 0x1,
5477 TESS_QUAD = 0x2,
5478} VGT_TESS_TYPE;
5479typedef enum VGT_TESS_PARTITION {
5480 PART_INTEGER = 0x0,
5481 PART_POW2 = 0x1,
5482 PART_FRAC_ODD = 0x2,
5483 PART_FRAC_EVEN = 0x3,
5484} VGT_TESS_PARTITION;
5485typedef enum VGT_TESS_TOPOLOGY {
5486 OUTPUT_POINT = 0x0,
5487 OUTPUT_LINE = 0x1,
5488 OUTPUT_TRIANGLE_CW = 0x2,
5489 OUTPUT_TRIANGLE_CCW = 0x3,
5490} VGT_TESS_TOPOLOGY;
5491typedef enum VGT_RDREQ_POLICY {
5492 VGT_POLICY_LRU = 0x0,
5493 VGT_POLICY_STREAM = 0x1,
5494} VGT_RDREQ_POLICY;
5495typedef enum VGT_DIST_MODE {
5496 NO_DIST = 0x0,
5497 PATCHES = 0x1,
5498 DONUTS = 0x2,
5499} VGT_DIST_MODE;
5500typedef enum VGT_STAGES_LS_EN {
5501 LS_STAGE_OFF = 0x0,
5502 LS_STAGE_ON = 0x1,
5503 CS_STAGE_ON = 0x2,
5504 RESERVED_LS = 0x3,
5505} VGT_STAGES_LS_EN;
5506typedef enum VGT_STAGES_HS_EN {
5507 HS_STAGE_OFF = 0x0,
5508 HS_STAGE_ON = 0x1,
5509} VGT_STAGES_HS_EN;
5510typedef enum VGT_STAGES_ES_EN {
5511 ES_STAGE_OFF = 0x0,
5512 ES_STAGE_DS = 0x1,
5513 ES_STAGE_REAL = 0x2,
5514 RESERVED_ES = 0x3,
5515} VGT_STAGES_ES_EN;
5516typedef enum VGT_STAGES_GS_EN {
5517 GS_STAGE_OFF = 0x0,
5518 GS_STAGE_ON = 0x1,
5519} VGT_STAGES_GS_EN;
5520typedef enum VGT_STAGES_VS_EN {
5521 VS_STAGE_REAL = 0x0,
5522 VS_STAGE_DS = 0x1,
5523 VS_STAGE_COPY_SHADER = 0x2,
5524 RESERVED_VS = 0x3,
5525} VGT_STAGES_VS_EN;
5526typedef enum VGT_PERFCOUNT_SELECT {
5527 vgt_perf_VGT_SPI_ESTHREAD_EVENT_WINDOW_ACTIVE = 0x0,
5528 vgt_perf_VGT_SPI_ESVERT_VALID = 0x1,
5529 vgt_perf_VGT_SPI_ESVERT_EOV = 0x2,
5530 vgt_perf_VGT_SPI_ESVERT_STALLED = 0x3,
5531 vgt_perf_VGT_SPI_ESVERT_STARVED_BUSY = 0x4,
5532 vgt_perf_VGT_SPI_ESVERT_STARVED_IDLE = 0x5,
5533 vgt_perf_VGT_SPI_ESVERT_STATIC = 0x6,
5534 vgt_perf_VGT_SPI_ESTHREAD_IS_EVENT = 0x7,
5535 vgt_perf_VGT_SPI_ESTHREAD_SEND = 0x8,
5536 vgt_perf_VGT_SPI_GSPRIM_VALID = 0x9,
5537 vgt_perf_VGT_SPI_GSPRIM_EOV = 0xa,
5538 vgt_perf_VGT_SPI_GSPRIM_CONT = 0xb,
5539 vgt_perf_VGT_SPI_GSPRIM_STALLED = 0xc,
5540 vgt_perf_VGT_SPI_GSPRIM_STARVED_BUSY = 0xd,
5541 vgt_perf_VGT_SPI_GSPRIM_STARVED_IDLE = 0xe,
5542 vgt_perf_VGT_SPI_GSPRIM_STATIC = 0xf,
5543 vgt_perf_VGT_SPI_GSTHREAD_EVENT_WINDOW_ACTIVE = 0x10,
5544 vgt_perf_VGT_SPI_GSTHREAD_IS_EVENT = 0x11,
5545 vgt_perf_VGT_SPI_GSTHREAD_SEND = 0x12,
5546 vgt_perf_VGT_SPI_VSTHREAD_EVENT_WINDOW_ACTIVE = 0x13,
5547 vgt_perf_VGT_SPI_VSVERT_SEND = 0x14,
5548 vgt_perf_VGT_SPI_VSVERT_EOV = 0x15,
5549 vgt_perf_VGT_SPI_VSVERT_STALLED = 0x16,
5550 vgt_perf_VGT_SPI_VSVERT_STARVED_BUSY = 0x17,
5551 vgt_perf_VGT_SPI_VSVERT_STARVED_IDLE = 0x18,
5552 vgt_perf_VGT_SPI_VSVERT_STATIC = 0x19,
5553 vgt_perf_VGT_SPI_VSTHREAD_IS_EVENT = 0x1a,
5554 vgt_perf_VGT_SPI_VSTHREAD_SEND = 0x1b,
5555 vgt_perf_VGT_PA_EVENT_WINDOW_ACTIVE = 0x1c,
5556 vgt_perf_VGT_PA_CLIPV_SEND = 0x1d,
5557 vgt_perf_VGT_PA_CLIPV_FIRSTVERT = 0x1e,
5558 vgt_perf_VGT_PA_CLIPV_STALLED = 0x1f,
5559 vgt_perf_VGT_PA_CLIPV_STARVED_BUSY = 0x20,
5560 vgt_perf_VGT_PA_CLIPV_STARVED_IDLE = 0x21,
5561 vgt_perf_VGT_PA_CLIPV_STATIC = 0x22,
5562 vgt_perf_VGT_PA_CLIPP_SEND = 0x23,
5563 vgt_perf_VGT_PA_CLIPP_EOP = 0x24,
5564 vgt_perf_VGT_PA_CLIPP_IS_EVENT = 0x25,
5565 vgt_perf_VGT_PA_CLIPP_NULL_PRIM = 0x26,
5566 vgt_perf_VGT_PA_CLIPP_NEW_VTX_VECT = 0x27,
5567 vgt_perf_VGT_PA_CLIPP_STALLED = 0x28,
5568 vgt_perf_VGT_PA_CLIPP_STARVED_BUSY = 0x29,
5569 vgt_perf_VGT_PA_CLIPP_STARVED_IDLE = 0x2a,
5570 vgt_perf_VGT_PA_CLIPP_STATIC = 0x2b,
5571 vgt_perf_VGT_PA_CLIPS_SEND = 0x2c,
5572 vgt_perf_VGT_PA_CLIPS_STALLED = 0x2d,
5573 vgt_perf_VGT_PA_CLIPS_STARVED_BUSY = 0x2e,
5574 vgt_perf_VGT_PA_CLIPS_STARVED_IDLE = 0x2f,
5575 vgt_perf_VGT_PA_CLIPS_STATIC = 0x30,
5576 vgt_perf_vsvert_ds_send = 0x31,
5577 vgt_perf_vsvert_api_send = 0x32,
5578 vgt_perf_hs_tif_stall = 0x33,
5579 vgt_perf_hs_input_stall = 0x34,
5580 vgt_perf_hs_interface_stall = 0x35,
5581 vgt_perf_hs_tfm_stall = 0x36,
5582 vgt_perf_te11_starved = 0x37,
5583 vgt_perf_gs_event_stall = 0x38,
5584 vgt_perf_vgt_pa_clipp_send_not_event = 0x39,
5585 vgt_perf_vgt_pa_clipp_valid_prim = 0x3a,
5586 vgt_perf_reused_es_indices = 0x3b,
5587 vgt_perf_vs_cache_hits = 0x3c,
5588 vgt_perf_gs_cache_hits = 0x3d,
5589 vgt_perf_ds_cache_hits = 0x3e,
5590 vgt_perf_total_cache_hits = 0x3f,
5591 vgt_perf_vgt_busy = 0x40,
5592 vgt_perf_vgt_gs_busy = 0x41,
5593 vgt_perf_esvert_stalled_es_tbl = 0x42,
5594 vgt_perf_esvert_stalled_gs_tbl = 0x43,
5595 vgt_perf_esvert_stalled_gs_event = 0x44,
5596 vgt_perf_esvert_stalled_gsprim = 0x45,
5597 vgt_perf_gsprim_stalled_es_tbl = 0x46,
5598 vgt_perf_gsprim_stalled_gs_tbl = 0x47,
5599 vgt_perf_gsprim_stalled_gs_event = 0x48,
5600 vgt_perf_gsprim_stalled_esvert = 0x49,
5601 vgt_perf_esthread_stalled_es_rb_full = 0x4a,
5602 vgt_perf_esthread_stalled_spi_bp = 0x4b,
5603 vgt_perf_counters_avail_stalled = 0x4c,
5604 vgt_perf_gs_rb_space_avail_stalled = 0x4d,
5605 vgt_perf_gs_issue_rtr_stalled = 0x4e,
5606 vgt_perf_gsthread_stalled = 0x4f,
5607 vgt_perf_strmout_stalled = 0x50,
5608 vgt_perf_wait_for_es_done_stalled = 0x51,
5609 vgt_perf_cm_stalled_by_gog = 0x52,
5610 vgt_perf_cm_reading_stalled = 0x53,
5611 vgt_perf_cm_stalled_by_gsfetch_done = 0x54,
5612 vgt_perf_gog_vs_tbl_stalled = 0x55,
5613 vgt_perf_gog_out_indx_stalled = 0x56,
5614 vgt_perf_gog_out_prim_stalled = 0x57,
5615 vgt_perf_waveid_stalled = 0x58,
5616 vgt_perf_gog_busy = 0x59,
5617 vgt_perf_reused_vs_indices = 0x5a,
5618 vgt_perf_sclk_reg_vld_event = 0x5b,
5619 vgt_perf_vs_conflicting_indices = 0x5c,
5620 vgt_perf_sclk_core_vld_event = 0x5d,
5621 vgt_perf_hswave_stalled = 0x5e,
5622 vgt_perf_sclk_gs_vld_event = 0x5f,
5623 vgt_perf_VGT_SPI_LSVERT_VALID = 0x60,
5624 vgt_perf_VGT_SPI_LSVERT_EOV = 0x61,
5625 vgt_perf_VGT_SPI_LSVERT_STALLED = 0x62,
5626 vgt_perf_VGT_SPI_LSVERT_STARVED_BUSY = 0x63,
5627 vgt_perf_VGT_SPI_LSVERT_STARVED_IDLE = 0x64,
5628 vgt_perf_VGT_SPI_LSVERT_STATIC = 0x65,
5629 vgt_perf_VGT_SPI_LSWAVE_EVENT_WINDOW_ACTIVE = 0x66,
5630 vgt_perf_VGT_SPI_LSWAVE_IS_EVENT = 0x67,
5631 vgt_perf_VGT_SPI_LSWAVE_SEND = 0x68,
5632 vgt_perf_VGT_SPI_HSVERT_VALID = 0x69,
5633 vgt_perf_VGT_SPI_HSVERT_EOV = 0x6a,
5634 vgt_perf_VGT_SPI_HSVERT_STALLED = 0x6b,
5635 vgt_perf_VGT_SPI_HSVERT_STARVED_BUSY = 0x6c,
5636 vgt_perf_VGT_SPI_HSVERT_STARVED_IDLE = 0x6d,
5637 vgt_perf_VGT_SPI_HSVERT_STATIC = 0x6e,
5638 vgt_perf_VGT_SPI_HSWAVE_EVENT_WINDOW_ACTIVE = 0x6f,
5639 vgt_perf_VGT_SPI_HSWAVE_IS_EVENT = 0x70,
5640 vgt_perf_VGT_SPI_HSWAVE_SEND = 0x71,
5641 vgt_perf_ds_prims = 0x72,
5642 vgt_perf_ls_thread_groups = 0x73,
5643 vgt_perf_hs_thread_groups = 0x74,
5644 vgt_perf_es_thread_groups = 0x75,
5645 vgt_perf_vs_thread_groups = 0x76,
5646 vgt_perf_ls_done_latency = 0x77,
5647 vgt_perf_hs_done_latency = 0x78,
5648 vgt_perf_es_done_latency = 0x79,
5649 vgt_perf_gs_done_latency = 0x7a,
5650 vgt_perf_vgt_hs_busy = 0x7b,
5651 vgt_perf_vgt_te11_busy = 0x7c,
5652 vgt_perf_ls_flush = 0x7d,
5653 vgt_perf_hs_flush = 0x7e,
5654 vgt_perf_es_flush = 0x7f,
5655 vgt_perf_vgt_pa_clipp_eopg = 0x80,
5656 vgt_perf_ls_done = 0x81,
5657 vgt_perf_hs_done = 0x82,
5658 vgt_perf_es_done = 0x83,
5659 vgt_perf_gs_done = 0x84,
5660 vgt_perf_vsfetch_done = 0x85,
5661 vgt_perf_gs_done_received = 0x86,
5662 vgt_perf_es_ring_high_water_mark = 0x87,
5663 vgt_perf_gs_ring_high_water_mark = 0x88,
5664 vgt_perf_vs_table_high_water_mark = 0x89,
5665 vgt_perf_hs_tgs_active_high_water_mark = 0x8a,
5666 vgt_perf_pa_clipp_dealloc = 0x8b,
5667 vgt_perf_cut_mem_flush_stalled = 0x8c,
5668 vgt_perf_vsvert_work_received = 0x8d,
5669 vgt_perf_vgt_pa_clipp_starved_after_work = 0x8e,
5670 vgt_perf_te11_con_starved_after_work = 0x8f,
5671 vgt_perf_hs_waiting_on_ls_done_stall = 0x90,
5672 vgt_spi_vsvert_valid = 0x91,
5673} VGT_PERFCOUNT_SELECT;
5674typedef enum IA_PERFCOUNT_SELECT {
5675 ia_perf_GRP_INPUT_EVENT_WINDOW_ACTIVE = 0x0,
5676 ia_perf_dma_data_fifo_full = 0x1,
5677 ia_perf_RESERVED1 = 0x2,
5678 ia_perf_RESERVED2 = 0x3,
5679 ia_perf_RESERVED3 = 0x4,
5680 ia_perf_RESERVED4 = 0x5,
5681 ia_perf_RESERVED5 = 0x6,
5682 ia_perf_MC_LAT_BIN_0 = 0x7,
5683 ia_perf_MC_LAT_BIN_1 = 0x8,
5684 ia_perf_MC_LAT_BIN_2 = 0x9,
5685 ia_perf_MC_LAT_BIN_3 = 0xa,
5686 ia_perf_MC_LAT_BIN_4 = 0xb,
5687 ia_perf_MC_LAT_BIN_5 = 0xc,
5688 ia_perf_MC_LAT_BIN_6 = 0xd,
5689 ia_perf_MC_LAT_BIN_7 = 0xe,
5690 ia_perf_ia_busy = 0xf,
5691 ia_perf_ia_sclk_reg_vld_event = 0x10,
5692 ia_perf_RESERVED6 = 0x11,
5693 ia_perf_ia_sclk_core_vld_event = 0x12,
5694 ia_perf_RESERVED7 = 0x13,
5695 ia_perf_ia_dma_return = 0x14,
5696 ia_perf_ia_stalled = 0x15,
5697 ia_perf_shift_starved_pipe0_event = 0x16,
5698 ia_perf_shift_starved_pipe1_event = 0x17,
5699} IA_PERFCOUNT_SELECT;
5700typedef enum WD_PERFCOUNT_SELECT {
5701 wd_perf_RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 0x0,
5702 wd_perf_RBIU_DR_FIFO_STARVED = 0x1,
5703 wd_perf_RBIU_DR_FIFO_STALLED = 0x2,
5704 wd_perf_RBIU_DI_FIFO_STARVED = 0x3,
5705 wd_perf_RBIU_DI_FIFO_STALLED = 0x4,
5706 wd_perf_wd_busy = 0x5,
5707 wd_perf_wd_sclk_reg_vld_event = 0x6,
5708 wd_perf_wd_sclk_input_vld_event = 0x7,
5709 wd_perf_wd_sclk_core_vld_event = 0x8,
5710 wd_perf_wd_stalled = 0x9,
5711 wd_perf_inside_tf_bin_0 = 0xa,
5712 wd_perf_inside_tf_bin_1 = 0xb,
5713 wd_perf_inside_tf_bin_2 = 0xc,
5714 wd_perf_inside_tf_bin_3 = 0xd,
5715 wd_perf_inside_tf_bin_4 = 0xe,
5716 wd_perf_inside_tf_bin_5 = 0xf,
5717 wd_perf_inside_tf_bin_6 = 0x10,
5718 wd_perf_inside_tf_bin_7 = 0x11,
5719 wd_perf_inside_tf_bin_8 = 0x12,
5720 wd_perf_tfreq_lat_bin_0 = 0x13,
5721 wd_perf_tfreq_lat_bin_1 = 0x14,
5722 wd_perf_tfreq_lat_bin_2 = 0x15,
5723 wd_perf_tfreq_lat_bin_3 = 0x16,
5724 wd_perf_tfreq_lat_bin_4 = 0x17,
5725 wd_perf_tfreq_lat_bin_5 = 0x18,
5726 wd_perf_tfreq_lat_bin_6 = 0x19,
5727 wd_perf_tfreq_lat_bin_7 = 0x1a,
5728 wd_starved_on_hs_done = 0x1b,
5729 wd_perf_se0_hs_done_latency = 0x1c,
5730 wd_perf_se1_hs_done_latency = 0x1d,
5731 wd_perf_se2_hs_done_latency = 0x1e,
5732 wd_perf_se3_hs_done_latency = 0x1f,
5733 wd_perf_hs_done_se0 = 0x20,
5734 wd_perf_hs_done_se1 = 0x21,
5735 wd_perf_hs_done_se2 = 0x22,
5736 wd_perf_hs_done_se3 = 0x23,
5737 wd_perf_null_patches = 0x24,
5738} WD_PERFCOUNT_SELECT;
5739typedef enum WD_IA_DRAW_TYPE {
5740 WD_IA_DRAW_TYPE_DI_MM0 = 0x0,
5741 WD_IA_DRAW_TYPE_DI_MM1 = 0x1,
5742 WD_IA_DRAW_TYPE_EVENT_INIT = 0x2,
5743 WD_IA_DRAW_TYPE_EVENT_ADDR = 0x3,
5744 WD_IA_DRAW_TYPE_MIN_INDX = 0x4,
5745 WD_IA_DRAW_TYPE_MAX_INDX = 0x5,
5746 WD_IA_DRAW_TYPE_INDX_OFF = 0x6,
5747 WD_IA_DRAW_TYPE_IMM_DATA = 0x7,
5748} WD_IA_DRAW_TYPE;
5749typedef enum WD_IA_DRAW_SOURCE {
5750 WD_IA_DRAW_SOURCE_DMA = 0x0,
5751 WD_IA_DRAW_SOURCE_IMMD = 0x1,
5752 WD_IA_DRAW_SOURCE_AUTO = 0x2,
5753 WD_IA_DRAW_SOURCE_OPAQ = 0x3,
5754} WD_IA_DRAW_SOURCE;
5755#define GSTHREADID_SIZE 0x2
5756typedef enum DebugBlockId {
5757 DBG_BLOCK_ID_RESERVED = 0x0,
5758 DBG_BLOCK_ID_DBG = 0x1,
5759 DBG_BLOCK_ID_VMC = 0x2,
5760 DBG_BLOCK_ID_PDMA = 0x3,
5761 DBG_BLOCK_ID_CG = 0x4,
5762 DBG_BLOCK_ID_SRBM = 0x5,
5763 DBG_BLOCK_ID_GRBM = 0x6,
5764 DBG_BLOCK_ID_RLC = 0x7,
5765 DBG_BLOCK_ID_CSC = 0x8,
5766 DBG_BLOCK_ID_SEM = 0x9,
5767 DBG_BLOCK_ID_IH = 0xa,
5768 DBG_BLOCK_ID_SC = 0xb,
5769 DBG_BLOCK_ID_SQ = 0xc,
5770 DBG_BLOCK_ID_UVDU = 0xd,
5771 DBG_BLOCK_ID_SQA = 0xe,
5772 DBG_BLOCK_ID_SDMA0 = 0xf,
5773 DBG_BLOCK_ID_SDMA1 = 0x10,
5774 DBG_BLOCK_ID_SPIM = 0x11,
5775 DBG_BLOCK_ID_GDS = 0x12,
5776 DBG_BLOCK_ID_VC0 = 0x13,
5777 DBG_BLOCK_ID_VC1 = 0x14,
5778 DBG_BLOCK_ID_PA0 = 0x15,
5779 DBG_BLOCK_ID_PA1 = 0x16,
5780 DBG_BLOCK_ID_CP0 = 0x17,
5781 DBG_BLOCK_ID_CP1 = 0x18,
5782 DBG_BLOCK_ID_CP2 = 0x19,
5783 DBG_BLOCK_ID_XBR = 0x1a,
5784 DBG_BLOCK_ID_UVDM = 0x1b,
5785 DBG_BLOCK_ID_VGT0 = 0x1c,
5786 DBG_BLOCK_ID_VGT1 = 0x1d,
5787 DBG_BLOCK_ID_IA = 0x1e,
5788 DBG_BLOCK_ID_SXM0 = 0x1f,
5789 DBG_BLOCK_ID_SXM1 = 0x20,
5790 DBG_BLOCK_ID_SCT0 = 0x21,
5791 DBG_BLOCK_ID_SCT1 = 0x22,
5792 DBG_BLOCK_ID_SPM0 = 0x23,
5793 DBG_BLOCK_ID_SPM1 = 0x24,
5794 DBG_BLOCK_ID_UNUSED0 = 0x25,
5795 DBG_BLOCK_ID_UNUSED1 = 0x26,
5796 DBG_BLOCK_ID_TCAA = 0x27,
5797 DBG_BLOCK_ID_TCAB = 0x28,
5798 DBG_BLOCK_ID_TCCA = 0x29,
5799 DBG_BLOCK_ID_TCCB = 0x2a,
5800 DBG_BLOCK_ID_MCC0 = 0x2b,
5801 DBG_BLOCK_ID_MCC1 = 0x2c,
5802 DBG_BLOCK_ID_MCC2 = 0x2d,
5803 DBG_BLOCK_ID_MCC3 = 0x2e,
5804 DBG_BLOCK_ID_SXS0 = 0x2f,
5805 DBG_BLOCK_ID_SXS1 = 0x30,
5806 DBG_BLOCK_ID_SXS2 = 0x31,
5807 DBG_BLOCK_ID_SXS3 = 0x32,
5808 DBG_BLOCK_ID_SXS4 = 0x33,
5809 DBG_BLOCK_ID_SXS5 = 0x34,
5810 DBG_BLOCK_ID_SXS6 = 0x35,
5811 DBG_BLOCK_ID_SXS7 = 0x36,
5812 DBG_BLOCK_ID_SXS8 = 0x37,
5813 DBG_BLOCK_ID_SXS9 = 0x38,
5814 DBG_BLOCK_ID_BCI0 = 0x39,
5815 DBG_BLOCK_ID_BCI1 = 0x3a,
5816 DBG_BLOCK_ID_BCI2 = 0x3b,
5817 DBG_BLOCK_ID_BCI3 = 0x3c,
5818 DBG_BLOCK_ID_MCB = 0x3d,
5819 DBG_BLOCK_ID_UNUSED6 = 0x3e,
5820 DBG_BLOCK_ID_SQA00 = 0x3f,
5821 DBG_BLOCK_ID_SQA01 = 0x40,
5822 DBG_BLOCK_ID_SQA02 = 0x41,
5823 DBG_BLOCK_ID_SQA10 = 0x42,
5824 DBG_BLOCK_ID_SQA11 = 0x43,
5825 DBG_BLOCK_ID_SQA12 = 0x44,
5826 DBG_BLOCK_ID_UNUSED7 = 0x45,
5827 DBG_BLOCK_ID_UNUSED8 = 0x46,
5828 DBG_BLOCK_ID_SQB00 = 0x47,
5829 DBG_BLOCK_ID_SQB01 = 0x48,
5830 DBG_BLOCK_ID_SQB10 = 0x49,
5831 DBG_BLOCK_ID_SQB11 = 0x4a,
5832 DBG_BLOCK_ID_SQ00 = 0x4b,
5833 DBG_BLOCK_ID_SQ01 = 0x4c,
5834 DBG_BLOCK_ID_SQ10 = 0x4d,
5835 DBG_BLOCK_ID_SQ11 = 0x4e,
5836 DBG_BLOCK_ID_CB00 = 0x4f,
5837 DBG_BLOCK_ID_CB01 = 0x50,
5838 DBG_BLOCK_ID_CB02 = 0x51,
5839 DBG_BLOCK_ID_CB03 = 0x52,
5840 DBG_BLOCK_ID_CB04 = 0x53,
5841 DBG_BLOCK_ID_UNUSED9 = 0x54,
5842 DBG_BLOCK_ID_UNUSED10 = 0x55,
5843 DBG_BLOCK_ID_UNUSED11 = 0x56,
5844 DBG_BLOCK_ID_CB10 = 0x57,
5845 DBG_BLOCK_ID_CB11 = 0x58,
5846 DBG_BLOCK_ID_CB12 = 0x59,
5847 DBG_BLOCK_ID_CB13 = 0x5a,
5848 DBG_BLOCK_ID_CB14 = 0x5b,
5849 DBG_BLOCK_ID_UNUSED12 = 0x5c,
5850 DBG_BLOCK_ID_UNUSED13 = 0x5d,
5851 DBG_BLOCK_ID_UNUSED14 = 0x5e,
5852 DBG_BLOCK_ID_TCP0 = 0x5f,
5853 DBG_BLOCK_ID_TCP1 = 0x60,
5854 DBG_BLOCK_ID_TCP2 = 0x61,
5855 DBG_BLOCK_ID_TCP3 = 0x62,
5856 DBG_BLOCK_ID_TCP4 = 0x63,
5857 DBG_BLOCK_ID_TCP5 = 0x64,
5858 DBG_BLOCK_ID_TCP6 = 0x65,
5859 DBG_BLOCK_ID_TCP7 = 0x66,
5860 DBG_BLOCK_ID_TCP8 = 0x67,
5861 DBG_BLOCK_ID_TCP9 = 0x68,
5862 DBG_BLOCK_ID_TCP10 = 0x69,
5863 DBG_BLOCK_ID_TCP11 = 0x6a,
5864 DBG_BLOCK_ID_TCP12 = 0x6b,
5865 DBG_BLOCK_ID_TCP13 = 0x6c,
5866 DBG_BLOCK_ID_TCP14 = 0x6d,
5867 DBG_BLOCK_ID_TCP15 = 0x6e,
5868 DBG_BLOCK_ID_TCP16 = 0x6f,
5869 DBG_BLOCK_ID_TCP17 = 0x70,
5870 DBG_BLOCK_ID_TCP18 = 0x71,
5871 DBG_BLOCK_ID_TCP19 = 0x72,
5872 DBG_BLOCK_ID_TCP20 = 0x73,
5873 DBG_BLOCK_ID_TCP21 = 0x74,
5874 DBG_BLOCK_ID_TCP22 = 0x75,
5875 DBG_BLOCK_ID_TCP23 = 0x76,
5876 DBG_BLOCK_ID_TCP_RESERVED0 = 0x77,
5877 DBG_BLOCK_ID_TCP_RESERVED1 = 0x78,
5878 DBG_BLOCK_ID_TCP_RESERVED2 = 0x79,
5879 DBG_BLOCK_ID_TCP_RESERVED3 = 0x7a,
5880 DBG_BLOCK_ID_TCP_RESERVED4 = 0x7b,
5881 DBG_BLOCK_ID_TCP_RESERVED5 = 0x7c,
5882 DBG_BLOCK_ID_TCP_RESERVED6 = 0x7d,
5883 DBG_BLOCK_ID_TCP_RESERVED7 = 0x7e,
5884 DBG_BLOCK_ID_DB00 = 0x7f,
5885 DBG_BLOCK_ID_DB01 = 0x80,
5886 DBG_BLOCK_ID_DB02 = 0x81,
5887 DBG_BLOCK_ID_DB03 = 0x82,
5888 DBG_BLOCK_ID_DB04 = 0x83,
5889 DBG_BLOCK_ID_UNUSED15 = 0x84,
5890 DBG_BLOCK_ID_UNUSED16 = 0x85,
5891 DBG_BLOCK_ID_UNUSED17 = 0x86,
5892 DBG_BLOCK_ID_DB10 = 0x87,
5893 DBG_BLOCK_ID_DB11 = 0x88,
5894 DBG_BLOCK_ID_DB12 = 0x89,
5895 DBG_BLOCK_ID_DB13 = 0x8a,
5896 DBG_BLOCK_ID_DB14 = 0x8b,
5897 DBG_BLOCK_ID_UNUSED18 = 0x8c,
5898 DBG_BLOCK_ID_UNUSED19 = 0x8d,
5899 DBG_BLOCK_ID_UNUSED20 = 0x8e,
5900 DBG_BLOCK_ID_TCC0 = 0x8f,
5901 DBG_BLOCK_ID_TCC1 = 0x90,
5902 DBG_BLOCK_ID_TCC2 = 0x91,
5903 DBG_BLOCK_ID_TCC3 = 0x92,
5904 DBG_BLOCK_ID_TCC4 = 0x93,
5905 DBG_BLOCK_ID_TCC5 = 0x94,
5906 DBG_BLOCK_ID_TCC6 = 0x95,
5907 DBG_BLOCK_ID_TCC7 = 0x96,
5908 DBG_BLOCK_ID_SPS00 = 0x97,
5909 DBG_BLOCK_ID_SPS01 = 0x98,
5910 DBG_BLOCK_ID_SPS02 = 0x99,
5911 DBG_BLOCK_ID_SPS10 = 0x9a,
5912 DBG_BLOCK_ID_SPS11 = 0x9b,
5913 DBG_BLOCK_ID_SPS12 = 0x9c,
5914 DBG_BLOCK_ID_UNUSED21 = 0x9d,
5915 DBG_BLOCK_ID_UNUSED22 = 0x9e,
5916 DBG_BLOCK_ID_TA00 = 0x9f,
5917 DBG_BLOCK_ID_TA01 = 0xa0,
5918 DBG_BLOCK_ID_TA02 = 0xa1,
5919 DBG_BLOCK_ID_TA03 = 0xa2,
5920 DBG_BLOCK_ID_TA04 = 0xa3,
5921 DBG_BLOCK_ID_TA05 = 0xa4,
5922 DBG_BLOCK_ID_TA06 = 0xa5,
5923 DBG_BLOCK_ID_TA07 = 0xa6,
5924 DBG_BLOCK_ID_TA08 = 0xa7,
5925 DBG_BLOCK_ID_TA09 = 0xa8,
5926 DBG_BLOCK_ID_TA0A = 0xa9,
5927 DBG_BLOCK_ID_TA0B = 0xaa,
5928 DBG_BLOCK_ID_UNUSED23 = 0xab,
5929 DBG_BLOCK_ID_UNUSED24 = 0xac,
5930 DBG_BLOCK_ID_UNUSED25 = 0xad,
5931 DBG_BLOCK_ID_UNUSED26 = 0xae,
5932 DBG_BLOCK_ID_TA10 = 0xaf,
5933 DBG_BLOCK_ID_TA11 = 0xb0,
5934 DBG_BLOCK_ID_TA12 = 0xb1,
5935 DBG_BLOCK_ID_TA13 = 0xb2,
5936 DBG_BLOCK_ID_TA14 = 0xb3,
5937 DBG_BLOCK_ID_TA15 = 0xb4,
5938 DBG_BLOCK_ID_TA16 = 0xb5,
5939 DBG_BLOCK_ID_TA17 = 0xb6,
5940 DBG_BLOCK_ID_TA18 = 0xb7,
5941 DBG_BLOCK_ID_TA19 = 0xb8,
5942 DBG_BLOCK_ID_TA1A = 0xb9,
5943 DBG_BLOCK_ID_TA1B = 0xba,
5944 DBG_BLOCK_ID_UNUSED27 = 0xbb,
5945 DBG_BLOCK_ID_UNUSED28 = 0xbc,
5946 DBG_BLOCK_ID_UNUSED29 = 0xbd,
5947 DBG_BLOCK_ID_UNUSED30 = 0xbe,
5948 DBG_BLOCK_ID_TD00 = 0xbf,
5949 DBG_BLOCK_ID_TD01 = 0xc0,
5950 DBG_BLOCK_ID_TD02 = 0xc1,
5951 DBG_BLOCK_ID_TD03 = 0xc2,
5952 DBG_BLOCK_ID_TD04 = 0xc3,
5953 DBG_BLOCK_ID_TD05 = 0xc4,
5954 DBG_BLOCK_ID_TD06 = 0xc5,
5955 DBG_BLOCK_ID_TD07 = 0xc6,
5956 DBG_BLOCK_ID_TD08 = 0xc7,
5957 DBG_BLOCK_ID_TD09 = 0xc8,
5958 DBG_BLOCK_ID_TD0A = 0xc9,
5959 DBG_BLOCK_ID_TD0B = 0xca,
5960 DBG_BLOCK_ID_UNUSED31 = 0xcb,
5961 DBG_BLOCK_ID_UNUSED32 = 0xcc,
5962 DBG_BLOCK_ID_UNUSED33 = 0xcd,
5963 DBG_BLOCK_ID_UNUSED34 = 0xce,
5964 DBG_BLOCK_ID_TD10 = 0xcf,
5965 DBG_BLOCK_ID_TD11 = 0xd0,
5966 DBG_BLOCK_ID_TD12 = 0xd1,
5967 DBG_BLOCK_ID_TD13 = 0xd2,
5968 DBG_BLOCK_ID_TD14 = 0xd3,
5969 DBG_BLOCK_ID_TD15 = 0xd4,
5970 DBG_BLOCK_ID_TD16 = 0xd5,
5971 DBG_BLOCK_ID_TD17 = 0xd6,
5972 DBG_BLOCK_ID_TD18 = 0xd7,
5973 DBG_BLOCK_ID_TD19 = 0xd8,
5974 DBG_BLOCK_ID_TD1A = 0xd9,
5975 DBG_BLOCK_ID_TD1B = 0xda,
5976 DBG_BLOCK_ID_UNUSED35 = 0xdb,
5977 DBG_BLOCK_ID_UNUSED36 = 0xdc,
5978 DBG_BLOCK_ID_UNUSED37 = 0xdd,
5979 DBG_BLOCK_ID_UNUSED38 = 0xde,
5980 DBG_BLOCK_ID_LDS00 = 0xdf,
5981 DBG_BLOCK_ID_LDS01 = 0xe0,
5982 DBG_BLOCK_ID_LDS02 = 0xe1,
5983 DBG_BLOCK_ID_LDS03 = 0xe2,
5984 DBG_BLOCK_ID_LDS04 = 0xe3,
5985 DBG_BLOCK_ID_LDS05 = 0xe4,
5986 DBG_BLOCK_ID_LDS06 = 0xe5,
5987 DBG_BLOCK_ID_LDS07 = 0xe6,
5988 DBG_BLOCK_ID_LDS08 = 0xe7,
5989 DBG_BLOCK_ID_LDS09 = 0xe8,
5990 DBG_BLOCK_ID_LDS0A = 0xe9,
5991 DBG_BLOCK_ID_LDS0B = 0xea,
5992 DBG_BLOCK_ID_UNUSED39 = 0xeb,
5993 DBG_BLOCK_ID_UNUSED40 = 0xec,
5994 DBG_BLOCK_ID_UNUSED41 = 0xed,
5995 DBG_BLOCK_ID_UNUSED42 = 0xee,
5996 DBG_BLOCK_ID_LDS10 = 0xef,
5997 DBG_BLOCK_ID_LDS11 = 0xf0,
5998 DBG_BLOCK_ID_LDS12 = 0xf1,
5999 DBG_BLOCK_ID_LDS13 = 0xf2,
6000 DBG_BLOCK_ID_LDS14 = 0xf3,
6001 DBG_BLOCK_ID_LDS15 = 0xf4,
6002 DBG_BLOCK_ID_LDS16 = 0xf5,
6003 DBG_BLOCK_ID_LDS17 = 0xf6,
6004 DBG_BLOCK_ID_LDS18 = 0xf7,
6005 DBG_BLOCK_ID_LDS19 = 0xf8,
6006 DBG_BLOCK_ID_LDS1A = 0xf9,
6007 DBG_BLOCK_ID_LDS1B = 0xfa,
6008 DBG_BLOCK_ID_UNUSED43 = 0xfb,
6009 DBG_BLOCK_ID_UNUSED44 = 0xfc,
6010 DBG_BLOCK_ID_UNUSED45 = 0xfd,
6011 DBG_BLOCK_ID_UNUSED46 = 0xfe,
6012} DebugBlockId;
6013typedef enum DebugBlockId_BY2 {
6014 DBG_BLOCK_ID_RESERVED_BY2 = 0x0,
6015 DBG_BLOCK_ID_VMC_BY2 = 0x1,
6016 DBG_BLOCK_ID_UNUSED0_BY2 = 0x2,
6017 DBG_BLOCK_ID_GRBM_BY2 = 0x3,
6018 DBG_BLOCK_ID_CSC_BY2 = 0x4,
6019 DBG_BLOCK_ID_IH_BY2 = 0x5,
6020 DBG_BLOCK_ID_SQ_BY2 = 0x6,
6021 DBG_BLOCK_ID_UVD_BY2 = 0x7,
6022 DBG_BLOCK_ID_SDMA0_BY2 = 0x8,
6023 DBG_BLOCK_ID_SPIM_BY2 = 0x9,
6024 DBG_BLOCK_ID_VC0_BY2 = 0xa,
6025 DBG_BLOCK_ID_PA_BY2 = 0xb,
6026 DBG_BLOCK_ID_CP0_BY2 = 0xc,
6027 DBG_BLOCK_ID_CP2_BY2 = 0xd,
6028 DBG_BLOCK_ID_PC0_BY2 = 0xe,
6029 DBG_BLOCK_ID_BCI0_BY2 = 0xf,
6030 DBG_BLOCK_ID_SXM0_BY2 = 0x10,
6031 DBG_BLOCK_ID_SCT0_BY2 = 0x11,
6032 DBG_BLOCK_ID_SPM0_BY2 = 0x12,
6033 DBG_BLOCK_ID_BCI2_BY2 = 0x13,
6034 DBG_BLOCK_ID_TCA_BY2 = 0x14,
6035 DBG_BLOCK_ID_TCCA_BY2 = 0x15,
6036 DBG_BLOCK_ID_MCC_BY2 = 0x16,
6037 DBG_BLOCK_ID_MCC2_BY2 = 0x17,
6038 DBG_BLOCK_ID_MCD_BY2 = 0x18,
6039 DBG_BLOCK_ID_MCD2_BY2 = 0x19,
6040 DBG_BLOCK_ID_MCD4_BY2 = 0x1a,
6041 DBG_BLOCK_ID_MCB_BY2 = 0x1b,
6042 DBG_BLOCK_ID_SQA_BY2 = 0x1c,
6043 DBG_BLOCK_ID_SQA02_BY2 = 0x1d,
6044 DBG_BLOCK_ID_SQA11_BY2 = 0x1e,
6045 DBG_BLOCK_ID_UNUSED8_BY2 = 0x1f,
6046 DBG_BLOCK_ID_SQB_BY2 = 0x20,
6047 DBG_BLOCK_ID_SQB10_BY2 = 0x21,
6048 DBG_BLOCK_ID_UNUSED10_BY2 = 0x22,
6049 DBG_BLOCK_ID_UNUSED12_BY2 = 0x23,
6050 DBG_BLOCK_ID_CB_BY2 = 0x24,
6051 DBG_BLOCK_ID_CB02_BY2 = 0x25,
6052 DBG_BLOCK_ID_CB10_BY2 = 0x26,
6053 DBG_BLOCK_ID_CB12_BY2 = 0x27,
6054 DBG_BLOCK_ID_SXS_BY2 = 0x28,
6055 DBG_BLOCK_ID_SXS2_BY2 = 0x29,
6056 DBG_BLOCK_ID_SXS4_BY2 = 0x2a,
6057 DBG_BLOCK_ID_SXS6_BY2 = 0x2b,
6058 DBG_BLOCK_ID_DB_BY2 = 0x2c,
6059 DBG_BLOCK_ID_DB02_BY2 = 0x2d,
6060 DBG_BLOCK_ID_DB10_BY2 = 0x2e,
6061 DBG_BLOCK_ID_DB12_BY2 = 0x2f,
6062 DBG_BLOCK_ID_TCP_BY2 = 0x30,
6063 DBG_BLOCK_ID_TCP2_BY2 = 0x31,
6064 DBG_BLOCK_ID_TCP4_BY2 = 0x32,
6065 DBG_BLOCK_ID_TCP6_BY2 = 0x33,
6066 DBG_BLOCK_ID_TCP8_BY2 = 0x34,
6067 DBG_BLOCK_ID_TCP10_BY2 = 0x35,
6068 DBG_BLOCK_ID_TCP12_BY2 = 0x36,
6069 DBG_BLOCK_ID_TCP14_BY2 = 0x37,
6070 DBG_BLOCK_ID_TCP16_BY2 = 0x38,
6071 DBG_BLOCK_ID_TCP18_BY2 = 0x39,
6072 DBG_BLOCK_ID_TCP20_BY2 = 0x3a,
6073 DBG_BLOCK_ID_TCP22_BY2 = 0x3b,
6074 DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c,
6075 DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d,
6076 DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e,
6077 DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f,
6078 DBG_BLOCK_ID_TCC_BY2 = 0x40,
6079 DBG_BLOCK_ID_TCC2_BY2 = 0x41,
6080 DBG_BLOCK_ID_TCC4_BY2 = 0x42,
6081 DBG_BLOCK_ID_TCC6_BY2 = 0x43,
6082 DBG_BLOCK_ID_SPS_BY2 = 0x44,
6083 DBG_BLOCK_ID_SPS02_BY2 = 0x45,
6084 DBG_BLOCK_ID_SPS11_BY2 = 0x46,
6085 DBG_BLOCK_ID_UNUSED14_BY2 = 0x47,
6086 DBG_BLOCK_ID_TA_BY2 = 0x48,
6087 DBG_BLOCK_ID_TA02_BY2 = 0x49,
6088 DBG_BLOCK_ID_TA04_BY2 = 0x4a,
6089 DBG_BLOCK_ID_TA06_BY2 = 0x4b,
6090 DBG_BLOCK_ID_TA08_BY2 = 0x4c,
6091 DBG_BLOCK_ID_TA0A_BY2 = 0x4d,
6092 DBG_BLOCK_ID_UNUSED20_BY2 = 0x4e,
6093 DBG_BLOCK_ID_UNUSED22_BY2 = 0x4f,
6094 DBG_BLOCK_ID_TA10_BY2 = 0x50,
6095 DBG_BLOCK_ID_TA12_BY2 = 0x51,
6096 DBG_BLOCK_ID_TA14_BY2 = 0x52,
6097 DBG_BLOCK_ID_TA16_BY2 = 0x53,
6098 DBG_BLOCK_ID_TA18_BY2 = 0x54,
6099 DBG_BLOCK_ID_TA1A_BY2 = 0x55,
6100 DBG_BLOCK_ID_UNUSED24_BY2 = 0x56,
6101 DBG_BLOCK_ID_UNUSED26_BY2 = 0x57,
6102 DBG_BLOCK_ID_TD_BY2 = 0x58,
6103 DBG_BLOCK_ID_TD02_BY2 = 0x59,
6104 DBG_BLOCK_ID_TD04_BY2 = 0x5a,
6105 DBG_BLOCK_ID_TD06_BY2 = 0x5b,
6106 DBG_BLOCK_ID_TD08_BY2 = 0x5c,
6107 DBG_BLOCK_ID_TD0A_BY2 = 0x5d,
6108 DBG_BLOCK_ID_UNUSED28_BY2 = 0x5e,
6109 DBG_BLOCK_ID_UNUSED30_BY2 = 0x5f,
6110 DBG_BLOCK_ID_TD10_BY2 = 0x60,
6111 DBG_BLOCK_ID_TD12_BY2 = 0x61,
6112 DBG_BLOCK_ID_TD14_BY2 = 0x62,
6113 DBG_BLOCK_ID_TD16_BY2 = 0x63,
6114 DBG_BLOCK_ID_TD18_BY2 = 0x64,
6115 DBG_BLOCK_ID_TD1A_BY2 = 0x65,
6116 DBG_BLOCK_ID_UNUSED32_BY2 = 0x66,
6117 DBG_BLOCK_ID_UNUSED34_BY2 = 0x67,
6118 DBG_BLOCK_ID_LDS_BY2 = 0x68,
6119 DBG_BLOCK_ID_LDS02_BY2 = 0x69,
6120 DBG_BLOCK_ID_LDS04_BY2 = 0x6a,
6121 DBG_BLOCK_ID_LDS06_BY2 = 0x6b,
6122 DBG_BLOCK_ID_LDS08_BY2 = 0x6c,
6123 DBG_BLOCK_ID_LDS0A_BY2 = 0x6d,
6124 DBG_BLOCK_ID_UNUSED36_BY2 = 0x6e,
6125 DBG_BLOCK_ID_UNUSED38_BY2 = 0x6f,
6126 DBG_BLOCK_ID_LDS10_BY2 = 0x70,
6127 DBG_BLOCK_ID_LDS12_BY2 = 0x71,
6128 DBG_BLOCK_ID_LDS14_BY2 = 0x72,
6129 DBG_BLOCK_ID_LDS16_BY2 = 0x73,
6130 DBG_BLOCK_ID_LDS18_BY2 = 0x74,
6131 DBG_BLOCK_ID_LDS1A_BY2 = 0x75,
6132 DBG_BLOCK_ID_UNUSED40_BY2 = 0x76,
6133 DBG_BLOCK_ID_UNUSED42_BY2 = 0x77,
6134} DebugBlockId_BY2;
6135typedef enum DebugBlockId_BY4 {
6136 DBG_BLOCK_ID_RESERVED_BY4 = 0x0,
6137 DBG_BLOCK_ID_UNUSED0_BY4 = 0x1,
6138 DBG_BLOCK_ID_CSC_BY4 = 0x2,
6139 DBG_BLOCK_ID_SQ_BY4 = 0x3,
6140 DBG_BLOCK_ID_SDMA0_BY4 = 0x4,
6141 DBG_BLOCK_ID_VC0_BY4 = 0x5,
6142 DBG_BLOCK_ID_CP0_BY4 = 0x6,
6143 DBG_BLOCK_ID_UNUSED1_BY4 = 0x7,
6144 DBG_BLOCK_ID_SXM0_BY4 = 0x8,
6145 DBG_BLOCK_ID_SPM0_BY4 = 0x9,
6146 DBG_BLOCK_ID_TCAA_BY4 = 0xa,
6147 DBG_BLOCK_ID_MCC_BY4 = 0xb,
6148 DBG_BLOCK_ID_MCD_BY4 = 0xc,
6149 DBG_BLOCK_ID_MCD4_BY4 = 0xd,
6150 DBG_BLOCK_ID_SQA_BY4 = 0xe,
6151 DBG_BLOCK_ID_SQA11_BY4 = 0xf,
6152 DBG_BLOCK_ID_SQB_BY4 = 0x10,
6153 DBG_BLOCK_ID_UNUSED10_BY4 = 0x11,
6154 DBG_BLOCK_ID_CB_BY4 = 0x12,
6155 DBG_BLOCK_ID_CB10_BY4 = 0x13,
6156 DBG_BLOCK_ID_SXS_BY4 = 0x14,
6157 DBG_BLOCK_ID_SXS4_BY4 = 0x15,
6158 DBG_BLOCK_ID_DB_BY4 = 0x16,
6159 DBG_BLOCK_ID_DB10_BY4 = 0x17,
6160 DBG_BLOCK_ID_TCP_BY4 = 0x18,
6161 DBG_BLOCK_ID_TCP4_BY4 = 0x19,
6162 DBG_BLOCK_ID_TCP8_BY4 = 0x1a,
6163 DBG_BLOCK_ID_TCP12_BY4 = 0x1b,
6164 DBG_BLOCK_ID_TCP16_BY4 = 0x1c,
6165 DBG_BLOCK_ID_TCP20_BY4 = 0x1d,
6166 DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e,
6167 DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f,
6168 DBG_BLOCK_ID_TCC_BY4 = 0x20,
6169 DBG_BLOCK_ID_TCC4_BY4 = 0x21,
6170 DBG_BLOCK_ID_SPS_BY4 = 0x22,
6171 DBG_BLOCK_ID_SPS11_BY4 = 0x23,
6172 DBG_BLOCK_ID_TA_BY4 = 0x24,
6173 DBG_BLOCK_ID_TA04_BY4 = 0x25,
6174 DBG_BLOCK_ID_TA08_BY4 = 0x26,
6175 DBG_BLOCK_ID_UNUSED20_BY4 = 0x27,
6176 DBG_BLOCK_ID_TA10_BY4 = 0x28,
6177 DBG_BLOCK_ID_TA14_BY4 = 0x29,
6178 DBG_BLOCK_ID_TA18_BY4 = 0x2a,
6179 DBG_BLOCK_ID_UNUSED24_BY4 = 0x2b,
6180 DBG_BLOCK_ID_TD_BY4 = 0x2c,
6181 DBG_BLOCK_ID_TD04_BY4 = 0x2d,
6182 DBG_BLOCK_ID_TD08_BY4 = 0x2e,
6183 DBG_BLOCK_ID_UNUSED28_BY4 = 0x2f,
6184 DBG_BLOCK_ID_TD10_BY4 = 0x30,
6185 DBG_BLOCK_ID_TD14_BY4 = 0x31,
6186 DBG_BLOCK_ID_TD18_BY4 = 0x32,
6187 DBG_BLOCK_ID_UNUSED32_BY4 = 0x33,
6188 DBG_BLOCK_ID_LDS_BY4 = 0x34,
6189 DBG_BLOCK_ID_LDS04_BY4 = 0x35,
6190 DBG_BLOCK_ID_LDS08_BY4 = 0x36,
6191 DBG_BLOCK_ID_UNUSED36_BY4 = 0x37,
6192 DBG_BLOCK_ID_LDS10_BY4 = 0x38,
6193 DBG_BLOCK_ID_LDS14_BY4 = 0x39,
6194 DBG_BLOCK_ID_LDS18_BY4 = 0x3a,
6195 DBG_BLOCK_ID_UNUSED40_BY4 = 0x3b,
6196} DebugBlockId_BY4;
6197typedef enum DebugBlockId_BY8 {
6198 DBG_BLOCK_ID_RESERVED_BY8 = 0x0,
6199 DBG_BLOCK_ID_CSC_BY8 = 0x1,
6200 DBG_BLOCK_ID_SDMA0_BY8 = 0x2,
6201 DBG_BLOCK_ID_CP0_BY8 = 0x3,
6202 DBG_BLOCK_ID_SXM0_BY8 = 0x4,
6203 DBG_BLOCK_ID_TCA_BY8 = 0x5,
6204 DBG_BLOCK_ID_MCD_BY8 = 0x6,
6205 DBG_BLOCK_ID_SQA_BY8 = 0x7,
6206 DBG_BLOCK_ID_SQB_BY8 = 0x8,
6207 DBG_BLOCK_ID_CB_BY8 = 0x9,
6208 DBG_BLOCK_ID_SXS_BY8 = 0xa,
6209 DBG_BLOCK_ID_DB_BY8 = 0xb,
6210 DBG_BLOCK_ID_TCP_BY8 = 0xc,
6211 DBG_BLOCK_ID_TCP8_BY8 = 0xd,
6212 DBG_BLOCK_ID_TCP16_BY8 = 0xe,
6213 DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf,
6214 DBG_BLOCK_ID_TCC_BY8 = 0x10,
6215 DBG_BLOCK_ID_SPS_BY8 = 0x11,
6216 DBG_BLOCK_ID_TA_BY8 = 0x12,
6217 DBG_BLOCK_ID_TA08_BY8 = 0x13,
6218 DBG_BLOCK_ID_TA10_BY8 = 0x14,
6219 DBG_BLOCK_ID_TA18_BY8 = 0x15,
6220 DBG_BLOCK_ID_TD_BY8 = 0x16,
6221 DBG_BLOCK_ID_TD08_BY8 = 0x17,
6222 DBG_BLOCK_ID_TD10_BY8 = 0x18,
6223 DBG_BLOCK_ID_TD18_BY8 = 0x19,
6224 DBG_BLOCK_ID_LDS_BY8 = 0x1a,
6225 DBG_BLOCK_ID_LDS08_BY8 = 0x1b,
6226 DBG_BLOCK_ID_LDS10_BY8 = 0x1c,
6227 DBG_BLOCK_ID_LDS18_BY8 = 0x1d,
6228} DebugBlockId_BY8;
6229typedef enum DebugBlockId_BY16 {
6230 DBG_BLOCK_ID_RESERVED_BY16 = 0x0,
6231 DBG_BLOCK_ID_SDMA0_BY16 = 0x1,
6232 DBG_BLOCK_ID_SXM_BY16 = 0x2,
6233 DBG_BLOCK_ID_MCD_BY16 = 0x3,
6234 DBG_BLOCK_ID_SQB_BY16 = 0x4,
6235 DBG_BLOCK_ID_SXS_BY16 = 0x5,
6236 DBG_BLOCK_ID_TCP_BY16 = 0x6,
6237 DBG_BLOCK_ID_TCP16_BY16 = 0x7,
6238 DBG_BLOCK_ID_TCC_BY16 = 0x8,
6239 DBG_BLOCK_ID_TA_BY16 = 0x9,
6240 DBG_BLOCK_ID_TA10_BY16 = 0xa,
6241 DBG_BLOCK_ID_TD_BY16 = 0xb,
6242 DBG_BLOCK_ID_TD10_BY16 = 0xc,
6243 DBG_BLOCK_ID_LDS_BY16 = 0xd,
6244 DBG_BLOCK_ID_LDS10_BY16 = 0xe,
6245} DebugBlockId_BY16;
6246typedef enum SurfaceEndian {
6247 ENDIAN_NONE = 0x0,
6248 ENDIAN_8IN16 = 0x1,
6249 ENDIAN_8IN32 = 0x2,
6250 ENDIAN_8IN64 = 0x3,
6251} SurfaceEndian;
6252typedef enum ArrayMode {
6253 ARRAY_LINEAR_GENERAL = 0x0,
6254 ARRAY_LINEAR_ALIGNED = 0x1,
6255 ARRAY_1D_TILED_THIN1 = 0x2,
6256 ARRAY_1D_TILED_THICK = 0x3,
6257 ARRAY_2D_TILED_THIN1 = 0x4,
6258 ARRAY_PRT_TILED_THIN1 = 0x5,
6259 ARRAY_PRT_2D_TILED_THIN1 = 0x6,
6260 ARRAY_2D_TILED_THICK = 0x7,
6261 ARRAY_2D_TILED_XTHICK = 0x8,
6262 ARRAY_PRT_TILED_THICK = 0x9,
6263 ARRAY_PRT_2D_TILED_THICK = 0xa,
6264 ARRAY_PRT_3D_TILED_THIN1 = 0xb,
6265 ARRAY_3D_TILED_THIN1 = 0xc,
6266 ARRAY_3D_TILED_THICK = 0xd,
6267 ARRAY_3D_TILED_XTHICK = 0xe,
6268 ARRAY_PRT_3D_TILED_THICK = 0xf,
6269} ArrayMode;
6270typedef enum PipeTiling {
6271 CONFIG_1_PIPE = 0x0,
6272 CONFIG_2_PIPE = 0x1,
6273 CONFIG_4_PIPE = 0x2,
6274 CONFIG_8_PIPE = 0x3,
6275} PipeTiling;
6276typedef enum BankTiling {
6277 CONFIG_4_BANK = 0x0,
6278 CONFIG_8_BANK = 0x1,
6279} BankTiling;
6280typedef enum GroupInterleave {
6281 CONFIG_256B_GROUP = 0x0,
6282 CONFIG_512B_GROUP = 0x1,
6283} GroupInterleave;
6284typedef enum RowTiling {
6285 CONFIG_1KB_ROW = 0x0,
6286 CONFIG_2KB_ROW = 0x1,
6287 CONFIG_4KB_ROW = 0x2,
6288 CONFIG_8KB_ROW = 0x3,
6289 CONFIG_1KB_ROW_OPT = 0x4,
6290 CONFIG_2KB_ROW_OPT = 0x5,
6291 CONFIG_4KB_ROW_OPT = 0x6,
6292 CONFIG_8KB_ROW_OPT = 0x7,
6293} RowTiling;
6294typedef enum BankSwapBytes {
6295 CONFIG_128B_SWAPS = 0x0,
6296 CONFIG_256B_SWAPS = 0x1,
6297 CONFIG_512B_SWAPS = 0x2,
6298 CONFIG_1KB_SWAPS = 0x3,
6299} BankSwapBytes;
6300typedef enum SampleSplitBytes {
6301 CONFIG_1KB_SPLIT = 0x0,
6302 CONFIG_2KB_SPLIT = 0x1,
6303 CONFIG_4KB_SPLIT = 0x2,
6304 CONFIG_8KB_SPLIT = 0x3,
6305} SampleSplitBytes;
6306typedef enum NumPipes {
6307 ADDR_CONFIG_1_PIPE = 0x0,
6308 ADDR_CONFIG_2_PIPE = 0x1,
6309 ADDR_CONFIG_4_PIPE = 0x2,
6310 ADDR_CONFIG_8_PIPE = 0x3,
6311} NumPipes;
6312typedef enum PipeInterleaveSize {
6313 ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0,
6314 ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1,
6315} PipeInterleaveSize;
6316typedef enum BankInterleaveSize {
6317 ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0,
6318 ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1,
6319 ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2,
6320 ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3,
6321} BankInterleaveSize;
6322typedef enum NumShaderEngines {
6323 ADDR_CONFIG_1_SHADER_ENGINE = 0x0,
6324 ADDR_CONFIG_2_SHADER_ENGINE = 0x1,
6325} NumShaderEngines;
6326typedef enum ShaderEngineTileSize {
6327 ADDR_CONFIG_SE_TILE_16 = 0x0,
6328 ADDR_CONFIG_SE_TILE_32 = 0x1,
6329} ShaderEngineTileSize;
6330typedef enum NumGPUs {
6331 ADDR_CONFIG_1_GPU = 0x0,
6332 ADDR_CONFIG_2_GPU = 0x1,
6333 ADDR_CONFIG_4_GPU = 0x2,
6334} NumGPUs;
6335typedef enum MultiGPUTileSize {
6336 ADDR_CONFIG_GPU_TILE_16 = 0x0,
6337 ADDR_CONFIG_GPU_TILE_32 = 0x1,
6338 ADDR_CONFIG_GPU_TILE_64 = 0x2,
6339 ADDR_CONFIG_GPU_TILE_128 = 0x3,
6340} MultiGPUTileSize;
6341typedef enum RowSize {
6342 ADDR_CONFIG_1KB_ROW = 0x0,
6343 ADDR_CONFIG_2KB_ROW = 0x1,
6344 ADDR_CONFIG_4KB_ROW = 0x2,
6345} RowSize;
6346typedef enum NumLowerPipes {
6347 ADDR_CONFIG_1_LOWER_PIPES = 0x0,
6348 ADDR_CONFIG_2_LOWER_PIPES = 0x1,
6349} NumLowerPipes;
6350typedef enum ColorTransform {
6351 DCC_CT_AUTO = 0x0,
6352 DCC_CT_NONE = 0x1,
6353 ABGR_TO_A_BG_G_RB = 0x2,
6354 BGRA_TO_BG_G_RB_A = 0x3,
6355} ColorTransform;
6356typedef enum CompareRef {
6357 REF_NEVER = 0x0,
6358 REF_LESS = 0x1,
6359 REF_EQUAL = 0x2,
6360 REF_LEQUAL = 0x3,
6361 REF_GREATER = 0x4,
6362 REF_NOTEQUAL = 0x5,
6363 REF_GEQUAL = 0x6,
6364 REF_ALWAYS = 0x7,
6365} CompareRef;
6366typedef enum ReadSize {
6367 READ_256_BITS = 0x0,
6368 READ_512_BITS = 0x1,
6369} ReadSize;
6370typedef enum DepthFormat {
6371 DEPTH_INVALID = 0x0,
6372 DEPTH_16 = 0x1,
6373 DEPTH_X8_24 = 0x2,
6374 DEPTH_8_24 = 0x3,
6375 DEPTH_X8_24_FLOAT = 0x4,
6376 DEPTH_8_24_FLOAT = 0x5,
6377 DEPTH_32_FLOAT = 0x6,
6378 DEPTH_X24_8_32_FLOAT = 0x7,
6379} DepthFormat;
6380typedef enum ZFormat {
6381 Z_INVALID = 0x0,
6382 Z_16 = 0x1,
6383 Z_24 = 0x2,
6384 Z_32_FLOAT = 0x3,
6385} ZFormat;
6386typedef enum StencilFormat {
6387 STENCIL_INVALID = 0x0,
6388 STENCIL_8 = 0x1,
6389} StencilFormat;
6390typedef enum CmaskMode {
6391 CMASK_CLEAR_NONE = 0x0,
6392 CMASK_CLEAR_ONE = 0x1,
6393 CMASK_CLEAR_ALL = 0x2,
6394 CMASK_ANY_EXPANDED = 0x3,
6395 CMASK_ALPHA0_FRAG1 = 0x4,
6396 CMASK_ALPHA0_FRAG2 = 0x5,
6397 CMASK_ALPHA0_FRAG4 = 0x6,
6398 CMASK_ALPHA0_FRAGS = 0x7,
6399 CMASK_ALPHA1_FRAG1 = 0x8,
6400 CMASK_ALPHA1_FRAG2 = 0x9,
6401 CMASK_ALPHA1_FRAG4 = 0xa,
6402 CMASK_ALPHA1_FRAGS = 0xb,
6403 CMASK_ALPHAX_FRAG1 = 0xc,
6404 CMASK_ALPHAX_FRAG2 = 0xd,
6405 CMASK_ALPHAX_FRAG4 = 0xe,
6406 CMASK_ALPHAX_FRAGS = 0xf,
6407} CmaskMode;
6408typedef enum QuadExportFormat {
6409 EXPORT_UNUSED = 0x0,
6410 EXPORT_32_R = 0x1,
6411 EXPORT_32_GR = 0x2,
6412 EXPORT_32_AR = 0x3,
6413 EXPORT_FP16_ABGR = 0x4,
6414 EXPORT_UNSIGNED16_ABGR = 0x5,
6415 EXPORT_SIGNED16_ABGR = 0x6,
6416 EXPORT_32_ABGR = 0x7,
6417 EXPORT_32BPP_8PIX = 0x8,
6418 EXPORT_16_16_UNSIGNED_8PIX = 0x9,
6419 EXPORT_16_16_SIGNED_8PIX = 0xa,
6420 EXPORT_16_16_FLOAT_8PIX = 0xb,
6421} QuadExportFormat;
6422typedef enum QuadExportFormatOld {
6423 EXPORT_4P_32BPC_ABGR = 0x0,
6424 EXPORT_4P_16BPC_ABGR = 0x1,
6425 EXPORT_4P_32BPC_GR = 0x2,
6426 EXPORT_4P_32BPC_AR = 0x3,
6427 EXPORT_2P_32BPC_ABGR = 0x4,
6428 EXPORT_8P_32BPC_R = 0x5,
6429} QuadExportFormatOld;
6430typedef enum ColorFormat {
6431 COLOR_INVALID = 0x0,
6432 COLOR_8 = 0x1,
6433 COLOR_16 = 0x2,
6434 COLOR_8_8 = 0x3,
6435 COLOR_32 = 0x4,
6436 COLOR_16_16 = 0x5,
6437 COLOR_10_11_11 = 0x6,
6438 COLOR_11_11_10 = 0x7,
6439 COLOR_10_10_10_2 = 0x8,
6440 COLOR_2_10_10_10 = 0x9,
6441 COLOR_8_8_8_8 = 0xa,
6442 COLOR_32_32 = 0xb,
6443 COLOR_16_16_16_16 = 0xc,
6444 COLOR_RESERVED_13 = 0xd,
6445 COLOR_32_32_32_32 = 0xe,
6446 COLOR_RESERVED_15 = 0xf,
6447 COLOR_5_6_5 = 0x10,
6448 COLOR_1_5_5_5 = 0x11,
6449 COLOR_5_5_5_1 = 0x12,
6450 COLOR_4_4_4_4 = 0x13,
6451 COLOR_8_24 = 0x14,
6452 COLOR_24_8 = 0x15,
6453 COLOR_X24_8_32_FLOAT = 0x16,
6454 COLOR_RESERVED_23 = 0x17,
6455 COLOR_RESERVED_24 = 0x18,
6456 COLOR_RESERVED_25 = 0x19,
6457 COLOR_RESERVED_26 = 0x1a,
6458 COLOR_RESERVED_27 = 0x1b,
6459 COLOR_RESERVED_28 = 0x1c,
6460 COLOR_RESERVED_29 = 0x1d,
6461 COLOR_RESERVED_30 = 0x1e,
6462} ColorFormat;
6463typedef enum SurfaceFormat {
6464 FMT_INVALID = 0x0,
6465 FMT_8 = 0x1,
6466 FMT_16 = 0x2,
6467 FMT_8_8 = 0x3,
6468 FMT_32 = 0x4,
6469 FMT_16_16 = 0x5,
6470 FMT_10_11_11 = 0x6,
6471 FMT_11_11_10 = 0x7,
6472 FMT_10_10_10_2 = 0x8,
6473 FMT_2_10_10_10 = 0x9,
6474 FMT_8_8_8_8 = 0xa,
6475 FMT_32_32 = 0xb,
6476 FMT_16_16_16_16 = 0xc,
6477 FMT_32_32_32 = 0xd,
6478 FMT_32_32_32_32 = 0xe,
6479 FMT_RESERVED_4 = 0xf,
6480 FMT_5_6_5 = 0x10,
6481 FMT_1_5_5_5 = 0x11,
6482 FMT_5_5_5_1 = 0x12,
6483 FMT_4_4_4_4 = 0x13,
6484 FMT_8_24 = 0x14,
6485 FMT_24_8 = 0x15,
6486 FMT_X24_8_32_FLOAT = 0x16,
6487 FMT_RESERVED_33 = 0x17,
6488 FMT_11_11_10_FLOAT = 0x18,
6489 FMT_16_FLOAT = 0x19,
6490 FMT_32_FLOAT = 0x1a,
6491 FMT_16_16_FLOAT = 0x1b,
6492 FMT_8_24_FLOAT = 0x1c,
6493 FMT_24_8_FLOAT = 0x1d,
6494 FMT_32_32_FLOAT = 0x1e,
6495 FMT_10_11_11_FLOAT = 0x1f,
6496 FMT_16_16_16_16_FLOAT = 0x20,
6497 FMT_3_3_2 = 0x21,
6498 FMT_6_5_5 = 0x22,
6499 FMT_32_32_32_32_FLOAT = 0x23,
6500 FMT_RESERVED_36 = 0x24,
6501 FMT_1 = 0x25,
6502 FMT_1_REVERSED = 0x26,
6503 FMT_GB_GR = 0x27,
6504 FMT_BG_RG = 0x28,
6505 FMT_32_AS_8 = 0x29,
6506 FMT_32_AS_8_8 = 0x2a,
6507 FMT_5_9_9_9_SHAREDEXP = 0x2b,
6508 FMT_8_8_8 = 0x2c,
6509 FMT_16_16_16 = 0x2d,
6510 FMT_16_16_16_FLOAT = 0x2e,
6511 FMT_4_4 = 0x2f,
6512 FMT_32_32_32_FLOAT = 0x30,
6513 FMT_BC1 = 0x31,
6514 FMT_BC2 = 0x32,
6515 FMT_BC3 = 0x33,
6516 FMT_BC4 = 0x34,
6517 FMT_BC5 = 0x35,
6518 FMT_BC6 = 0x36,
6519 FMT_BC7 = 0x37,
6520 FMT_32_AS_32_32_32_32 = 0x38,
6521 FMT_APC3 = 0x39,
6522 FMT_APC4 = 0x3a,
6523 FMT_APC5 = 0x3b,
6524 FMT_APC6 = 0x3c,
6525 FMT_APC7 = 0x3d,
6526 FMT_CTX1 = 0x3e,
6527 FMT_RESERVED_63 = 0x3f,
6528} SurfaceFormat;
6529typedef enum BUF_DATA_FORMAT {
6530 BUF_DATA_FORMAT_INVALID = 0x0,
6531 BUF_DATA_FORMAT_8 = 0x1,
6532 BUF_DATA_FORMAT_16 = 0x2,
6533 BUF_DATA_FORMAT_8_8 = 0x3,
6534 BUF_DATA_FORMAT_32 = 0x4,
6535 BUF_DATA_FORMAT_16_16 = 0x5,
6536 BUF_DATA_FORMAT_10_11_11 = 0x6,
6537 BUF_DATA_FORMAT_11_11_10 = 0x7,
6538 BUF_DATA_FORMAT_10_10_10_2 = 0x8,
6539 BUF_DATA_FORMAT_2_10_10_10 = 0x9,
6540 BUF_DATA_FORMAT_8_8_8_8 = 0xa,
6541 BUF_DATA_FORMAT_32_32 = 0xb,
6542 BUF_DATA_FORMAT_16_16_16_16 = 0xc,
6543 BUF_DATA_FORMAT_32_32_32 = 0xd,
6544 BUF_DATA_FORMAT_32_32_32_32 = 0xe,
6545 BUF_DATA_FORMAT_RESERVED_15 = 0xf,
6546} BUF_DATA_FORMAT;
6547typedef enum IMG_DATA_FORMAT {
6548 IMG_DATA_FORMAT_INVALID = 0x0,
6549 IMG_DATA_FORMAT_8 = 0x1,
6550 IMG_DATA_FORMAT_16 = 0x2,
6551 IMG_DATA_FORMAT_8_8 = 0x3,
6552 IMG_DATA_FORMAT_32 = 0x4,
6553 IMG_DATA_FORMAT_16_16 = 0x5,
6554 IMG_DATA_FORMAT_10_11_11 = 0x6,
6555 IMG_DATA_FORMAT_11_11_10 = 0x7,
6556 IMG_DATA_FORMAT_10_10_10_2 = 0x8,
6557 IMG_DATA_FORMAT_2_10_10_10 = 0x9,
6558 IMG_DATA_FORMAT_8_8_8_8 = 0xa,
6559 IMG_DATA_FORMAT_32_32 = 0xb,
6560 IMG_DATA_FORMAT_16_16_16_16 = 0xc,
6561 IMG_DATA_FORMAT_32_32_32 = 0xd,
6562 IMG_DATA_FORMAT_32_32_32_32 = 0xe,
6563 IMG_DATA_FORMAT_16_AS_32_32 = 0xf,
6564 IMG_DATA_FORMAT_5_6_5 = 0x10,
6565 IMG_DATA_FORMAT_1_5_5_5 = 0x11,
6566 IMG_DATA_FORMAT_5_5_5_1 = 0x12,
6567 IMG_DATA_FORMAT_4_4_4_4 = 0x13,
6568 IMG_DATA_FORMAT_8_24 = 0x14,
6569 IMG_DATA_FORMAT_24_8 = 0x15,
6570 IMG_DATA_FORMAT_X24_8_32 = 0x16,
6571 IMG_DATA_FORMAT_8_AS_8_8_8_8 = 0x17,
6572 IMG_DATA_FORMAT_ETC2_RGB = 0x18,
6573 IMG_DATA_FORMAT_ETC2_RGBA = 0x19,
6574 IMG_DATA_FORMAT_ETC2_R = 0x1a,
6575 IMG_DATA_FORMAT_ETC2_RG = 0x1b,
6576 IMG_DATA_FORMAT_ETC2_RGBA1 = 0x1c,
6577 IMG_DATA_FORMAT_RESERVED_29 = 0x1d,
6578 IMG_DATA_FORMAT_RESERVED_30 = 0x1e,
6579 IMG_DATA_FORMAT_RESERVED_31 = 0x1f,
6580 IMG_DATA_FORMAT_GB_GR = 0x20,
6581 IMG_DATA_FORMAT_BG_RG = 0x21,
6582 IMG_DATA_FORMAT_5_9_9_9 = 0x22,
6583 IMG_DATA_FORMAT_BC1 = 0x23,
6584 IMG_DATA_FORMAT_BC2 = 0x24,
6585 IMG_DATA_FORMAT_BC3 = 0x25,
6586 IMG_DATA_FORMAT_BC4 = 0x26,
6587 IMG_DATA_FORMAT_BC5 = 0x27,
6588 IMG_DATA_FORMAT_BC6 = 0x28,
6589 IMG_DATA_FORMAT_BC7 = 0x29,
6590 IMG_DATA_FORMAT_16_AS_16_16_16_16 = 0x2a,
6591 IMG_DATA_FORMAT_16_AS_32_32_32_32 = 0x2b,
6592 IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c,
6593 IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d,
6594 IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e,
6595 IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f,
6596 IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30,
6597 IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31,
6598 IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32,
6599 IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33,
6600 IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34,
6601 IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35,
6602 IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36,
6603 IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37,
6604 IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38,
6605 IMG_DATA_FORMAT_4_4 = 0x39,
6606 IMG_DATA_FORMAT_6_5_5 = 0x3a,
6607 IMG_DATA_FORMAT_1 = 0x3b,
6608 IMG_DATA_FORMAT_1_REVERSED = 0x3c,
6609 IMG_DATA_FORMAT_8_AS_32 = 0x3d,
6610 IMG_DATA_FORMAT_8_AS_32_32 = 0x3e,
6611 IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f,
6612} IMG_DATA_FORMAT;
6613typedef enum BUF_NUM_FORMAT {
6614 BUF_NUM_FORMAT_UNORM = 0x0,
6615 BUF_NUM_FORMAT_SNORM = 0x1,
6616 BUF_NUM_FORMAT_USCALED = 0x2,
6617 BUF_NUM_FORMAT_SSCALED = 0x3,
6618 BUF_NUM_FORMAT_UINT = 0x4,
6619 BUF_NUM_FORMAT_SINT = 0x5,
6620 BUF_NUM_FORMAT_RESERVED_6 = 0x6,
6621 BUF_NUM_FORMAT_FLOAT = 0x7,
6622} BUF_NUM_FORMAT;
6623typedef enum IMG_NUM_FORMAT {
6624 IMG_NUM_FORMAT_UNORM = 0x0,
6625 IMG_NUM_FORMAT_SNORM = 0x1,
6626 IMG_NUM_FORMAT_USCALED = 0x2,
6627 IMG_NUM_FORMAT_SSCALED = 0x3,
6628 IMG_NUM_FORMAT_UINT = 0x4,
6629 IMG_NUM_FORMAT_SINT = 0x5,
6630 IMG_NUM_FORMAT_RESERVED_6 = 0x6,
6631 IMG_NUM_FORMAT_FLOAT = 0x7,
6632 IMG_NUM_FORMAT_RESERVED_8 = 0x8,
6633 IMG_NUM_FORMAT_SRGB = 0x9,
6634 IMG_NUM_FORMAT_RESERVED_10 = 0xa,
6635 IMG_NUM_FORMAT_RESERVED_11 = 0xb,
6636 IMG_NUM_FORMAT_RESERVED_12 = 0xc,
6637 IMG_NUM_FORMAT_RESERVED_13 = 0xd,
6638 IMG_NUM_FORMAT_RESERVED_14 = 0xe,
6639 IMG_NUM_FORMAT_RESERVED_15 = 0xf,
6640} IMG_NUM_FORMAT;
6641typedef enum TileType {
6642 ARRAY_COLOR_TILE = 0x0,
6643 ARRAY_DEPTH_TILE = 0x1,
6644} TileType;
6645typedef enum NonDispTilingOrder {
6646 ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
6647 ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
6648} NonDispTilingOrder;
6649typedef enum MicroTileMode {
6650 ADDR_SURF_DISPLAY_MICRO_TILING = 0x0,
6651 ADDR_SURF_THIN_MICRO_TILING = 0x1,
6652 ADDR_SURF_DEPTH_MICRO_TILING = 0x2,
6653 ADDR_SURF_ROTATED_MICRO_TILING = 0x3,
6654 ADDR_SURF_THICK_MICRO_TILING = 0x4,
6655} MicroTileMode;
6656typedef enum TileSplit {
6657 ADDR_SURF_TILE_SPLIT_64B = 0x0,
6658 ADDR_SURF_TILE_SPLIT_128B = 0x1,
6659 ADDR_SURF_TILE_SPLIT_256B = 0x2,
6660 ADDR_SURF_TILE_SPLIT_512B = 0x3,
6661 ADDR_SURF_TILE_SPLIT_1KB = 0x4,
6662 ADDR_SURF_TILE_SPLIT_2KB = 0x5,
6663 ADDR_SURF_TILE_SPLIT_4KB = 0x6,
6664} TileSplit;
6665typedef enum SampleSplit {
6666 ADDR_SURF_SAMPLE_SPLIT_1 = 0x0,
6667 ADDR_SURF_SAMPLE_SPLIT_2 = 0x1,
6668 ADDR_SURF_SAMPLE_SPLIT_4 = 0x2,
6669 ADDR_SURF_SAMPLE_SPLIT_8 = 0x3,
6670} SampleSplit;
6671typedef enum PipeConfig {
6672 ADDR_SURF_P2 = 0x0,
6673 ADDR_SURF_P2_RESERVED0 = 0x1,
6674 ADDR_SURF_P2_RESERVED1 = 0x2,
6675 ADDR_SURF_P2_RESERVED2 = 0x3,
6676 ADDR_SURF_P4_8x16 = 0x4,
6677 ADDR_SURF_P4_16x16 = 0x5,
6678 ADDR_SURF_P4_16x32 = 0x6,
6679 ADDR_SURF_P4_32x32 = 0x7,
6680 ADDR_SURF_P8_16x16_8x16 = 0x8,
6681 ADDR_SURF_P8_16x32_8x16 = 0x9,
6682 ADDR_SURF_P8_32x32_8x16 = 0xa,
6683 ADDR_SURF_P8_16x32_16x16 = 0xb,
6684 ADDR_SURF_P8_32x32_16x16 = 0xc,
6685 ADDR_SURF_P8_32x32_16x32 = 0xd,
6686 ADDR_SURF_P8_32x64_32x32 = 0xe,
6687 ADDR_SURF_P8_RESERVED0 = 0xf,
6688 ADDR_SURF_P16_32x32_8x16 = 0x10,
6689 ADDR_SURF_P16_32x32_16x16 = 0x11,
6690} PipeConfig;
6691typedef enum NumBanks {
6692 ADDR_SURF_2_BANK = 0x0,
6693 ADDR_SURF_4_BANK = 0x1,
6694 ADDR_SURF_8_BANK = 0x2,
6695 ADDR_SURF_16_BANK = 0x3,
6696} NumBanks;
6697typedef enum BankWidth {
6698 ADDR_SURF_BANK_WIDTH_1 = 0x0,
6699 ADDR_SURF_BANK_WIDTH_2 = 0x1,
6700 ADDR_SURF_BANK_WIDTH_4 = 0x2,
6701 ADDR_SURF_BANK_WIDTH_8 = 0x3,
6702} BankWidth;
6703typedef enum BankHeight {
6704 ADDR_SURF_BANK_HEIGHT_1 = 0x0,
6705 ADDR_SURF_BANK_HEIGHT_2 = 0x1,
6706 ADDR_SURF_BANK_HEIGHT_4 = 0x2,
6707 ADDR_SURF_BANK_HEIGHT_8 = 0x3,
6708} BankHeight;
6709typedef enum BankWidthHeight {
6710 ADDR_SURF_BANK_WH_1 = 0x0,
6711 ADDR_SURF_BANK_WH_2 = 0x1,
6712 ADDR_SURF_BANK_WH_4 = 0x2,
6713 ADDR_SURF_BANK_WH_8 = 0x3,
6714} BankWidthHeight;
6715typedef enum MacroTileAspect {
6716 ADDR_SURF_MACRO_ASPECT_1 = 0x0,
6717 ADDR_SURF_MACRO_ASPECT_2 = 0x1,
6718 ADDR_SURF_MACRO_ASPECT_4 = 0x2,
6719 ADDR_SURF_MACRO_ASPECT_8 = 0x3,
6720} MacroTileAspect;
6721typedef enum GATCL1RequestType {
6722 GATCL1_TYPE_NORMAL = 0x0,
6723 GATCL1_TYPE_SHOOTDOWN = 0x1,
6724 GATCL1_TYPE_BYPASS = 0x2,
6725} GATCL1RequestType;
6726typedef enum TCC_CACHE_POLICIES {
6727 TCC_CACHE_POLICY_LRU = 0x0,
6728 TCC_CACHE_POLICY_STREAM = 0x1,
6729} TCC_CACHE_POLICIES;
6730typedef enum MTYPE {
6731 MTYPE_NC_NV = 0x0,
6732 MTYPE_NC = 0x1,
6733 MTYPE_CC = 0x2,
6734 MTYPE_UC = 0x3,
6735} MTYPE;
6736typedef enum PERFMON_COUNTER_MODE {
6737 PERFMON_COUNTER_MODE_ACCUM = 0x0,
6738 PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
6739 PERFMON_COUNTER_MODE_MAX = 0x2,
6740 PERFMON_COUNTER_MODE_DIRTY = 0x3,
6741 PERFMON_COUNTER_MODE_SAMPLE = 0x4,
6742 PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5,
6743 PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6,
6744 PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7,
6745 PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8,
6746 PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9,
6747 PERFMON_COUNTER_MODE_RESERVED = 0xf,
6748} PERFMON_COUNTER_MODE;
6749typedef enum PERFMON_SPM_MODE {
6750 PERFMON_SPM_MODE_OFF = 0x0,
6751 PERFMON_SPM_MODE_16BIT_CLAMP = 0x1,
6752 PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2,
6753 PERFMON_SPM_MODE_32BIT_CLAMP = 0x3,
6754 PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4,
6755 PERFMON_SPM_MODE_RESERVED_5 = 0x5,
6756 PERFMON_SPM_MODE_RESERVED_6 = 0x6,
6757 PERFMON_SPM_MODE_RESERVED_7 = 0x7,
6758 PERFMON_SPM_MODE_TEST_MODE_0 = 0x8,
6759 PERFMON_SPM_MODE_TEST_MODE_1 = 0x9,
6760 PERFMON_SPM_MODE_TEST_MODE_2 = 0xa,
6761} PERFMON_SPM_MODE;
6762typedef enum SurfaceTiling {
6763 ARRAY_LINEAR = 0x0,
6764 ARRAY_TILED = 0x1,
6765} SurfaceTiling;
6766typedef enum SurfaceArray {
6767 ARRAY_1D = 0x0,
6768 ARRAY_2D = 0x1,
6769 ARRAY_3D = 0x2,
6770 ARRAY_3D_SLICE = 0x3,
6771} SurfaceArray;
6772typedef enum ColorArray {
6773 ARRAY_2D_ALT_COLOR = 0x0,
6774 ARRAY_2D_COLOR = 0x1,
6775 ARRAY_3D_SLICE_COLOR = 0x3,
6776} ColorArray;
6777typedef enum DepthArray {
6778 ARRAY_2D_ALT_DEPTH = 0x0,
6779 ARRAY_2D_DEPTH = 0x1,
6780} DepthArray;
6781typedef enum ENUM_NUM_SIMD_PER_CU {
6782 NUM_SIMD_PER_CU = 0x4,
6783} ENUM_NUM_SIMD_PER_CU;
6784typedef enum MEM_PWR_FORCE_CTRL {
6785 NO_FORCE_REQUEST = 0x0,
6786 FORCE_LIGHT_SLEEP_REQUEST = 0x1,
6787 FORCE_DEEP_SLEEP_REQUEST = 0x2,
6788 FORCE_SHUT_DOWN_REQUEST = 0x3,
6789} MEM_PWR_FORCE_CTRL;
6790typedef enum MEM_PWR_FORCE_CTRL2 {
6791 NO_FORCE_REQ = 0x0,
6792 FORCE_LIGHT_SLEEP_REQ = 0x1,
6793} MEM_PWR_FORCE_CTRL2;
6794typedef enum MEM_PWR_DIS_CTRL {
6795 ENABLE_MEM_PWR_CTRL = 0x0,
6796 DISABLE_MEM_PWR_CTRL = 0x1,
6797} MEM_PWR_DIS_CTRL;
6798typedef enum MEM_PWR_SEL_CTRL {
6799 DYNAMIC_SHUT_DOWN_ENABLE = 0x0,
6800 DYNAMIC_DEEP_SLEEP_ENABLE = 0x1,
6801 DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2,
6802} MEM_PWR_SEL_CTRL;
6803typedef enum MEM_PWR_SEL_CTRL2 {
6804 DYNAMIC_DEEP_SLEEP_EN = 0x0,
6805 DYNAMIC_LIGHT_SLEEP_EN = 0x1,
6806} MEM_PWR_SEL_CTRL2;
6807
6808#endif /* GFX_8_1_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_sh_mask.h
new file mode 100644
index 000000000000..397705a6b3a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_sh_mask.h
@@ -0,0 +1,21368 @@
1/*
2 * GFX_8_1 Register documentation
3 *
4 * Copyright (C) 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
20 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef GFX_8_1_SH_MASK_H
25#define GFX_8_1_SH_MASK_H
26
27#define CB_BLEND_RED__BLEND_RED_MASK 0xffffffff
28#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
29#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xffffffff
30#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
31#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xffffffff
32#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
33#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xffffffff
34#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
35#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
36#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
37#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE_MASK 0x2
38#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE__SHIFT 0x1
39#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK_MASK 0x7c
40#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK__SHIFT 0x2
41#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x1
42#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
43#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x8
44#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
45#define CB_COLOR_CONTROL__MODE_MASK 0x70
46#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
47#define CB_COLOR_CONTROL__ROP3_MASK 0xff0000
48#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
49#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x1f
50#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
51#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0xe0
52#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
53#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
54#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
55#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
56#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
57#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
58#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
59#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
60#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
61#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
62#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
63#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000
64#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
65#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000
66#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
67#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x1f
68#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
69#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0xe0
70#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
71#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
72#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
73#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
74#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
75#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
76#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
77#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
78#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
79#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
80#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
81#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000
82#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
83#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000
84#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
85#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x1f
86#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
87#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0xe0
88#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
89#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
90#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
91#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
92#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
93#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
94#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
95#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
96#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
97#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
98#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
99#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000
100#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
101#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000
102#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
103#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x1f
104#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
105#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0xe0
106#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
107#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
108#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
109#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
110#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
111#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
112#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
113#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
114#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
115#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
116#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
117#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000
118#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
119#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000
120#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
121#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x1f
122#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
123#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0xe0
124#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
125#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
126#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
127#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
128#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
129#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
130#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
131#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
132#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
133#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
134#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
135#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000
136#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
137#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000
138#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
139#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x1f
140#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
141#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0xe0
142#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
143#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
144#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
145#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
146#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
147#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
148#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
149#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
150#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
151#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
152#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
153#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000
154#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
155#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000
156#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
157#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x1f
158#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
159#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0xe0
160#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
161#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
162#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
163#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
164#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
165#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
166#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
167#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
168#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
169#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
170#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
171#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000
172#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
173#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000
174#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
175#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x1f
176#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
177#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0xe0
178#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
179#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x1f00
180#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
181#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x1f0000
182#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
183#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0xe00000
184#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
185#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000
186#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
187#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000
188#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
189#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000
190#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
191#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000
192#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
193#define CB_COLOR0_BASE__BASE_256B_MASK 0xffffffff
194#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
195#define CB_COLOR1_BASE__BASE_256B_MASK 0xffffffff
196#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
197#define CB_COLOR2_BASE__BASE_256B_MASK 0xffffffff
198#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
199#define CB_COLOR3_BASE__BASE_256B_MASK 0xffffffff
200#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
201#define CB_COLOR4_BASE__BASE_256B_MASK 0xffffffff
202#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
203#define CB_COLOR5_BASE__BASE_256B_MASK 0xffffffff
204#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
205#define CB_COLOR6_BASE__BASE_256B_MASK 0xffffffff
206#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
207#define CB_COLOR7_BASE__BASE_256B_MASK 0xffffffff
208#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
209#define CB_COLOR0_PITCH__TILE_MAX_MASK 0x7ff
210#define CB_COLOR0_PITCH__TILE_MAX__SHIFT 0x0
211#define CB_COLOR0_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
212#define CB_COLOR0_PITCH__FMASK_TILE_MAX__SHIFT 0x14
213#define CB_COLOR1_PITCH__TILE_MAX_MASK 0x7ff
214#define CB_COLOR1_PITCH__TILE_MAX__SHIFT 0x0
215#define CB_COLOR1_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
216#define CB_COLOR1_PITCH__FMASK_TILE_MAX__SHIFT 0x14
217#define CB_COLOR2_PITCH__TILE_MAX_MASK 0x7ff
218#define CB_COLOR2_PITCH__TILE_MAX__SHIFT 0x0
219#define CB_COLOR2_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
220#define CB_COLOR2_PITCH__FMASK_TILE_MAX__SHIFT 0x14
221#define CB_COLOR3_PITCH__TILE_MAX_MASK 0x7ff
222#define CB_COLOR3_PITCH__TILE_MAX__SHIFT 0x0
223#define CB_COLOR3_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
224#define CB_COLOR3_PITCH__FMASK_TILE_MAX__SHIFT 0x14
225#define CB_COLOR4_PITCH__TILE_MAX_MASK 0x7ff
226#define CB_COLOR4_PITCH__TILE_MAX__SHIFT 0x0
227#define CB_COLOR4_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
228#define CB_COLOR4_PITCH__FMASK_TILE_MAX__SHIFT 0x14
229#define CB_COLOR5_PITCH__TILE_MAX_MASK 0x7ff
230#define CB_COLOR5_PITCH__TILE_MAX__SHIFT 0x0
231#define CB_COLOR5_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
232#define CB_COLOR5_PITCH__FMASK_TILE_MAX__SHIFT 0x14
233#define CB_COLOR6_PITCH__TILE_MAX_MASK 0x7ff
234#define CB_COLOR6_PITCH__TILE_MAX__SHIFT 0x0
235#define CB_COLOR6_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
236#define CB_COLOR6_PITCH__FMASK_TILE_MAX__SHIFT 0x14
237#define CB_COLOR7_PITCH__TILE_MAX_MASK 0x7ff
238#define CB_COLOR7_PITCH__TILE_MAX__SHIFT 0x0
239#define CB_COLOR7_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000
240#define CB_COLOR7_PITCH__FMASK_TILE_MAX__SHIFT 0x14
241#define CB_COLOR0_SLICE__TILE_MAX_MASK 0x3fffff
242#define CB_COLOR0_SLICE__TILE_MAX__SHIFT 0x0
243#define CB_COLOR1_SLICE__TILE_MAX_MASK 0x3fffff
244#define CB_COLOR1_SLICE__TILE_MAX__SHIFT 0x0
245#define CB_COLOR2_SLICE__TILE_MAX_MASK 0x3fffff
246#define CB_COLOR2_SLICE__TILE_MAX__SHIFT 0x0
247#define CB_COLOR3_SLICE__TILE_MAX_MASK 0x3fffff
248#define CB_COLOR3_SLICE__TILE_MAX__SHIFT 0x0
249#define CB_COLOR4_SLICE__TILE_MAX_MASK 0x3fffff
250#define CB_COLOR4_SLICE__TILE_MAX__SHIFT 0x0
251#define CB_COLOR5_SLICE__TILE_MAX_MASK 0x3fffff
252#define CB_COLOR5_SLICE__TILE_MAX__SHIFT 0x0
253#define CB_COLOR6_SLICE__TILE_MAX_MASK 0x3fffff
254#define CB_COLOR6_SLICE__TILE_MAX__SHIFT 0x0
255#define CB_COLOR7_SLICE__TILE_MAX_MASK 0x3fffff
256#define CB_COLOR7_SLICE__TILE_MAX__SHIFT 0x0
257#define CB_COLOR0_VIEW__SLICE_START_MASK 0x7ff
258#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
259#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0xffe000
260#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
261#define CB_COLOR1_VIEW__SLICE_START_MASK 0x7ff
262#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
263#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0xffe000
264#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
265#define CB_COLOR2_VIEW__SLICE_START_MASK 0x7ff
266#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
267#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0xffe000
268#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
269#define CB_COLOR3_VIEW__SLICE_START_MASK 0x7ff
270#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
271#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0xffe000
272#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
273#define CB_COLOR4_VIEW__SLICE_START_MASK 0x7ff
274#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
275#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0xffe000
276#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
277#define CB_COLOR5_VIEW__SLICE_START_MASK 0x7ff
278#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
279#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0xffe000
280#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
281#define CB_COLOR6_VIEW__SLICE_START_MASK 0x7ff
282#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
283#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0xffe000
284#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
285#define CB_COLOR7_VIEW__SLICE_START_MASK 0x7ff
286#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
287#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0xffe000
288#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
289#define CB_COLOR0_INFO__ENDIAN_MASK 0x3
290#define CB_COLOR0_INFO__ENDIAN__SHIFT 0x0
291#define CB_COLOR0_INFO__FORMAT_MASK 0x7c
292#define CB_COLOR0_INFO__FORMAT__SHIFT 0x2
293#define CB_COLOR0_INFO__LINEAR_GENERAL_MASK 0x80
294#define CB_COLOR0_INFO__LINEAR_GENERAL__SHIFT 0x7
295#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x700
296#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
297#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x1800
298#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
299#define CB_COLOR0_INFO__FAST_CLEAR_MASK 0x2000
300#define CB_COLOR0_INFO__FAST_CLEAR__SHIFT 0xd
301#define CB_COLOR0_INFO__COMPRESSION_MASK 0x4000
302#define CB_COLOR0_INFO__COMPRESSION__SHIFT 0xe
303#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x8000
304#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
305#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x10000
306#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
307#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x20000
308#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
309#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x40000
310#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
311#define CB_COLOR0_INFO__CMASK_IS_LINEAR_MASK 0x80000
312#define CB_COLOR0_INFO__CMASK_IS_LINEAR__SHIFT 0x13
313#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
314#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
315#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
316#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
317#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
318#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
319#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
320#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
321#define CB_COLOR0_INFO__DCC_ENABLE_MASK 0x10000000
322#define CB_COLOR0_INFO__DCC_ENABLE__SHIFT 0x1c
323#define CB_COLOR0_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
324#define CB_COLOR0_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
325#define CB_COLOR1_INFO__ENDIAN_MASK 0x3
326#define CB_COLOR1_INFO__ENDIAN__SHIFT 0x0
327#define CB_COLOR1_INFO__FORMAT_MASK 0x7c
328#define CB_COLOR1_INFO__FORMAT__SHIFT 0x2
329#define CB_COLOR1_INFO__LINEAR_GENERAL_MASK 0x80
330#define CB_COLOR1_INFO__LINEAR_GENERAL__SHIFT 0x7
331#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x700
332#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
333#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x1800
334#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
335#define CB_COLOR1_INFO__FAST_CLEAR_MASK 0x2000
336#define CB_COLOR1_INFO__FAST_CLEAR__SHIFT 0xd
337#define CB_COLOR1_INFO__COMPRESSION_MASK 0x4000
338#define CB_COLOR1_INFO__COMPRESSION__SHIFT 0xe
339#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x8000
340#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
341#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x10000
342#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
343#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x20000
344#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
345#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x40000
346#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
347#define CB_COLOR1_INFO__CMASK_IS_LINEAR_MASK 0x80000
348#define CB_COLOR1_INFO__CMASK_IS_LINEAR__SHIFT 0x13
349#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
350#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
351#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
352#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
353#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
354#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
355#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
356#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
357#define CB_COLOR1_INFO__DCC_ENABLE_MASK 0x10000000
358#define CB_COLOR1_INFO__DCC_ENABLE__SHIFT 0x1c
359#define CB_COLOR1_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
360#define CB_COLOR1_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
361#define CB_COLOR2_INFO__ENDIAN_MASK 0x3
362#define CB_COLOR2_INFO__ENDIAN__SHIFT 0x0
363#define CB_COLOR2_INFO__FORMAT_MASK 0x7c
364#define CB_COLOR2_INFO__FORMAT__SHIFT 0x2
365#define CB_COLOR2_INFO__LINEAR_GENERAL_MASK 0x80
366#define CB_COLOR2_INFO__LINEAR_GENERAL__SHIFT 0x7
367#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x700
368#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
369#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x1800
370#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
371#define CB_COLOR2_INFO__FAST_CLEAR_MASK 0x2000
372#define CB_COLOR2_INFO__FAST_CLEAR__SHIFT 0xd
373#define CB_COLOR2_INFO__COMPRESSION_MASK 0x4000
374#define CB_COLOR2_INFO__COMPRESSION__SHIFT 0xe
375#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x8000
376#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
377#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x10000
378#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
379#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x20000
380#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
381#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x40000
382#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
383#define CB_COLOR2_INFO__CMASK_IS_LINEAR_MASK 0x80000
384#define CB_COLOR2_INFO__CMASK_IS_LINEAR__SHIFT 0x13
385#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
386#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
387#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
388#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
389#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
390#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
391#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
392#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
393#define CB_COLOR2_INFO__DCC_ENABLE_MASK 0x10000000
394#define CB_COLOR2_INFO__DCC_ENABLE__SHIFT 0x1c
395#define CB_COLOR2_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
396#define CB_COLOR2_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
397#define CB_COLOR3_INFO__ENDIAN_MASK 0x3
398#define CB_COLOR3_INFO__ENDIAN__SHIFT 0x0
399#define CB_COLOR3_INFO__FORMAT_MASK 0x7c
400#define CB_COLOR3_INFO__FORMAT__SHIFT 0x2
401#define CB_COLOR3_INFO__LINEAR_GENERAL_MASK 0x80
402#define CB_COLOR3_INFO__LINEAR_GENERAL__SHIFT 0x7
403#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x700
404#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
405#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x1800
406#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
407#define CB_COLOR3_INFO__FAST_CLEAR_MASK 0x2000
408#define CB_COLOR3_INFO__FAST_CLEAR__SHIFT 0xd
409#define CB_COLOR3_INFO__COMPRESSION_MASK 0x4000
410#define CB_COLOR3_INFO__COMPRESSION__SHIFT 0xe
411#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x8000
412#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
413#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x10000
414#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
415#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x20000
416#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
417#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x40000
418#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
419#define CB_COLOR3_INFO__CMASK_IS_LINEAR_MASK 0x80000
420#define CB_COLOR3_INFO__CMASK_IS_LINEAR__SHIFT 0x13
421#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
422#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
423#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
424#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
425#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
426#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
427#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
428#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
429#define CB_COLOR3_INFO__DCC_ENABLE_MASK 0x10000000
430#define CB_COLOR3_INFO__DCC_ENABLE__SHIFT 0x1c
431#define CB_COLOR3_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
432#define CB_COLOR3_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
433#define CB_COLOR4_INFO__ENDIAN_MASK 0x3
434#define CB_COLOR4_INFO__ENDIAN__SHIFT 0x0
435#define CB_COLOR4_INFO__FORMAT_MASK 0x7c
436#define CB_COLOR4_INFO__FORMAT__SHIFT 0x2
437#define CB_COLOR4_INFO__LINEAR_GENERAL_MASK 0x80
438#define CB_COLOR4_INFO__LINEAR_GENERAL__SHIFT 0x7
439#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x700
440#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
441#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x1800
442#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
443#define CB_COLOR4_INFO__FAST_CLEAR_MASK 0x2000
444#define CB_COLOR4_INFO__FAST_CLEAR__SHIFT 0xd
445#define CB_COLOR4_INFO__COMPRESSION_MASK 0x4000
446#define CB_COLOR4_INFO__COMPRESSION__SHIFT 0xe
447#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x8000
448#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
449#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x10000
450#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
451#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x20000
452#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
453#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x40000
454#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
455#define CB_COLOR4_INFO__CMASK_IS_LINEAR_MASK 0x80000
456#define CB_COLOR4_INFO__CMASK_IS_LINEAR__SHIFT 0x13
457#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
458#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
459#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
460#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
461#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
462#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
463#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
464#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
465#define CB_COLOR4_INFO__DCC_ENABLE_MASK 0x10000000
466#define CB_COLOR4_INFO__DCC_ENABLE__SHIFT 0x1c
467#define CB_COLOR4_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
468#define CB_COLOR4_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
469#define CB_COLOR5_INFO__ENDIAN_MASK 0x3
470#define CB_COLOR5_INFO__ENDIAN__SHIFT 0x0
471#define CB_COLOR5_INFO__FORMAT_MASK 0x7c
472#define CB_COLOR5_INFO__FORMAT__SHIFT 0x2
473#define CB_COLOR5_INFO__LINEAR_GENERAL_MASK 0x80
474#define CB_COLOR5_INFO__LINEAR_GENERAL__SHIFT 0x7
475#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x700
476#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
477#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x1800
478#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
479#define CB_COLOR5_INFO__FAST_CLEAR_MASK 0x2000
480#define CB_COLOR5_INFO__FAST_CLEAR__SHIFT 0xd
481#define CB_COLOR5_INFO__COMPRESSION_MASK 0x4000
482#define CB_COLOR5_INFO__COMPRESSION__SHIFT 0xe
483#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x8000
484#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
485#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x10000
486#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
487#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x20000
488#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
489#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x40000
490#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
491#define CB_COLOR5_INFO__CMASK_IS_LINEAR_MASK 0x80000
492#define CB_COLOR5_INFO__CMASK_IS_LINEAR__SHIFT 0x13
493#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
494#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
495#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
496#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
497#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
498#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
499#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
500#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
501#define CB_COLOR5_INFO__DCC_ENABLE_MASK 0x10000000
502#define CB_COLOR5_INFO__DCC_ENABLE__SHIFT 0x1c
503#define CB_COLOR5_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
504#define CB_COLOR5_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
505#define CB_COLOR6_INFO__ENDIAN_MASK 0x3
506#define CB_COLOR6_INFO__ENDIAN__SHIFT 0x0
507#define CB_COLOR6_INFO__FORMAT_MASK 0x7c
508#define CB_COLOR6_INFO__FORMAT__SHIFT 0x2
509#define CB_COLOR6_INFO__LINEAR_GENERAL_MASK 0x80
510#define CB_COLOR6_INFO__LINEAR_GENERAL__SHIFT 0x7
511#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x700
512#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
513#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x1800
514#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
515#define CB_COLOR6_INFO__FAST_CLEAR_MASK 0x2000
516#define CB_COLOR6_INFO__FAST_CLEAR__SHIFT 0xd
517#define CB_COLOR6_INFO__COMPRESSION_MASK 0x4000
518#define CB_COLOR6_INFO__COMPRESSION__SHIFT 0xe
519#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x8000
520#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
521#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x10000
522#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
523#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x20000
524#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
525#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x40000
526#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
527#define CB_COLOR6_INFO__CMASK_IS_LINEAR_MASK 0x80000
528#define CB_COLOR6_INFO__CMASK_IS_LINEAR__SHIFT 0x13
529#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
530#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
531#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
532#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
533#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
534#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
535#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
536#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
537#define CB_COLOR6_INFO__DCC_ENABLE_MASK 0x10000000
538#define CB_COLOR6_INFO__DCC_ENABLE__SHIFT 0x1c
539#define CB_COLOR6_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
540#define CB_COLOR6_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
541#define CB_COLOR7_INFO__ENDIAN_MASK 0x3
542#define CB_COLOR7_INFO__ENDIAN__SHIFT 0x0
543#define CB_COLOR7_INFO__FORMAT_MASK 0x7c
544#define CB_COLOR7_INFO__FORMAT__SHIFT 0x2
545#define CB_COLOR7_INFO__LINEAR_GENERAL_MASK 0x80
546#define CB_COLOR7_INFO__LINEAR_GENERAL__SHIFT 0x7
547#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x700
548#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
549#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x1800
550#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
551#define CB_COLOR7_INFO__FAST_CLEAR_MASK 0x2000
552#define CB_COLOR7_INFO__FAST_CLEAR__SHIFT 0xd
553#define CB_COLOR7_INFO__COMPRESSION_MASK 0x4000
554#define CB_COLOR7_INFO__COMPRESSION__SHIFT 0xe
555#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x8000
556#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
557#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x10000
558#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
559#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x20000
560#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
561#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x40000
562#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
563#define CB_COLOR7_INFO__CMASK_IS_LINEAR_MASK 0x80000
564#define CB_COLOR7_INFO__CMASK_IS_LINEAR__SHIFT 0x13
565#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x700000
566#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
567#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x3800000
568#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
569#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x4000000
570#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
571#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x8000000
572#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
573#define CB_COLOR7_INFO__DCC_ENABLE_MASK 0x10000000
574#define CB_COLOR7_INFO__DCC_ENABLE__SHIFT 0x1c
575#define CB_COLOR7_INFO__CMASK_ADDR_TYPE_MASK 0x60000000
576#define CB_COLOR7_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
577#define CB_COLOR0_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
578#define CB_COLOR0_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
579#define CB_COLOR0_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
580#define CB_COLOR0_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
581#define CB_COLOR0_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
582#define CB_COLOR0_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
583#define CB_COLOR0_ATTRIB__NUM_SAMPLES_MASK 0x7000
584#define CB_COLOR0_ATTRIB__NUM_SAMPLES__SHIFT 0xc
585#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
586#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
587#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
588#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
589#define CB_COLOR1_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
590#define CB_COLOR1_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
591#define CB_COLOR1_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
592#define CB_COLOR1_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
593#define CB_COLOR1_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
594#define CB_COLOR1_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
595#define CB_COLOR1_ATTRIB__NUM_SAMPLES_MASK 0x7000
596#define CB_COLOR1_ATTRIB__NUM_SAMPLES__SHIFT 0xc
597#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
598#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
599#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
600#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
601#define CB_COLOR2_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
602#define CB_COLOR2_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
603#define CB_COLOR2_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
604#define CB_COLOR2_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
605#define CB_COLOR2_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
606#define CB_COLOR2_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
607#define CB_COLOR2_ATTRIB__NUM_SAMPLES_MASK 0x7000
608#define CB_COLOR2_ATTRIB__NUM_SAMPLES__SHIFT 0xc
609#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
610#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
611#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
612#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
613#define CB_COLOR3_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
614#define CB_COLOR3_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
615#define CB_COLOR3_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
616#define CB_COLOR3_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
617#define CB_COLOR3_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
618#define CB_COLOR3_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
619#define CB_COLOR3_ATTRIB__NUM_SAMPLES_MASK 0x7000
620#define CB_COLOR3_ATTRIB__NUM_SAMPLES__SHIFT 0xc
621#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
622#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
623#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
624#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
625#define CB_COLOR4_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
626#define CB_COLOR4_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
627#define CB_COLOR4_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
628#define CB_COLOR4_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
629#define CB_COLOR4_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
630#define CB_COLOR4_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
631#define CB_COLOR4_ATTRIB__NUM_SAMPLES_MASK 0x7000
632#define CB_COLOR4_ATTRIB__NUM_SAMPLES__SHIFT 0xc
633#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
634#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
635#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
636#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
637#define CB_COLOR5_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
638#define CB_COLOR5_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
639#define CB_COLOR5_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
640#define CB_COLOR5_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
641#define CB_COLOR5_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
642#define CB_COLOR5_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
643#define CB_COLOR5_ATTRIB__NUM_SAMPLES_MASK 0x7000
644#define CB_COLOR5_ATTRIB__NUM_SAMPLES__SHIFT 0xc
645#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
646#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
647#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
648#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
649#define CB_COLOR6_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
650#define CB_COLOR6_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
651#define CB_COLOR6_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
652#define CB_COLOR6_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
653#define CB_COLOR6_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
654#define CB_COLOR6_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
655#define CB_COLOR6_ATTRIB__NUM_SAMPLES_MASK 0x7000
656#define CB_COLOR6_ATTRIB__NUM_SAMPLES__SHIFT 0xc
657#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
658#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
659#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
660#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
661#define CB_COLOR7_ATTRIB__TILE_MODE_INDEX_MASK 0x1f
662#define CB_COLOR7_ATTRIB__TILE_MODE_INDEX__SHIFT 0x0
663#define CB_COLOR7_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x3e0
664#define CB_COLOR7_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x5
665#define CB_COLOR7_ATTRIB__FMASK_BANK_HEIGHT_MASK 0xc00
666#define CB_COLOR7_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0xa
667#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK 0x7000
668#define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT 0xc
669#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x18000
670#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
671#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x20000
672#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
673#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
674#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
675#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
676#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
677#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
678#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
679#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
680#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
681#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
682#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
683#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
684#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
685#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
686#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
687#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
688#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
689#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
690#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
691#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
692#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
693#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
694#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
695#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
696#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
697#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
698#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
699#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
700#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
701#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
702#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
703#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
704#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
705#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
706#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
707#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
708#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
709#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
710#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
711#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
712#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
713#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
714#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
715#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
716#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
717#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
718#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
719#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
720#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
721#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
722#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
723#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
724#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
725#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
726#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
727#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
728#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
729#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
730#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
731#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
732#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
733#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
734#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
735#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
736#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
737#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
738#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
739#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
740#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
741#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
742#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
743#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
744#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
745#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
746#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
747#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
748#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
749#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
750#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
751#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
752#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
753#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
754#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
755#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
756#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
757#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
758#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
759#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
760#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
761#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
762#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
763#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
764#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
765#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
766#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
767#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
768#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
769#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
770#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
771#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
772#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
773#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
774#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
775#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
776#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
777#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
778#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
779#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
780#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
781#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
782#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
783#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
784#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
785#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
786#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
787#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
788#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
789#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
790#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
791#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
792#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
793#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
794#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
795#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
796#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
797#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
798#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
799#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x1
800#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
801#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x2
802#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
803#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0xc
804#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
805#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x10
806#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
807#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x60
808#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
809#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x180
810#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
811#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x200
812#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
813#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x3c00
814#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
815#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x3c000
816#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
817#define CB_COLOR0_CMASK__BASE_256B_MASK 0xffffffff
818#define CB_COLOR0_CMASK__BASE_256B__SHIFT 0x0
819#define CB_COLOR1_CMASK__BASE_256B_MASK 0xffffffff
820#define CB_COLOR1_CMASK__BASE_256B__SHIFT 0x0
821#define CB_COLOR2_CMASK__BASE_256B_MASK 0xffffffff
822#define CB_COLOR2_CMASK__BASE_256B__SHIFT 0x0
823#define CB_COLOR3_CMASK__BASE_256B_MASK 0xffffffff
824#define CB_COLOR3_CMASK__BASE_256B__SHIFT 0x0
825#define CB_COLOR4_CMASK__BASE_256B_MASK 0xffffffff
826#define CB_COLOR4_CMASK__BASE_256B__SHIFT 0x0
827#define CB_COLOR5_CMASK__BASE_256B_MASK 0xffffffff
828#define CB_COLOR5_CMASK__BASE_256B__SHIFT 0x0
829#define CB_COLOR6_CMASK__BASE_256B_MASK 0xffffffff
830#define CB_COLOR6_CMASK__BASE_256B__SHIFT 0x0
831#define CB_COLOR7_CMASK__BASE_256B_MASK 0xffffffff
832#define CB_COLOR7_CMASK__BASE_256B__SHIFT 0x0
833#define CB_COLOR0_CMASK_SLICE__TILE_MAX_MASK 0x3fff
834#define CB_COLOR0_CMASK_SLICE__TILE_MAX__SHIFT 0x0
835#define CB_COLOR1_CMASK_SLICE__TILE_MAX_MASK 0x3fff
836#define CB_COLOR1_CMASK_SLICE__TILE_MAX__SHIFT 0x0
837#define CB_COLOR2_CMASK_SLICE__TILE_MAX_MASK 0x3fff
838#define CB_COLOR2_CMASK_SLICE__TILE_MAX__SHIFT 0x0
839#define CB_COLOR3_CMASK_SLICE__TILE_MAX_MASK 0x3fff
840#define CB_COLOR3_CMASK_SLICE__TILE_MAX__SHIFT 0x0
841#define CB_COLOR4_CMASK_SLICE__TILE_MAX_MASK 0x3fff
842#define CB_COLOR4_CMASK_SLICE__TILE_MAX__SHIFT 0x0
843#define CB_COLOR5_CMASK_SLICE__TILE_MAX_MASK 0x3fff
844#define CB_COLOR5_CMASK_SLICE__TILE_MAX__SHIFT 0x0
845#define CB_COLOR6_CMASK_SLICE__TILE_MAX_MASK 0x3fff
846#define CB_COLOR6_CMASK_SLICE__TILE_MAX__SHIFT 0x0
847#define CB_COLOR7_CMASK_SLICE__TILE_MAX_MASK 0x3fff
848#define CB_COLOR7_CMASK_SLICE__TILE_MAX__SHIFT 0x0
849#define CB_COLOR0_FMASK__BASE_256B_MASK 0xffffffff
850#define CB_COLOR0_FMASK__BASE_256B__SHIFT 0x0
851#define CB_COLOR1_FMASK__BASE_256B_MASK 0xffffffff
852#define CB_COLOR1_FMASK__BASE_256B__SHIFT 0x0
853#define CB_COLOR2_FMASK__BASE_256B_MASK 0xffffffff
854#define CB_COLOR2_FMASK__BASE_256B__SHIFT 0x0
855#define CB_COLOR3_FMASK__BASE_256B_MASK 0xffffffff
856#define CB_COLOR3_FMASK__BASE_256B__SHIFT 0x0
857#define CB_COLOR4_FMASK__BASE_256B_MASK 0xffffffff
858#define CB_COLOR4_FMASK__BASE_256B__SHIFT 0x0
859#define CB_COLOR5_FMASK__BASE_256B_MASK 0xffffffff
860#define CB_COLOR5_FMASK__BASE_256B__SHIFT 0x0
861#define CB_COLOR6_FMASK__BASE_256B_MASK 0xffffffff
862#define CB_COLOR6_FMASK__BASE_256B__SHIFT 0x0
863#define CB_COLOR7_FMASK__BASE_256B_MASK 0xffffffff
864#define CB_COLOR7_FMASK__BASE_256B__SHIFT 0x0
865#define CB_COLOR0_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
866#define CB_COLOR0_FMASK_SLICE__TILE_MAX__SHIFT 0x0
867#define CB_COLOR1_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
868#define CB_COLOR1_FMASK_SLICE__TILE_MAX__SHIFT 0x0
869#define CB_COLOR2_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
870#define CB_COLOR2_FMASK_SLICE__TILE_MAX__SHIFT 0x0
871#define CB_COLOR3_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
872#define CB_COLOR3_FMASK_SLICE__TILE_MAX__SHIFT 0x0
873#define CB_COLOR4_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
874#define CB_COLOR4_FMASK_SLICE__TILE_MAX__SHIFT 0x0
875#define CB_COLOR5_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
876#define CB_COLOR5_FMASK_SLICE__TILE_MAX__SHIFT 0x0
877#define CB_COLOR6_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
878#define CB_COLOR6_FMASK_SLICE__TILE_MAX__SHIFT 0x0
879#define CB_COLOR7_FMASK_SLICE__TILE_MAX_MASK 0x3fffff
880#define CB_COLOR7_FMASK_SLICE__TILE_MAX__SHIFT 0x0
881#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
882#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
883#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
884#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
885#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
886#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
887#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
888#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
889#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
890#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
891#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
892#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
893#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
894#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
895#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffff
896#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
897#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
898#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
899#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
900#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
901#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
902#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
903#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
904#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
905#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
906#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
907#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
908#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
909#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
910#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
911#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffff
912#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
913#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xffffffff
914#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
915#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xffffffff
916#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
917#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xffffffff
918#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
919#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xffffffff
920#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
921#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xffffffff
922#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
923#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xffffffff
924#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
925#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xffffffff
926#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
927#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xffffffff
928#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
929#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0xf
930#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
931#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0xf0
932#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
933#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0xf00
934#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
935#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0xf000
936#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
937#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0xf0000
938#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
939#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0xf00000
940#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
941#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0xf000000
942#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
943#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xf0000000
944#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
945#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0xf
946#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
947#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0xf0
948#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
949#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0xf00
950#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
951#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0xf000
952#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
953#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0xf0000
954#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
955#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0xf00000
956#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
957#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0xf000000
958#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
959#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xf0000000
960#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
961#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT_MASK 0xf
962#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT__SHIFT 0x0
963#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT_MASK 0x3c0
964#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT__SHIFT 0x6
965#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT_MASK 0xf000
966#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT__SHIFT 0xc
967#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x10000
968#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x10
969#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING_MASK 0x40000
970#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING__SHIFT 0x12
971#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x80000
972#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
973#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE_MASK 0x100000
974#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE__SHIFT 0x14
975#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x200000
976#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
977#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK_MASK 0x400000
978#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK__SHIFT 0x16
979#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG_MASK 0x800000
980#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG__SHIFT 0x17
981#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x1000000
982#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
983#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x2000000
984#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
985#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x4000000
986#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
987#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x8000000
988#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
989#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT_MASK 0x10000000
990#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT__SHIFT 0x1c
991#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT_MASK 0x20000000
992#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT__SHIFT 0x1d
993#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000
994#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
995#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000
996#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
997#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS_MASK 0x1f
998#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS__SHIFT 0x0
999#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS_MASK 0x7e0
1000#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS__SHIFT 0x5
1001#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x1f800
1002#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0xb
1003#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH_MASK 0x3fe0000
1004#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH__SHIFT 0x11
1005#define CB_HW_CONTROL_1__CHICKEN_BITS_MASK 0xfc000000
1006#define CB_HW_CONTROL_1__CHICKEN_BITS__SHIFT 0x1a
1007#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH_MASK 0xff
1008#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH__SHIFT 0x0
1009#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH_MASK 0x7f00
1010#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH__SHIFT 0x8
1011#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH_MASK 0x7f8000
1012#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH__SHIFT 0xf
1013#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0xf000000
1014#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x18
1015#define CB_HW_CONTROL_2__CHICKEN_BITS_MASK 0xf0000000
1016#define CB_HW_CONTROL_2__CHICKEN_BITS__SHIFT 0x1c
1017#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x1
1018#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL__SHIFT 0x0
1019#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x2
1020#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
1021#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT_MASK 0x4
1022#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT__SHIFT 0x2
1023#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP_MASK 0x8
1024#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP__SHIFT 0x3
1025#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR_MASK 0x10
1026#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR__SHIFT 0x4
1027#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x20
1028#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x5
1029#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x80
1030#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x7
1031#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION_MASK 0x100
1032#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION__SHIFT 0x8
1033#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x200
1034#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x9
1035#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x400
1036#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0xa
1037#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION_MASK 0x800
1038#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION__SHIFT 0xb
1039#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967_MASK 0x1000
1040#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967__SHIFT 0xc
1041#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657_MASK 0x2000
1042#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657__SHIFT 0xd
1043#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH_MASK 0x1f
1044#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH__SHIFT 0x0
1045#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE_MASK 0x20
1046#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE__SHIFT 0x5
1047#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE_MASK 0x40
1048#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE__SHIFT 0x6
1049#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH_MASK 0xff00
1050#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH__SHIFT 0x8
1051#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x7f0000
1052#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
1053#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT_MASK 0xf000000
1054#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT__SHIFT 0x18
1055#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xf0000000
1056#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x1c
1057#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x1
1058#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
1059#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0xe
1060#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
1061#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x10
1062#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
1063#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x3e0
1064#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
1065#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x400
1066#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
1067#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x800
1068#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
1069#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x1000
1070#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
1071#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0xe000
1072#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
1073#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x20000
1074#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
1075#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x1c0000
1076#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
1077#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x200000
1078#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
1079#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0xc00000
1080#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
1081#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x1ff
1082#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
1083#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x7fc00
1084#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
1085#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
1086#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
1087#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
1088#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
1089#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
1090#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
1091#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x1ff
1092#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
1093#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x7fc00
1094#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
1095#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf000000
1096#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
1097#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000
1098#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
1099#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x1ff
1100#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
1101#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
1102#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
1103#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x1ff
1104#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
1105#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
1106#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
1107#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x1ff
1108#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
1109#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
1110#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
1111#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
1112#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
1113#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
1114#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
1115#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
1116#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
1117#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
1118#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
1119#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
1120#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
1121#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
1122#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
1123#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
1124#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
1125#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
1126#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
1127#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0xf
1128#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
1129#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
1130#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
1131#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
1132#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
1133#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
1134#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
1135#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
1136#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
1137#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
1138#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
1139#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
1140#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
1141#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
1142#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
1143#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
1144#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
1145#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
1146#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
1147#define CB_DEBUG_BUS_1__CB_BUSY_MASK 0x1
1148#define CB_DEBUG_BUS_1__CB_BUSY__SHIFT 0x0
1149#define CB_DEBUG_BUS_1__DB_CB_TILE_VALID_READY_MASK 0x2
1150#define CB_DEBUG_BUS_1__DB_CB_TILE_VALID_READY__SHIFT 0x1
1151#define CB_DEBUG_BUS_1__DB_CB_TILE_VALID_READYB_MASK 0x4
1152#define CB_DEBUG_BUS_1__DB_CB_TILE_VALID_READYB__SHIFT 0x2
1153#define CB_DEBUG_BUS_1__DB_CB_TILE_VALIDB_READY_MASK 0x8
1154#define CB_DEBUG_BUS_1__DB_CB_TILE_VALIDB_READY__SHIFT 0x3
1155#define CB_DEBUG_BUS_1__DB_CB_TILE_VALIDB_READYB_MASK 0x10
1156#define CB_DEBUG_BUS_1__DB_CB_TILE_VALIDB_READYB__SHIFT 0x4
1157#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALID_READY_MASK 0x20
1158#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALID_READY__SHIFT 0x5
1159#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALID_READYB_MASK 0x40
1160#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALID_READYB__SHIFT 0x6
1161#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALIDB_READY_MASK 0x80
1162#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALIDB_READY__SHIFT 0x7
1163#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALIDB_READYB_MASK 0x100
1164#define CB_DEBUG_BUS_1__DB_CB_LQUAD_VALIDB_READYB__SHIFT 0x8
1165#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALID_READY_MASK 0x200
1166#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALID_READY__SHIFT 0x9
1167#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALID_READYB_MASK 0x400
1168#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALID_READYB__SHIFT 0xa
1169#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALIDB_READY_MASK 0x800
1170#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALIDB_READY__SHIFT 0xb
1171#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALIDB_READYB_MASK 0x1000
1172#define CB_DEBUG_BUS_1__CB_TAP_WRREQ_VALIDB_READYB__SHIFT 0xc
1173#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALID_READY_MASK 0x2000
1174#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALID_READY__SHIFT 0xd
1175#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALID_READYB_MASK 0x4000
1176#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALID_READYB__SHIFT 0xe
1177#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALIDB_READY_MASK 0x8000
1178#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALIDB_READY__SHIFT 0xf
1179#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALIDB_READYB_MASK 0x10000
1180#define CB_DEBUG_BUS_1__CB_TAP_RDREQ_VALIDB_READYB__SHIFT 0x10
1181#define CB_DEBUG_BUS_1__CM_FC_TILE_VALID_READY_MASK 0x20000
1182#define CB_DEBUG_BUS_1__CM_FC_TILE_VALID_READY__SHIFT 0x11
1183#define CB_DEBUG_BUS_1__CM_FC_TILE_VALID_READYB_MASK 0x40000
1184#define CB_DEBUG_BUS_1__CM_FC_TILE_VALID_READYB__SHIFT 0x12
1185#define CB_DEBUG_BUS_1__CM_FC_TILE_VALIDB_READY_MASK 0x80000
1186#define CB_DEBUG_BUS_1__CM_FC_TILE_VALIDB_READY__SHIFT 0x13
1187#define CB_DEBUG_BUS_1__CM_FC_TILE_VALIDB_READYB_MASK 0x100000
1188#define CB_DEBUG_BUS_1__CM_FC_TILE_VALIDB_READYB__SHIFT 0x14
1189#define CB_DEBUG_BUS_1__FC_CLEAR_QUAD_VALID_READY_MASK 0x200000
1190#define CB_DEBUG_BUS_1__FC_CLEAR_QUAD_VALID_READY__SHIFT 0x15
1191#define CB_DEBUG_BUS_1__FC_CLEAR_QUAD_VALID_READYB_MASK 0x400000
1192#define CB_DEBUG_BUS_1__FC_CLEAR_QUAD_VALID_READYB__SHIFT 0x16
1193#define CB_DEBUG_BUS_1__FC_CLEAR_QUAD_VALIDB_READY_MASK 0x800000
1194#define CB_DEBUG_BUS_1__FC_CLEAR_QUAD_VALIDB_READY__SHIFT 0x17
1195#define CB_DEBUG_BUS_2__FC_CLEAR_QUAD_VALIDB_READYB_MASK 0x1
1196#define CB_DEBUG_BUS_2__FC_CLEAR_QUAD_VALIDB_READYB__SHIFT 0x0
1197#define CB_DEBUG_BUS_2__FC_QUAD_RESIDENCY_STALL_MASK 0x2
1198#define CB_DEBUG_BUS_2__FC_QUAD_RESIDENCY_STALL__SHIFT 0x1
1199#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALID_READY_MASK 0x4
1200#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALID_READY__SHIFT 0x2
1201#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALID_READYB_MASK 0x8
1202#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALID_READYB__SHIFT 0x3
1203#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALIDB_READY_MASK 0x10
1204#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALIDB_READY__SHIFT 0x4
1205#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALIDB_READYB_MASK 0x20
1206#define CB_DEBUG_BUS_2__FC_CC_QUADFRAG_VALIDB_READYB__SHIFT 0x5
1207#define CB_DEBUG_BUS_2__FOP_IN_VALID_READY_MASK 0x40
1208#define CB_DEBUG_BUS_2__FOP_IN_VALID_READY__SHIFT 0x6
1209#define CB_DEBUG_BUS_2__FOP_IN_VALID_READYB_MASK 0x80
1210#define CB_DEBUG_BUS_2__FOP_IN_VALID_READYB__SHIFT 0x7
1211#define CB_DEBUG_BUS_2__FOP_IN_VALIDB_READY_MASK 0x100
1212#define CB_DEBUG_BUS_2__FOP_IN_VALIDB_READY__SHIFT 0x8
1213#define CB_DEBUG_BUS_2__FOP_IN_VALIDB_READYB_MASK 0x200
1214#define CB_DEBUG_BUS_2__FOP_IN_VALIDB_READYB__SHIFT 0x9
1215#define CB_DEBUG_BUS_2__FOP_FMASK_RAW_STALL_MASK 0x400
1216#define CB_DEBUG_BUS_2__FOP_FMASK_RAW_STALL__SHIFT 0xa
1217#define CB_DEBUG_BUS_2__FOP_FMASK_BYPASS_STALL_MASK 0x800
1218#define CB_DEBUG_BUS_2__FOP_FMASK_BYPASS_STALL__SHIFT 0xb
1219#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALID_READY_MASK 0x1000
1220#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALID_READY__SHIFT 0xc
1221#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALID_READYB_MASK 0x2000
1222#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALID_READYB__SHIFT 0xd
1223#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALIDB_READY_MASK 0x4000
1224#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALIDB_READY__SHIFT 0xe
1225#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALIDB_READYB_MASK 0x8000
1226#define CB_DEBUG_BUS_2__CC_IB_TB_FRAG_VALIDB_READYB__SHIFT 0xf
1227#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALID_READY_MASK 0x10000
1228#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALID_READY__SHIFT 0x10
1229#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALID_READYB_MASK 0x20000
1230#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALID_READYB__SHIFT 0x11
1231#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALIDB_READY_MASK 0x40000
1232#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALIDB_READY__SHIFT 0x12
1233#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALIDB_READYB_MASK 0x80000
1234#define CB_DEBUG_BUS_2__CC_IB_SR_FRAG_VALIDB_READYB__SHIFT 0x13
1235#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALID_READY_MASK 0x100000
1236#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALID_READY__SHIFT 0x14
1237#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALID_READYB_MASK 0x200000
1238#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALID_READYB__SHIFT 0x15
1239#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALIDB_READY_MASK 0x400000
1240#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALIDB_READY__SHIFT 0x16
1241#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALIDB_READYB_MASK 0x800000
1242#define CB_DEBUG_BUS_2__CC_RB_BC_EVENFRAG_VALIDB_READYB__SHIFT 0x17
1243#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALID_READY_MASK 0x1
1244#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALID_READY__SHIFT 0x0
1245#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALID_READYB_MASK 0x2
1246#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALID_READYB__SHIFT 0x1
1247#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALIDB_READY_MASK 0x4
1248#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALIDB_READY__SHIFT 0x2
1249#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALIDB_READYB_MASK 0x8
1250#define CB_DEBUG_BUS_3__CC_RB_BC_ODDFRAG_VALIDB_READYB__SHIFT 0x3
1251#define CB_DEBUG_BUS_3__CC_BC_CS_FRAG_VALID_MASK 0x10
1252#define CB_DEBUG_BUS_3__CC_BC_CS_FRAG_VALID__SHIFT 0x4
1253#define CB_DEBUG_BUS_3__CC_SF_FULL_MASK 0x20
1254#define CB_DEBUG_BUS_3__CC_SF_FULL__SHIFT 0x5
1255#define CB_DEBUG_BUS_3__CC_RB_FULL_MASK 0x40
1256#define CB_DEBUG_BUS_3__CC_RB_FULL__SHIFT 0x6
1257#define CB_DEBUG_BUS_3__CC_EVENFIFO_QUAD_RESIDENCY_STALL_MASK 0x80
1258#define CB_DEBUG_BUS_3__CC_EVENFIFO_QUAD_RESIDENCY_STALL__SHIFT 0x7
1259#define CB_DEBUG_BUS_3__CC_ODDFIFO_QUAD_RESIDENCY_STALL_MASK 0x100
1260#define CB_DEBUG_BUS_3__CC_ODDFIFO_QUAD_RESIDENCY_STALL__SHIFT 0x8
1261#define CB_DEBUG_BUS_3__CM_TQ_FULL_MASK 0x200
1262#define CB_DEBUG_BUS_3__CM_TQ_FULL__SHIFT 0x9
1263#define CB_DEBUG_BUS_3__CM_TILE_RESIDENCY_STALL_MASK 0x400
1264#define CB_DEBUG_BUS_3__CM_TILE_RESIDENCY_STALL__SHIFT 0xa
1265#define CB_DEBUG_BUS_3__LQUAD_NO_TILE_MASK 0x800
1266#define CB_DEBUG_BUS_3__LQUAD_NO_TILE__SHIFT 0xb
1267#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_R_MASK 0x1000
1268#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_R__SHIFT 0xc
1269#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_AR_MASK 0x2000
1270#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_AR__SHIFT 0xd
1271#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_GR_MASK 0x4000
1272#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_GR__SHIFT 0xe
1273#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_ABGR_MASK 0x8000
1274#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_32_ABGR__SHIFT 0xf
1275#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_FP16_ABGR_MASK 0x10000
1276#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_FP16_ABGR__SHIFT 0x10
1277#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_SIGNED16_ABGR_MASK 0x20000
1278#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_SIGNED16_ABGR__SHIFT 0x11
1279#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_UNSIGNED16_ABGR_MASK 0x40000
1280#define CB_DEBUG_BUS_3__LQUAD_FORMAT_IS_EXPORT_UNSIGNED16_ABGR__SHIFT 0x12
1281#define CB_DEBUG_BUS_3__CM_CACHE_HIT_MASK 0x80000
1282#define CB_DEBUG_BUS_3__CM_CACHE_HIT__SHIFT 0x13
1283#define CB_DEBUG_BUS_3__CM_CACHE_TAG_MISS_MASK 0x100000
1284#define CB_DEBUG_BUS_3__CM_CACHE_TAG_MISS__SHIFT 0x14
1285#define CB_DEBUG_BUS_3__CM_CACHE_SECTOR_MISS_MASK 0x200000
1286#define CB_DEBUG_BUS_3__CM_CACHE_SECTOR_MISS__SHIFT 0x15
1287#define CB_DEBUG_BUS_3__CM_CACHE_REEVICTION_STALL_MASK 0x400000
1288#define CB_DEBUG_BUS_3__CM_CACHE_REEVICTION_STALL__SHIFT 0x16
1289#define CB_DEBUG_BUS_3__CM_CACHE_EVICT_NONZERO_INFLIGHT_STALL_MASK 0x800000
1290#define CB_DEBUG_BUS_3__CM_CACHE_EVICT_NONZERO_INFLIGHT_STALL__SHIFT 0x17
1291#define CB_DEBUG_BUS_4__CM_CACHE_REPLACE_PENDING_EVICT_STALL_MASK 0x1
1292#define CB_DEBUG_BUS_4__CM_CACHE_REPLACE_PENDING_EVICT_STALL__SHIFT 0x0
1293#define CB_DEBUG_BUS_4__CM_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL_MASK 0x2
1294#define CB_DEBUG_BUS_4__CM_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL__SHIFT 0x1
1295#define CB_DEBUG_BUS_4__CM_CACHE_READ_OUTPUT_STALL_MASK 0x4
1296#define CB_DEBUG_BUS_4__CM_CACHE_READ_OUTPUT_STALL__SHIFT 0x2
1297#define CB_DEBUG_BUS_4__CM_CACHE_WRITE_OUTPUT_STALL_MASK 0x8
1298#define CB_DEBUG_BUS_4__CM_CACHE_WRITE_OUTPUT_STALL__SHIFT 0x3
1299#define CB_DEBUG_BUS_4__CM_CACHE_ACK_OUTPUT_STALL_MASK 0x10
1300#define CB_DEBUG_BUS_4__CM_CACHE_ACK_OUTPUT_STALL__SHIFT 0x4
1301#define CB_DEBUG_BUS_4__CM_CACHE_STALL_MASK 0x20
1302#define CB_DEBUG_BUS_4__CM_CACHE_STALL__SHIFT 0x5
1303#define CB_DEBUG_BUS_4__FC_CACHE_HIT_MASK 0x40
1304#define CB_DEBUG_BUS_4__FC_CACHE_HIT__SHIFT 0x6
1305#define CB_DEBUG_BUS_4__FC_CACHE_TAG_MISS_MASK 0x80
1306#define CB_DEBUG_BUS_4__FC_CACHE_TAG_MISS__SHIFT 0x7
1307#define CB_DEBUG_BUS_4__FC_CACHE_SECTOR_MISS_MASK 0x100
1308#define CB_DEBUG_BUS_4__FC_CACHE_SECTOR_MISS__SHIFT 0x8
1309#define CB_DEBUG_BUS_4__FC_CACHE_REEVICTION_STALL_MASK 0x200
1310#define CB_DEBUG_BUS_4__FC_CACHE_REEVICTION_STALL__SHIFT 0x9
1311#define CB_DEBUG_BUS_4__FC_CACHE_EVICT_NONZERO_INFLIGHT_STALL_MASK 0x400
1312#define CB_DEBUG_BUS_4__FC_CACHE_EVICT_NONZERO_INFLIGHT_STALL__SHIFT 0xa
1313#define CB_DEBUG_BUS_4__FC_CACHE_REPLACE_PENDING_EVICT_STALL_MASK 0x800
1314#define CB_DEBUG_BUS_4__FC_CACHE_REPLACE_PENDING_EVICT_STALL__SHIFT 0xb
1315#define CB_DEBUG_BUS_4__FC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL_MASK 0x1000
1316#define CB_DEBUG_BUS_4__FC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL__SHIFT 0xc
1317#define CB_DEBUG_BUS_4__FC_CACHE_READ_OUTPUT_STALL_MASK 0x2000
1318#define CB_DEBUG_BUS_4__FC_CACHE_READ_OUTPUT_STALL__SHIFT 0xd
1319#define CB_DEBUG_BUS_4__FC_CACHE_WRITE_OUTPUT_STALL_MASK 0x4000
1320#define CB_DEBUG_BUS_4__FC_CACHE_WRITE_OUTPUT_STALL__SHIFT 0xe
1321#define CB_DEBUG_BUS_4__FC_CACHE_ACK_OUTPUT_STALL_MASK 0x8000
1322#define CB_DEBUG_BUS_4__FC_CACHE_ACK_OUTPUT_STALL__SHIFT 0xf
1323#define CB_DEBUG_BUS_4__FC_CACHE_STALL_MASK 0x10000
1324#define CB_DEBUG_BUS_4__FC_CACHE_STALL__SHIFT 0x10
1325#define CB_DEBUG_BUS_4__CC_CACHE_HIT_MASK 0x20000
1326#define CB_DEBUG_BUS_4__CC_CACHE_HIT__SHIFT 0x11
1327#define CB_DEBUG_BUS_4__CC_CACHE_TAG_MISS_MASK 0x40000
1328#define CB_DEBUG_BUS_4__CC_CACHE_TAG_MISS__SHIFT 0x12
1329#define CB_DEBUG_BUS_4__CC_CACHE_SECTOR_MISS_MASK 0x80000
1330#define CB_DEBUG_BUS_4__CC_CACHE_SECTOR_MISS__SHIFT 0x13
1331#define CB_DEBUG_BUS_4__CC_CACHE_REEVICTION_STALL_MASK 0x100000
1332#define CB_DEBUG_BUS_4__CC_CACHE_REEVICTION_STALL__SHIFT 0x14
1333#define CB_DEBUG_BUS_4__CC_CACHE_EVICT_NONZERO_INFLIGHT_STALL_MASK 0x200000
1334#define CB_DEBUG_BUS_4__CC_CACHE_EVICT_NONZERO_INFLIGHT_STALL__SHIFT 0x15
1335#define CB_DEBUG_BUS_4__CC_CACHE_REPLACE_PENDING_EVICT_STALL_MASK 0x400000
1336#define CB_DEBUG_BUS_4__CC_CACHE_REPLACE_PENDING_EVICT_STALL__SHIFT 0x16
1337#define CB_DEBUG_BUS_4__CC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL_MASK 0x800000
1338#define CB_DEBUG_BUS_4__CC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL__SHIFT 0x17
1339#define CB_DEBUG_BUS_5__CC_CACHE_READ_OUTPUT_STALL_MASK 0x1
1340#define CB_DEBUG_BUS_5__CC_CACHE_READ_OUTPUT_STALL__SHIFT 0x0
1341#define CB_DEBUG_BUS_5__CC_CACHE_WRITE_OUTPUT_STALL_MASK 0x2
1342#define CB_DEBUG_BUS_5__CC_CACHE_WRITE_OUTPUT_STALL__SHIFT 0x1
1343#define CB_DEBUG_BUS_5__CC_CACHE_ACK_OUTPUT_STALL_MASK 0x4
1344#define CB_DEBUG_BUS_5__CC_CACHE_ACK_OUTPUT_STALL__SHIFT 0x2
1345#define CB_DEBUG_BUS_5__CC_CACHE_STALL_MASK 0x8
1346#define CB_DEBUG_BUS_5__CC_CACHE_STALL__SHIFT 0x3
1347#define CB_DEBUG_BUS_5__CC_CACHE_WA_TO_RMW_CONVERSION_MASK 0x10
1348#define CB_DEBUG_BUS_5__CC_CACHE_WA_TO_RMW_CONVERSION__SHIFT 0x4
1349#define CB_DEBUG_BUS_5__CM_CACHE_FLUSH_MASK 0x20
1350#define CB_DEBUG_BUS_5__CM_CACHE_FLUSH__SHIFT 0x5
1351#define CB_DEBUG_BUS_5__CM_CACHE_TAGS_FLUSHED_MASK 0x40
1352#define CB_DEBUG_BUS_5__CM_CACHE_TAGS_FLUSHED__SHIFT 0x6
1353#define CB_DEBUG_BUS_5__CM_CACHE_SECTORS_FLUSHED_MASK 0x80
1354#define CB_DEBUG_BUS_5__CM_CACHE_SECTORS_FLUSHED__SHIFT 0x7
1355#define CB_DEBUG_BUS_5__CM_CACHE_DIRTY_SECTORS_FLUSHED_MASK 0x100
1356#define CB_DEBUG_BUS_5__CM_CACHE_DIRTY_SECTORS_FLUSHED__SHIFT 0x8
1357#define CB_DEBUG_BUS_5__FC_CACHE_FLUSH_MASK 0x200
1358#define CB_DEBUG_BUS_5__FC_CACHE_FLUSH__SHIFT 0x9
1359#define CB_DEBUG_BUS_5__FC_CACHE_TAGS_FLUSHED_MASK 0x400
1360#define CB_DEBUG_BUS_5__FC_CACHE_TAGS_FLUSHED__SHIFT 0xa
1361#define CB_DEBUG_BUS_5__FC_CACHE_SECTORS_FLUSHED_MASK 0x3800
1362#define CB_DEBUG_BUS_5__FC_CACHE_SECTORS_FLUSHED__SHIFT 0xb
1363#define CB_DEBUG_BUS_5__FC_CACHE_DIRTY_SECTORS_FLUSHED_MASK 0x1c000
1364#define CB_DEBUG_BUS_5__FC_CACHE_DIRTY_SECTORS_FLUSHED__SHIFT 0xe
1365#define CB_DEBUG_BUS_5__CC_CACHE_FLUSH_MASK 0x20000
1366#define CB_DEBUG_BUS_5__CC_CACHE_FLUSH__SHIFT 0x11
1367#define CB_DEBUG_BUS_5__CC_CACHE_TAGS_FLUSHED_MASK 0x40000
1368#define CB_DEBUG_BUS_5__CC_CACHE_TAGS_FLUSHED__SHIFT 0x12
1369#define CB_DEBUG_BUS_5__CC_CACHE_SECTORS_FLUSHED_MASK 0x380000
1370#define CB_DEBUG_BUS_5__CC_CACHE_SECTORS_FLUSHED__SHIFT 0x13
1371#define CB_DEBUG_BUS_6__CC_CACHE_DIRTY_SECTORS_FLUSHED_MASK 0x7
1372#define CB_DEBUG_BUS_6__CC_CACHE_DIRTY_SECTORS_FLUSHED__SHIFT 0x0
1373#define CB_DEBUG_BUS_6__CM_MC_READ_REQUEST_MASK 0x8
1374#define CB_DEBUG_BUS_6__CM_MC_READ_REQUEST__SHIFT 0x3
1375#define CB_DEBUG_BUS_6__FC_MC_READ_REQUEST_MASK 0x10
1376#define CB_DEBUG_BUS_6__FC_MC_READ_REQUEST__SHIFT 0x4
1377#define CB_DEBUG_BUS_6__CC_MC_READ_REQUEST_MASK 0x20
1378#define CB_DEBUG_BUS_6__CC_MC_READ_REQUEST__SHIFT 0x5
1379#define CB_DEBUG_BUS_6__CM_MC_WRITE_REQUEST_MASK 0x40
1380#define CB_DEBUG_BUS_6__CM_MC_WRITE_REQUEST__SHIFT 0x6
1381#define CB_DEBUG_BUS_6__FC_MC_WRITE_REQUEST_MASK 0x80
1382#define CB_DEBUG_BUS_6__FC_MC_WRITE_REQUEST__SHIFT 0x7
1383#define CB_DEBUG_BUS_6__CC_MC_WRITE_REQUEST_MASK 0x100
1384#define CB_DEBUG_BUS_6__CC_MC_WRITE_REQUEST__SHIFT 0x8
1385#define CB_DEBUG_BUS_6__CM_MC_READ_REQUESTS_IN_FLIGHT_MASK 0x1fe00
1386#define CB_DEBUG_BUS_6__CM_MC_READ_REQUESTS_IN_FLIGHT__SHIFT 0x9
1387#define CB_DEBUG_BUS_7__FC_MC_READ_REQUESTS_IN_FLIGHT_MASK 0x7ff
1388#define CB_DEBUG_BUS_7__FC_MC_READ_REQUESTS_IN_FLIGHT__SHIFT 0x0
1389#define CB_DEBUG_BUS_7__CC_MC_READ_REQUESTS_IN_FLIGHT_MASK 0x1ff800
1390#define CB_DEBUG_BUS_7__CC_MC_READ_REQUESTS_IN_FLIGHT__SHIFT 0xb
1391#define CB_DEBUG_BUS_8__CM_MC_WRITE_REQUESTS_IN_FLIGHT_MASK 0xff
1392#define CB_DEBUG_BUS_8__CM_MC_WRITE_REQUESTS_IN_FLIGHT__SHIFT 0x0
1393#define CB_DEBUG_BUS_8__FC_MC_WRITE_REQUESTS_IN_FLIGHT_MASK 0x7ff00
1394#define CB_DEBUG_BUS_8__FC_MC_WRITE_REQUESTS_IN_FLIGHT__SHIFT 0x8
1395#define CB_DEBUG_BUS_8__FC_SEQUENCER_FMASK_COMPRESSION_DISABLE_MASK 0x80000
1396#define CB_DEBUG_BUS_8__FC_SEQUENCER_FMASK_COMPRESSION_DISABLE__SHIFT 0x13
1397#define CB_DEBUG_BUS_8__FC_SEQUENCER_FMASK_DECOMPRESS_MASK 0x100000
1398#define CB_DEBUG_BUS_8__FC_SEQUENCER_FMASK_DECOMPRESS__SHIFT 0x14
1399#define CB_DEBUG_BUS_8__FC_SEQUENCER_ELIMINATE_FAST_CLEAR_MASK 0x200000
1400#define CB_DEBUG_BUS_8__FC_SEQUENCER_ELIMINATE_FAST_CLEAR__SHIFT 0x15
1401#define CB_DEBUG_BUS_8__FC_SEQUENCER_CLEAR_MASK 0x400000
1402#define CB_DEBUG_BUS_8__FC_SEQUENCER_CLEAR__SHIFT 0x16
1403#define CB_DEBUG_BUS_9__CC_MC_WRITE_REQUESTS_IN_FLIGHT_MASK 0x3ff
1404#define CB_DEBUG_BUS_9__CC_MC_WRITE_REQUESTS_IN_FLIGHT__SHIFT 0x0
1405#define CB_DEBUG_BUS_9__CC_SURFACE_SYNC_MASK 0x400
1406#define CB_DEBUG_BUS_9__CC_SURFACE_SYNC__SHIFT 0xa
1407#define CB_DEBUG_BUS_9__TWO_PROBE_QUAD_FRAGMENT_MASK 0x800
1408#define CB_DEBUG_BUS_9__TWO_PROBE_QUAD_FRAGMENT__SHIFT 0xb
1409#define CB_DEBUG_BUS_9__EXPORT_32_ABGR_QUAD_FRAGMENT_MASK 0x1000
1410#define CB_DEBUG_BUS_9__EXPORT_32_ABGR_QUAD_FRAGMENT__SHIFT 0xc
1411#define CB_DEBUG_BUS_9__DUAL_SOURCE_COLOR_QUAD_FRAGMENT_MASK 0x2000
1412#define CB_DEBUG_BUS_9__DUAL_SOURCE_COLOR_QUAD_FRAGMENT__SHIFT 0xd
1413#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_QUAD_MASK 0x4000
1414#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_QUAD__SHIFT 0xe
1415#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_PIXEL_MASK 0x78000
1416#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_PIXEL__SHIFT 0xf
1417#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_QUAD_FRAGMENT_MASK 0x80000
1418#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_QUAD_FRAGMENT__SHIFT 0x13
1419#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_TILE_MASK 0x100000
1420#define CB_DEBUG_BUS_9__DEBUG_BUS_DRAWN_TILE__SHIFT 0x14
1421#define CB_DEBUG_BUS_9__EVENT_ALL_MASK 0x200000
1422#define CB_DEBUG_BUS_9__EVENT_ALL__SHIFT 0x15
1423#define CB_DEBUG_BUS_9__EVENT_CACHE_FLUSH_TS_MASK 0x400000
1424#define CB_DEBUG_BUS_9__EVENT_CACHE_FLUSH_TS__SHIFT 0x16
1425#define CB_DEBUG_BUS_9__EVENT_CONTEXT_DONE_MASK 0x800000
1426#define CB_DEBUG_BUS_9__EVENT_CONTEXT_DONE__SHIFT 0x17
1427#define CB_DEBUG_BUS_10__EVENT_CACHE_FLUSH_MASK 0x1
1428#define CB_DEBUG_BUS_10__EVENT_CACHE_FLUSH__SHIFT 0x0
1429#define CB_DEBUG_BUS_10__EVENT_CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x2
1430#define CB_DEBUG_BUS_10__EVENT_CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x1
1431#define CB_DEBUG_BUS_10__EVENT_CACHE_FLUSH_AND_INV_EVENT_MASK 0x4
1432#define CB_DEBUG_BUS_10__EVENT_CACHE_FLUSH_AND_INV_EVENT__SHIFT 0x2
1433#define CB_DEBUG_BUS_10__EVENT_FLUSH_AND_INV_CB_DATA_TS_MASK 0x8
1434#define CB_DEBUG_BUS_10__EVENT_FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x3
1435#define CB_DEBUG_BUS_10__EVENT_FLUSH_AND_INV_CB_META_MASK 0x10
1436#define CB_DEBUG_BUS_10__EVENT_FLUSH_AND_INV_CB_META__SHIFT 0x4
1437#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XC_MASK 0x20
1438#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XC__SHIFT 0x5
1439#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XD_MASK 0x40
1440#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XD__SHIFT 0x6
1441#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XE_MASK 0x80
1442#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XE__SHIFT 0x7
1443#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XF_MASK 0x100
1444#define CB_DEBUG_BUS_10__CMASK_READ_DATA_0XF__SHIFT 0x8
1445#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XC_MASK 0x200
1446#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XC__SHIFT 0x9
1447#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XD_MASK 0x400
1448#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XD__SHIFT 0xa
1449#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XE_MASK 0x800
1450#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XE__SHIFT 0xb
1451#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XF_MASK 0x1000
1452#define CB_DEBUG_BUS_10__CMASK_WRITE_DATA_0XF__SHIFT 0xc
1453#define CB_DEBUG_BUS_10__CORE_SCLK_VLD_MASK 0x2000
1454#define CB_DEBUG_BUS_10__CORE_SCLK_VLD__SHIFT 0xd
1455#define CB_DEBUG_BUS_10__REG_SCLK0_VLD_MASK 0x4000
1456#define CB_DEBUG_BUS_10__REG_SCLK0_VLD__SHIFT 0xe
1457#define CB_DEBUG_BUS_10__REG_SCLK1_VLD_MASK 0x8000
1458#define CB_DEBUG_BUS_10__REG_SCLK1_VLD__SHIFT 0xf
1459#define CB_DEBUG_BUS_10__MERGE_TILE_ONLY_VALID_READY_MASK 0x10000
1460#define CB_DEBUG_BUS_10__MERGE_TILE_ONLY_VALID_READY__SHIFT 0x10
1461#define CB_DEBUG_BUS_10__MERGE_TILE_ONLY_VALID_READYB_MASK 0x20000
1462#define CB_DEBUG_BUS_10__MERGE_TILE_ONLY_VALID_READYB__SHIFT 0x11
1463#define CB_DEBUG_BUS_10__FC_QUAD_RDLAT_FIFO_FULL_MASK 0x40000
1464#define CB_DEBUG_BUS_10__FC_QUAD_RDLAT_FIFO_FULL__SHIFT 0x12
1465#define CB_DEBUG_BUS_10__FC_TILE_RDLAT_FIFO_FULL_MASK 0x80000
1466#define CB_DEBUG_BUS_10__FC_TILE_RDLAT_FIFO_FULL__SHIFT 0x13
1467#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_1_FRAGMENT_BEFORE_UPDATE_MASK 0x100000
1468#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_1_FRAGMENT_BEFORE_UPDATE__SHIFT 0x14
1469#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_2_FRAGMENTS_BEFORE_UPDATE_MASK 0x200000
1470#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_2_FRAGMENTS_BEFORE_UPDATE__SHIFT 0x15
1471#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_3_FRAGMENTS_BEFORE_UPDATE_MASK 0x400000
1472#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_3_FRAGMENTS_BEFORE_UPDATE__SHIFT 0x16
1473#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_4_FRAGMENTS_BEFORE_UPDATE_MASK 0x800000
1474#define CB_DEBUG_BUS_10__FOP_QUAD_HAS_4_FRAGMENTS_BEFORE_UPDATE__SHIFT 0x17
1475#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_5_FRAGMENTS_BEFORE_UPDATE_MASK 0x1
1476#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_5_FRAGMENTS_BEFORE_UPDATE__SHIFT 0x0
1477#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_6_FRAGMENTS_BEFORE_UPDATE_MASK 0x2
1478#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_6_FRAGMENTS_BEFORE_UPDATE__SHIFT 0x1
1479#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_7_FRAGMENTS_BEFORE_UPDATE_MASK 0x4
1480#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_7_FRAGMENTS_BEFORE_UPDATE__SHIFT 0x2
1481#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_8_FRAGMENTS_BEFORE_UPDATE_MASK 0x8
1482#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_8_FRAGMENTS_BEFORE_UPDATE__SHIFT 0x3
1483#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_1_FRAGMENT_AFTER_UPDATE_MASK 0x10
1484#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_1_FRAGMENT_AFTER_UPDATE__SHIFT 0x4
1485#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_2_FRAGMENTS_AFTER_UPDATE_MASK 0x20
1486#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_2_FRAGMENTS_AFTER_UPDATE__SHIFT 0x5
1487#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_3_FRAGMENTS_AFTER_UPDATE_MASK 0x40
1488#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_3_FRAGMENTS_AFTER_UPDATE__SHIFT 0x6
1489#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_4_FRAGMENTS_AFTER_UPDATE_MASK 0x80
1490#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_4_FRAGMENTS_AFTER_UPDATE__SHIFT 0x7
1491#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_5_FRAGMENTS_AFTER_UPDATE_MASK 0x100
1492#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_5_FRAGMENTS_AFTER_UPDATE__SHIFT 0x8
1493#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_6_FRAGMENTS_AFTER_UPDATE_MASK 0x200
1494#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_6_FRAGMENTS_AFTER_UPDATE__SHIFT 0x9
1495#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_7_FRAGMENTS_AFTER_UPDATE_MASK 0x400
1496#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_7_FRAGMENTS_AFTER_UPDATE__SHIFT 0xa
1497#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_8_FRAGMENTS_AFTER_UPDATE_MASK 0x800
1498#define CB_DEBUG_BUS_11__FOP_QUAD_HAS_8_FRAGMENTS_AFTER_UPDATE__SHIFT 0xb
1499#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_1_FRAGMENT_MASK 0x1000
1500#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_1_FRAGMENT__SHIFT 0xc
1501#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_2_FRAGMENTS_MASK 0x2000
1502#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_2_FRAGMENTS__SHIFT 0xd
1503#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_3_FRAGMENTS_MASK 0x4000
1504#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_3_FRAGMENTS__SHIFT 0xe
1505#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_4_FRAGMENTS_MASK 0x8000
1506#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_4_FRAGMENTS__SHIFT 0xf
1507#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_5_FRAGMENTS_MASK 0x10000
1508#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_5_FRAGMENTS__SHIFT 0x10
1509#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_6_FRAGMENTS_MASK 0x20000
1510#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_6_FRAGMENTS__SHIFT 0x11
1511#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_7_FRAGMENTS_MASK 0x40000
1512#define CB_DEBUG_BUS_11__FOP_QUAD_ADDED_7_FRAGMENTS__SHIFT 0x12
1513#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_1_FRAGMENT_MASK 0x80000
1514#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_1_FRAGMENT__SHIFT 0x13
1515#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_2_FRAGMENTS_MASK 0x100000
1516#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_2_FRAGMENTS__SHIFT 0x14
1517#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_3_FRAGMENTS_MASK 0x200000
1518#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_3_FRAGMENTS__SHIFT 0x15
1519#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_4_FRAGMENTS_MASK 0x400000
1520#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_4_FRAGMENTS__SHIFT 0x16
1521#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_5_FRAGMENTS_MASK 0x800000
1522#define CB_DEBUG_BUS_11__FOP_QUAD_REMOVED_5_FRAGMENTS__SHIFT 0x17
1523#define CB_DEBUG_BUS_12__FOP_QUAD_REMOVED_6_FRAGMENTS_MASK 0x1
1524#define CB_DEBUG_BUS_12__FOP_QUAD_REMOVED_6_FRAGMENTS__SHIFT 0x0
1525#define CB_DEBUG_BUS_12__FOP_QUAD_REMOVED_7_FRAGMENTS_MASK 0x2
1526#define CB_DEBUG_BUS_12__FOP_QUAD_REMOVED_7_FRAGMENTS__SHIFT 0x1
1527#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_0_MASK 0x4
1528#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_0__SHIFT 0x2
1529#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_1_MASK 0x8
1530#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_1__SHIFT 0x3
1531#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_2_MASK 0x10
1532#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_2__SHIFT 0x4
1533#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_3_MASK 0x20
1534#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_3__SHIFT 0x5
1535#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_4_MASK 0x40
1536#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_4__SHIFT 0x6
1537#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_5_MASK 0x80
1538#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_5__SHIFT 0x7
1539#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_6_MASK 0x100
1540#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_6__SHIFT 0x8
1541#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_7_MASK 0x200
1542#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_READS_FRAGMENT_7__SHIFT 0x9
1543#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_0_MASK 0x400
1544#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_0__SHIFT 0xa
1545#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_1_MASK 0x800
1546#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_1__SHIFT 0xb
1547#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_2_MASK 0x1000
1548#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_2__SHIFT 0xc
1549#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_3_MASK 0x2000
1550#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_3__SHIFT 0xd
1551#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_4_MASK 0x4000
1552#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_4__SHIFT 0xe
1553#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_5_MASK 0x8000
1554#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_5__SHIFT 0xf
1555#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_6_MASK 0x10000
1556#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_6__SHIFT 0x10
1557#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_7_MASK 0x20000
1558#define CB_DEBUG_BUS_12__FC_CC_QUADFRAG_WRITES_FRAGMENT_7__SHIFT 0x11
1559#define CB_DEBUG_BUS_12__FC_QUAD_BLEND_OPT_DONT_READ_DST_MASK 0x40000
1560#define CB_DEBUG_BUS_12__FC_QUAD_BLEND_OPT_DONT_READ_DST__SHIFT 0x12
1561#define CB_DEBUG_BUS_12__FC_QUAD_BLEND_OPT_BLEND_BYPASS_MASK 0x80000
1562#define CB_DEBUG_BUS_12__FC_QUAD_BLEND_OPT_BLEND_BYPASS__SHIFT 0x13
1563#define CB_DEBUG_BUS_12__FC_QUAD_BLEND_OPT_DISCARD_PIXELS_MASK 0x100000
1564#define CB_DEBUG_BUS_12__FC_QUAD_BLEND_OPT_DISCARD_PIXELS__SHIFT 0x14
1565#define CB_DEBUG_BUS_12__FC_QUAD_KILLED_BY_EXTRA_PIXEL_EXPORT_MASK 0x200000
1566#define CB_DEBUG_BUS_12__FC_QUAD_KILLED_BY_EXTRA_PIXEL_EXPORT__SHIFT 0x15
1567#define CB_DEBUG_BUS_12__FC_QUAD_KILLED_BY_COLOR_INVALID_MASK 0x400000
1568#define CB_DEBUG_BUS_12__FC_QUAD_KILLED_BY_COLOR_INVALID__SHIFT 0x16
1569#define CB_DEBUG_BUS_12__FC_QUAD_KILLED_BY_NULL_TARGET_SHADER_MASK_MASK 0x800000
1570#define CB_DEBUG_BUS_12__FC_QUAD_KILLED_BY_NULL_TARGET_SHADER_MASK__SHIFT 0x17
1571#define CB_DEBUG_BUS_13__FC_PF_FC_KEYID_RDLAT_FIFO_FULL_MASK 0x1
1572#define CB_DEBUG_BUS_13__FC_PF_FC_KEYID_RDLAT_FIFO_FULL__SHIFT 0x0
1573#define CB_DEBUG_BUS_13__FC_DOC_QTILE_CAM_MISS_MASK 0x2
1574#define CB_DEBUG_BUS_13__FC_DOC_QTILE_CAM_MISS__SHIFT 0x1
1575#define CB_DEBUG_BUS_13__FC_DOC_QTILE_CAM_HIT_MASK 0x4
1576#define CB_DEBUG_BUS_13__FC_DOC_QTILE_CAM_HIT__SHIFT 0x2
1577#define CB_DEBUG_BUS_13__FC_DOC_CLINE_CAM_MISS_MASK 0x8
1578#define CB_DEBUG_BUS_13__FC_DOC_CLINE_CAM_MISS__SHIFT 0x3
1579#define CB_DEBUG_BUS_13__FC_DOC_CLINE_CAM_HIT_MASK 0x10
1580#define CB_DEBUG_BUS_13__FC_DOC_CLINE_CAM_HIT__SHIFT 0x4
1581#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_1_SECTOR_MASK 0x20
1582#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_1_SECTOR__SHIFT 0x5
1583#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_2_SECTORS_MASK 0x40
1584#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_2_SECTORS__SHIFT 0x6
1585#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_3_SECTORS_MASK 0x80
1586#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_3_SECTORS__SHIFT 0x7
1587#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_4_SECTORS_MASK 0x100
1588#define CB_DEBUG_BUS_13__FC_DOC_OVERWROTE_4_SECTORS__SHIFT 0x8
1589#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_HIT_MASK 0x200
1590#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_HIT__SHIFT 0x9
1591#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_TAG_MISS_MASK 0x400
1592#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_TAG_MISS__SHIFT 0xa
1593#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_SECTOR_MISS_MASK 0x800
1594#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_SECTOR_MISS__SHIFT 0xb
1595#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_REEVICTION_STALL_MASK 0x1000
1596#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_REEVICTION_STALL__SHIFT 0xc
1597#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_EVICT_NONZERO_INFLIGHT_STALL_MASK 0x2000
1598#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_EVICT_NONZERO_INFLIGHT_STALL__SHIFT 0xd
1599#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_REPLACE_PENDING_EVICT_STALL_MASK 0x4000
1600#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_REPLACE_PENDING_EVICT_STALL__SHIFT 0xe
1601#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL_MASK 0x8000
1602#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_INFLIGHT_COUNTER_MAXIMUM_STALL__SHIFT 0xf
1603#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_READ_OUTPUT_STALL_MASK 0x10000
1604#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_READ_OUTPUT_STALL__SHIFT 0x10
1605#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_WRITE_OUTPUT_STALL_MASK 0x20000
1606#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_WRITE_OUTPUT_STALL__SHIFT 0x11
1607#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_ACK_OUTPUT_STALL_MASK 0x40000
1608#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_ACK_OUTPUT_STALL__SHIFT 0x12
1609#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_STALL_MASK 0x80000
1610#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_STALL__SHIFT 0x13
1611#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_FLUSH_MASK 0x100000
1612#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_FLUSH__SHIFT 0x14
1613#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_SECTORS_FLUSHED_MASK 0x200000
1614#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_SECTORS_FLUSHED__SHIFT 0x15
1615#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_DIRTY_SECTORS_FLUSHED_MASK 0x400000
1616#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_DIRTY_SECTORS_FLUSHED__SHIFT 0x16
1617#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_TAGS_FLUSHED_MASK 0x800000
1618#define CB_DEBUG_BUS_13__FC_PF_DCC_CACHE_TAGS_FLUSHED__SHIFT 0x17
1619#define CB_DEBUG_BUS_14__FC_MC_DCC_WRITE_REQUESTS_IN_FLIGHT_MASK 0x7ff
1620#define CB_DEBUG_BUS_14__FC_MC_DCC_WRITE_REQUESTS_IN_FLIGHT__SHIFT 0x0
1621#define CB_DEBUG_BUS_14__FC_MC_DCC_READ_REQUESTS_IN_FLIGHT_MASK 0x3ff800
1622#define CB_DEBUG_BUS_14__FC_MC_DCC_READ_REQUESTS_IN_FLIGHT__SHIFT 0xb
1623#define CB_DEBUG_BUS_14__CC_PF_DCC_BEYOND_TILE_SPLIT_MASK 0x400000
1624#define CB_DEBUG_BUS_14__CC_PF_DCC_BEYOND_TILE_SPLIT__SHIFT 0x16
1625#define CB_DEBUG_BUS_14__CC_PF_DCC_RDREQ_STALL_MASK 0x800000
1626#define CB_DEBUG_BUS_14__CC_PF_DCC_RDREQ_STALL__SHIFT 0x17
1627#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_2TO1_MASK 0x7
1628#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_2TO1__SHIFT 0x0
1629#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_4TO1_MASK 0x18
1630#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_4TO1__SHIFT 0x3
1631#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_4TO2_MASK 0x60
1632#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_4TO2__SHIFT 0x5
1633#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_4TO3_MASK 0x180
1634#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_4TO3__SHIFT 0x7
1635#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO1_MASK 0x600
1636#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO1__SHIFT 0x9
1637#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO2_MASK 0x1800
1638#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO2__SHIFT 0xb
1639#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO3_MASK 0x6000
1640#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO3__SHIFT 0xd
1641#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO4_MASK 0x18000
1642#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO4__SHIFT 0xf
1643#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO5_MASK 0x60000
1644#define CB_DEBUG_BUS_15__CC_PF_DCC_COMPRESS_RATIO_6TO5__SHIFT 0x11
1645#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO1_MASK 0x1
1646#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO1__SHIFT 0x0
1647#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO2_MASK 0x2
1648#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO2__SHIFT 0x1
1649#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO3_MASK 0x4
1650#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO3__SHIFT 0x2
1651#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO4_MASK 0x8
1652#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO4__SHIFT 0x3
1653#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO5_MASK 0x10
1654#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO5__SHIFT 0x4
1655#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO6_MASK 0x20
1656#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO6__SHIFT 0x5
1657#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO7_MASK 0x40
1658#define CB_DEBUG_BUS_16__CC_PF_DCC_COMPRESS_RATIO_8TO7__SHIFT 0x6
1659#define CB_DEBUG_BUS_17__TILE_INTFC_BUSY_MASK 0x1
1660#define CB_DEBUG_BUS_17__TILE_INTFC_BUSY__SHIFT 0x0
1661#define CB_DEBUG_BUS_17__MU_BUSY_MASK 0x2
1662#define CB_DEBUG_BUS_17__MU_BUSY__SHIFT 0x1
1663#define CB_DEBUG_BUS_17__TQ_BUSY_MASK 0x4
1664#define CB_DEBUG_BUS_17__TQ_BUSY__SHIFT 0x2
1665#define CB_DEBUG_BUS_17__AC_BUSY_MASK 0x8
1666#define CB_DEBUG_BUS_17__AC_BUSY__SHIFT 0x3
1667#define CB_DEBUG_BUS_17__CRW_BUSY_MASK 0x10
1668#define CB_DEBUG_BUS_17__CRW_BUSY__SHIFT 0x4
1669#define CB_DEBUG_BUS_17__CACHE_CTRL_BUSY_MASK 0x20
1670#define CB_DEBUG_BUS_17__CACHE_CTRL_BUSY__SHIFT 0x5
1671#define CB_DEBUG_BUS_17__MC_WR_PENDING_MASK 0x40
1672#define CB_DEBUG_BUS_17__MC_WR_PENDING__SHIFT 0x6
1673#define CB_DEBUG_BUS_17__FC_WR_PENDING_MASK 0x80
1674#define CB_DEBUG_BUS_17__FC_WR_PENDING__SHIFT 0x7
1675#define CB_DEBUG_BUS_17__FC_RD_PENDING_MASK 0x100
1676#define CB_DEBUG_BUS_17__FC_RD_PENDING__SHIFT 0x8
1677#define CB_DEBUG_BUS_17__EVICT_PENDING_MASK 0x200
1678#define CB_DEBUG_BUS_17__EVICT_PENDING__SHIFT 0x9
1679#define CB_DEBUG_BUS_17__LAST_RD_ARB_WINNER_MASK 0x400
1680#define CB_DEBUG_BUS_17__LAST_RD_ARB_WINNER__SHIFT 0xa
1681#define CB_DEBUG_BUS_17__MU_STATE_MASK 0x7f800
1682#define CB_DEBUG_BUS_17__MU_STATE__SHIFT 0xb
1683#define CB_DEBUG_BUS_18__TILE_RETIREMENT_BUSY_MASK 0x1
1684#define CB_DEBUG_BUS_18__TILE_RETIREMENT_BUSY__SHIFT 0x0
1685#define CB_DEBUG_BUS_18__FOP_BUSY_MASK 0x2
1686#define CB_DEBUG_BUS_18__FOP_BUSY__SHIFT 0x1
1687#define CB_DEBUG_BUS_18__CLEAR_BUSY_MASK 0x4
1688#define CB_DEBUG_BUS_18__CLEAR_BUSY__SHIFT 0x2
1689#define CB_DEBUG_BUS_18__LAT_BUSY_MASK 0x8
1690#define CB_DEBUG_BUS_18__LAT_BUSY__SHIFT 0x3
1691#define CB_DEBUG_BUS_18__CACHE_CTL_BUSY_MASK 0x10
1692#define CB_DEBUG_BUS_18__CACHE_CTL_BUSY__SHIFT 0x4
1693#define CB_DEBUG_BUS_18__ADDR_BUSY_MASK 0x20
1694#define CB_DEBUG_BUS_18__ADDR_BUSY__SHIFT 0x5
1695#define CB_DEBUG_BUS_18__MERGE_BUSY_MASK 0x40
1696#define CB_DEBUG_BUS_18__MERGE_BUSY__SHIFT 0x6
1697#define CB_DEBUG_BUS_18__QUAD_BUSY_MASK 0x80
1698#define CB_DEBUG_BUS_18__QUAD_BUSY__SHIFT 0x7
1699#define CB_DEBUG_BUS_18__TILE_BUSY_MASK 0x100
1700#define CB_DEBUG_BUS_18__TILE_BUSY__SHIFT 0x8
1701#define CB_DEBUG_BUS_18__DCC_BUSY_MASK 0x200
1702#define CB_DEBUG_BUS_18__DCC_BUSY__SHIFT 0x9
1703#define CB_DEBUG_BUS_18__DOC_BUSY_MASK 0x400
1704#define CB_DEBUG_BUS_18__DOC_BUSY__SHIFT 0xa
1705#define CB_DEBUG_BUS_18__DAG_BUSY_MASK 0x800
1706#define CB_DEBUG_BUS_18__DAG_BUSY__SHIFT 0xb
1707#define CB_DEBUG_BUS_18__DOC_STALL_MASK 0x1000
1708#define CB_DEBUG_BUS_18__DOC_STALL__SHIFT 0xc
1709#define CB_DEBUG_BUS_18__DOC_QT_CAM_FULL_MASK 0x2000
1710#define CB_DEBUG_BUS_18__DOC_QT_CAM_FULL__SHIFT 0xd
1711#define CB_DEBUG_BUS_18__DOC_CL_CAM_FULL_MASK 0x4000
1712#define CB_DEBUG_BUS_18__DOC_CL_CAM_FULL__SHIFT 0xe
1713#define CB_DEBUG_BUS_18__DOC_QUAD_PTR_FIFO_FULL_MASK 0x8000
1714#define CB_DEBUG_BUS_18__DOC_QUAD_PTR_FIFO_FULL__SHIFT 0xf
1715#define CB_DEBUG_BUS_18__DOC_SECTOR_MASK_FIFO_FULL_MASK 0x10000
1716#define CB_DEBUG_BUS_18__DOC_SECTOR_MASK_FIFO_FULL__SHIFT 0x10
1717#define CB_DEBUG_BUS_18__DCS_READ_WINNER_LAST_MASK 0x20000
1718#define CB_DEBUG_BUS_18__DCS_READ_WINNER_LAST__SHIFT 0x11
1719#define CB_DEBUG_BUS_18__DCS_READ_EV_PENDING_MASK 0x40000
1720#define CB_DEBUG_BUS_18__DCS_READ_EV_PENDING__SHIFT 0x12
1721#define CB_DEBUG_BUS_18__DCS_WRITE_CC_PENDING_MASK 0x80000
1722#define CB_DEBUG_BUS_18__DCS_WRITE_CC_PENDING__SHIFT 0x13
1723#define CB_DEBUG_BUS_18__DCS_READ_CC_PENDING_MASK 0x100000
1724#define CB_DEBUG_BUS_18__DCS_READ_CC_PENDING__SHIFT 0x14
1725#define CB_DEBUG_BUS_18__DCS_WRITE_MC_PENDING_MASK 0x200000
1726#define CB_DEBUG_BUS_18__DCS_WRITE_MC_PENDING__SHIFT 0x15
1727#define CB_DEBUG_BUS_19__SURF_SYNC_STATE_MASK 0x3
1728#define CB_DEBUG_BUS_19__SURF_SYNC_STATE__SHIFT 0x0
1729#define CB_DEBUG_BUS_19__SURF_SYNC_START_MASK 0x4
1730#define CB_DEBUG_BUS_19__SURF_SYNC_START__SHIFT 0x2
1731#define CB_DEBUG_BUS_19__SF_BUSY_MASK 0x8
1732#define CB_DEBUG_BUS_19__SF_BUSY__SHIFT 0x3
1733#define CB_DEBUG_BUS_19__CS_BUSY_MASK 0x10
1734#define CB_DEBUG_BUS_19__CS_BUSY__SHIFT 0x4
1735#define CB_DEBUG_BUS_19__RB_BUSY_MASK 0x20
1736#define CB_DEBUG_BUS_19__RB_BUSY__SHIFT 0x5
1737#define CB_DEBUG_BUS_19__DS_BUSY_MASK 0x40
1738#define CB_DEBUG_BUS_19__DS_BUSY__SHIFT 0x6
1739#define CB_DEBUG_BUS_19__TB_BUSY_MASK 0x80
1740#define CB_DEBUG_BUS_19__TB_BUSY__SHIFT 0x7
1741#define CB_DEBUG_BUS_19__IB_BUSY_MASK 0x100
1742#define CB_DEBUG_BUS_19__IB_BUSY__SHIFT 0x8
1743#define CB_DEBUG_BUS_19__DRR_BUSY_MASK 0x200
1744#define CB_DEBUG_BUS_19__DRR_BUSY__SHIFT 0x9
1745#define CB_DEBUG_BUS_19__DF_BUSY_MASK 0x400
1746#define CB_DEBUG_BUS_19__DF_BUSY__SHIFT 0xa
1747#define CB_DEBUG_BUS_19__DD_BUSY_MASK 0x800
1748#define CB_DEBUG_BUS_19__DD_BUSY__SHIFT 0xb
1749#define CB_DEBUG_BUS_19__DC_BUSY_MASK 0x1000
1750#define CB_DEBUG_BUS_19__DC_BUSY__SHIFT 0xc
1751#define CB_DEBUG_BUS_19__DK_BUSY_MASK 0x2000
1752#define CB_DEBUG_BUS_19__DK_BUSY__SHIFT 0xd
1753#define CB_DEBUG_BUS_19__DF_SKID_FIFO_EMPTY_MASK 0x4000
1754#define CB_DEBUG_BUS_19__DF_SKID_FIFO_EMPTY__SHIFT 0xe
1755#define CB_DEBUG_BUS_19__DF_CLEAR_FIFO_EMPTY_MASK 0x8000
1756#define CB_DEBUG_BUS_19__DF_CLEAR_FIFO_EMPTY__SHIFT 0xf
1757#define CB_DEBUG_BUS_19__DD_READY_MASK 0x10000
1758#define CB_DEBUG_BUS_19__DD_READY__SHIFT 0x10
1759#define CB_DEBUG_BUS_19__DC_FIFO_FULL_MASK 0x20000
1760#define CB_DEBUG_BUS_19__DC_FIFO_FULL__SHIFT 0x11
1761#define CB_DEBUG_BUS_19__DC_READY_MASK 0x40000
1762#define CB_DEBUG_BUS_19__DC_READY__SHIFT 0x12
1763#define CB_DEBUG_BUS_20__MC_RDREQ_CREDITS_MASK 0x3f
1764#define CB_DEBUG_BUS_20__MC_RDREQ_CREDITS__SHIFT 0x0
1765#define CB_DEBUG_BUS_20__MC_WRREQ_CREDITS_MASK 0xfc0
1766#define CB_DEBUG_BUS_20__MC_WRREQ_CREDITS__SHIFT 0x6
1767#define CB_DEBUG_BUS_20__CC_RDREQ_HAD_ITS_TURN_MASK 0x1000
1768#define CB_DEBUG_BUS_20__CC_RDREQ_HAD_ITS_TURN__SHIFT 0xc
1769#define CB_DEBUG_BUS_20__FC_RDREQ_HAD_ITS_TURN_MASK 0x2000
1770#define CB_DEBUG_BUS_20__FC_RDREQ_HAD_ITS_TURN__SHIFT 0xd
1771#define CB_DEBUG_BUS_20__CM_RDREQ_HAD_ITS_TURN_MASK 0x4000
1772#define CB_DEBUG_BUS_20__CM_RDREQ_HAD_ITS_TURN__SHIFT 0xe
1773#define CB_DEBUG_BUS_20__CC_WRREQ_HAD_ITS_TURN_MASK 0x10000
1774#define CB_DEBUG_BUS_20__CC_WRREQ_HAD_ITS_TURN__SHIFT 0x10
1775#define CB_DEBUG_BUS_20__FC_WRREQ_HAD_ITS_TURN_MASK 0x20000
1776#define CB_DEBUG_BUS_20__FC_WRREQ_HAD_ITS_TURN__SHIFT 0x11
1777#define CB_DEBUG_BUS_20__CM_WRREQ_HAD_ITS_TURN_MASK 0x40000
1778#define CB_DEBUG_BUS_20__CM_WRREQ_HAD_ITS_TURN__SHIFT 0x12
1779#define CB_DEBUG_BUS_20__CC_WRREQ_FIFO_EMPTY_MASK 0x100000
1780#define CB_DEBUG_BUS_20__CC_WRREQ_FIFO_EMPTY__SHIFT 0x14
1781#define CB_DEBUG_BUS_20__FC_WRREQ_FIFO_EMPTY_MASK 0x200000
1782#define CB_DEBUG_BUS_20__FC_WRREQ_FIFO_EMPTY__SHIFT 0x15
1783#define CB_DEBUG_BUS_20__CM_WRREQ_FIFO_EMPTY_MASK 0x400000
1784#define CB_DEBUG_BUS_20__CM_WRREQ_FIFO_EMPTY__SHIFT 0x16
1785#define CB_DEBUG_BUS_20__DCC_WRREQ_FIFO_EMPTY_MASK 0x800000
1786#define CB_DEBUG_BUS_20__DCC_WRREQ_FIFO_EMPTY__SHIFT 0x17
1787#define CB_DEBUG_BUS_21__CM_BUSY_MASK 0x1
1788#define CB_DEBUG_BUS_21__CM_BUSY__SHIFT 0x0
1789#define CB_DEBUG_BUS_21__FC_BUSY_MASK 0x2
1790#define CB_DEBUG_BUS_21__FC_BUSY__SHIFT 0x1
1791#define CB_DEBUG_BUS_21__CC_BUSY_MASK 0x4
1792#define CB_DEBUG_BUS_21__CC_BUSY__SHIFT 0x2
1793#define CB_DEBUG_BUS_21__BB_BUSY_MASK 0x8
1794#define CB_DEBUG_BUS_21__BB_BUSY__SHIFT 0x3
1795#define CB_DEBUG_BUS_21__MA_BUSY_MASK 0x10
1796#define CB_DEBUG_BUS_21__MA_BUSY__SHIFT 0x4
1797#define CB_DEBUG_BUS_21__CORE_SCLK_VLD_MASK 0x20
1798#define CB_DEBUG_BUS_21__CORE_SCLK_VLD__SHIFT 0x5
1799#define CB_DEBUG_BUS_21__REG_SCLK1_VLD_MASK 0x40
1800#define CB_DEBUG_BUS_21__REG_SCLK1_VLD__SHIFT 0x6
1801#define CB_DEBUG_BUS_21__REG_SCLK0_VLD_MASK 0x80
1802#define CB_DEBUG_BUS_21__REG_SCLK0_VLD__SHIFT 0x7
1803#define CB_DEBUG_BUS_22__OUTSTANDING_MC_READS_MASK 0xfff
1804#define CB_DEBUG_BUS_22__OUTSTANDING_MC_READS__SHIFT 0x0
1805#define CB_DEBUG_BUS_22__OUTSTANDING_MC_WRITES_MASK 0xfff000
1806#define CB_DEBUG_BUS_22__OUTSTANDING_MC_WRITES__SHIFT 0xc
1807#define CP_DFY_CNTL__POLICY_MASK 0x1
1808#define CP_DFY_CNTL__POLICY__SHIFT 0x0
1809#define CP_DFY_CNTL__MTYPE_MASK 0xc
1810#define CP_DFY_CNTL__MTYPE__SHIFT 0x2
1811#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000
1812#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
1813#define CP_DFY_CNTL__MODE_MASK 0x60000000
1814#define CP_DFY_CNTL__MODE__SHIFT 0x1d
1815#define CP_DFY_CNTL__ENABLE_MASK 0x80000000
1816#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
1817#define CP_DFY_STAT__BURST_COUNT_MASK 0xffff
1818#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
1819#define CP_DFY_STAT__TAGS_PENDING_MASK 0x1ff0000
1820#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
1821#define CP_DFY_STAT__BUSY_MASK 0x80000000
1822#define CP_DFY_STAT__BUSY__SHIFT 0x1f
1823#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0xffffffff
1824#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
1825#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xffffffe0
1826#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
1827#define CP_DFY_DATA_0__DATA_MASK 0xffffffff
1828#define CP_DFY_DATA_0__DATA__SHIFT 0x0
1829#define CP_DFY_DATA_1__DATA_MASK 0xffffffff
1830#define CP_DFY_DATA_1__DATA__SHIFT 0x0
1831#define CP_DFY_DATA_2__DATA_MASK 0xffffffff
1832#define CP_DFY_DATA_2__DATA__SHIFT 0x0
1833#define CP_DFY_DATA_3__DATA_MASK 0xffffffff
1834#define CP_DFY_DATA_3__DATA__SHIFT 0x0
1835#define CP_DFY_DATA_4__DATA_MASK 0xffffffff
1836#define CP_DFY_DATA_4__DATA__SHIFT 0x0
1837#define CP_DFY_DATA_5__DATA_MASK 0xffffffff
1838#define CP_DFY_DATA_5__DATA__SHIFT 0x0
1839#define CP_DFY_DATA_6__DATA_MASK 0xffffffff
1840#define CP_DFY_DATA_6__DATA__SHIFT 0x0
1841#define CP_DFY_DATA_7__DATA_MASK 0xffffffff
1842#define CP_DFY_DATA_7__DATA__SHIFT 0x0
1843#define CP_DFY_DATA_8__DATA_MASK 0xffffffff
1844#define CP_DFY_DATA_8__DATA__SHIFT 0x0
1845#define CP_DFY_DATA_9__DATA_MASK 0xffffffff
1846#define CP_DFY_DATA_9__DATA__SHIFT 0x0
1847#define CP_DFY_DATA_10__DATA_MASK 0xffffffff
1848#define CP_DFY_DATA_10__DATA__SHIFT 0x0
1849#define CP_DFY_DATA_11__DATA_MASK 0xffffffff
1850#define CP_DFY_DATA_11__DATA__SHIFT 0x0
1851#define CP_DFY_DATA_12__DATA_MASK 0xffffffff
1852#define CP_DFY_DATA_12__DATA__SHIFT 0x0
1853#define CP_DFY_DATA_13__DATA_MASK 0xffffffff
1854#define CP_DFY_DATA_13__DATA__SHIFT 0x0
1855#define CP_DFY_DATA_14__DATA_MASK 0xffffffff
1856#define CP_DFY_DATA_14__DATA__SHIFT 0x0
1857#define CP_DFY_DATA_15__DATA_MASK 0xffffffff
1858#define CP_DFY_DATA_15__DATA__SHIFT 0x0
1859#define CP_DFY_CMD__OFFSET_MASK 0x1ff
1860#define CP_DFY_CMD__OFFSET__SHIFT 0x0
1861#define CP_DFY_CMD__SIZE_MASK 0xffff0000
1862#define CP_DFY_CMD__SIZE__SHIFT 0x10
1863#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0xff
1864#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
1865#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0xff00
1866#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
1867#define CP_ATCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x3ff
1868#define CP_ATCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
1869#define CP_RB0_BASE__RB_BASE_MASK 0xffffffff
1870#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
1871#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0xff
1872#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
1873#define CP_RB_BASE__RB_BASE_MASK 0xffffffff
1874#define CP_RB_BASE__RB_BASE__SHIFT 0x0
1875#define CP_RB1_BASE__RB_BASE_MASK 0xffffffff
1876#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
1877#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0xff
1878#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
1879#define CP_RB2_BASE__RB_BASE_MASK 0xffffffff
1880#define CP_RB2_BASE__RB_BASE__SHIFT 0x0
1881#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x3f
1882#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
1883#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x3f00
1884#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
1885#define CP_RB0_CNTL__MTYPE_MASK 0x18000
1886#define CP_RB0_CNTL__MTYPE__SHIFT 0xf
1887#define CP_RB0_CNTL__BUF_SWAP_MASK 0x60000
1888#define CP_RB0_CNTL__BUF_SWAP__SHIFT 0x11
1889#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x300000
1890#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
1891#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0xc00000
1892#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
1893#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x1000000
1894#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
1895#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x8000000
1896#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
1897#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000
1898#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
1899#define CP_RB_CNTL__RB_BUFSZ_MASK 0x3f
1900#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
1901#define CP_RB_CNTL__RB_BLKSZ_MASK 0x3f00
1902#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
1903#define CP_RB_CNTL__MTYPE_MASK 0x18000
1904#define CP_RB_CNTL__MTYPE__SHIFT 0xf
1905#define CP_RB_CNTL__BUF_SWAP_MASK 0x60000
1906#define CP_RB_CNTL__BUF_SWAP__SHIFT 0x11
1907#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x300000
1908#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
1909#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0xc00000
1910#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
1911#define CP_RB_CNTL__CACHE_POLICY_MASK 0x1000000
1912#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
1913#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x8000000
1914#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
1915#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000
1916#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
1917#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x3f
1918#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
1919#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x3f00
1920#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
1921#define CP_RB1_CNTL__MTYPE_MASK 0x18000
1922#define CP_RB1_CNTL__MTYPE__SHIFT 0xf
1923#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x300000
1924#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
1925#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0xc00000
1926#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
1927#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x1000000
1928#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
1929#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x8000000
1930#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
1931#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000
1932#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
1933#define CP_RB2_CNTL__RB_BUFSZ_MASK 0x3f
1934#define CP_RB2_CNTL__RB_BUFSZ__SHIFT 0x0
1935#define CP_RB2_CNTL__RB_BLKSZ_MASK 0x3f00
1936#define CP_RB2_CNTL__RB_BLKSZ__SHIFT 0x8
1937#define CP_RB2_CNTL__MTYPE_MASK 0x18000
1938#define CP_RB2_CNTL__MTYPE__SHIFT 0xf
1939#define CP_RB2_CNTL__MIN_AVAILSZ_MASK 0x300000
1940#define CP_RB2_CNTL__MIN_AVAILSZ__SHIFT 0x14
1941#define CP_RB2_CNTL__MIN_IB_AVAILSZ_MASK 0xc00000
1942#define CP_RB2_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
1943#define CP_RB2_CNTL__CACHE_POLICY_MASK 0x1000000
1944#define CP_RB2_CNTL__CACHE_POLICY__SHIFT 0x18
1945#define CP_RB2_CNTL__RB_NO_UPDATE_MASK 0x8000000
1946#define CP_RB2_CNTL__RB_NO_UPDATE__SHIFT 0x1b
1947#define CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000
1948#define CP_RB2_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
1949#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0xfffff
1950#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
1951#define CP_RB0_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x3
1952#define CP_RB0_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x0
1953#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffc
1954#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
1955#define CP_RB_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x3
1956#define CP_RB_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x0
1957#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffc
1958#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
1959#define CP_RB1_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x3
1960#define CP_RB1_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x0
1961#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffc
1962#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
1963#define CP_RB2_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x3
1964#define CP_RB2_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x0
1965#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffc
1966#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
1967#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0xffff
1968#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
1969#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0xffff
1970#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
1971#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0xffff
1972#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
1973#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0xffff
1974#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
1975#define CP_RB0_WPTR__RB_WPTR_MASK 0xfffff
1976#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
1977#define CP_RB_WPTR__RB_WPTR_MASK 0xfffff
1978#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
1979#define CP_RB1_WPTR__RB_WPTR_MASK 0xfffff
1980#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
1981#define CP_RB2_WPTR__RB_WPTR_MASK 0xfffff
1982#define CP_RB2_WPTR__RB_WPTR__SHIFT 0x0
1983#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xfffffffc
1984#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
1985#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0xff
1986#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
1987#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x800
1988#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
1989#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
1990#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
1991#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
1992#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
1993#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x40000
1994#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
1995#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x80000
1996#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
1997#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x100000
1998#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
1999#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x200000
2000#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
2001#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x400000
2002#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
2003#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2004#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2005#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2006#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2007#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2008#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2009#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2010#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2011#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2012#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2013#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2014#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2015#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2016#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2017#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x800
2018#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
2019#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2020#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2021#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2022#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2023#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x40000
2024#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
2025#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x80000
2026#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
2027#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x100000
2028#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
2029#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x200000
2030#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
2031#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
2032#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
2033#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
2034#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
2035#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2036#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2037#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2038#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2039#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2040#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2041#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000
2042#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
2043#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000
2044#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
2045#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000
2046#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
2047#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x800
2048#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
2049#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2050#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2051#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2052#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2053#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE_MASK 0x40000
2054#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE__SHIFT 0x12
2055#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE_MASK 0x80000
2056#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
2057#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE_MASK 0x100000
2058#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
2059#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE_MASK 0x200000
2060#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE__SHIFT 0x15
2061#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x400000
2062#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
2063#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x800000
2064#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
2065#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2066#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2067#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2068#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2069#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2070#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2071#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000
2072#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
2073#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000
2074#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
2075#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000
2076#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
2077#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x800
2078#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
2079#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2080#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2081#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2082#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2083#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE_MASK 0x40000
2084#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE__SHIFT 0x12
2085#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE_MASK 0x80000
2086#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
2087#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE_MASK 0x100000
2088#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
2089#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE_MASK 0x200000
2090#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE__SHIFT 0x15
2091#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE_MASK 0x400000
2092#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
2093#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE_MASK 0x800000
2094#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE__SHIFT 0x17
2095#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2096#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2097#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2098#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2099#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2100#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2101#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE_MASK 0x20000000
2102#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE__SHIFT 0x1d
2103#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE_MASK 0x40000000
2104#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE__SHIFT 0x1e
2105#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE_MASK 0x80000000
2106#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE__SHIFT 0x1f
2107#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x800
2108#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
2109#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x4000
2110#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
2111#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x20000
2112#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
2113#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x40000
2114#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
2115#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x80000
2116#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
2117#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x100000
2118#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
2119#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x200000
2120#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
2121#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x400000
2122#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
2123#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x800000
2124#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
2125#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x1000000
2126#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
2127#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x4000000
2128#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
2129#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x8000000
2130#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
2131#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000
2132#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
2133#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000
2134#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
2135#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000
2136#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
2137#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x800
2138#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
2139#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x4000
2140#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
2141#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x20000
2142#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
2143#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x40000
2144#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
2145#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x80000
2146#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
2147#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x100000
2148#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
2149#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x200000
2150#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
2151#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x400000
2152#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
2153#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x800000
2154#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
2155#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x1000000
2156#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
2157#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x4000000
2158#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
2159#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x8000000
2160#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
2161#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000
2162#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
2163#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000
2164#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
2165#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000
2166#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
2167#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x800
2168#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
2169#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x4000
2170#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
2171#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x20000
2172#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
2173#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT_MASK 0x40000
2174#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT__SHIFT 0x12
2175#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT_MASK 0x80000
2176#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT__SHIFT 0x13
2177#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT_MASK 0x100000
2178#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT__SHIFT 0x14
2179#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT_MASK 0x200000
2180#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT__SHIFT 0x15
2181#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x400000
2182#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
2183#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x800000
2184#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
2185#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x1000000
2186#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
2187#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x4000000
2188#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
2189#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x8000000
2190#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
2191#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000
2192#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
2193#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000
2194#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
2195#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000
2196#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
2197#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x800
2198#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
2199#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT_MASK 0x4000
2200#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
2201#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x20000
2202#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
2203#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT_MASK 0x40000
2204#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT__SHIFT 0x12
2205#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT_MASK 0x80000
2206#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT__SHIFT 0x13
2207#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT_MASK 0x100000
2208#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT__SHIFT 0x14
2209#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT_MASK 0x200000
2210#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT__SHIFT 0x15
2211#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT_MASK 0x400000
2212#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT__SHIFT 0x16
2213#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT_MASK 0x800000
2214#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT__SHIFT 0x17
2215#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT_MASK 0x1000000
2216#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT__SHIFT 0x18
2217#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT_MASK 0x4000000
2218#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT__SHIFT 0x1a
2219#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT_MASK 0x8000000
2220#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
2221#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT_MASK 0x20000000
2222#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT__SHIFT 0x1d
2223#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT_MASK 0x40000000
2224#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT__SHIFT 0x1e
2225#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT_MASK 0x80000000
2226#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT__SHIFT 0x1f
2227#define CP_DEVICE_ID__DEVICE_ID_MASK 0xff
2228#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
2229#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0xff
2230#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
2231#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0xff00
2232#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
2233#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0xff0000
2234#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
2235#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xff000000
2236#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
2237#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0xff
2238#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
2239#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0xff00
2240#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
2241#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0xff0000
2242#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
2243#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xff000000
2244#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
2245#define CP_RING0_PRIORITY__PRIORITY_MASK 0x3
2246#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
2247#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x3
2248#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
2249#define CP_RING1_PRIORITY__PRIORITY_MASK 0x3
2250#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
2251#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x3
2252#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
2253#define CP_RING2_PRIORITY__PRIORITY_MASK 0x3
2254#define CP_RING2_PRIORITY__PRIORITY__SHIFT 0x0
2255#define CP_ME0_PIPE2_PRIORITY__PRIORITY_MASK 0x3
2256#define CP_ME0_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
2257#define CP_ENDIAN_SWAP__ENDIAN_SWAP_MASK 0x3
2258#define CP_ENDIAN_SWAP__ENDIAN_SWAP__SHIFT 0x0
2259#define CP_RB_VMID__RB0_VMID_MASK 0xf
2260#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
2261#define CP_RB_VMID__RB1_VMID_MASK 0xf00
2262#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
2263#define CP_RB_VMID__RB2_VMID_MASK 0xf0000
2264#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
2265#define CP_ME0_PIPE0_VMID__VMID_MASK 0xf
2266#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
2267#define CP_ME0_PIPE1_VMID__VMID_MASK 0xf
2268#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
2269#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x7ffffc
2270#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
2271#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000
2272#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
2273#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000
2274#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
2275#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x7ffffc
2276#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
2277#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x7ffffc
2278#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
2279#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x7ffffc
2280#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
2281#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x7ffffc
2282#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
2283#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x1fff
2284#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
2285#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xffffffff
2286#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
2287#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x1fff
2288#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
2289#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x1fff
2290#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
2291#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xffffffff
2292#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
2293#define CGTT_CPC_CLK_CTRL__ON_DELAY_MASK 0xf
2294#define CGTT_CPC_CLK_CTRL__ON_DELAY__SHIFT 0x0
2295#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
2296#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
2297#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000
2298#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
2299#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
2300#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
2301#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
2302#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
2303#define CGTT_CPF_CLK_CTRL__ON_DELAY_MASK 0xf
2304#define CGTT_CPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
2305#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
2306#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
2307#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000
2308#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
2309#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
2310#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
2311#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
2312#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
2313#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0xf
2314#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x0
2315#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
2316#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
2317#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000
2318#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
2319#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
2320#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
2321#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
2322#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
2323#define CP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0xfff
2324#define CP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
2325#define CP_CE_UCODE_DATA__UCODE_DATA_MASK 0xffffffff
2326#define CP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
2327#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x1ffff
2328#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
2329#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xffffffff
2330#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
2331#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x1ffff
2332#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
2333#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xffffffff
2334#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
2335#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT_MASK 0x2
2336#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
2337#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT_MASK 0x2
2338#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
2339#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT_MASK 0x2
2340#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
2341#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x1
2342#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
2343#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x2
2344#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
2345#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x4
2346#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
2347#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x8
2348#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
2349#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x10
2350#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
2351#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x20
2352#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
2353#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x40
2354#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
2355#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x80
2356#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
2357#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x100
2358#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
2359#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x200
2360#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
2361#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x1
2362#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
2363#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x2
2364#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
2365#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x4
2366#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
2367#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x8
2368#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
2369#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x10
2370#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
2371#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x20
2372#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
2373#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x40
2374#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
2375#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x80
2376#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
2377#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x100
2378#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
2379#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x200
2380#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
2381#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x1
2382#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
2383#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x2
2384#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
2385#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x100
2386#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
2387#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x200
2388#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
2389#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x400
2390#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
2391#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x800
2392#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
2393#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x10000
2394#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
2395#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x20000
2396#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
2397#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x40000
2398#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
2399#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x80000
2400#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
2401#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK 0x1
2402#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN__SHIFT 0x0
2403#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN_MASK 0x2
2404#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN__SHIFT 0x1
2405#define CP_MEM_SLP_CNTL__RESERVED_MASK 0x7c
2406#define CP_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
2407#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE_MASK 0x80
2408#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
2409#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY_MASK 0xff00
2410#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY__SHIFT 0x8
2411#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY_MASK 0xff0000
2412#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY__SHIFT 0x10
2413#define CP_MEM_SLP_CNTL__RESERVED1_MASK 0xff000000
2414#define CP_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
2415#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x3
2416#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
2417#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0xf0
2418#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
2419#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x300
2420#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
2421#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0xc00
2422#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
2423#define CP_ECC_FIRSTOCCURRENCE__QUEUE_MASK 0x7000
2424#define CP_ECC_FIRSTOCCURRENCE__QUEUE__SHIFT 0xc
2425#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0xf0000
2426#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
2427#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xffffffff
2428#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
2429#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xffffffff
2430#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
2431#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE_MASK 0xffffffff
2432#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE__SHIFT 0x0
2433#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0xff
2434#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
2435#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000
2436#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
2437#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000
2438#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
2439#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xffffffff
2440#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
2441#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2442#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2443#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2444#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2445#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2446#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2447#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2448#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2449#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2450#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2451#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2452#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2453#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2454#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2455#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2456#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2457#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2458#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2459#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2460#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2461#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2462#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2463#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2464#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2465#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2466#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2467#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2468#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2469#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2470#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2471#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2472#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2473#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2474#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2475#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2476#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2477#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2478#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2479#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2480#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2481#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2482#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2483#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2484#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2485#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2486#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2487#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2488#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2489#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2490#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2491#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2492#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2493#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2494#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2495#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2496#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2497#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2498#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2499#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2500#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2501#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2502#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2503#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2504#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2505#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2506#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2507#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2508#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2509#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2510#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2511#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2512#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2513#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2514#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2515#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2516#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2517#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2518#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2519#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2520#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2521#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2522#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2523#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2524#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2525#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2526#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2527#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2528#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2529#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2530#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2531#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2532#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2533#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2534#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2535#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2536#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2537#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2538#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2539#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2540#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2541#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2542#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2543#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2544#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2545#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2546#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2547#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2548#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2549#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2550#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2551#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2552#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2553#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2554#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2555#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2556#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2557#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2558#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2559#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2560#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2561#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2562#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2563#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2564#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2565#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2566#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2567#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2568#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2569#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2570#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2571#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2572#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2573#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2574#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2575#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2576#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2577#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2578#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2579#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2580#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2581#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2582#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2583#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2584#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2585#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2586#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2587#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2588#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2589#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2590#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2591#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2592#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2593#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2594#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2595#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2596#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2597#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2598#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2599#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2600#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2601#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2602#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2603#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2604#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2605#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2606#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2607#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2608#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2609#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2610#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2611#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2612#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2613#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2614#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2615#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2616#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2617#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2618#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2619#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2620#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2621#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2622#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2623#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2624#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2625#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2626#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2627#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2628#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2629#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2630#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2631#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2632#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2633#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x1000
2634#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
2635#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x2000
2636#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
2637#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x4000
2638#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
2639#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x8000
2640#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
2641#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x20000
2642#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
2643#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x800000
2644#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
2645#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x1000000
2646#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
2647#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x4000000
2648#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
2649#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x8000000
2650#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
2651#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000
2652#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
2653#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000
2654#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
2655#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000
2656#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
2657#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2658#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2659#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2660#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2661#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2662#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2663#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2664#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2665#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2666#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2667#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2668#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2669#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2670#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2671#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2672#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2673#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2674#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2675#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2676#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2677#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2678#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2679#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2680#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2681#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2682#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2683#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2684#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2685#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2686#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2687#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2688#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2689#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2690#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2691#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2692#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2693#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2694#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2695#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2696#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2697#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2698#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2699#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2700#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2701#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2702#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2703#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2704#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2705#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2706#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2707#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2708#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2709#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2710#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2711#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2712#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2713#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2714#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2715#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2716#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2717#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2718#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2719#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2720#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2721#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2722#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2723#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2724#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2725#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2726#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2727#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2728#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2729#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2730#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2731#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2732#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2733#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2734#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2735#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2736#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2737#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2738#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2739#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2740#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2741#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2742#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2743#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2744#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2745#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2746#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2747#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2748#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2749#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2750#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2751#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2752#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2753#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2754#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2755#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2756#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2757#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2758#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2759#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2760#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2761#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2762#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2763#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2764#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2765#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2766#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2767#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2768#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2769#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2770#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2771#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2772#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2773#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2774#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2775#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2776#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2777#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2778#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2779#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2780#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2781#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2782#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2783#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2784#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2785#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2786#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2787#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2788#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2789#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2790#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2791#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2792#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2793#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2794#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2795#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2796#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2797#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2798#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2799#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2800#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2801#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2802#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2803#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2804#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2805#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2806#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2807#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2808#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2809#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2810#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2811#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2812#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2813#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2814#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2815#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2816#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2817#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2818#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2819#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2820#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2821#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2822#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2823#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2824#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2825#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2826#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2827#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2828#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2829#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2830#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2831#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2832#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2833#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2834#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2835#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2836#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2837#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2838#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2839#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2840#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2841#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2842#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2843#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2844#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2845#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2846#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2847#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2848#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2849#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x1000
2850#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
2851#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x2000
2852#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
2853#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x4000
2854#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
2855#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2856#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2857#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x20000
2858#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
2859#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x800000
2860#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
2861#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x1000000
2862#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
2863#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x4000000
2864#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
2865#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x8000000
2866#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
2867#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000
2868#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
2869#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000
2870#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
2871#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000
2872#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
2873#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x1000
2874#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
2875#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x2000
2876#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
2877#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x4000
2878#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
2879#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2880#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2881#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x20000
2882#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
2883#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x800000
2884#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
2885#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x1000000
2886#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
2887#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x4000000
2888#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
2889#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x8000000
2890#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
2891#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000
2892#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
2893#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000
2894#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
2895#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000
2896#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
2897#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x1000
2898#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
2899#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x2000
2900#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
2901#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x4000
2902#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
2903#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x8000
2904#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
2905#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x20000
2906#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
2907#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x800000
2908#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
2909#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x1000000
2910#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
2911#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x4000000
2912#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
2913#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x8000000
2914#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
2915#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000
2916#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
2917#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000
2918#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
2919#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000
2920#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
2921#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0xff
2922#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
2923#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0xff00
2924#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
2925#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0xff0000
2926#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
2927#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xff000000
2928#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
2929#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x3
2930#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
2931#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x3
2932#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
2933#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x3
2934#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
2935#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x3
2936#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
2937#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0xff
2938#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
2939#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0xff00
2940#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
2941#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0xff0000
2942#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
2943#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xff000000
2944#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
2945#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x3
2946#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
2947#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x3
2948#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
2949#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x3
2950#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
2951#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x3
2952#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
2953#define CP_CE_PRGRM_CNTR_START__IP_START_MASK 0x7ff
2954#define CP_CE_PRGRM_CNTR_START__IP_START__SHIFT 0x0
2955#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0xfff
2956#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
2957#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0xfff
2958#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
2959#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0xffff
2960#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
2961#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0xffff
2962#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
2963#define CP_CE_INTR_ROUTINE_START__IR_START_MASK 0x7ff
2964#define CP_CE_INTR_ROUTINE_START__IR_START__SHIFT 0x0
2965#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0xfff
2966#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
2967#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0xfff
2968#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
2969#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0xffff
2970#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
2971#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0xffff
2972#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
2973#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX_MASK 0x7
2974#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX__SHIFT 0x0
2975#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x70
2976#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
2977#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX_MASK 0x70000
2978#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX__SHIFT 0x10
2979#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x700000
2980#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
2981#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x7
2982#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
2983#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0xff
2984#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
2985#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0xff00
2986#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
2987#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0xff0000
2988#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
2989#define CP_IQ_WAIT_TIME1__GWS_MASK 0xff000000
2990#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
2991#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0xff
2992#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
2993#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0xff00
2994#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
2995#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0xff0000
2996#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
2997#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xff000000
2998#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
2999#define CP_VMID_RESET__RESET_REQUEST_MASK 0xffff
3000#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
3001#define CP_VMID_RESET__RESET_STATUS_MASK 0xffff0000
3002#define CP_VMID_RESET__RESET_STATUS__SHIFT 0x10
3003#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0xffff
3004#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
3005#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0xf0000
3006#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
3007#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0xffff
3008#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
3009#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xffff0000
3010#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
3011#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xfffffff
3012#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
3013#define CPC_INT_CNTX_ID__QUEUE_ID_MASK 0x70000000
3014#define CPC_INT_CNTX_ID__QUEUE_ID__SHIFT 0x1c
3015#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x1
3016#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
3017#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x2
3018#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
3019#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xfffff000
3020#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
3021#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0xffff
3022#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
3023#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0xf
3024#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
3025#define CP_CPC_IC_BASE_CNTL__ATC_MASK 0x800000
3026#define CP_CPC_IC_BASE_CNTL__ATC__SHIFT 0x17
3027#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x1000000
3028#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
3029#define CP_CPC_IC_BASE_CNTL__MTYPE_MASK 0x18000000
3030#define CP_CPC_IC_BASE_CNTL__MTYPE__SHIFT 0x1b
3031#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x1
3032#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
3033#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x10
3034#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
3035#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x20
3036#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
3037#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x1
3038#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
3039#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x2
3040#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
3041#define CP_CPC_STATUS__DC0_BUSY_MASK 0x4
3042#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
3043#define CP_CPC_STATUS__DC1_BUSY_MASK 0x8
3044#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
3045#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x10
3046#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
3047#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x20
3048#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
3049#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x40
3050#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
3051#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x80
3052#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
3053#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x400
3054#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
3055#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x800
3056#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
3057#define CP_CPC_STATUS__QU_BUSY_MASK 0x1000
3058#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
3059#define CP_CPC_STATUS__ATCL2IU_BUSY_MASK 0x2000
3060#define CP_CPC_STATUS__ATCL2IU_BUSY__SHIFT 0xd
3061#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000
3062#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
3063#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000
3064#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
3065#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000
3066#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
3067#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x1
3068#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
3069#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY_MASK 0x2
3070#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY__SHIFT 0x1
3071#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x4
3072#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
3073#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x8
3074#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
3075#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x10
3076#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
3077#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x20
3078#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
3079#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x40
3080#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
3081#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x80
3082#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
3083#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x100
3084#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
3085#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x200
3086#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
3087#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x400
3088#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
3089#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x800
3090#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
3091#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x1000
3092#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
3093#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x2000
3094#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
3095#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x10000
3096#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
3097#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY_MASK 0x20000
3098#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY__SHIFT 0x11
3099#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x40000
3100#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
3101#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x80000
3102#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
3103#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x100000
3104#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
3105#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x200000
3106#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
3107#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x400000
3108#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
3109#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x800000
3110#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
3111#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x1000000
3112#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
3113#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x2000000
3114#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
3115#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x4000000
3116#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
3117#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x8000000
3118#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
3119#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000
3120#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
3121#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000
3122#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
3123#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x8
3124#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
3125#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x10
3126#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
3127#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x40
3128#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
3129#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x100
3130#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
3131#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x200
3132#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
3133#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x400
3134#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
3135#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x2000
3136#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
3137#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x10000
3138#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
3139#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x20000
3140#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
3141#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x40000
3142#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
3143#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x200000
3144#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
3145#define CP_CPC_STALLED_STAT1__ATCL2IU_WAITING_ON_FREE_MASK 0x400000
3146#define CP_CPC_STALLED_STAT1__ATCL2IU_WAITING_ON_FREE__SHIFT 0x16
3147#define CP_CPC_STALLED_STAT1__ATCL2IU_WAITING_ON_TAGS_MASK 0x800000
3148#define CP_CPC_STALLED_STAT1__ATCL2IU_WAITING_ON_TAGS__SHIFT 0x17
3149#define CP_CPC_STALLED_STAT1__ATCL1_WAITING_ON_TRANS_MASK 0x1000000
3150#define CP_CPC_STALLED_STAT1__ATCL1_WAITING_ON_TRANS__SHIFT 0x18
3151#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x1
3152#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
3153#define CP_CPF_STATUS__CSF_BUSY_MASK 0x2
3154#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
3155#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x10
3156#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
3157#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x20
3158#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
3159#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x40
3160#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
3161#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x80
3162#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
3163#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x100
3164#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
3165#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x200
3166#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
3167#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x400
3168#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
3169#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x800
3170#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
3171#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x1000
3172#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
3173#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x2000
3174#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
3175#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x4000
3176#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
3177#define CP_CPF_STATUS__HQD_BUSY_MASK 0x8000
3178#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
3179#define CP_CPF_STATUS__PRT_BUSY_MASK 0x10000
3180#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
3181#define CP_CPF_STATUS__ATCL2IU_BUSY_MASK 0x20000
3182#define CP_CPF_STATUS__ATCL2IU_BUSY__SHIFT 0x11
3183#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x4000000
3184#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
3185#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x8000000
3186#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
3187#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000
3188#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
3189#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000
3190#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
3191#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000
3192#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
3193#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x1
3194#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
3195#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x2
3196#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
3197#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x4
3198#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
3199#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x8
3200#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
3201#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x10
3202#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
3203#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x20
3204#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
3205#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x40
3206#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
3207#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x80
3208#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
3209#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x100
3210#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
3211#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS_MASK 0x200
3212#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS__SHIFT 0x9
3213#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x800
3214#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
3215#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x1000
3216#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
3217#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x2000
3218#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
3219#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x4000
3220#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
3221#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x8000
3222#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
3223#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x10000
3224#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
3225#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x20000
3226#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
3227#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x40000
3228#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
3229#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x80000
3230#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
3231#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x100000
3232#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
3233#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x200000
3234#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
3235#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x400000
3236#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
3237#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x800000
3238#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
3239#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x1000000
3240#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
3241#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x2000000
3242#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
3243#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x4000000
3244#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
3245#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x8000000
3246#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
3247#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000
3248#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
3249#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000
3250#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
3251#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000
3252#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
3253#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000
3254#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
3255#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x1
3256#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
3257#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x2
3258#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
3259#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x4
3260#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
3261#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x8
3262#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
3263#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x20
3264#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
3265#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x40
3266#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
3267#define CP_CPF_STALLED_STAT1__ATCL2IU_WAITING_ON_FREE_MASK 0x80
3268#define CP_CPF_STALLED_STAT1__ATCL2IU_WAITING_ON_FREE__SHIFT 0x7
3269#define CP_CPF_STALLED_STAT1__ATCL2IU_WAITING_ON_TAGS_MASK 0x100
3270#define CP_CPF_STALLED_STAT1__ATCL2IU_WAITING_ON_TAGS__SHIFT 0x8
3271#define CP_CPF_STALLED_STAT1__ATCL1_WAITING_ON_TRANS_MASK 0x200
3272#define CP_CPF_STALLED_STAT1__ATCL1_WAITING_ON_TRANS__SHIFT 0x9
3273#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x3f
3274#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
3275#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x10
3276#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
3277#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x10000
3278#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
3279#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x20000
3280#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
3281#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x40000
3282#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
3283#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x80000
3284#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
3285#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x100000
3286#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
3287#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x200000
3288#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
3289#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000
3290#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
3291#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000
3292#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
3293#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000
3294#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
3295#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000
3296#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
3297#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xffffffff
3298#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
3299#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xffffffff
3300#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
3301#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x1ff
3302#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
3303#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xffffffff
3304#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
3305#define CPG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3f
3306#define CPG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
3307#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
3308#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
3309#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
3310#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
3311#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3f
3312#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
3313#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xfc00
3314#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
3315#define CPG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3f
3316#define CPG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
3317#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xfc00
3318#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
3319#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
3320#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
3321#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
3322#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
3323#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
3324#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
3325#define CPC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3f
3326#define CPC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
3327#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
3328#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
3329#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
3330#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
3331#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3f
3332#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
3333#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xfc00
3334#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
3335#define CPC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3f
3336#define CPC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
3337#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xfc00
3338#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
3339#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
3340#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
3341#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
3342#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
3343#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
3344#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
3345#define CPF_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3f
3346#define CPF_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
3347#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
3348#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
3349#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
3350#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
3351#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3f
3352#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
3353#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xfc00
3354#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
3355#define CPF_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3f
3356#define CPF_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
3357#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xfc00
3358#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
3359#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
3360#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
3361#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
3362#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
3363#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
3364#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
3365#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0xf
3366#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
3367#define CP_DRAW_OBJECT__OBJECT_MASK 0xffffffff
3368#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
3369#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0xffff
3370#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
3371#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xffffffff
3372#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
3373#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xffffffff
3374#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
3375#define CP_DRAW_WINDOW_LO__MIN_MASK 0xffff
3376#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
3377#define CP_DRAW_WINDOW_LO__MAX_MASK 0xffff0000
3378#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
3379#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x1
3380#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
3381#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x2
3382#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
3383#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x4
3384#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
3385#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x100
3386#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
3387#define CP_PRT_LOD_STATS_CNTL0__BU_SIZE_MASK 0xffffffff
3388#define CP_PRT_LOD_STATS_CNTL0__BU_SIZE__SHIFT 0x0
3389#define CP_PRT_LOD_STATS_CNTL1__BASE_LO_MASK 0xffffffff
3390#define CP_PRT_LOD_STATS_CNTL1__BASE_LO__SHIFT 0x0
3391#define CP_PRT_LOD_STATS_CNTL2__BASE_HI_MASK 0x3
3392#define CP_PRT_LOD_STATS_CNTL2__BASE_HI__SHIFT 0x0
3393#define CP_PRT_LOD_STATS_CNTL2__INTERVAL_MASK 0x3fc
3394#define CP_PRT_LOD_STATS_CNTL2__INTERVAL__SHIFT 0x2
3395#define CP_PRT_LOD_STATS_CNTL2__RESET_CNT_MASK 0x3fc00
3396#define CP_PRT_LOD_STATS_CNTL2__RESET_CNT__SHIFT 0xa
3397#define CP_PRT_LOD_STATS_CNTL2__RESET_FORCE_MASK 0x40000
3398#define CP_PRT_LOD_STATS_CNTL2__RESET_FORCE__SHIFT 0x12
3399#define CP_PRT_LOD_STATS_CNTL2__REPORT_AND_RESET_MASK 0x80000
3400#define CP_PRT_LOD_STATS_CNTL2__REPORT_AND_RESET__SHIFT 0x13
3401#define CP_PRT_LOD_STATS_CNTL2__MC_VMID_MASK 0x7800000
3402#define CP_PRT_LOD_STATS_CNTL2__MC_VMID__SHIFT 0x17
3403#define CP_PRT_LOD_STATS_CNTL2__CACHE_POLICY_MASK 0x10000000
3404#define CP_PRT_LOD_STATS_CNTL2__CACHE_POLICY__SHIFT 0x1c
3405#define CP_PRT_LOD_STATS_CNTL2__MTYPE_MASK 0xc0000000
3406#define CP_PRT_LOD_STATS_CNTL2__MTYPE__SHIFT 0x1e
3407#define CP_CE_COMPARE_COUNT__COMPARE_COUNT_MASK 0xffffffff
3408#define CP_CE_COMPARE_COUNT__COMPARE_COUNT__SHIFT 0x0
3409#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xffffffff
3410#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
3411#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT_MASK 0xffffffff
3412#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT__SHIFT 0x0
3413#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT_MASK 0xffffffff
3414#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT__SHIFT 0x0
3415#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xffffffff
3416#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
3417#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP_MASK 0x7f
3418#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP__SHIFT 0x0
3419#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA_MASK 0x3f000
3420#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA__SHIFT 0xc
3421#define CP_EOP_DONE_EVENT_CNTL__CACHE_CONTROL_MASK 0x2000000
3422#define CP_EOP_DONE_EVENT_CNTL__CACHE_CONTROL__SHIFT 0x19
3423#define CP_EOP_DONE_EVENT_CNTL__MTYPE_MASK 0x18000000
3424#define CP_EOP_DONE_EVENT_CNTL__MTYPE__SHIFT 0x1b
3425#define CP_EOP_DONE_DATA_CNTL__CNTX_ID_MASK 0xffff
3426#define CP_EOP_DONE_DATA_CNTL__CNTX_ID__SHIFT 0x0
3427#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x30000
3428#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
3429#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x7000000
3430#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
3431#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xe0000000
3432#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
3433#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xfffffff
3434#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
3435#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xfffffffc
3436#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
3437#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0xffff
3438#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
3439#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xffffffff
3440#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
3441#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xffffffff
3442#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
3443#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xffffffff
3444#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
3445#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xffffffff
3446#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
3447#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO_MASK 0xfffffffc
3448#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO__SHIFT 0x2
3449#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI_MASK 0xffff
3450#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI__SHIFT 0x0
3451#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO_MASK 0xffffffff
3452#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO__SHIFT 0x0
3453#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI_MASK 0xffffffff
3454#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI__SHIFT 0x0
3455#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO_MASK 0xffffffff
3456#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO__SHIFT 0x0
3457#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI_MASK 0xffffffff
3458#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI__SHIFT 0x0
3459#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO_MASK 0xffffffff
3460#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO__SHIFT 0x0
3461#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI_MASK 0xffffffff
3462#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI__SHIFT 0x0
3463#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO_MASK 0xffffffff
3464#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO__SHIFT 0x0
3465#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI_MASK 0xffffffff
3466#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI__SHIFT 0x0
3467#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO_MASK 0xffffffff
3468#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO__SHIFT 0x0
3469#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI_MASK 0xffffffff
3470#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI__SHIFT 0x0
3471#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO_MASK 0xffffffff
3472#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO__SHIFT 0x0
3473#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI_MASK 0xffffffff
3474#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI__SHIFT 0x0
3475#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO_MASK 0xffffffff
3476#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO__SHIFT 0x0
3477#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI_MASK 0xffffffff
3478#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI__SHIFT 0x0
3479#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO_MASK 0xffffffff
3480#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO__SHIFT 0x0
3481#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI_MASK 0xffffffff
3482#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI__SHIFT 0x0
3483#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xfffffffc
3484#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
3485#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0xffff
3486#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
3487#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xffffffff
3488#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
3489#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xffffffff
3490#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
3491#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xffffffff
3492#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
3493#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xffffffff
3494#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
3495#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xffffffff
3496#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
3497#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xffffffff
3498#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
3499#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xffffffff
3500#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
3501#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xffffffff
3502#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
3503#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xffffffff
3504#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
3505#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xffffffff
3506#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
3507#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xffffffff
3508#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
3509#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xffffffff
3510#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
3511#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xffffffff
3512#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
3513#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xffffffff
3514#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
3515#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xffffffff
3516#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
3517#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xffffffff
3518#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
3519#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xffffffff
3520#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
3521#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xffffffff
3522#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
3523#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xffffffff
3524#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
3525#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xffffffff
3526#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
3527#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xffffffff
3528#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
3529#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xffffffff
3530#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
3531#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xffffffff
3532#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
3533#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xffffffff
3534#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
3535#define CP_PIPE_STATS_CONTROL__CACHE_CONTROL_MASK 0x2000000
3536#define CP_PIPE_STATS_CONTROL__CACHE_CONTROL__SHIFT 0x19
3537#define CP_PIPE_STATS_CONTROL__MTYPE_MASK 0x18000000
3538#define CP_PIPE_STATS_CONTROL__MTYPE__SHIFT 0x1b
3539#define CP_STREAM_OUT_CONTROL__CACHE_CONTROL_MASK 0x2000000
3540#define CP_STREAM_OUT_CONTROL__CACHE_CONTROL__SHIFT 0x19
3541#define CP_STREAM_OUT_CONTROL__MTYPE_MASK 0x18000000
3542#define CP_STREAM_OUT_CONTROL__MTYPE__SHIFT 0x1b
3543#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE_MASK 0x1
3544#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE__SHIFT 0x0
3545#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xffffffff
3546#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
3547#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xffffffff
3548#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
3549#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xffffffff
3550#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
3551#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xffffffff
3552#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
3553#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xffffffff
3554#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
3555#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xffffffff
3556#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
3557#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xffffffff
3558#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
3559#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xffffffff
3560#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
3561#define SCRATCH_UMSK__OBSOLETE_UMSK_MASK 0xff
3562#define SCRATCH_UMSK__OBSOLETE_UMSK__SHIFT 0x0
3563#define SCRATCH_UMSK__OBSOLETE_SWAP_MASK 0x30000
3564#define SCRATCH_UMSK__OBSOLETE_SWAP__SHIFT 0x10
3565#define SCRATCH_ADDR__OBSOLETE_ADDR_MASK 0xffffffff
3566#define SCRATCH_ADDR__OBSOLETE_ADDR__SHIFT 0x0
3567#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xffffffff
3568#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
3569#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xffffffff
3570#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
3571#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xffffffff
3572#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
3573#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xffffffff
3574#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
3575#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xffffffff
3576#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
3577#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xffffffff
3578#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
3579#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xfffffffc
3580#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
3581#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0xffff
3582#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
3583#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x10000
3584#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
3585#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x2000000
3586#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
3587#define CP_APPEND_ADDR_HI__MTYPE_MASK 0x18000000
3588#define CP_APPEND_ADDR_HI__MTYPE__SHIFT 0x1b
3589#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xe0000000
3590#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
3591#define CP_APPEND_DATA__DATA_MASK 0xffffffff
3592#define CP_APPEND_DATA__DATA__SHIFT 0x0
3593#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE_MASK 0xffffffff
3594#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE__SHIFT 0x0
3595#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE_MASK 0xffffffff
3596#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE__SHIFT 0x0
3597#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xffffffff
3598#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
3599#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xffffffff
3600#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
3601#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xffffffff
3602#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
3603#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xffffffff
3604#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
3605#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xffffffff
3606#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
3607#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xffffffff
3608#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
3609#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xffffffff
3610#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
3611#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xffffffff
3612#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
3613#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xffffffff
3614#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
3615#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xffffffff
3616#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
3617#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xffffffff
3618#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
3619#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xffffffff
3620#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
3621#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_SWAP_MASK 0x3
3622#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_SWAP__SHIFT 0x0
3623#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xfffffffc
3624#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
3625#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0xffff
3626#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
3627#define CP_ME_MC_WADDR_HI__MTYPE_MASK 0x300000
3628#define CP_ME_MC_WADDR_HI__MTYPE__SHIFT 0x14
3629#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x400000
3630#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
3631#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xffffffff
3632#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
3633#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xffffffff
3634#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
3635#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_SWAP_MASK 0x3
3636#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_SWAP__SHIFT 0x0
3637#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xfffffffc
3638#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
3639#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0xffff
3640#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
3641#define CP_ME_MC_RADDR_HI__MTYPE_MASK 0x300000
3642#define CP_ME_MC_RADDR_HI__MTYPE__SHIFT 0x14
3643#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x400000
3644#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
3645#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xffffffff
3646#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
3647#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x3
3648#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
3649#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xfffffff8
3650#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
3651#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0xffff
3652#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
3653#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x10000
3654#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
3655#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x100000
3656#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
3657#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x3000000
3658#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
3659#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xe0000000
3660#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
3661#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x3
3662#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
3663#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xfffffff8
3664#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
3665#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0xffff
3666#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
3667#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x10000
3668#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
3669#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x100000
3670#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
3671#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x3000000
3672#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
3673#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xe0000000
3674#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
3675#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xffffffff
3676#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
3677#define CP_COHER_START_DELAY__START_DELAY_COUNT_MASK 0x3f
3678#define CP_COHER_START_DELAY__START_DELAY_COUNT__SHIFT 0x0
3679#define CP_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x1
3680#define CP_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
3681#define CP_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x2
3682#define CP_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
3683#define CP_COHER_CNTL__TC_SD_ACTION_ENA_MASK 0x4
3684#define CP_COHER_CNTL__TC_SD_ACTION_ENA__SHIFT 0x2
3685#define CP_COHER_CNTL__TC_NC_ACTION_ENA_MASK 0x8
3686#define CP_COHER_CNTL__TC_NC_ACTION_ENA__SHIFT 0x3
3687#define CP_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x40
3688#define CP_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
3689#define CP_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x80
3690#define CP_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
3691#define CP_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x100
3692#define CP_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
3693#define CP_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x200
3694#define CP_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
3695#define CP_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x400
3696#define CP_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
3697#define CP_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x800
3698#define CP_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
3699#define CP_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x1000
3700#define CP_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
3701#define CP_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x2000
3702#define CP_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
3703#define CP_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x4000
3704#define CP_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
3705#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA_MASK 0x8000
3706#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA__SHIFT 0xf
3707#define CP_COHER_CNTL__TC_WB_ACTION_ENA_MASK 0x40000
3708#define CP_COHER_CNTL__TC_WB_ACTION_ENA__SHIFT 0x12
3709#define CP_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x80000
3710#define CP_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
3711#define CP_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x200000
3712#define CP_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
3713#define CP_COHER_CNTL__TCL1_ACTION_ENA_MASK 0x400000
3714#define CP_COHER_CNTL__TCL1_ACTION_ENA__SHIFT 0x16
3715#define CP_COHER_CNTL__TC_ACTION_ENA_MASK 0x800000
3716#define CP_COHER_CNTL__TC_ACTION_ENA__SHIFT 0x17
3717#define CP_COHER_CNTL__CB_ACTION_ENA_MASK 0x2000000
3718#define CP_COHER_CNTL__CB_ACTION_ENA__SHIFT 0x19
3719#define CP_COHER_CNTL__DB_ACTION_ENA_MASK 0x4000000
3720#define CP_COHER_CNTL__DB_ACTION_ENA__SHIFT 0x1a
3721#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA_MASK 0x8000000
3722#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA__SHIFT 0x1b
3723#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA_MASK 0x10000000
3724#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA__SHIFT 0x1c
3725#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA_MASK 0x20000000
3726#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA__SHIFT 0x1d
3727#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA_MASK 0x40000000
3728#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA__SHIFT 0x1e
3729#define CP_COHER_CNTL__SH_SD_ACTION_ENA_MASK 0x80000000
3730#define CP_COHER_CNTL__SH_SD_ACTION_ENA__SHIFT 0x1f
3731#define CP_COHER_SIZE__COHER_SIZE_256B_MASK 0xffffffff
3732#define CP_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
3733#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0xff
3734#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
3735#define CP_COHER_BASE__COHER_BASE_256B_MASK 0xffffffff
3736#define CP_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
3737#define CP_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0xff
3738#define CP_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
3739#define CP_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0xff
3740#define CP_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
3741#define CP_COHER_STATUS__MEID_MASK 0x3000000
3742#define CP_COHER_STATUS__MEID__SHIFT 0x18
3743#define CP_COHER_STATUS__PHASE1_STATUS_MASK 0x40000000
3744#define CP_COHER_STATUS__PHASE1_STATUS__SHIFT 0x1e
3745#define CP_COHER_STATUS__STATUS_MASK 0x80000000
3746#define CP_COHER_STATUS__STATUS__SHIFT 0x1f
3747#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xffffffff
3748#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
3749#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xffffffff
3750#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
3751#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xffffffff
3752#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
3753#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xffffffff
3754#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
3755#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0xffffffff
3756#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
3757#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0xffffffff
3758#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
3759#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0xffffffff
3760#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
3761#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0xffffffff
3762#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
3763#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xffffffff
3764#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
3765#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0xffff
3766#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
3767#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xffffffff
3768#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
3769#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0xffff
3770#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
3771#define CP_DMA_ME_CONTROL__SRC_MTYPE_MASK 0xc00
3772#define CP_DMA_ME_CONTROL__SRC_MTYPE__SHIFT 0xa
3773#define CP_DMA_ME_CONTROL__SRC_ATC_MASK 0x1000
3774#define CP_DMA_ME_CONTROL__SRC_ATC__SHIFT 0xc
3775#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x2000
3776#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
3777#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x300000
3778#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
3779#define CP_DMA_ME_CONTROL__DST_MTYPE_MASK 0xc00000
3780#define CP_DMA_ME_CONTROL__DST_MTYPE__SHIFT 0x16
3781#define CP_DMA_ME_CONTROL__DST_ATC_MASK 0x1000000
3782#define CP_DMA_ME_CONTROL__DST_ATC__SHIFT 0x18
3783#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x2000000
3784#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
3785#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000
3786#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
3787#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x1fffff
3788#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
3789#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x200000
3790#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x15
3791#define CP_DMA_ME_COMMAND__SRC_SWAP_MASK 0xc00000
3792#define CP_DMA_ME_COMMAND__SRC_SWAP__SHIFT 0x16
3793#define CP_DMA_ME_COMMAND__DST_SWAP_MASK 0x3000000
3794#define CP_DMA_ME_COMMAND__DST_SWAP__SHIFT 0x18
3795#define CP_DMA_ME_COMMAND__SAS_MASK 0x4000000
3796#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
3797#define CP_DMA_ME_COMMAND__DAS_MASK 0x8000000
3798#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
3799#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000
3800#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
3801#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000
3802#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
3803#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000
3804#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
3805#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xffffffff
3806#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
3807#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0xffff
3808#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
3809#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xffffffff
3810#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
3811#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0xffff
3812#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
3813#define CP_DMA_PFP_CONTROL__SRC_MTYPE_MASK 0xc00
3814#define CP_DMA_PFP_CONTROL__SRC_MTYPE__SHIFT 0xa
3815#define CP_DMA_PFP_CONTROL__SRC_ATC_MASK 0x1000
3816#define CP_DMA_PFP_CONTROL__SRC_ATC__SHIFT 0xc
3817#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x2000
3818#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
3819#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x300000
3820#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
3821#define CP_DMA_PFP_CONTROL__DST_MTYPE_MASK 0xc00000
3822#define CP_DMA_PFP_CONTROL__DST_MTYPE__SHIFT 0x16
3823#define CP_DMA_PFP_CONTROL__DST_ATC_MASK 0x1000000
3824#define CP_DMA_PFP_CONTROL__DST_ATC__SHIFT 0x18
3825#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x2000000
3826#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
3827#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000
3828#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
3829#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x1fffff
3830#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
3831#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x200000
3832#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x15
3833#define CP_DMA_PFP_COMMAND__SRC_SWAP_MASK 0xc00000
3834#define CP_DMA_PFP_COMMAND__SRC_SWAP__SHIFT 0x16
3835#define CP_DMA_PFP_COMMAND__DST_SWAP_MASK 0x3000000
3836#define CP_DMA_PFP_COMMAND__DST_SWAP__SHIFT 0x18
3837#define CP_DMA_PFP_COMMAND__SAS_MASK 0x4000000
3838#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
3839#define CP_DMA_PFP_COMMAND__DAS_MASK 0x8000000
3840#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
3841#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000
3842#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
3843#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000
3844#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
3845#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000
3846#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
3847#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x30
3848#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
3849#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0xf0000
3850#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
3851#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000
3852#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
3853#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000
3854#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
3855#define CP_DMA_CNTL__PIO_COUNT_MASK 0xc0000000
3856#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
3857#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x3ffffff
3858#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
3859#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000
3860#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
3861#define CP_PFP_IB_CONTROL__IB_EN_MASK 0xff
3862#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
3863#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x1
3864#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
3865#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x2
3866#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
3867#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x10000
3868#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
3869#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x1000000
3870#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
3871#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0xff
3872#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
3873#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xffffffff
3874#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
3875#define CP_RB_OFFSET__RB_OFFSET_MASK 0xfffff
3876#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
3877#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0xfffff
3878#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
3879#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0xfffff
3880#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
3881#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0xfffff
3882#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
3883#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0xfffff
3884#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
3885#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0xfffff
3886#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
3887#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0xfffff
3888#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
3889#define CP_CE_IB1_OFFSET__IB1_OFFSET_MASK 0xfffff
3890#define CP_CE_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
3891#define CP_CE_IB2_OFFSET__IB2_OFFSET_MASK 0xfffff
3892#define CP_CE_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
3893#define CP_CE_COUNTER__CONST_ENGINE_COUNT_MASK 0xffffffff
3894#define CP_CE_COUNTER__CONST_ENGINE_COUNT__SHIFT 0x0
3895#define CP_CE_RB_OFFSET__RB_OFFSET_MASK 0xfffff
3896#define CP_CE_RB_OFFSET__RB_OFFSET__SHIFT 0x0
3897#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x3
3898#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
3899#define CP_CE_COMPLETION_STATUS__STATUS_MASK 0x3
3900#define CP_CE_COMPLETION_STATUS__STATUS__SHIFT 0x0
3901#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x1
3902#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
3903#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xffffffff
3904#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
3905#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0xffff
3906#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
3907#define CP_CE_METADATA_BASE_ADDR__ADDR_LO_MASK 0xffffffff
3908#define CP_CE_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
3909#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0xffff
3910#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
3911#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xffffffff
3912#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
3913#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0xffff
3914#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
3915#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xffffffff
3916#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
3917#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0xffff
3918#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
3919#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xffffffff
3920#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
3921#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0xffff
3922#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
3923#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x3
3924#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
3925#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xffffffff
3926#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
3927#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0xffff
3928#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
3929#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x1
3930#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
3931#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x2
3932#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
3933#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x4
3934#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
3935#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x8
3936#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
3937#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x10
3938#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
3939#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x20
3940#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
3941#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x40
3942#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
3943#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x80
3944#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
3945#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x1
3946#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
3947#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_MASK 0x4
3948#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV__SHIFT 0x2
3949#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_MASK 0x10
3950#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV__SHIFT 0x4
3951#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x400
3952#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
3953#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x800
3954#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
3955#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x1000
3956#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
3957#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x2000
3958#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
3959#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x4000
3960#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
3961#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x8000
3962#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
3963#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x800000
3964#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
3965#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x1000000
3966#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
3967#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x2000000
3968#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
3969#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x4000000
3970#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
3971#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x8000000
3972#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
3973#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000
3974#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
3975#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000
3976#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
3977#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x1
3978#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
3979#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x2
3980#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
3981#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x4
3982#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
3983#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x10
3984#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
3985#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x20
3986#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
3987#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x100
3988#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
3989#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x200
3990#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
3991#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x400
3992#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
3993#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x800
3994#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
3995#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x1000
3996#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
3997#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x2000
3998#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
3999#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x4000
4000#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
4001#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x8000
4002#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
4003#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x10000
4004#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
4005#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x20000
4006#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
4007#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x40000
4008#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
4009#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x80000
4010#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
4011#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x100000
4012#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
4013#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE_MASK 0x200000
4014#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE__SHIFT 0x15
4015#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM_MASK 0x400000
4016#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM__SHIFT 0x16
4017#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x800000
4018#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
4019#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x1000000
4020#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
4021#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x2000000
4022#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
4023#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x4000000
4024#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
4025#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x8000000
4026#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
4027#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000
4028#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
4029#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000
4030#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
4031#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000
4032#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
4033#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000
4034#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
4035#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x1
4036#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
4037#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x2
4038#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
4039#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x4
4040#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
4041#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x8
4042#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
4043#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x10
4044#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
4045#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x20
4046#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
4047#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x40
4048#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
4049#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x80
4050#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
4051#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x400
4052#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
4053#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x800
4054#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
4055#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x1000
4056#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
4057#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x2000
4058#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
4059#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x4000
4060#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
4061#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x8000
4062#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
4063#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x10000
4064#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
4065#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x20000
4066#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
4067#define CP_STALLED_STAT3__ATCL2IU_WAITING_ON_FREE_MASK 0x40000
4068#define CP_STALLED_STAT3__ATCL2IU_WAITING_ON_FREE__SHIFT 0x12
4069#define CP_STALLED_STAT3__ATCL2IU_WAITING_ON_TAGS_MASK 0x80000
4070#define CP_STALLED_STAT3__ATCL2IU_WAITING_ON_TAGS__SHIFT 0x13
4071#define CP_STALLED_STAT3__ATCL1_WAITING_ON_TRANS_MASK 0x100000
4072#define CP_STALLED_STAT3__ATCL1_WAITING_ON_TRANS__SHIFT 0x14
4073#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x1
4074#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
4075#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x40
4076#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
4077#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x80
4078#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
4079#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x100
4080#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
4081#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x200
4082#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
4083#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x400
4084#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
4085#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x1000
4086#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
4087#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x2000
4088#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
4089#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x4000
4090#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
4091#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x8000
4092#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
4093#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x20000
4094#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
4095#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x40000
4096#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
4097#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x80000
4098#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
4099#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x100000
4100#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
4101#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x200000
4102#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
4103#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x400000
4104#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
4105#define CP_STAT__ROQ_RING_BUSY_MASK 0x200
4106#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
4107#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x400
4108#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
4109#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x800
4110#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
4111#define CP_STAT__ROQ_STATE_BUSY_MASK 0x1000
4112#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
4113#define CP_STAT__DC_BUSY_MASK 0x2000
4114#define CP_STAT__DC_BUSY__SHIFT 0xd
4115#define CP_STAT__ATCL2IU_BUSY_MASK 0x4000
4116#define CP_STAT__ATCL2IU_BUSY__SHIFT 0xe
4117#define CP_STAT__PFP_BUSY_MASK 0x8000
4118#define CP_STAT__PFP_BUSY__SHIFT 0xf
4119#define CP_STAT__MEQ_BUSY_MASK 0x10000
4120#define CP_STAT__MEQ_BUSY__SHIFT 0x10
4121#define CP_STAT__ME_BUSY_MASK 0x20000
4122#define CP_STAT__ME_BUSY__SHIFT 0x11
4123#define CP_STAT__QUERY_BUSY_MASK 0x40000
4124#define CP_STAT__QUERY_BUSY__SHIFT 0x12
4125#define CP_STAT__SEMAPHORE_BUSY_MASK 0x80000
4126#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
4127#define CP_STAT__INTERRUPT_BUSY_MASK 0x100000
4128#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
4129#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x200000
4130#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
4131#define CP_STAT__DMA_BUSY_MASK 0x400000
4132#define CP_STAT__DMA_BUSY__SHIFT 0x16
4133#define CP_STAT__RCIU_BUSY_MASK 0x800000
4134#define CP_STAT__RCIU_BUSY__SHIFT 0x17
4135#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x1000000
4136#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
4137#define CP_STAT__CPC_CPG_BUSY_MASK 0x2000000
4138#define CP_STAT__CPC_CPG_BUSY__SHIFT 0x19
4139#define CP_STAT__CE_BUSY_MASK 0x4000000
4140#define CP_STAT__CE_BUSY__SHIFT 0x1a
4141#define CP_STAT__TCIU_BUSY_MASK 0x8000000
4142#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
4143#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000
4144#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
4145#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000
4146#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
4147#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000
4148#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
4149#define CP_STAT__CP_BUSY_MASK 0x80000000
4150#define CP_STAT__CP_BUSY__SHIFT 0x1f
4151#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xffffffff
4152#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
4153#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xffffffff
4154#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
4155#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x3f
4156#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
4157#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x3f00
4158#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
4159#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x3f0000
4160#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
4161#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP_MASK 0xffffffff
4162#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP__SHIFT 0x0
4163#define CP_CSF_STAT__BUFFER_SLOTS_ALLOCATED_MASK 0xf
4164#define CP_CSF_STAT__BUFFER_SLOTS_ALLOCATED__SHIFT 0x0
4165#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x1ff00
4166#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
4167#define CP_CSF_CNTL__FETCH_BUFFER_DEPTH_MASK 0xf
4168#define CP_CSF_CNTL__FETCH_BUFFER_DEPTH__SHIFT 0x0
4169#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x10
4170#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
4171#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x40
4172#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
4173#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x100
4174#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
4175#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x10000
4176#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
4177#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x40000
4178#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
4179#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x100000
4180#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
4181#define CP_ME_CNTL__CE_HALT_MASK 0x1000000
4182#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
4183#define CP_ME_CNTL__CE_STEP_MASK 0x2000000
4184#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
4185#define CP_ME_CNTL__PFP_HALT_MASK 0x4000000
4186#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
4187#define CP_ME_CNTL__PFP_STEP_MASK 0x8000000
4188#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
4189#define CP_ME_CNTL__ME_HALT_MASK 0x10000000
4190#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
4191#define CP_ME_CNTL__ME_STEP_MASK 0x20000000
4192#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
4193#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0xff
4194#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
4195#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x700
4196#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
4197#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0xff00000
4198#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
4199#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000
4200#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
4201#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x1
4202#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
4203#define CP_RB0_RPTR__RB_RPTR_MASK 0xfffff
4204#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
4205#define CP_RB_RPTR__RB_RPTR_MASK 0xfffff
4206#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
4207#define CP_RB1_RPTR__RB_RPTR_MASK 0xfffff
4208#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
4209#define CP_RB2_RPTR__RB_RPTR_MASK 0xfffff
4210#define CP_RB2_RPTR__RB_RPTR__SHIFT 0x0
4211#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0xfffffff
4212#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
4213#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xf0000000
4214#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
4215#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0xffff
4216#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
4217#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xffff0000
4218#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
4219#define CP_CE_INIT_BASE_LO__INIT_BASE_LO_MASK 0xffffffe0
4220#define CP_CE_INIT_BASE_LO__INIT_BASE_LO__SHIFT 0x5
4221#define CP_CE_INIT_BASE_HI__INIT_BASE_HI_MASK 0xffff
4222#define CP_CE_INIT_BASE_HI__INIT_BASE_HI__SHIFT 0x0
4223#define CP_CE_INIT_BUFSZ__INIT_BUFSZ_MASK 0xfff
4224#define CP_CE_INIT_BUFSZ__INIT_BUFSZ__SHIFT 0x0
4225#define CP_CE_IB1_BASE_LO__IB1_BASE_LO_MASK 0xfffffffc
4226#define CP_CE_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
4227#define CP_CE_IB1_BASE_HI__IB1_BASE_HI_MASK 0xffff
4228#define CP_CE_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
4229#define CP_CE_IB1_BUFSZ__IB1_BUFSZ_MASK 0xfffff
4230#define CP_CE_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
4231#define CP_CE_IB2_BASE_LO__IB2_BASE_LO_MASK 0xfffffffc
4232#define CP_CE_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
4233#define CP_CE_IB2_BASE_HI__IB2_BASE_HI_MASK 0xffff
4234#define CP_CE_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
4235#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0xfffff
4236#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
4237#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xfffffffc
4238#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
4239#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0xffff
4240#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
4241#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0xfffff
4242#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
4243#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xfffffffc
4244#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
4245#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0xffff
4246#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
4247#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0xfffff
4248#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
4249#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xfffffffc
4250#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
4251#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0xffff
4252#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
4253#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0xfffff
4254#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
4255#define CP_ROQ_THRESHOLDS__IB1_START_MASK 0xff
4256#define CP_ROQ_THRESHOLDS__IB1_START__SHIFT 0x0
4257#define CP_ROQ_THRESHOLDS__IB2_START_MASK 0xff00
4258#define CP_ROQ_THRESHOLDS__IB2_START__SHIFT 0x8
4259#define CP_MEQ_STQ_THRESHOLD__STQ_START_MASK 0xff
4260#define CP_MEQ_STQ_THRESHOLD__STQ_START__SHIFT 0x0
4261#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0xff
4262#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
4263#define CP_ROQ1_THRESHOLDS__RB2_START_MASK 0xff00
4264#define CP_ROQ1_THRESHOLDS__RB2_START__SHIFT 0x8
4265#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0xff0000
4266#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0x10
4267#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0xff000000
4268#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x18
4269#define CP_ROQ2_THRESHOLDS__R2_IB1_START_MASK 0xff
4270#define CP_ROQ2_THRESHOLDS__R2_IB1_START__SHIFT 0x0
4271#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0xff00
4272#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x8
4273#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0xff0000
4274#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0x10
4275#define CP_ROQ2_THRESHOLDS__R2_IB2_START_MASK 0xff000000
4276#define CP_ROQ2_THRESHOLDS__R2_IB2_START__SHIFT 0x18
4277#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0xff
4278#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
4279#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0xff00
4280#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
4281#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0xff0000
4282#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
4283#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START_MASK 0x3f
4284#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT 0x0
4285#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START_MASK 0x3f00
4286#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT 0x8
4287#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0xff
4288#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
4289#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0xff00
4290#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
4291#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x7ff
4292#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
4293#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x7ff0000
4294#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
4295#define CP_STQ_AVAIL__STQ_CNT_MASK 0x1ff
4296#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
4297#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x7ff
4298#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
4299#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x3ff
4300#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
4301#define CP_CMD_INDEX__CMD_INDEX_MASK 0x7ff
4302#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
4303#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x3000
4304#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
4305#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x70000
4306#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
4307#define CP_CMD_DATA__CMD_DATA_MASK 0xffffffff
4308#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
4309#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x3ff
4310#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
4311#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x3ff0000
4312#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
4313#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x3ff
4314#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
4315#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x3ff0000
4316#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
4317#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x3ff
4318#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
4319#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x3ff0000
4320#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
4321#define CP_STQ_STAT__STQ_RPTR_MASK 0x3ff
4322#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
4323#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x3ff
4324#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
4325#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x3ff
4326#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
4327#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x3ff0000
4328#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
4329#define CP_CEQ1_AVAIL__CEQ_CNT_RING_MASK 0x7ff
4330#define CP_CEQ1_AVAIL__CEQ_CNT_RING__SHIFT 0x0
4331#define CP_CEQ1_AVAIL__CEQ_CNT_IB1_MASK 0x7ff0000
4332#define CP_CEQ1_AVAIL__CEQ_CNT_IB1__SHIFT 0x10
4333#define CP_CEQ2_AVAIL__CEQ_CNT_IB2_MASK 0x7ff
4334#define CP_CEQ2_AVAIL__CEQ_CNT_IB2__SHIFT 0x0
4335#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY_MASK 0x3ff
4336#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY__SHIFT 0x0
4337#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY_MASK 0x3ff0000
4338#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY__SHIFT 0x10
4339#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1_MASK 0x3ff
4340#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1__SHIFT 0x0
4341#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1_MASK 0x3ff0000
4342#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1__SHIFT 0x10
4343#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2_MASK 0x3ff
4344#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2__SHIFT 0x0
4345#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2_MASK 0x3ff0000
4346#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2__SHIFT 0x10
4347#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED_MASK 0x800
4348#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED__SHIFT 0xb
4349#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x4000
4350#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
4351#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x20000
4352#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
4353#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED_MASK 0x40000
4354#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED__SHIFT 0x12
4355#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED_MASK 0x80000
4356#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED__SHIFT 0x13
4357#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED_MASK 0x100000
4358#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED__SHIFT 0x14
4359#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED_MASK 0x200000
4360#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED__SHIFT 0x15
4361#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x400000
4362#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x16
4363#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x800000
4364#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
4365#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x1000000
4366#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
4367#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x4000000
4368#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
4369#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x8000000
4370#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
4371#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000
4372#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
4373#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000
4374#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
4375#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000
4376#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
4377#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0xf
4378#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
4379#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0xf0
4380#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
4381#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x300
4382#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
4383#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x400
4384#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
4385#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000
4386#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
4387#define CP_RINGID__RINGID_MASK 0x3
4388#define CP_RINGID__RINGID__SHIFT 0x0
4389#define CP_PIPEID__PIPE_ID_MASK 0x3
4390#define CP_PIPEID__PIPE_ID__SHIFT 0x0
4391#define CP_VMID__VMID_MASK 0xf
4392#define CP_VMID__VMID__SHIFT 0x0
4393#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x7
4394#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
4395#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x3f00
4396#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
4397#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x3f0000
4398#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
4399#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x1f
4400#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
4401#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0xe0
4402#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
4403#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0xff00
4404#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
4405#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xfffffffc
4406#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
4407#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0xffff
4408#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
4409#define CP_HQD_ACTIVE__ACTIVE_MASK 0x1
4410#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
4411#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x2
4412#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
4413#define CP_HQD_VMID__VMID_MASK 0xf
4414#define CP_HQD_VMID__VMID__SHIFT 0x0
4415#define CP_HQD_VMID__IB_VMID_MASK 0xf00
4416#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
4417#define CP_HQD_VMID__VQID_MASK 0x3ff0000
4418#define CP_HQD_VMID__VQID__SHIFT 0x10
4419#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x1
4420#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
4421#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x3ff00
4422#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
4423#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000
4424#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
4425#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000
4426#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
4427#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000
4428#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
4429#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000
4430#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
4431#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x3
4432#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
4433#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0xf
4434#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
4435#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x1
4436#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
4437#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x10
4438#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
4439#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x3f00
4440#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
4441#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000
4442#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
4443#define CP_HQD_PQ_BASE__ADDR_MASK 0xffffffff
4444#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
4445#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0xff
4446#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
4447#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xffffffff
4448#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
4449#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xfffffffc
4450#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
4451#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0xffff
4452#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
4453#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xfffffffc
4454#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x2
4455#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0xffff
4456#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
4457#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x1
4458#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
4459#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x2
4460#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
4461#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x7ffffc
4462#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
4463#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_CARRY_BITS_MASK 0x3800000
4464#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_CARRY_BITS__SHIFT 0x17
4465#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000
4466#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
4467#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000
4468#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
4469#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000
4470#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
4471#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000
4472#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
4473#define CP_HQD_PQ_WPTR__OFFSET_MASK 0xffffffff
4474#define CP_HQD_PQ_WPTR__OFFSET__SHIFT 0x0
4475#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x3f
4476#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
4477#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x3f00
4478#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
4479#define CP_HQD_PQ_CONTROL__MTYPE_MASK 0x18000
4480#define CP_HQD_PQ_CONTROL__MTYPE__SHIFT 0xf
4481#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP_MASK 0x60000
4482#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT 0x11
4483#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x300000
4484#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
4485#define CP_HQD_PQ_CONTROL__PQ_ATC_MASK 0x800000
4486#define CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT 0x17
4487#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x1000000
4488#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
4489#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x6000000
4490#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x19
4491#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x8000000
4492#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
4493#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000
4494#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
4495#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK 0x20000000
4496#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP__SHIFT 0x1d
4497#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000
4498#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
4499#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000
4500#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
4501#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xfffffffc
4502#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
4503#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0xffff
4504#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
4505#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0xfffff
4506#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
4507#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0xfffff
4508#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
4509#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x300000
4510#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
4511#define CP_HQD_IB_CONTROL__IB_ATC_MASK 0x800000
4512#define CP_HQD_IB_CONTROL__IB_ATC__SHIFT 0x17
4513#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x1000000
4514#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
4515#define CP_HQD_IB_CONTROL__MTYPE_MASK 0x18000000
4516#define CP_HQD_IB_CONTROL__MTYPE__SHIFT 0x1b
4517#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000
4518#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
4519#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0xff
4520#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
4521#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x700
4522#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
4523#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x800
4524#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
4525#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x3000
4526#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
4527#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0xc000
4528#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
4529#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x3f0000
4530#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
4531#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x400000
4532#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
4533#define CP_HQD_IQ_TIMER__IQ_ATC_MASK 0x800000
4534#define CP_HQD_IQ_TIMER__IQ_ATC__SHIFT 0x17
4535#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x1000000
4536#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
4537#define CP_HQD_IQ_TIMER__MTYPE_MASK 0x18000000
4538#define CP_HQD_IQ_TIMER__MTYPE__SHIFT 0x1b
4539#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000
4540#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
4541#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000
4542#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
4543#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000
4544#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
4545#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x3f
4546#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
4547#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x7
4548#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
4549#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x10
4550#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
4551#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x100
4552#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
4553#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x200
4554#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
4555#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x400
4556#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
4557#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x1
4558#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
4559#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x1
4560#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
4561#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x2
4562#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
4563#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x10
4564#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
4565#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x20
4566#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
4567#define CP_HQD_SEMA_CMD__RETRY_MASK 0x1
4568#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
4569#define CP_HQD_SEMA_CMD__RESULT_MASK 0x6
4570#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
4571#define CP_HQD_MSG_TYPE__ACTION_MASK 0x7
4572#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
4573#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x70
4574#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
4575#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xffffffff
4576#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
4577#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xffffffff
4578#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
4579#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xffffffff
4580#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
4581#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xffffffff
4582#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
4583#define CP_HQD_HQ_SCHEDULER0__SCHEDULER_MASK 0xffffffff
4584#define CP_HQD_HQ_SCHEDULER0__SCHEDULER__SHIFT 0x0
4585#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x3
4586#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
4587#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT_MASK 0xc
4588#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT__SHIFT 0x2
4589#define CP_HQD_HQ_STATUS0__RSV_6_4_MASK 0x70
4590#define CP_HQD_HQ_STATUS0__RSV_6_4__SHIFT 0x4
4591#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x80
4592#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
4593#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x100
4594#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
4595#define CP_HQD_HQ_STATUS0__PG_ACTIVATED_MASK 0x200
4596#define CP_HQD_HQ_STATUS0__PG_ACTIVATED__SHIFT 0x9
4597#define CP_HQD_HQ_STATUS0__RSVR_31_10_MASK 0xfffffc00
4598#define CP_HQD_HQ_STATUS0__RSVR_31_10__SHIFT 0xa
4599#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xffffffff
4600#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
4601#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xffffffff
4602#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
4603#define CP_MQD_CONTROL__VMID_MASK 0xf
4604#define CP_MQD_CONTROL__VMID__SHIFT 0x0
4605#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x1000
4606#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
4607#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x2000
4608#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
4609#define CP_MQD_CONTROL__MQD_ATC_MASK 0x800000
4610#define CP_MQD_CONTROL__MQD_ATC__SHIFT 0x17
4611#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x1000000
4612#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
4613#define CP_MQD_CONTROL__MTYPE_MASK 0x18000000
4614#define CP_MQD_CONTROL__MTYPE__SHIFT 0x1b
4615#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xffffffff
4616#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
4617#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xffffffff
4618#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
4619#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xffffffff
4620#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
4621#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0xff
4622#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
4623#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x3f
4624#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
4625#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x100
4626#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
4627#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x1000
4628#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
4629#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x2000
4630#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
4631#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x4000
4632#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
4633#define CP_HQD_EOP_CONTROL__MTYPE_MASK 0x18000
4634#define CP_HQD_EOP_CONTROL__MTYPE__SHIFT 0xf
4635#define CP_HQD_EOP_CONTROL__EOP_ATC_MASK 0x800000
4636#define CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT 0x17
4637#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x1000000
4638#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
4639#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000
4640#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
4641#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000
4642#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
4643#define CP_HQD_EOP_RPTR__RPTR_MASK 0x1fff
4644#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
4645#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000
4646#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
4647#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000
4648#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
4649#define CP_HQD_EOP_WPTR__WPTR_MASK 0x1fff
4650#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
4651#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1fff0000
4652#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
4653#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0xfff
4654#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
4655#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x10000
4656#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
4657#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xfffff000
4658#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
4659#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0xffff
4660#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
4661#define CP_HQD_CTX_SAVE_CONTROL__ATC_MASK 0x1
4662#define CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT 0x0
4663#define CP_HQD_CTX_SAVE_CONTROL__MTYPE_MASK 0x6
4664#define CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT 0x1
4665#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x8
4666#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
4667#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x7ffc
4668#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
4669#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x7000
4670#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
4671#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x1fffffc
4672#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
4673#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x1fff000
4674#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
4675#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x1
4676#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
4677#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x2
4678#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
4679#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x3f0
4680#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
4681#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x3f000
4682#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
4683#define CP_HQD_ERROR__EDC_ERROR_ID_MASK 0xf
4684#define CP_HQD_ERROR__EDC_ERROR_ID__SHIFT 0x0
4685#define CP_HQD_ERROR__SUA_ERROR_MASK 0x10
4686#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
4687#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x1fff
4688#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
4689#define CP_HQD_EOP_DONES__DONE_COUNT_MASK 0xffffffff
4690#define CP_HQD_EOP_DONES__DONE_COUNT__SHIFT 0x0
4691#define DB_Z_READ_BASE__BASE_256B_MASK 0xffffffff
4692#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
4693#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xffffffff
4694#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
4695#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xffffffff
4696#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
4697#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xffffffff
4698#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
4699#define DB_DEPTH_INFO__ADDR5_SWIZZLE_MASK_MASK 0xf
4700#define DB_DEPTH_INFO__ADDR5_SWIZZLE_MASK__SHIFT 0x0
4701#define DB_DEPTH_INFO__ARRAY_MODE_MASK 0xf0
4702#define DB_DEPTH_INFO__ARRAY_MODE__SHIFT 0x4
4703#define DB_DEPTH_INFO__PIPE_CONFIG_MASK 0x1f00
4704#define DB_DEPTH_INFO__PIPE_CONFIG__SHIFT 0x8
4705#define DB_DEPTH_INFO__BANK_WIDTH_MASK 0x6000
4706#define DB_DEPTH_INFO__BANK_WIDTH__SHIFT 0xd
4707#define DB_DEPTH_INFO__BANK_HEIGHT_MASK 0x18000
4708#define DB_DEPTH_INFO__BANK_HEIGHT__SHIFT 0xf
4709#define DB_DEPTH_INFO__MACRO_TILE_ASPECT_MASK 0x60000
4710#define DB_DEPTH_INFO__MACRO_TILE_ASPECT__SHIFT 0x11
4711#define DB_DEPTH_INFO__NUM_BANKS_MASK 0x180000
4712#define DB_DEPTH_INFO__NUM_BANKS__SHIFT 0x13
4713#define DB_Z_INFO__FORMAT_MASK 0x3
4714#define DB_Z_INFO__FORMAT__SHIFT 0x0
4715#define DB_Z_INFO__NUM_SAMPLES_MASK 0xc
4716#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
4717#define DB_Z_INFO__TILE_SPLIT_MASK 0xe000
4718#define DB_Z_INFO__TILE_SPLIT__SHIFT 0xd
4719#define DB_Z_INFO__TILE_MODE_INDEX_MASK 0x700000
4720#define DB_Z_INFO__TILE_MODE_INDEX__SHIFT 0x14
4721#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x7800000
4722#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
4723#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x8000000
4724#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
4725#define DB_Z_INFO__READ_SIZE_MASK 0x10000000
4726#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
4727#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000
4728#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
4729#define DB_Z_INFO__CLEAR_DISALLOWED_MASK 0x40000000
4730#define DB_Z_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
4731#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000
4732#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
4733#define DB_STENCIL_INFO__FORMAT_MASK 0x1
4734#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
4735#define DB_STENCIL_INFO__TILE_SPLIT_MASK 0xe000
4736#define DB_STENCIL_INFO__TILE_SPLIT__SHIFT 0xd
4737#define DB_STENCIL_INFO__TILE_MODE_INDEX_MASK 0x700000
4738#define DB_STENCIL_INFO__TILE_MODE_INDEX__SHIFT 0x14
4739#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x8000000
4740#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
4741#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000
4742#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
4743#define DB_STENCIL_INFO__CLEAR_DISALLOWED_MASK 0x40000000
4744#define DB_STENCIL_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
4745#define DB_DEPTH_SIZE__PITCH_TILE_MAX_MASK 0x7ff
4746#define DB_DEPTH_SIZE__PITCH_TILE_MAX__SHIFT 0x0
4747#define DB_DEPTH_SIZE__HEIGHT_TILE_MAX_MASK 0x3ff800
4748#define DB_DEPTH_SIZE__HEIGHT_TILE_MAX__SHIFT 0xb
4749#define DB_DEPTH_SLICE__SLICE_TILE_MAX_MASK 0x3fffff
4750#define DB_DEPTH_SLICE__SLICE_TILE_MAX__SHIFT 0x0
4751#define DB_DEPTH_VIEW__SLICE_START_MASK 0x7ff
4752#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
4753#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0xffe000
4754#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
4755#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x1000000
4756#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
4757#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x2000000
4758#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
4759#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x1
4760#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
4761#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x2
4762#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
4763#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x4
4764#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
4765#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x8
4766#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
4767#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x10
4768#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
4769#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x20
4770#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
4771#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x40
4772#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
4773#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x80
4774#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
4775#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0xf00
4776#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
4777#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x1000
4778#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
4779#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE_MASK 0x1
4780#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE__SHIFT 0x0
4781#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x2
4782#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
4783#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x70
4784#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
4785#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0xf00
4786#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
4787#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0xf000
4788#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
4789#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0xf0000
4790#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
4791#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0xf00000
4792#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
4793#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0xf000000
4794#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
4795#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xf0000000
4796#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
4797#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x3
4798#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
4799#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0xc
4800#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
4801#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x30
4802#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
4803#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x40
4804#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
4805#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x80
4806#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
4807#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x100
4808#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
4809#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x200
4810#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
4811#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x400
4812#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
4813#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x800
4814#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
4815#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x1000
4816#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
4817#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x6000
4818#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
4819#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT_MASK 0x8000
4820#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT__SHIFT 0xf
4821#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x10000
4822#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
4823#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x20000
4824#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
4825#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x40000
4826#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
4827#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x180000
4828#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
4829#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x3e00000
4830#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
4831#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x4000000
4832#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
4833#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x8000000
4834#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
4835#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000
4836#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
4837#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000
4838#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
4839#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000
4840#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
4841#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000
4842#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
4843#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x3
4844#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
4845#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x1c
4846#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
4847#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x20
4848#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
4849#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x40
4850#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
4851#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x80
4852#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
4853#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x100
4854#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
4855#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x200
4856#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
4857#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x400
4858#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
4859#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x800
4860#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
4861#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x7000
4862#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
4863#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x38000
4864#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
4865#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x1c0000
4866#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
4867#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x200000
4868#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
4869#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x400000
4870#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
4871#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x800000
4872#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
4873#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x7
4874#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
4875#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x70
4876#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
4877#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x700
4878#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
4879#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x7000
4880#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
4881#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x10000
4882#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
4883#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x20000
4884#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
4885#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x40000
4886#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
4887#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x80000
4888#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
4889#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x100000
4890#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
4891#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x200000
4892#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
4893#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x7000000
4894#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
4895#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x8000000
4896#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
4897#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x1
4898#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
4899#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x2
4900#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
4901#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x4
4902#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
4903#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x30
4904#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
4905#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x40
4906#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
4907#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x80
4908#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
4909#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x100
4910#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
4911#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x200
4912#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
4913#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x400
4914#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
4915#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x800
4916#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
4917#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x1000
4918#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
4919#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x6000
4920#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
4921#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x8000
4922#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
4923#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xffffffff
4924#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
4925#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xffffffff
4926#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
4927#define DB_STENCIL_CLEAR__CLEAR_MASK 0xff
4928#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
4929#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xffffffff
4930#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
4931#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xffffffff
4932#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
4933#define DB_HTILE_SURFACE__LINEAR_MASK 0x1
4934#define DB_HTILE_SURFACE__LINEAR__SHIFT 0x0
4935#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x2
4936#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
4937#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN_MASK 0x4
4938#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN__SHIFT 0x2
4939#define DB_HTILE_SURFACE__PRELOAD_MASK 0x8
4940#define DB_HTILE_SURFACE__PRELOAD__SHIFT 0x3
4941#define DB_HTILE_SURFACE__PREFETCH_WIDTH_MASK 0x3f0
4942#define DB_HTILE_SURFACE__PREFETCH_WIDTH__SHIFT 0x4
4943#define DB_HTILE_SURFACE__PREFETCH_HEIGHT_MASK 0xfc00
4944#define DB_HTILE_SURFACE__PREFETCH_HEIGHT__SHIFT 0xa
4945#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x10000
4946#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
4947#define DB_HTILE_SURFACE__TC_COMPATIBLE_MASK 0x20000
4948#define DB_HTILE_SURFACE__TC_COMPATIBLE__SHIFT 0x11
4949#define DB_PRELOAD_CONTROL__START_X_MASK 0xff
4950#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
4951#define DB_PRELOAD_CONTROL__START_Y_MASK 0xff00
4952#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
4953#define DB_PRELOAD_CONTROL__MAX_X_MASK 0xff0000
4954#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
4955#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xff000000
4956#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
4957#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0xff
4958#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
4959#define DB_STENCILREFMASK__STENCILMASK_MASK 0xff00
4960#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
4961#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0xff0000
4962#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
4963#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xff000000
4964#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
4965#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0xff
4966#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
4967#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0xff00
4968#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
4969#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0xff0000
4970#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
4971#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xff000000
4972#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
4973#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x7
4974#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
4975#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0xff0
4976#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
4977#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0xff000
4978#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
4979#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x1000000
4980#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
4981#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x7
4982#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
4983#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0xff0
4984#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
4985#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0xff000
4986#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
4987#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x1000000
4988#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
4989#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x1
4990#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
4991#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x2
4992#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
4993#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x4
4994#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
4995#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x8
4996#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
4997#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x70
4998#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
4999#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x80
5000#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
5001#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x700
5002#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
5003#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x700000
5004#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
5005#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000
5006#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
5007#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000
5008#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
5009#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0xf
5010#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
5011#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0xf0
5012#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
5013#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0xf00
5014#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
5015#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0xf000
5016#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
5017#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0xf0000
5018#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
5019#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0xf00000
5020#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
5021#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x1
5022#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
5023#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x300
5024#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
5025#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0xc00
5026#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
5027#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x3000
5028#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
5029#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0xc000
5030#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
5031#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x10000
5032#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
5033#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
5034#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
5035#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
5036#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
5037#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
5038#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
5039#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
5040#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
5041#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
5042#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
5043#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
5044#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
5045#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0xffc00
5046#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
5047#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
5048#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
5049#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0xf000000
5050#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
5051#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
5052#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
5053#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x3ff
5054#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
5055#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0xffc00
5056#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
5057#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
5058#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
5059#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0xf000000
5060#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
5061#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
5062#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
5063#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x3ff
5064#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
5065#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0xffc00
5066#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
5067#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
5068#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
5069#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0xf000000
5070#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
5071#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
5072#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
5073#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
5074#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
5075#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
5076#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
5077#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf000000
5078#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
5079#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000
5080#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
5081#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x3ff
5082#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
5083#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0xffc00
5084#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
5085#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf000000
5086#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
5087#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000
5088#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
5089#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
5090#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
5091#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
5092#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
5093#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
5094#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
5095#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
5096#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
5097#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
5098#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
5099#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
5100#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
5101#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
5102#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
5103#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
5104#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
5105#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x1
5106#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
5107#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x2
5108#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
5109#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x4
5110#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
5111#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x8
5112#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
5113#define DB_DEBUG__FORCE_Z_MODE_MASK 0x30
5114#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
5115#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x40
5116#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
5117#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x80
5118#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
5119#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x300
5120#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
5121#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0xc00
5122#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
5123#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x3000
5124#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
5125#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x4000
5126#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
5127#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x8000
5128#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
5129#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x10000
5130#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
5131#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x20000
5132#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
5133#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x40000
5134#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
5135#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x180000
5136#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
5137#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x200000
5138#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
5139#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x400000
5140#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
5141#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x800000
5142#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
5143#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0xf000000
5144#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
5145#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000
5146#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
5147#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000
5148#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
5149#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000
5150#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
5151#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000
5152#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
5153#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x1
5154#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
5155#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x2
5156#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
5157#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x4
5158#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
5159#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x8
5160#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
5161#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x10
5162#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
5163#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_MASK 0x20
5164#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL__SHIFT 0x5
5165#define DB_DEBUG2__ENABLE_PREZL_CB_STALL_MASK 0x40
5166#define DB_DEBUG2__ENABLE_PREZL_CB_STALL__SHIFT 0x6
5167#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_REZ_MASK 0x80
5168#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_REZ__SHIFT 0x7
5169#define DB_DEBUG2__DISABLE_PREZL_CB_STALL_REZ_MASK 0x100
5170#define DB_DEBUG2__DISABLE_PREZL_CB_STALL_REZ__SHIFT 0x8
5171#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x3e00
5172#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
5173#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER_MASK 0x4000
5174#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER__SHIFT 0xe
5175#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING_MASK 0x8000
5176#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING__SHIFT 0xf
5177#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES_MASK 0x10000
5178#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES__SHIFT 0x10
5179#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x20000
5180#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
5181#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x40000
5182#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
5183#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x80000
5184#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
5185#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000
5186#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
5187#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000
5188#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
5189#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000
5190#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
5191#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000
5192#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
5193#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x4
5194#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
5195#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x8
5196#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
5197#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x10
5198#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
5199#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x20
5200#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
5201#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x40
5202#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
5203#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS_MASK 0x80
5204#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS__SHIFT 0x7
5205#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x100
5206#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
5207#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT_MASK 0x200
5208#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT__SHIFT 0x9
5209#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x400
5210#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
5211#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x800
5212#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
5213#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING_MASK 0x1000
5214#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING__SHIFT 0xc
5215#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x2000
5216#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
5217#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x4000
5218#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
5219#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x8000
5220#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
5221#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION_MASK 0x10000
5222#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION__SHIFT 0x10
5223#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x20000
5224#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
5225#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING_MASK 0x40000
5226#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING__SHIFT 0x12
5227#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x80000
5228#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
5229#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x100000
5230#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
5231#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x200000
5232#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
5233#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x400000
5234#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
5235#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x800000
5236#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
5237#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x1000000
5238#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
5239#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x2000000
5240#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
5241#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x4000000
5242#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
5243#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x8000000
5244#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
5245#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000
5246#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
5247#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND_MASK 0x20000000
5248#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
5249#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE_MASK 0x40000000
5250#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE__SHIFT 0x1e
5251#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK_MASK 0x80000000
5252#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK__SHIFT 0x1f
5253#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x1
5254#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
5255#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x2
5256#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
5257#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x4
5258#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
5259#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x8
5260#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
5261#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF_MASK 0x10
5262#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF__SHIFT 0x4
5263#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x20
5264#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0x5
5265#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x40
5266#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x6
5267#define DB_DEBUG4__DB_EXTRA_DEBUG4_MASK 0xffffff80
5268#define DB_DEBUG4__DB_EXTRA_DEBUG4__SHIFT 0x7
5269#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x1f
5270#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
5271#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x3e0
5272#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
5273#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x1c00
5274#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
5275#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS_MASK 0x7f000000
5276#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS__SHIFT 0x18
5277#define DB_WATERMARKS__DEPTH_FREE_MASK 0x1f
5278#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
5279#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x7e0
5280#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x5
5281#define DB_WATERMARKS__FORCE_SUMMARIZE_MASK 0x7800
5282#define DB_WATERMARKS__FORCE_SUMMARIZE__SHIFT 0xb
5283#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0xf8000
5284#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0xf
5285#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0x7f00000
5286#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x14
5287#define DB_WATERMARKS__EARLY_Z_PANIC_DISABLE_MASK 0x8000000
5288#define DB_WATERMARKS__EARLY_Z_PANIC_DISABLE__SHIFT 0x1b
5289#define DB_WATERMARKS__LATE_Z_PANIC_DISABLE_MASK 0x10000000
5290#define DB_WATERMARKS__LATE_Z_PANIC_DISABLE__SHIFT 0x1c
5291#define DB_WATERMARKS__RE_Z_PANIC_DISABLE_MASK 0x20000000
5292#define DB_WATERMARKS__RE_Z_PANIC_DISABLE__SHIFT 0x1d
5293#define DB_WATERMARKS__AUTO_FLUSH_HTILE_MASK 0x40000000
5294#define DB_WATERMARKS__AUTO_FLUSH_HTILE__SHIFT 0x1e
5295#define DB_WATERMARKS__AUTO_FLUSH_QUAD_MASK 0x80000000
5296#define DB_WATERMARKS__AUTO_FLUSH_QUAD__SHIFT 0x1f
5297#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x3
5298#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
5299#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0xc
5300#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
5301#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x30
5302#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
5303#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0xc0
5304#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
5305#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x300
5306#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
5307#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0xc00
5308#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
5309#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x3000
5310#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
5311#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0xc000
5312#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
5313#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x30000
5314#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
5315#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0xc0000
5316#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
5317#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x7f
5318#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
5319#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x3f80
5320#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x7
5321#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x1fc000
5322#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0xe
5323#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0x1e00000
5324#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x15
5325#define DB_FREE_CACHELINES__QUAD_READ_REQS_MASK 0xfe000000
5326#define DB_FREE_CACHELINES__QUAD_READ_REQS__SHIFT 0x19
5327#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH_MASK 0x1f
5328#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH__SHIFT 0x0
5329#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH_MASK 0x3e0
5330#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH__SHIFT 0x5
5331#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0xfc00
5332#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0xa
5333#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0x1f0000
5334#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x10
5335#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH_MASK 0x1fe00000
5336#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x15
5337#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0xff
5338#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
5339#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x7f00
5340#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
5341#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x1ff8000
5342#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0xf
5343#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xfe000000
5344#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
5345#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0xf
5346#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
5347#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0xff0
5348#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
5349#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0xfff000
5350#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
5351#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x1000000
5352#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
5353#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x2000000
5354#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
5355#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x4000000
5356#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
5357#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x8000000
5358#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
5359#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000
5360#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
5361#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000
5362#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
5363#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000
5364#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
5365#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000
5366#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
5367#define DB_ZPASS_COUNT_LOW__COUNT_LOW_MASK 0xffffffff
5368#define DB_ZPASS_COUNT_LOW__COUNT_LOW__SHIFT 0x0
5369#define DB_ZPASS_COUNT_HI__COUNT_HI_MASK 0x7fffffff
5370#define DB_ZPASS_COUNT_HI__COUNT_HI__SHIFT 0x0
5371#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x3
5372#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
5373#define DB_READ_DEBUG_0__BUSY_DATA0_MASK 0xffffffff
5374#define DB_READ_DEBUG_0__BUSY_DATA0__SHIFT 0x0
5375#define DB_READ_DEBUG_1__BUSY_DATA1_MASK 0xffffffff
5376#define DB_READ_DEBUG_1__BUSY_DATA1__SHIFT 0x0
5377#define DB_READ_DEBUG_2__BUSY_DATA2_MASK 0xffffffff
5378#define DB_READ_DEBUG_2__BUSY_DATA2__SHIFT 0x0
5379#define DB_READ_DEBUG_3__DEBUG_DATA_MASK 0xffffffff
5380#define DB_READ_DEBUG_3__DEBUG_DATA__SHIFT 0x0
5381#define DB_READ_DEBUG_4__DEBUG_DATA_MASK 0xffffffff
5382#define DB_READ_DEBUG_4__DEBUG_DATA__SHIFT 0x0
5383#define DB_READ_DEBUG_5__DEBUG_DATA_MASK 0xffffffff
5384#define DB_READ_DEBUG_5__DEBUG_DATA__SHIFT 0x0
5385#define DB_READ_DEBUG_6__DEBUG_DATA_MASK 0xffffffff
5386#define DB_READ_DEBUG_6__DEBUG_DATA__SHIFT 0x0
5387#define DB_READ_DEBUG_7__DEBUG_DATA_MASK 0xffffffff
5388#define DB_READ_DEBUG_7__DEBUG_DATA__SHIFT 0x0
5389#define DB_READ_DEBUG_8__DEBUG_DATA_MASK 0xffffffff
5390#define DB_READ_DEBUG_8__DEBUG_DATA__SHIFT 0x0
5391#define DB_READ_DEBUG_9__DEBUG_DATA_MASK 0xffffffff
5392#define DB_READ_DEBUG_9__DEBUG_DATA__SHIFT 0x0
5393#define DB_READ_DEBUG_A__DEBUG_DATA_MASK 0xffffffff
5394#define DB_READ_DEBUG_A__DEBUG_DATA__SHIFT 0x0
5395#define DB_READ_DEBUG_B__DEBUG_DATA_MASK 0xffffffff
5396#define DB_READ_DEBUG_B__DEBUG_DATA__SHIFT 0x0
5397#define DB_READ_DEBUG_C__DEBUG_DATA_MASK 0xffffffff
5398#define DB_READ_DEBUG_C__DEBUG_DATA__SHIFT 0x0
5399#define DB_READ_DEBUG_D__DEBUG_DATA_MASK 0xffffffff
5400#define DB_READ_DEBUG_D__DEBUG_DATA__SHIFT 0x0
5401#define DB_READ_DEBUG_E__DEBUG_DATA_MASK 0xffffffff
5402#define DB_READ_DEBUG_E__DEBUG_DATA__SHIFT 0x0
5403#define DB_READ_DEBUG_F__DEBUG_DATA_MASK 0xffffffff
5404#define DB_READ_DEBUG_F__DEBUG_DATA__SHIFT 0x0
5405#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xffffffff
5406#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
5407#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7fffffff
5408#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
5409#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xffffffff
5410#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
5411#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7fffffff
5412#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
5413#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xffffffff
5414#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
5415#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7fffffff
5416#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
5417#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xffffffff
5418#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
5419#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7fffffff
5420#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
5421#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0xf00
5422#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
5423#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x1000
5424#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
5425#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0xf0000
5426#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
5427#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x100000
5428#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
5429#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xff0000
5430#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
5431#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0xf00
5432#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
5433#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x1000
5434#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
5435#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0xf0000
5436#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
5437#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x100000
5438#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
5439#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xff0000
5440#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
5441#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x7
5442#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
5443#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x70
5444#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
5445#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x700
5446#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
5447#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x3000
5448#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0xc
5449#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x70000
5450#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
5451#define GB_ADDR_CONFIG__NUM_GPUS_MASK 0x700000
5452#define GB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x14
5453#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x3000000
5454#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x18
5455#define GB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000
5456#define GB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x1c
5457#define GB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000
5458#define GB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x1e
5459#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xffffffff
5460#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
5461#define GB_GPU_ID__GPU_ID_MASK 0xf
5462#define GB_GPU_ID__GPU_ID__SHIFT 0x0
5463#define CC_RB_DAISY_CHAIN__RB_0_MASK 0xf
5464#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
5465#define CC_RB_DAISY_CHAIN__RB_1_MASK 0xf0
5466#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
5467#define CC_RB_DAISY_CHAIN__RB_2_MASK 0xf00
5468#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
5469#define CC_RB_DAISY_CHAIN__RB_3_MASK 0xf000
5470#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
5471#define CC_RB_DAISY_CHAIN__RB_4_MASK 0xf0000
5472#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
5473#define CC_RB_DAISY_CHAIN__RB_5_MASK 0xf00000
5474#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
5475#define CC_RB_DAISY_CHAIN__RB_6_MASK 0xf000000
5476#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
5477#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xf0000000
5478#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
5479#define GB_TILE_MODE0__ARRAY_MODE_MASK 0x3c
5480#define GB_TILE_MODE0__ARRAY_MODE__SHIFT 0x2
5481#define GB_TILE_MODE0__PIPE_CONFIG_MASK 0x7c0
5482#define GB_TILE_MODE0__PIPE_CONFIG__SHIFT 0x6
5483#define GB_TILE_MODE0__TILE_SPLIT_MASK 0x3800
5484#define GB_TILE_MODE0__TILE_SPLIT__SHIFT 0xb
5485#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5486#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT 0x16
5487#define GB_TILE_MODE0__SAMPLE_SPLIT_MASK 0x6000000
5488#define GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT 0x19
5489#define GB_TILE_MODE1__ARRAY_MODE_MASK 0x3c
5490#define GB_TILE_MODE1__ARRAY_MODE__SHIFT 0x2
5491#define GB_TILE_MODE1__PIPE_CONFIG_MASK 0x7c0
5492#define GB_TILE_MODE1__PIPE_CONFIG__SHIFT 0x6
5493#define GB_TILE_MODE1__TILE_SPLIT_MASK 0x3800
5494#define GB_TILE_MODE1__TILE_SPLIT__SHIFT 0xb
5495#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5496#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW__SHIFT 0x16
5497#define GB_TILE_MODE1__SAMPLE_SPLIT_MASK 0x6000000
5498#define GB_TILE_MODE1__SAMPLE_SPLIT__SHIFT 0x19
5499#define GB_TILE_MODE2__ARRAY_MODE_MASK 0x3c
5500#define GB_TILE_MODE2__ARRAY_MODE__SHIFT 0x2
5501#define GB_TILE_MODE2__PIPE_CONFIG_MASK 0x7c0
5502#define GB_TILE_MODE2__PIPE_CONFIG__SHIFT 0x6
5503#define GB_TILE_MODE2__TILE_SPLIT_MASK 0x3800
5504#define GB_TILE_MODE2__TILE_SPLIT__SHIFT 0xb
5505#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5506#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW__SHIFT 0x16
5507#define GB_TILE_MODE2__SAMPLE_SPLIT_MASK 0x6000000
5508#define GB_TILE_MODE2__SAMPLE_SPLIT__SHIFT 0x19
5509#define GB_TILE_MODE3__ARRAY_MODE_MASK 0x3c
5510#define GB_TILE_MODE3__ARRAY_MODE__SHIFT 0x2
5511#define GB_TILE_MODE3__PIPE_CONFIG_MASK 0x7c0
5512#define GB_TILE_MODE3__PIPE_CONFIG__SHIFT 0x6
5513#define GB_TILE_MODE3__TILE_SPLIT_MASK 0x3800
5514#define GB_TILE_MODE3__TILE_SPLIT__SHIFT 0xb
5515#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5516#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW__SHIFT 0x16
5517#define GB_TILE_MODE3__SAMPLE_SPLIT_MASK 0x6000000
5518#define GB_TILE_MODE3__SAMPLE_SPLIT__SHIFT 0x19
5519#define GB_TILE_MODE4__ARRAY_MODE_MASK 0x3c
5520#define GB_TILE_MODE4__ARRAY_MODE__SHIFT 0x2
5521#define GB_TILE_MODE4__PIPE_CONFIG_MASK 0x7c0
5522#define GB_TILE_MODE4__PIPE_CONFIG__SHIFT 0x6
5523#define GB_TILE_MODE4__TILE_SPLIT_MASK 0x3800
5524#define GB_TILE_MODE4__TILE_SPLIT__SHIFT 0xb
5525#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5526#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW__SHIFT 0x16
5527#define GB_TILE_MODE4__SAMPLE_SPLIT_MASK 0x6000000
5528#define GB_TILE_MODE4__SAMPLE_SPLIT__SHIFT 0x19
5529#define GB_TILE_MODE5__ARRAY_MODE_MASK 0x3c
5530#define GB_TILE_MODE5__ARRAY_MODE__SHIFT 0x2
5531#define GB_TILE_MODE5__PIPE_CONFIG_MASK 0x7c0
5532#define GB_TILE_MODE5__PIPE_CONFIG__SHIFT 0x6
5533#define GB_TILE_MODE5__TILE_SPLIT_MASK 0x3800
5534#define GB_TILE_MODE5__TILE_SPLIT__SHIFT 0xb
5535#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5536#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW__SHIFT 0x16
5537#define GB_TILE_MODE5__SAMPLE_SPLIT_MASK 0x6000000
5538#define GB_TILE_MODE5__SAMPLE_SPLIT__SHIFT 0x19
5539#define GB_TILE_MODE6__ARRAY_MODE_MASK 0x3c
5540#define GB_TILE_MODE6__ARRAY_MODE__SHIFT 0x2
5541#define GB_TILE_MODE6__PIPE_CONFIG_MASK 0x7c0
5542#define GB_TILE_MODE6__PIPE_CONFIG__SHIFT 0x6
5543#define GB_TILE_MODE6__TILE_SPLIT_MASK 0x3800
5544#define GB_TILE_MODE6__TILE_SPLIT__SHIFT 0xb
5545#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5546#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW__SHIFT 0x16
5547#define GB_TILE_MODE6__SAMPLE_SPLIT_MASK 0x6000000
5548#define GB_TILE_MODE6__SAMPLE_SPLIT__SHIFT 0x19
5549#define GB_TILE_MODE7__ARRAY_MODE_MASK 0x3c
5550#define GB_TILE_MODE7__ARRAY_MODE__SHIFT 0x2
5551#define GB_TILE_MODE7__PIPE_CONFIG_MASK 0x7c0
5552#define GB_TILE_MODE7__PIPE_CONFIG__SHIFT 0x6
5553#define GB_TILE_MODE7__TILE_SPLIT_MASK 0x3800
5554#define GB_TILE_MODE7__TILE_SPLIT__SHIFT 0xb
5555#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5556#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW__SHIFT 0x16
5557#define GB_TILE_MODE7__SAMPLE_SPLIT_MASK 0x6000000
5558#define GB_TILE_MODE7__SAMPLE_SPLIT__SHIFT 0x19
5559#define GB_TILE_MODE8__ARRAY_MODE_MASK 0x3c
5560#define GB_TILE_MODE8__ARRAY_MODE__SHIFT 0x2
5561#define GB_TILE_MODE8__PIPE_CONFIG_MASK 0x7c0
5562#define GB_TILE_MODE8__PIPE_CONFIG__SHIFT 0x6
5563#define GB_TILE_MODE8__TILE_SPLIT_MASK 0x3800
5564#define GB_TILE_MODE8__TILE_SPLIT__SHIFT 0xb
5565#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5566#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW__SHIFT 0x16
5567#define GB_TILE_MODE8__SAMPLE_SPLIT_MASK 0x6000000
5568#define GB_TILE_MODE8__SAMPLE_SPLIT__SHIFT 0x19
5569#define GB_TILE_MODE9__ARRAY_MODE_MASK 0x3c
5570#define GB_TILE_MODE9__ARRAY_MODE__SHIFT 0x2
5571#define GB_TILE_MODE9__PIPE_CONFIG_MASK 0x7c0
5572#define GB_TILE_MODE9__PIPE_CONFIG__SHIFT 0x6
5573#define GB_TILE_MODE9__TILE_SPLIT_MASK 0x3800
5574#define GB_TILE_MODE9__TILE_SPLIT__SHIFT 0xb
5575#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5576#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW__SHIFT 0x16
5577#define GB_TILE_MODE9__SAMPLE_SPLIT_MASK 0x6000000
5578#define GB_TILE_MODE9__SAMPLE_SPLIT__SHIFT 0x19
5579#define GB_TILE_MODE10__ARRAY_MODE_MASK 0x3c
5580#define GB_TILE_MODE10__ARRAY_MODE__SHIFT 0x2
5581#define GB_TILE_MODE10__PIPE_CONFIG_MASK 0x7c0
5582#define GB_TILE_MODE10__PIPE_CONFIG__SHIFT 0x6
5583#define GB_TILE_MODE10__TILE_SPLIT_MASK 0x3800
5584#define GB_TILE_MODE10__TILE_SPLIT__SHIFT 0xb
5585#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5586#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW__SHIFT 0x16
5587#define GB_TILE_MODE10__SAMPLE_SPLIT_MASK 0x6000000
5588#define GB_TILE_MODE10__SAMPLE_SPLIT__SHIFT 0x19
5589#define GB_TILE_MODE11__ARRAY_MODE_MASK 0x3c
5590#define GB_TILE_MODE11__ARRAY_MODE__SHIFT 0x2
5591#define GB_TILE_MODE11__PIPE_CONFIG_MASK 0x7c0
5592#define GB_TILE_MODE11__PIPE_CONFIG__SHIFT 0x6
5593#define GB_TILE_MODE11__TILE_SPLIT_MASK 0x3800
5594#define GB_TILE_MODE11__TILE_SPLIT__SHIFT 0xb
5595#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5596#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW__SHIFT 0x16
5597#define GB_TILE_MODE11__SAMPLE_SPLIT_MASK 0x6000000
5598#define GB_TILE_MODE11__SAMPLE_SPLIT__SHIFT 0x19
5599#define GB_TILE_MODE12__ARRAY_MODE_MASK 0x3c
5600#define GB_TILE_MODE12__ARRAY_MODE__SHIFT 0x2
5601#define GB_TILE_MODE12__PIPE_CONFIG_MASK 0x7c0
5602#define GB_TILE_MODE12__PIPE_CONFIG__SHIFT 0x6
5603#define GB_TILE_MODE12__TILE_SPLIT_MASK 0x3800
5604#define GB_TILE_MODE12__TILE_SPLIT__SHIFT 0xb
5605#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5606#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW__SHIFT 0x16
5607#define GB_TILE_MODE12__SAMPLE_SPLIT_MASK 0x6000000
5608#define GB_TILE_MODE12__SAMPLE_SPLIT__SHIFT 0x19
5609#define GB_TILE_MODE13__ARRAY_MODE_MASK 0x3c
5610#define GB_TILE_MODE13__ARRAY_MODE__SHIFT 0x2
5611#define GB_TILE_MODE13__PIPE_CONFIG_MASK 0x7c0
5612#define GB_TILE_MODE13__PIPE_CONFIG__SHIFT 0x6
5613#define GB_TILE_MODE13__TILE_SPLIT_MASK 0x3800
5614#define GB_TILE_MODE13__TILE_SPLIT__SHIFT 0xb
5615#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5616#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW__SHIFT 0x16
5617#define GB_TILE_MODE13__SAMPLE_SPLIT_MASK 0x6000000
5618#define GB_TILE_MODE13__SAMPLE_SPLIT__SHIFT 0x19
5619#define GB_TILE_MODE14__ARRAY_MODE_MASK 0x3c
5620#define GB_TILE_MODE14__ARRAY_MODE__SHIFT 0x2
5621#define GB_TILE_MODE14__PIPE_CONFIG_MASK 0x7c0
5622#define GB_TILE_MODE14__PIPE_CONFIG__SHIFT 0x6
5623#define GB_TILE_MODE14__TILE_SPLIT_MASK 0x3800
5624#define GB_TILE_MODE14__TILE_SPLIT__SHIFT 0xb
5625#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5626#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW__SHIFT 0x16
5627#define GB_TILE_MODE14__SAMPLE_SPLIT_MASK 0x6000000
5628#define GB_TILE_MODE14__SAMPLE_SPLIT__SHIFT 0x19
5629#define GB_TILE_MODE15__ARRAY_MODE_MASK 0x3c
5630#define GB_TILE_MODE15__ARRAY_MODE__SHIFT 0x2
5631#define GB_TILE_MODE15__PIPE_CONFIG_MASK 0x7c0
5632#define GB_TILE_MODE15__PIPE_CONFIG__SHIFT 0x6
5633#define GB_TILE_MODE15__TILE_SPLIT_MASK 0x3800
5634#define GB_TILE_MODE15__TILE_SPLIT__SHIFT 0xb
5635#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5636#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW__SHIFT 0x16
5637#define GB_TILE_MODE15__SAMPLE_SPLIT_MASK 0x6000000
5638#define GB_TILE_MODE15__SAMPLE_SPLIT__SHIFT 0x19
5639#define GB_TILE_MODE16__ARRAY_MODE_MASK 0x3c
5640#define GB_TILE_MODE16__ARRAY_MODE__SHIFT 0x2
5641#define GB_TILE_MODE16__PIPE_CONFIG_MASK 0x7c0
5642#define GB_TILE_MODE16__PIPE_CONFIG__SHIFT 0x6
5643#define GB_TILE_MODE16__TILE_SPLIT_MASK 0x3800
5644#define GB_TILE_MODE16__TILE_SPLIT__SHIFT 0xb
5645#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5646#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW__SHIFT 0x16
5647#define GB_TILE_MODE16__SAMPLE_SPLIT_MASK 0x6000000
5648#define GB_TILE_MODE16__SAMPLE_SPLIT__SHIFT 0x19
5649#define GB_TILE_MODE17__ARRAY_MODE_MASK 0x3c
5650#define GB_TILE_MODE17__ARRAY_MODE__SHIFT 0x2
5651#define GB_TILE_MODE17__PIPE_CONFIG_MASK 0x7c0
5652#define GB_TILE_MODE17__PIPE_CONFIG__SHIFT 0x6
5653#define GB_TILE_MODE17__TILE_SPLIT_MASK 0x3800
5654#define GB_TILE_MODE17__TILE_SPLIT__SHIFT 0xb
5655#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5656#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW__SHIFT 0x16
5657#define GB_TILE_MODE17__SAMPLE_SPLIT_MASK 0x6000000
5658#define GB_TILE_MODE17__SAMPLE_SPLIT__SHIFT 0x19
5659#define GB_TILE_MODE18__ARRAY_MODE_MASK 0x3c
5660#define GB_TILE_MODE18__ARRAY_MODE__SHIFT 0x2
5661#define GB_TILE_MODE18__PIPE_CONFIG_MASK 0x7c0
5662#define GB_TILE_MODE18__PIPE_CONFIG__SHIFT 0x6
5663#define GB_TILE_MODE18__TILE_SPLIT_MASK 0x3800
5664#define GB_TILE_MODE18__TILE_SPLIT__SHIFT 0xb
5665#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5666#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW__SHIFT 0x16
5667#define GB_TILE_MODE18__SAMPLE_SPLIT_MASK 0x6000000
5668#define GB_TILE_MODE18__SAMPLE_SPLIT__SHIFT 0x19
5669#define GB_TILE_MODE19__ARRAY_MODE_MASK 0x3c
5670#define GB_TILE_MODE19__ARRAY_MODE__SHIFT 0x2
5671#define GB_TILE_MODE19__PIPE_CONFIG_MASK 0x7c0
5672#define GB_TILE_MODE19__PIPE_CONFIG__SHIFT 0x6
5673#define GB_TILE_MODE19__TILE_SPLIT_MASK 0x3800
5674#define GB_TILE_MODE19__TILE_SPLIT__SHIFT 0xb
5675#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5676#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW__SHIFT 0x16
5677#define GB_TILE_MODE19__SAMPLE_SPLIT_MASK 0x6000000
5678#define GB_TILE_MODE19__SAMPLE_SPLIT__SHIFT 0x19
5679#define GB_TILE_MODE20__ARRAY_MODE_MASK 0x3c
5680#define GB_TILE_MODE20__ARRAY_MODE__SHIFT 0x2
5681#define GB_TILE_MODE20__PIPE_CONFIG_MASK 0x7c0
5682#define GB_TILE_MODE20__PIPE_CONFIG__SHIFT 0x6
5683#define GB_TILE_MODE20__TILE_SPLIT_MASK 0x3800
5684#define GB_TILE_MODE20__TILE_SPLIT__SHIFT 0xb
5685#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5686#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW__SHIFT 0x16
5687#define GB_TILE_MODE20__SAMPLE_SPLIT_MASK 0x6000000
5688#define GB_TILE_MODE20__SAMPLE_SPLIT__SHIFT 0x19
5689#define GB_TILE_MODE21__ARRAY_MODE_MASK 0x3c
5690#define GB_TILE_MODE21__ARRAY_MODE__SHIFT 0x2
5691#define GB_TILE_MODE21__PIPE_CONFIG_MASK 0x7c0
5692#define GB_TILE_MODE21__PIPE_CONFIG__SHIFT 0x6
5693#define GB_TILE_MODE21__TILE_SPLIT_MASK 0x3800
5694#define GB_TILE_MODE21__TILE_SPLIT__SHIFT 0xb
5695#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5696#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW__SHIFT 0x16
5697#define GB_TILE_MODE21__SAMPLE_SPLIT_MASK 0x6000000
5698#define GB_TILE_MODE21__SAMPLE_SPLIT__SHIFT 0x19
5699#define GB_TILE_MODE22__ARRAY_MODE_MASK 0x3c
5700#define GB_TILE_MODE22__ARRAY_MODE__SHIFT 0x2
5701#define GB_TILE_MODE22__PIPE_CONFIG_MASK 0x7c0
5702#define GB_TILE_MODE22__PIPE_CONFIG__SHIFT 0x6
5703#define GB_TILE_MODE22__TILE_SPLIT_MASK 0x3800
5704#define GB_TILE_MODE22__TILE_SPLIT__SHIFT 0xb
5705#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5706#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW__SHIFT 0x16
5707#define GB_TILE_MODE22__SAMPLE_SPLIT_MASK 0x6000000
5708#define GB_TILE_MODE22__SAMPLE_SPLIT__SHIFT 0x19
5709#define GB_TILE_MODE23__ARRAY_MODE_MASK 0x3c
5710#define GB_TILE_MODE23__ARRAY_MODE__SHIFT 0x2
5711#define GB_TILE_MODE23__PIPE_CONFIG_MASK 0x7c0
5712#define GB_TILE_MODE23__PIPE_CONFIG__SHIFT 0x6
5713#define GB_TILE_MODE23__TILE_SPLIT_MASK 0x3800
5714#define GB_TILE_MODE23__TILE_SPLIT__SHIFT 0xb
5715#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5716#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW__SHIFT 0x16
5717#define GB_TILE_MODE23__SAMPLE_SPLIT_MASK 0x6000000
5718#define GB_TILE_MODE23__SAMPLE_SPLIT__SHIFT 0x19
5719#define GB_TILE_MODE24__ARRAY_MODE_MASK 0x3c
5720#define GB_TILE_MODE24__ARRAY_MODE__SHIFT 0x2
5721#define GB_TILE_MODE24__PIPE_CONFIG_MASK 0x7c0
5722#define GB_TILE_MODE24__PIPE_CONFIG__SHIFT 0x6
5723#define GB_TILE_MODE24__TILE_SPLIT_MASK 0x3800
5724#define GB_TILE_MODE24__TILE_SPLIT__SHIFT 0xb
5725#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5726#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW__SHIFT 0x16
5727#define GB_TILE_MODE24__SAMPLE_SPLIT_MASK 0x6000000
5728#define GB_TILE_MODE24__SAMPLE_SPLIT__SHIFT 0x19
5729#define GB_TILE_MODE25__ARRAY_MODE_MASK 0x3c
5730#define GB_TILE_MODE25__ARRAY_MODE__SHIFT 0x2
5731#define GB_TILE_MODE25__PIPE_CONFIG_MASK 0x7c0
5732#define GB_TILE_MODE25__PIPE_CONFIG__SHIFT 0x6
5733#define GB_TILE_MODE25__TILE_SPLIT_MASK 0x3800
5734#define GB_TILE_MODE25__TILE_SPLIT__SHIFT 0xb
5735#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5736#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW__SHIFT 0x16
5737#define GB_TILE_MODE25__SAMPLE_SPLIT_MASK 0x6000000
5738#define GB_TILE_MODE25__SAMPLE_SPLIT__SHIFT 0x19
5739#define GB_TILE_MODE26__ARRAY_MODE_MASK 0x3c
5740#define GB_TILE_MODE26__ARRAY_MODE__SHIFT 0x2
5741#define GB_TILE_MODE26__PIPE_CONFIG_MASK 0x7c0
5742#define GB_TILE_MODE26__PIPE_CONFIG__SHIFT 0x6
5743#define GB_TILE_MODE26__TILE_SPLIT_MASK 0x3800
5744#define GB_TILE_MODE26__TILE_SPLIT__SHIFT 0xb
5745#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5746#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW__SHIFT 0x16
5747#define GB_TILE_MODE26__SAMPLE_SPLIT_MASK 0x6000000
5748#define GB_TILE_MODE26__SAMPLE_SPLIT__SHIFT 0x19
5749#define GB_TILE_MODE27__ARRAY_MODE_MASK 0x3c
5750#define GB_TILE_MODE27__ARRAY_MODE__SHIFT 0x2
5751#define GB_TILE_MODE27__PIPE_CONFIG_MASK 0x7c0
5752#define GB_TILE_MODE27__PIPE_CONFIG__SHIFT 0x6
5753#define GB_TILE_MODE27__TILE_SPLIT_MASK 0x3800
5754#define GB_TILE_MODE27__TILE_SPLIT__SHIFT 0xb
5755#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5756#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW__SHIFT 0x16
5757#define GB_TILE_MODE27__SAMPLE_SPLIT_MASK 0x6000000
5758#define GB_TILE_MODE27__SAMPLE_SPLIT__SHIFT 0x19
5759#define GB_TILE_MODE28__ARRAY_MODE_MASK 0x3c
5760#define GB_TILE_MODE28__ARRAY_MODE__SHIFT 0x2
5761#define GB_TILE_MODE28__PIPE_CONFIG_MASK 0x7c0
5762#define GB_TILE_MODE28__PIPE_CONFIG__SHIFT 0x6
5763#define GB_TILE_MODE28__TILE_SPLIT_MASK 0x3800
5764#define GB_TILE_MODE28__TILE_SPLIT__SHIFT 0xb
5765#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5766#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW__SHIFT 0x16
5767#define GB_TILE_MODE28__SAMPLE_SPLIT_MASK 0x6000000
5768#define GB_TILE_MODE28__SAMPLE_SPLIT__SHIFT 0x19
5769#define GB_TILE_MODE29__ARRAY_MODE_MASK 0x3c
5770#define GB_TILE_MODE29__ARRAY_MODE__SHIFT 0x2
5771#define GB_TILE_MODE29__PIPE_CONFIG_MASK 0x7c0
5772#define GB_TILE_MODE29__PIPE_CONFIG__SHIFT 0x6
5773#define GB_TILE_MODE29__TILE_SPLIT_MASK 0x3800
5774#define GB_TILE_MODE29__TILE_SPLIT__SHIFT 0xb
5775#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5776#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW__SHIFT 0x16
5777#define GB_TILE_MODE29__SAMPLE_SPLIT_MASK 0x6000000
5778#define GB_TILE_MODE29__SAMPLE_SPLIT__SHIFT 0x19
5779#define GB_TILE_MODE30__ARRAY_MODE_MASK 0x3c
5780#define GB_TILE_MODE30__ARRAY_MODE__SHIFT 0x2
5781#define GB_TILE_MODE30__PIPE_CONFIG_MASK 0x7c0
5782#define GB_TILE_MODE30__PIPE_CONFIG__SHIFT 0x6
5783#define GB_TILE_MODE30__TILE_SPLIT_MASK 0x3800
5784#define GB_TILE_MODE30__TILE_SPLIT__SHIFT 0xb
5785#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5786#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW__SHIFT 0x16
5787#define GB_TILE_MODE30__SAMPLE_SPLIT_MASK 0x6000000
5788#define GB_TILE_MODE30__SAMPLE_SPLIT__SHIFT 0x19
5789#define GB_TILE_MODE31__ARRAY_MODE_MASK 0x3c
5790#define GB_TILE_MODE31__ARRAY_MODE__SHIFT 0x2
5791#define GB_TILE_MODE31__PIPE_CONFIG_MASK 0x7c0
5792#define GB_TILE_MODE31__PIPE_CONFIG__SHIFT 0x6
5793#define GB_TILE_MODE31__TILE_SPLIT_MASK 0x3800
5794#define GB_TILE_MODE31__TILE_SPLIT__SHIFT 0xb
5795#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW_MASK 0x1c00000
5796#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW__SHIFT 0x16
5797#define GB_TILE_MODE31__SAMPLE_SPLIT_MASK 0x6000000
5798#define GB_TILE_MODE31__SAMPLE_SPLIT__SHIFT 0x19
5799#define GB_MACROTILE_MODE0__BANK_WIDTH_MASK 0x3
5800#define GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT 0x0
5801#define GB_MACROTILE_MODE0__BANK_HEIGHT_MASK 0xc
5802#define GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT 0x2
5803#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT_MASK 0x30
5804#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT 0x4
5805#define GB_MACROTILE_MODE0__NUM_BANKS_MASK 0xc0
5806#define GB_MACROTILE_MODE0__NUM_BANKS__SHIFT 0x6
5807#define GB_MACROTILE_MODE1__BANK_WIDTH_MASK 0x3
5808#define GB_MACROTILE_MODE1__BANK_WIDTH__SHIFT 0x0
5809#define GB_MACROTILE_MODE1__BANK_HEIGHT_MASK 0xc
5810#define GB_MACROTILE_MODE1__BANK_HEIGHT__SHIFT 0x2
5811#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT_MASK 0x30
5812#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT__SHIFT 0x4
5813#define GB_MACROTILE_MODE1__NUM_BANKS_MASK 0xc0
5814#define GB_MACROTILE_MODE1__NUM_BANKS__SHIFT 0x6
5815#define GB_MACROTILE_MODE2__BANK_WIDTH_MASK 0x3
5816#define GB_MACROTILE_MODE2__BANK_WIDTH__SHIFT 0x0
5817#define GB_MACROTILE_MODE2__BANK_HEIGHT_MASK 0xc
5818#define GB_MACROTILE_MODE2__BANK_HEIGHT__SHIFT 0x2
5819#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT_MASK 0x30
5820#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT__SHIFT 0x4
5821#define GB_MACROTILE_MODE2__NUM_BANKS_MASK 0xc0
5822#define GB_MACROTILE_MODE2__NUM_BANKS__SHIFT 0x6
5823#define GB_MACROTILE_MODE3__BANK_WIDTH_MASK 0x3
5824#define GB_MACROTILE_MODE3__BANK_WIDTH__SHIFT 0x0
5825#define GB_MACROTILE_MODE3__BANK_HEIGHT_MASK 0xc
5826#define GB_MACROTILE_MODE3__BANK_HEIGHT__SHIFT 0x2
5827#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT_MASK 0x30
5828#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT__SHIFT 0x4
5829#define GB_MACROTILE_MODE3__NUM_BANKS_MASK 0xc0
5830#define GB_MACROTILE_MODE3__NUM_BANKS__SHIFT 0x6
5831#define GB_MACROTILE_MODE4__BANK_WIDTH_MASK 0x3
5832#define GB_MACROTILE_MODE4__BANK_WIDTH__SHIFT 0x0
5833#define GB_MACROTILE_MODE4__BANK_HEIGHT_MASK 0xc
5834#define GB_MACROTILE_MODE4__BANK_HEIGHT__SHIFT 0x2
5835#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT_MASK 0x30
5836#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT__SHIFT 0x4
5837#define GB_MACROTILE_MODE4__NUM_BANKS_MASK 0xc0
5838#define GB_MACROTILE_MODE4__NUM_BANKS__SHIFT 0x6
5839#define GB_MACROTILE_MODE5__BANK_WIDTH_MASK 0x3
5840#define GB_MACROTILE_MODE5__BANK_WIDTH__SHIFT 0x0
5841#define GB_MACROTILE_MODE5__BANK_HEIGHT_MASK 0xc
5842#define GB_MACROTILE_MODE5__BANK_HEIGHT__SHIFT 0x2
5843#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT_MASK 0x30
5844#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT__SHIFT 0x4
5845#define GB_MACROTILE_MODE5__NUM_BANKS_MASK 0xc0
5846#define GB_MACROTILE_MODE5__NUM_BANKS__SHIFT 0x6
5847#define GB_MACROTILE_MODE6__BANK_WIDTH_MASK 0x3
5848#define GB_MACROTILE_MODE6__BANK_WIDTH__SHIFT 0x0
5849#define GB_MACROTILE_MODE6__BANK_HEIGHT_MASK 0xc
5850#define GB_MACROTILE_MODE6__BANK_HEIGHT__SHIFT 0x2
5851#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT_MASK 0x30
5852#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT__SHIFT 0x4
5853#define GB_MACROTILE_MODE6__NUM_BANKS_MASK 0xc0
5854#define GB_MACROTILE_MODE6__NUM_BANKS__SHIFT 0x6
5855#define GB_MACROTILE_MODE7__BANK_WIDTH_MASK 0x3
5856#define GB_MACROTILE_MODE7__BANK_WIDTH__SHIFT 0x0
5857#define GB_MACROTILE_MODE7__BANK_HEIGHT_MASK 0xc
5858#define GB_MACROTILE_MODE7__BANK_HEIGHT__SHIFT 0x2
5859#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT_MASK 0x30
5860#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT__SHIFT 0x4
5861#define GB_MACROTILE_MODE7__NUM_BANKS_MASK 0xc0
5862#define GB_MACROTILE_MODE7__NUM_BANKS__SHIFT 0x6
5863#define GB_MACROTILE_MODE8__BANK_WIDTH_MASK 0x3
5864#define GB_MACROTILE_MODE8__BANK_WIDTH__SHIFT 0x0
5865#define GB_MACROTILE_MODE8__BANK_HEIGHT_MASK 0xc
5866#define GB_MACROTILE_MODE8__BANK_HEIGHT__SHIFT 0x2
5867#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT_MASK 0x30
5868#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT__SHIFT 0x4
5869#define GB_MACROTILE_MODE8__NUM_BANKS_MASK 0xc0
5870#define GB_MACROTILE_MODE8__NUM_BANKS__SHIFT 0x6
5871#define GB_MACROTILE_MODE9__BANK_WIDTH_MASK 0x3
5872#define GB_MACROTILE_MODE9__BANK_WIDTH__SHIFT 0x0
5873#define GB_MACROTILE_MODE9__BANK_HEIGHT_MASK 0xc
5874#define GB_MACROTILE_MODE9__BANK_HEIGHT__SHIFT 0x2
5875#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT_MASK 0x30
5876#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT__SHIFT 0x4
5877#define GB_MACROTILE_MODE9__NUM_BANKS_MASK 0xc0
5878#define GB_MACROTILE_MODE9__NUM_BANKS__SHIFT 0x6
5879#define GB_MACROTILE_MODE10__BANK_WIDTH_MASK 0x3
5880#define GB_MACROTILE_MODE10__BANK_WIDTH__SHIFT 0x0
5881#define GB_MACROTILE_MODE10__BANK_HEIGHT_MASK 0xc
5882#define GB_MACROTILE_MODE10__BANK_HEIGHT__SHIFT 0x2
5883#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT_MASK 0x30
5884#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT__SHIFT 0x4
5885#define GB_MACROTILE_MODE10__NUM_BANKS_MASK 0xc0
5886#define GB_MACROTILE_MODE10__NUM_BANKS__SHIFT 0x6
5887#define GB_MACROTILE_MODE11__BANK_WIDTH_MASK 0x3
5888#define GB_MACROTILE_MODE11__BANK_WIDTH__SHIFT 0x0
5889#define GB_MACROTILE_MODE11__BANK_HEIGHT_MASK 0xc
5890#define GB_MACROTILE_MODE11__BANK_HEIGHT__SHIFT 0x2
5891#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT_MASK 0x30
5892#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT__SHIFT 0x4
5893#define GB_MACROTILE_MODE11__NUM_BANKS_MASK 0xc0
5894#define GB_MACROTILE_MODE11__NUM_BANKS__SHIFT 0x6
5895#define GB_MACROTILE_MODE12__BANK_WIDTH_MASK 0x3
5896#define GB_MACROTILE_MODE12__BANK_WIDTH__SHIFT 0x0
5897#define GB_MACROTILE_MODE12__BANK_HEIGHT_MASK 0xc
5898#define GB_MACROTILE_MODE12__BANK_HEIGHT__SHIFT 0x2
5899#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT_MASK 0x30
5900#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT__SHIFT 0x4
5901#define GB_MACROTILE_MODE12__NUM_BANKS_MASK 0xc0
5902#define GB_MACROTILE_MODE12__NUM_BANKS__SHIFT 0x6
5903#define GB_MACROTILE_MODE13__BANK_WIDTH_MASK 0x3
5904#define GB_MACROTILE_MODE13__BANK_WIDTH__SHIFT 0x0
5905#define GB_MACROTILE_MODE13__BANK_HEIGHT_MASK 0xc
5906#define GB_MACROTILE_MODE13__BANK_HEIGHT__SHIFT 0x2
5907#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT_MASK 0x30
5908#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT__SHIFT 0x4
5909#define GB_MACROTILE_MODE13__NUM_BANKS_MASK 0xc0
5910#define GB_MACROTILE_MODE13__NUM_BANKS__SHIFT 0x6
5911#define GB_MACROTILE_MODE14__BANK_WIDTH_MASK 0x3
5912#define GB_MACROTILE_MODE14__BANK_WIDTH__SHIFT 0x0
5913#define GB_MACROTILE_MODE14__BANK_HEIGHT_MASK 0xc
5914#define GB_MACROTILE_MODE14__BANK_HEIGHT__SHIFT 0x2
5915#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT_MASK 0x30
5916#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT__SHIFT 0x4
5917#define GB_MACROTILE_MODE14__NUM_BANKS_MASK 0xc0
5918#define GB_MACROTILE_MODE14__NUM_BANKS__SHIFT 0x6
5919#define GB_MACROTILE_MODE15__BANK_WIDTH_MASK 0x3
5920#define GB_MACROTILE_MODE15__BANK_WIDTH__SHIFT 0x0
5921#define GB_MACROTILE_MODE15__BANK_HEIGHT_MASK 0xc
5922#define GB_MACROTILE_MODE15__BANK_HEIGHT__SHIFT 0x2
5923#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT_MASK 0x30
5924#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT__SHIFT 0x4
5925#define GB_MACROTILE_MODE15__NUM_BANKS_MASK 0xc0
5926#define GB_MACROTILE_MODE15__NUM_BANKS__SHIFT 0x6
5927#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x10000
5928#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0x10
5929#define GB_EDC_MODE__DED_MODE_MASK 0x300000
5930#define GB_EDC_MODE__DED_MODE__SHIFT 0x14
5931#define GB_EDC_MODE__PROP_FED_MASK 0x20000000
5932#define GB_EDC_MODE__PROP_FED__SHIFT 0x1d
5933#define GB_EDC_MODE__BYPASS_MASK 0x80000000
5934#define GB_EDC_MODE__BYPASS__SHIFT 0x1f
5935#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x2
5936#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
5937#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x1
5938#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
5939#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xffffffff
5940#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
5941#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5942#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
5943#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xffffffff
5944#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
5945#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xffffffff
5946#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
5947#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xffffffff
5948#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
5949#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5950#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
5951#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5952#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
5953#define RAS_VGT_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5954#define RAS_VGT_SIGNATURE0__SIGNATURE__SHIFT 0x0
5955#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5956#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
5957#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xffffffff
5958#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
5959#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xffffffff
5960#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
5961#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xffffffff
5962#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
5963#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xffffffff
5964#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
5965#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xffffffff
5966#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
5967#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xffffffff
5968#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
5969#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xffffffff
5970#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
5971#define RAS_IA_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5972#define RAS_IA_SIGNATURE0__SIGNATURE__SHIFT 0x0
5973#define RAS_IA_SIGNATURE1__SIGNATURE_MASK 0xffffffff
5974#define RAS_IA_SIGNATURE1__SIGNATURE__SHIFT 0x0
5975#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5976#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
5977#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xffffffff
5978#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
5979#define RAS_TA_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5980#define RAS_TA_SIGNATURE0__SIGNATURE__SHIFT 0x0
5981#define RAS_TD_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5982#define RAS_TD_SIGNATURE0__SIGNATURE__SHIFT 0x0
5983#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5984#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
5985#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xffffffff
5986#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
5987#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xffffffff
5988#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
5989#define RAS_TA_SIGNATURE1__SIGNATURE_MASK 0xffffffff
5990#define RAS_TA_SIGNATURE1__SIGNATURE__SHIFT 0x0
5991#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x7
5992#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
5993#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x7
5994#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
5995#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0xffff
5996#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
5997#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xffff0000
5998#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
5999#define GRBM_CAM_DATA__CAM_ADDR_MASK 0xffff
6000#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
6001#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xffff0000
6002#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
6003#define GRBM_CNTL__READ_TIMEOUT_MASK 0xff
6004#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
6005#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000
6006#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
6007#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x3f
6008#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
6009#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0xfc0
6010#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
6011#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x3
6012#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
6013#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0xc
6014#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
6015#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x30
6016#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
6017#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0xc0
6018#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
6019#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x4000
6020#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
6021#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x8000
6022#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
6023#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0xf
6024#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
6025#define GRBM_STATUS__SRBM_RQ_PENDING_MASK 0x20
6026#define GRBM_STATUS__SRBM_RQ_PENDING__SHIFT 0x5
6027#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x80
6028#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
6029#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x100
6030#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
6031#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x200
6032#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
6033#define GRBM_STATUS__DB_CLEAN_MASK 0x1000
6034#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
6035#define GRBM_STATUS__CB_CLEAN_MASK 0x2000
6036#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
6037#define GRBM_STATUS__TA_BUSY_MASK 0x4000
6038#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
6039#define GRBM_STATUS__GDS_BUSY_MASK 0x8000
6040#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
6041#define GRBM_STATUS__WD_BUSY_NO_DMA_MASK 0x10000
6042#define GRBM_STATUS__WD_BUSY_NO_DMA__SHIFT 0x10
6043#define GRBM_STATUS__VGT_BUSY_MASK 0x20000
6044#define GRBM_STATUS__VGT_BUSY__SHIFT 0x11
6045#define GRBM_STATUS__IA_BUSY_NO_DMA_MASK 0x40000
6046#define GRBM_STATUS__IA_BUSY_NO_DMA__SHIFT 0x12
6047#define GRBM_STATUS__IA_BUSY_MASK 0x80000
6048#define GRBM_STATUS__IA_BUSY__SHIFT 0x13
6049#define GRBM_STATUS__SX_BUSY_MASK 0x100000
6050#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
6051#define GRBM_STATUS__WD_BUSY_MASK 0x200000
6052#define GRBM_STATUS__WD_BUSY__SHIFT 0x15
6053#define GRBM_STATUS__SPI_BUSY_MASK 0x400000
6054#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
6055#define GRBM_STATUS__BCI_BUSY_MASK 0x800000
6056#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
6057#define GRBM_STATUS__SC_BUSY_MASK 0x1000000
6058#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
6059#define GRBM_STATUS__PA_BUSY_MASK 0x2000000
6060#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
6061#define GRBM_STATUS__DB_BUSY_MASK 0x4000000
6062#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
6063#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000
6064#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
6065#define GRBM_STATUS__CP_BUSY_MASK 0x20000000
6066#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
6067#define GRBM_STATUS__CB_BUSY_MASK 0x40000000
6068#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
6069#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
6070#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
6071#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0xf
6072#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
6073#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x10
6074#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
6075#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x20
6076#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
6077#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x40
6078#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
6079#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x80
6080#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
6081#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x100
6082#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
6083#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x200
6084#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
6085#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING_MASK 0x400
6086#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING__SHIFT 0xa
6087#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING_MASK 0x800
6088#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING__SHIFT 0xb
6089#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING_MASK 0x1000
6090#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING__SHIFT 0xc
6091#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING_MASK 0x2000
6092#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING__SHIFT 0xd
6093#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x4000
6094#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
6095#define GRBM_STATUS2__RLC_BUSY_MASK 0x1000000
6096#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x18
6097#define GRBM_STATUS2__TC_BUSY_MASK 0x2000000
6098#define GRBM_STATUS2__TC_BUSY__SHIFT 0x19
6099#define GRBM_STATUS2__TCC_CC_RESIDENT_MASK 0x4000000
6100#define GRBM_STATUS2__TCC_CC_RESIDENT__SHIFT 0x1a
6101#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000
6102#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
6103#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000
6104#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
6105#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000
6106#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
6107#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x2
6108#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
6109#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x4
6110#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
6111#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x400000
6112#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
6113#define GRBM_STATUS_SE0__VGT_BUSY_MASK 0x800000
6114#define GRBM_STATUS_SE0__VGT_BUSY__SHIFT 0x17
6115#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x1000000
6116#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
6117#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x2000000
6118#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
6119#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x4000000
6120#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
6121#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x8000000
6122#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
6123#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000
6124#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
6125#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000
6126#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
6127#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000
6128#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
6129#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x2
6130#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
6131#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x4
6132#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
6133#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x400000
6134#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
6135#define GRBM_STATUS_SE1__VGT_BUSY_MASK 0x800000
6136#define GRBM_STATUS_SE1__VGT_BUSY__SHIFT 0x17
6137#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x1000000
6138#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
6139#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x2000000
6140#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
6141#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x4000000
6142#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
6143#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x8000000
6144#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
6145#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000
6146#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
6147#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000
6148#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
6149#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000
6150#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
6151#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x2
6152#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
6153#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x4
6154#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
6155#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x400000
6156#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
6157#define GRBM_STATUS_SE2__VGT_BUSY_MASK 0x800000
6158#define GRBM_STATUS_SE2__VGT_BUSY__SHIFT 0x17
6159#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x1000000
6160#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
6161#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x2000000
6162#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
6163#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x4000000
6164#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
6165#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x8000000
6166#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
6167#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000
6168#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
6169#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000
6170#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
6171#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000
6172#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
6173#define GRBM_STATUS_SE3__DB_CLEAN_MASK 0x2
6174#define GRBM_STATUS_SE3__DB_CLEAN__SHIFT 0x1
6175#define GRBM_STATUS_SE3__CB_CLEAN_MASK 0x4
6176#define GRBM_STATUS_SE3__CB_CLEAN__SHIFT 0x2
6177#define GRBM_STATUS_SE3__BCI_BUSY_MASK 0x400000
6178#define GRBM_STATUS_SE3__BCI_BUSY__SHIFT 0x16
6179#define GRBM_STATUS_SE3__VGT_BUSY_MASK 0x800000
6180#define GRBM_STATUS_SE3__VGT_BUSY__SHIFT 0x17
6181#define GRBM_STATUS_SE3__PA_BUSY_MASK 0x1000000
6182#define GRBM_STATUS_SE3__PA_BUSY__SHIFT 0x18
6183#define GRBM_STATUS_SE3__TA_BUSY_MASK 0x2000000
6184#define GRBM_STATUS_SE3__TA_BUSY__SHIFT 0x19
6185#define GRBM_STATUS_SE3__SX_BUSY_MASK 0x4000000
6186#define GRBM_STATUS_SE3__SX_BUSY__SHIFT 0x1a
6187#define GRBM_STATUS_SE3__SPI_BUSY_MASK 0x8000000
6188#define GRBM_STATUS_SE3__SPI_BUSY__SHIFT 0x1b
6189#define GRBM_STATUS_SE3__SC_BUSY_MASK 0x20000000
6190#define GRBM_STATUS_SE3__SC_BUSY__SHIFT 0x1d
6191#define GRBM_STATUS_SE3__DB_BUSY_MASK 0x40000000
6192#define GRBM_STATUS_SE3__DB_BUSY__SHIFT 0x1e
6193#define GRBM_STATUS_SE3__CB_BUSY_MASK 0x80000000
6194#define GRBM_STATUS_SE3__CB_BUSY__SHIFT 0x1f
6195#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x1
6196#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
6197#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x4
6198#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
6199#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x10000
6200#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
6201#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x20000
6202#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
6203#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x40000
6204#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
6205#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x80000
6206#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
6207#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x100000
6208#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
6209#define GRBM_DEBUG_CNTL__GRBM_DEBUG_INDEX_MASK 0x3f
6210#define GRBM_DEBUG_CNTL__GRBM_DEBUG_INDEX__SHIFT 0x0
6211#define GRBM_DEBUG_DATA__DATA_MASK 0xffffffff
6212#define GRBM_DEBUG_DATA__DATA__SHIFT 0x0
6213#define GRBM_CGTT_CLK_CNTL__ON_DELAY_MASK 0xf
6214#define GRBM_CGTT_CLK_CNTL__ON_DELAY__SHIFT 0x0
6215#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS_MASK 0xff0
6216#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS__SHIFT 0x4
6217#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN_MASK 0x40000000
6218#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
6219#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0xff
6220#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
6221#define GRBM_GFX_INDEX__SH_INDEX_MASK 0xff00
6222#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8
6223#define GRBM_GFX_INDEX__SE_INDEX_MASK 0xff0000
6224#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
6225#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000
6226#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d
6227#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000
6228#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
6229#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000
6230#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
6231#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0xf
6232#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
6233#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x1f00
6234#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
6235#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0xff
6236#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
6237#define GRBM_DEBUG__IGNORE_RDY_MASK 0x2
6238#define GRBM_DEBUG__IGNORE_RDY__SHIFT 0x1
6239#define GRBM_DEBUG__IGNORE_FAO_MASK 0x20
6240#define GRBM_DEBUG__IGNORE_FAO__SHIFT 0x5
6241#define GRBM_DEBUG__DISABLE_READ_TIMEOUT_MASK 0x40
6242#define GRBM_DEBUG__DISABLE_READ_TIMEOUT__SHIFT 0x6
6243#define GRBM_DEBUG__SNAPSHOT_FREE_CNTRS_MASK 0x80
6244#define GRBM_DEBUG__SNAPSHOT_FREE_CNTRS__SHIFT 0x7
6245#define GRBM_DEBUG__HYSTERESIS_GUI_ACTIVE_MASK 0xf00
6246#define GRBM_DEBUG__HYSTERESIS_GUI_ACTIVE__SHIFT 0x8
6247#define GRBM_DEBUG__GFX_CLOCK_DOMAIN_OVERRIDE_MASK 0x1000
6248#define GRBM_DEBUG__GFX_CLOCK_DOMAIN_OVERRIDE__SHIFT 0xc
6249#define GRBM_DEBUG__GRBM_TRAP_ENABLE_MASK 0x2000
6250#define GRBM_DEBUG__GRBM_TRAP_ENABLE__SHIFT 0xd
6251#define GRBM_DEBUG__DEBUG_BUS_FGCG_EN_MASK 0x80000000
6252#define GRBM_DEBUG__DEBUG_BUS_FGCG_EN__SHIFT 0x1f
6253#define GRBM_DEBUG_SNAPSHOT__CPF_RDY_MASK 0x1
6254#define GRBM_DEBUG_SNAPSHOT__CPF_RDY__SHIFT 0x0
6255#define GRBM_DEBUG_SNAPSHOT__CPG_RDY_MASK 0x2
6256#define GRBM_DEBUG_SNAPSHOT__CPG_RDY__SHIFT 0x1
6257#define GRBM_DEBUG_SNAPSHOT__SRBM_RDY_MASK 0x4
6258#define GRBM_DEBUG_SNAPSHOT__SRBM_RDY__SHIFT 0x2
6259#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE0_RDY_MASK 0x8
6260#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE0_RDY__SHIFT 0x3
6261#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE1_RDY_MASK 0x10
6262#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE1_RDY__SHIFT 0x4
6263#define GRBM_DEBUG_SNAPSHOT__GDS_RDY_MASK 0x20
6264#define GRBM_DEBUG_SNAPSHOT__GDS_RDY__SHIFT 0x5
6265#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY0_MASK 0x40
6266#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY0__SHIFT 0x6
6267#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY0_MASK 0x80
6268#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY0__SHIFT 0x7
6269#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY0_MASK 0x100
6270#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY0__SHIFT 0x8
6271#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY0_MASK 0x200
6272#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY0__SHIFT 0x9
6273#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY0_MASK 0x400
6274#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY0__SHIFT 0xa
6275#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY0_MASK 0x800
6276#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY0__SHIFT 0xb
6277#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY0_MASK 0x1000
6278#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY0__SHIFT 0xc
6279#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY0_MASK 0x2000
6280#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY0__SHIFT 0xd
6281#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY1_MASK 0x4000
6282#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY1__SHIFT 0xe
6283#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY1_MASK 0x8000
6284#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY1__SHIFT 0xf
6285#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY1_MASK 0x10000
6286#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY1__SHIFT 0x10
6287#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY1_MASK 0x20000
6288#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY1__SHIFT 0x11
6289#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY1_MASK 0x40000
6290#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY1__SHIFT 0x12
6291#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY1_MASK 0x80000
6292#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY1__SHIFT 0x13
6293#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY1_MASK 0x100000
6294#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY1__SHIFT 0x14
6295#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY1_MASK 0x200000
6296#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY1__SHIFT 0x15
6297#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x3fffc
6298#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
6299#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x300000
6300#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
6301#define GRBM_READ_ERROR__READ_MEID_MASK 0xc00000
6302#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
6303#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000
6304#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
6305#define GRBM_READ_ERROR2__READ_REQUESTER_SRBM_MASK 0x20000
6306#define GRBM_READ_ERROR2__READ_REQUESTER_SRBM__SHIFT 0x11
6307#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x40000
6308#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
6309#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x80000
6310#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
6311#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x100000
6312#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
6313#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x200000
6314#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
6315#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x400000
6316#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
6317#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x800000
6318#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
6319#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x1000000
6320#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
6321#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x2000000
6322#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
6323#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x4000000
6324#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
6325#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x8000000
6326#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
6327#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000
6328#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
6329#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000
6330#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
6331#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000
6332#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
6333#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000
6334#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
6335#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x1
6336#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
6337#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x80000
6338#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
6339#define GRBM_TRAP_OP__RW_MASK 0x1
6340#define GRBM_TRAP_OP__RW__SHIFT 0x0
6341#define GRBM_TRAP_ADDR__DATA_MASK 0xffff
6342#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
6343#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0xffff
6344#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
6345#define GRBM_TRAP_WD__DATA_MASK 0xffffffff
6346#define GRBM_TRAP_WD__DATA__SHIFT 0x0
6347#define GRBM_TRAP_WD_MSK__DATA_MASK 0xffffffff
6348#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
6349#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x3
6350#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
6351#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x4
6352#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
6353#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x1
6354#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
6355#define GRBM_WRITE_ERROR__WRITE_REQUESTER_SRBM_MASK 0x2
6356#define GRBM_WRITE_ERROR__WRITE_REQUESTER_SRBM__SHIFT 0x1
6357#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x1c
6358#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
6359#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x1e0
6360#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x5
6361#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x1000
6362#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
6363#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x1e000
6364#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
6365#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x300000
6366#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
6367#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0xc00000
6368#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
6369#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000
6370#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
6371#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3f
6372#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
6373#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x400
6374#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
6375#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x800
6376#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
6377#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x1000
6378#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
6379#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x2000
6380#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
6381#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x4000
6382#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
6383#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x10000
6384#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
6385#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x20000
6386#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
6387#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x40000
6388#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
6389#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x80000
6390#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
6391#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x100000
6392#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
6393#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x200000
6394#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
6395#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x400000
6396#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
6397#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x800000
6398#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
6399#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x1000000
6400#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
6401#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x2000000
6402#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
6403#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x4000000
6404#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
6405#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x8000000
6406#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
6407#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000
6408#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
6409#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3f
6410#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
6411#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x400
6412#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
6413#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x800
6414#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
6415#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x1000
6416#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
6417#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x2000
6418#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
6419#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x4000
6420#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
6421#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x10000
6422#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
6423#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x20000
6424#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
6425#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x40000
6426#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
6427#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x80000
6428#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
6429#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x100000
6430#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
6431#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x200000
6432#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
6433#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x400000
6434#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
6435#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x800000
6436#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
6437#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x1000000
6438#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
6439#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x2000000
6440#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
6441#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x4000000
6442#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
6443#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x8000000
6444#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
6445#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000
6446#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
6447#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x3f
6448#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
6449#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x400
6450#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
6451#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x800
6452#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
6453#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x1000
6454#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
6455#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x2000
6456#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
6457#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x8000
6458#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
6459#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x10000
6460#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
6461#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x20000
6462#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
6463#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x40000
6464#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
6465#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x80000
6466#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
6467#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x100000
6468#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
6469#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x200000
6470#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
6471#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x3f
6472#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
6473#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x400
6474#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
6475#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x800
6476#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
6477#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x1000
6478#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
6479#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x2000
6480#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
6481#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x8000
6482#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
6483#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x10000
6484#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
6485#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x20000
6486#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
6487#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x40000
6488#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
6489#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x80000
6490#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
6491#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x100000
6492#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
6493#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x200000
6494#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
6495#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x3f
6496#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
6497#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x400
6498#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
6499#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x800
6500#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
6501#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x1000
6502#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
6503#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x2000
6504#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
6505#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x8000
6506#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
6507#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x10000
6508#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
6509#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x20000
6510#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
6511#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x40000
6512#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
6513#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x80000
6514#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
6515#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x100000
6516#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
6517#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x200000
6518#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
6519#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x3f
6520#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
6521#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x400
6522#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
6523#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x800
6524#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
6525#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x1000
6526#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
6527#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x2000
6528#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
6529#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x8000
6530#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
6531#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x10000
6532#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
6533#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x20000
6534#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
6535#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x40000
6536#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
6537#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x80000
6538#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
6539#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x100000
6540#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
6541#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x200000
6542#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
6543#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
6544#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
6545#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
6546#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
6547#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
6548#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
6549#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
6550#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
6551#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffff
6552#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
6553#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffff
6554#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
6555#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffff
6556#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
6557#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffff
6558#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
6559#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffff
6560#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
6561#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffff
6562#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
6563#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffff
6564#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
6565#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffff
6566#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
6567#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xffffffff
6568#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
6569#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xffffffff
6570#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
6571#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xffffffff
6572#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
6573#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xffffffff
6574#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
6575#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xffffffff
6576#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
6577#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xffffffff
6578#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
6579#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xffffffff
6580#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
6581#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xffffffff
6582#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
6583#define DEBUG_INDEX__DEBUG_INDEX_MASK 0x3ffff
6584#define DEBUG_INDEX__DEBUG_INDEX__SHIFT 0x0
6585#define DEBUG_DATA__DEBUG_DATA_MASK 0xffffffff
6586#define DEBUG_DATA__DEBUG_DATA__SHIFT 0x0
6587#define GRBM_NOWHERE__DATA_MASK 0xffffffff
6588#define GRBM_NOWHERE__DATA__SHIFT 0x0
6589#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xffffffff
6590#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
6591#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xffffffff
6592#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
6593#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xffffffff
6594#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
6595#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xffffffff
6596#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
6597#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xffffffff
6598#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
6599#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xffffffff
6600#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
6601#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xffffffff
6602#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
6603#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xffffffff
6604#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
6605#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xffffffff
6606#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
6607#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xffffffff
6608#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
6609#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xffffffff
6610#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
6611#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xffffffff
6612#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
6613#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xffffffff
6614#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
6615#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xffffffff
6616#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
6617#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xffffffff
6618#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
6619#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xffffffff
6620#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
6621#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xffffffff
6622#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
6623#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xffffffff
6624#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
6625#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xffffffff
6626#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
6627#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xffffffff
6628#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
6629#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xffffffff
6630#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
6631#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xffffffff
6632#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
6633#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xffffffff
6634#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
6635#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xffffffff
6636#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
6637#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xffffffff
6638#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
6639#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xffffffff
6640#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
6641#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xffffffff
6642#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
6643#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xffffffff
6644#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
6645#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xffffffff
6646#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
6647#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xffffffff
6648#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
6649#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xffffffff
6650#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
6651#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xffffffff
6652#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
6653#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xffffffff
6654#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
6655#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xffffffff
6656#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
6657#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xffffffff
6658#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
6659#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xffffffff
6660#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
6661#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xffffffff
6662#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
6663#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xffffffff
6664#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
6665#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xffffffff
6666#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
6667#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xffffffff
6668#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
6669#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xffffffff
6670#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
6671#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xffffffff
6672#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
6673#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xffffffff
6674#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
6675#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xffffffff
6676#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
6677#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xffffffff
6678#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
6679#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xffffffff
6680#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
6681#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xffffffff
6682#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
6683#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xffffffff
6684#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
6685#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xffffffff
6686#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
6687#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xffffffff
6688#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
6689#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xffffffff
6690#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
6691#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xffffffff
6692#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
6693#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xffffffff
6694#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
6695#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xffffffff
6696#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
6697#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xffffffff
6698#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
6699#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xffffffff
6700#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
6701#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xffffffff
6702#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
6703#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xffffffff
6704#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
6705#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xffffffff
6706#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
6707#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xffffffff
6708#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
6709#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xffffffff
6710#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
6711#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xffffffff
6712#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
6713#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xffffffff
6714#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
6715#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xffffffff
6716#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
6717#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xffffffff
6718#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
6719#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xffffffff
6720#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
6721#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xffffffff
6722#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
6723#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xffffffff
6724#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
6725#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xffffffff
6726#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
6727#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xffffffff
6728#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
6729#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xffffffff
6730#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
6731#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xffffffff
6732#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
6733#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xffffffff
6734#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
6735#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xffffffff
6736#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
6737#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xffffffff
6738#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
6739#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xffffffff
6740#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
6741#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xffffffff
6742#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
6743#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xffffffff
6744#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
6745#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xffffffff
6746#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
6747#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xffffffff
6748#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
6749#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xffffffff
6750#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
6751#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xffffffff
6752#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
6753#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xffffffff
6754#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
6755#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xffffffff
6756#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
6757#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xffffffff
6758#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
6759#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xffffffff
6760#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
6761#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xffffffff
6762#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
6763#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xffffffff
6764#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
6765#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xffffffff
6766#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
6767#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xffffffff
6768#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
6769#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xffffffff
6770#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
6771#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xffffffff
6772#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
6773#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xffffffff
6774#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
6775#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xffffffff
6776#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
6777#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xffffffff
6778#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
6779#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xffffffff
6780#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
6781#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x1
6782#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
6783#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x2
6784#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
6785#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x4
6786#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
6787#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x8
6788#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
6789#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x10
6790#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
6791#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x20
6792#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
6793#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x100
6794#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
6795#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x200
6796#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
6797#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x400
6798#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
6799#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x800
6800#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
6801#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x1
6802#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
6803#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x2
6804#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
6805#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x4
6806#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
6807#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x8
6808#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
6809#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x10
6810#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
6811#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x20
6812#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
6813#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x40
6814#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
6815#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x80
6816#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
6817#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x100
6818#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
6819#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x200
6820#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
6821#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x400
6822#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
6823#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x800
6824#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
6825#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x1000
6826#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
6827#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x2000
6828#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
6829#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x4000
6830#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
6831#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x8000
6832#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
6833#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x10000
6834#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
6835#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x20000
6836#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
6837#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x40000
6838#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
6839#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x80000
6840#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
6841#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x100000
6842#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
6843#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x200000
6844#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
6845#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x400000
6846#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
6847#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x800000
6848#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
6849#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x1000000
6850#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
6851#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x2000000
6852#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
6853#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x4000000
6854#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1a
6855#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x1
6856#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
6857#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x2
6858#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
6859#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x4
6860#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
6861#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x8
6862#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
6863#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x10
6864#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
6865#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x20
6866#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
6867#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x40
6868#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
6869#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x80
6870#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
6871#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x100
6872#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
6873#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x200
6874#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
6875#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x400
6876#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
6877#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x800
6878#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
6879#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x1000
6880#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
6881#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x2000
6882#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
6883#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x4000
6884#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
6885#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x100000
6886#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
6887#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x1
6888#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
6889#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x2
6890#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
6891#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x4
6892#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
6893#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x8
6894#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
6895#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x10
6896#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
6897#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x20
6898#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
6899#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x2000
6900#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
6901#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0xc000
6902#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
6903#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x10000
6904#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
6905#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x20000
6906#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
6907#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x40000
6908#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
6909#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x80000
6910#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
6911#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x100000
6912#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
6913#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x200000
6914#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
6915#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x400000
6916#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
6917#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x1000000
6918#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
6919#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x2000000
6920#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
6921#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x4000000
6922#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
6923#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x8000000
6924#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
6925#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xffffffff
6926#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
6927#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xffffffff
6928#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
6929#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xffffffff
6930#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
6931#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xffffffff
6932#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
6933#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xffffffff
6934#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
6935#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xffffffff
6936#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
6937#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xffffffff
6938#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
6939#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xffffffff
6940#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
6941#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xffffffff
6942#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
6943#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xffffffff
6944#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
6945#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xffffffff
6946#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
6947#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xffffffff
6948#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
6949#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xffffffff
6950#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
6951#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xffffffff
6952#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
6953#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xffffffff
6954#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
6955#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xffffffff
6956#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
6957#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xffffffff
6958#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
6959#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xffffffff
6960#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
6961#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xffffffff
6962#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
6963#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xffffffff
6964#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
6965#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xffffffff
6966#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
6967#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xffffffff
6968#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
6969#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xffffffff
6970#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
6971#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xffffffff
6972#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
6973#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xffffffff
6974#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
6975#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xffffffff
6976#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
6977#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xffffffff
6978#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
6979#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xffffffff
6980#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
6981#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xffffffff
6982#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
6983#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xffffffff
6984#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
6985#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xffffffff
6986#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
6987#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xffffffff
6988#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
6989#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x1
6990#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
6991#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x6
6992#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
6993#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x8
6994#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
6995#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x10
6996#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
6997#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x20
6998#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x5
6999#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000
7000#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
7001#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000
7002#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
7003#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000
7004#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
7005#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000
7006#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
7007#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE_MASK 0x1
7008#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE__SHIFT 0x0
7009#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x1
7010#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
7011#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x6
7012#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
7013#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x38
7014#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
7015#define PA_SU_POINT_SIZE__HEIGHT_MASK 0xffff
7016#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
7017#define PA_SU_POINT_SIZE__WIDTH_MASK 0xffff0000
7018#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
7019#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0xffff
7020#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
7021#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xffff0000
7022#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
7023#define PA_SU_LINE_CNTL__WIDTH_MASK 0xffff
7024#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
7025#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x3
7026#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
7027#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x4
7028#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
7029#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x8
7030#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
7031#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST_MASK 0x10
7032#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST__SHIFT 0x4
7033#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xffffffff
7034#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
7035#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x1
7036#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
7037#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x2
7038#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
7039#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x4
7040#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
7041#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x8
7042#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
7043#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x10
7044#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
7045#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x20
7046#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
7047#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x40
7048#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
7049#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x80
7050#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
7051#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0xff00
7052#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
7053#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000
7054#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
7055#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000
7056#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
7057#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x1
7058#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
7059#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x2
7060#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
7061#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x4
7062#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
7063#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x18
7064#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
7065#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0xe0
7066#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
7067#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x700
7068#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
7069#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x800
7070#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
7071#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x1000
7072#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
7073#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x2000
7074#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
7075#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x10000
7076#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
7077#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x80000
7078#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
7079#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x100000
7080#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
7081#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x200000
7082#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
7083#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0xff
7084#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
7085#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x100
7086#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
7087#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xffffffff
7088#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
7089#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xffffffff
7090#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
7091#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xffffffff
7092#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
7093#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xffffffff
7094#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
7095#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xffffffff
7096#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
7097#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x1ff
7098#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
7099#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x1ff0000
7100#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
7101#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0xffffff
7102#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
7103#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
7104#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
7105#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
7106#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
7107#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
7108#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
7109#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
7110#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
7111#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
7112#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
7113#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
7114#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
7115#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0xffc00
7116#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
7117#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
7118#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
7119#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x3ff
7120#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
7121#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0xffc00
7122#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
7123#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x3ff
7124#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
7125#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
7126#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
7127#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x3ff
7128#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
7129#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
7130#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
7131#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
7132#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
7133#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffff
7134#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
7135#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
7136#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
7137#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffff
7138#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
7139#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
7140#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
7141#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffff
7142#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
7143#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
7144#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
7145#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffff
7146#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
7147#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x7
7148#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
7149#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x10
7150#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
7151#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x1e000
7152#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
7153#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x700000
7154#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
7155#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x3000000
7156#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
7157#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0xffff
7158#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
7159#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xffff0000
7160#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
7161#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0xffff
7162#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
7163#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xffff0000
7164#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
7165#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x3
7166#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
7167#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0xf
7168#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
7169#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0xf0
7170#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
7171#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0xf00
7172#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
7173#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0xf000
7174#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
7175#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0xf0000
7176#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
7177#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0xf00000
7178#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
7179#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0xf000000
7180#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
7181#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xf0000000
7182#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
7183#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0xf
7184#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
7185#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0xf0
7186#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
7187#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0xf00
7188#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
7189#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0xf000
7190#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
7191#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0xf0000
7192#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
7193#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0xf00000
7194#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
7195#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0xf000000
7196#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
7197#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xf0000000
7198#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
7199#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0xf
7200#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
7201#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0xf0
7202#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
7203#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0xf00
7204#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
7205#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0xf000
7206#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
7207#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0xf0000
7208#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
7209#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0xf00000
7210#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
7211#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0xf000000
7212#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
7213#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xf0000000
7214#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
7215#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0xf
7216#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
7217#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0xf0
7218#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
7219#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0xf00
7220#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
7221#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0xf000
7222#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
7223#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0xf0000
7224#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
7225#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0xf00000
7226#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
7227#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0xf000000
7228#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
7229#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xf0000000
7230#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
7231#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0xf
7232#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
7233#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0xf0
7234#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
7235#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0xf00
7236#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
7237#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0xf000
7238#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
7239#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0xf0000
7240#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
7241#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0xf00000
7242#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
7243#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0xf000000
7244#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
7245#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xf0000000
7246#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
7247#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0xf
7248#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
7249#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0xf0
7250#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
7251#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0xf00
7252#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
7253#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0xf000
7254#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
7255#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0xf0000
7256#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
7257#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0xf00000
7258#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
7259#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0xf000000
7260#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
7261#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xf0000000
7262#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
7263#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0xf
7264#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
7265#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0xf0
7266#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
7267#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0xf00
7268#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
7269#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0xf000
7270#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
7271#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0xf0000
7272#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
7273#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0xf00000
7274#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
7275#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0xf000000
7276#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
7277#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xf0000000
7278#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
7279#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0xf
7280#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
7281#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0xf0
7282#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
7283#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0xf00
7284#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
7285#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0xf000
7286#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
7287#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0xf0000
7288#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
7289#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0xf00000
7290#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
7291#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0xf000000
7292#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
7293#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xf0000000
7294#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
7295#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0xf
7296#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
7297#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0xf0
7298#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
7299#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0xf00
7300#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
7301#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0xf000
7302#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
7303#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0xf0000
7304#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
7305#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0xf00000
7306#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
7307#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0xf000000
7308#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
7309#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xf0000000
7310#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
7311#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0xf
7312#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
7313#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0xf0
7314#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
7315#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0xf00
7316#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
7317#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0xf000
7318#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
7319#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0xf0000
7320#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
7321#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0xf00000
7322#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
7323#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0xf000000
7324#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
7325#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xf0000000
7326#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
7327#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0xf
7328#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
7329#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0xf0
7330#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
7331#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0xf00
7332#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
7333#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0xf000
7334#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
7335#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0xf0000
7336#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
7337#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0xf00000
7338#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
7339#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0xf000000
7340#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
7341#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xf0000000
7342#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
7343#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0xf
7344#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
7345#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0xf0
7346#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
7347#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0xf00
7348#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
7349#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0xf000
7350#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
7351#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0xf0000
7352#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
7353#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0xf00000
7354#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
7355#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0xf000000
7356#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
7357#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xf0000000
7358#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
7359#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0xf
7360#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
7361#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0xf0
7362#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
7363#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0xf00
7364#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
7365#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0xf000
7366#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
7367#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0xf0000
7368#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
7369#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0xf00000
7370#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
7371#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0xf000000
7372#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
7373#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xf0000000
7374#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
7375#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0xf
7376#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
7377#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0xf0
7378#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
7379#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0xf00
7380#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
7381#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0xf000
7382#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
7383#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0xf0000
7384#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
7385#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0xf00000
7386#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
7387#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0xf000000
7388#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
7389#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xf0000000
7390#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
7391#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0xf
7392#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
7393#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0xf0
7394#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
7395#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0xf00
7396#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
7397#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0xf000
7398#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
7399#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0xf0000
7400#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
7401#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0xf00000
7402#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
7403#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0xf000000
7404#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
7405#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xf0000000
7406#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
7407#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0xf
7408#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
7409#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0xf0
7410#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
7411#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0xf00
7412#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
7413#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0xf000
7414#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
7415#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0xf0000
7416#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
7417#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0xf00000
7418#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
7419#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0xf000000
7420#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
7421#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xf0000000
7422#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
7423#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0xf
7424#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
7425#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0xf0
7426#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
7427#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0xf00
7428#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
7429#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0xf000
7430#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
7431#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0xf0000
7432#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
7433#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0xf00000
7434#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
7435#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0xf000000
7436#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
7437#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xf0000000
7438#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
7439#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0xf
7440#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
7441#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0xf0
7442#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
7443#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0xf00
7444#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
7445#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0xf000
7446#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
7447#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0xf0000
7448#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
7449#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0xf00000
7450#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
7451#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0xf000000
7452#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
7453#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xf0000000
7454#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
7455#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x7fff
7456#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
7457#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7fff0000
7458#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
7459#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x7fff
7460#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
7461#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7fff0000
7462#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
7463#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x7fff
7464#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
7465#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7fff0000
7466#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
7467#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x7fff
7468#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
7469#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7fff0000
7470#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
7471#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x7fff
7472#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
7473#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7fff0000
7474#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
7475#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x7fff
7476#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
7477#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7fff0000
7478#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
7479#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x7fff
7480#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
7481#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7fff0000
7482#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
7483#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x7fff
7484#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
7485#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7fff0000
7486#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
7487#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0xffff
7488#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
7489#define PA_SC_EDGERULE__ER_TRI_MASK 0xf
7490#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
7491#define PA_SC_EDGERULE__ER_POINT_MASK 0xf0
7492#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
7493#define PA_SC_EDGERULE__ER_RECT_MASK 0xf00
7494#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
7495#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x3f000
7496#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
7497#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0xfc0000
7498#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
7499#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0xf000000
7500#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
7501#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xf0000000
7502#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
7503#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x200
7504#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
7505#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x400
7506#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
7507#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x800
7508#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
7509#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x1000
7510#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
7511#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0xffff
7512#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
7513#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0xff0000
7514#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
7515#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000
7516#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
7517#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000
7518#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
7519#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x1
7520#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
7521#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x2
7522#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
7523#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x4
7524#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
7525#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x8
7526#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
7527#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x1
7528#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
7529#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x2
7530#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
7531#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x4
7532#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
7533#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x8
7534#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
7535#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x70
7536#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
7537#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x80
7538#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
7539#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x100
7540#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
7541#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x200
7542#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
7543#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x400
7544#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
7545#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x800
7546#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
7547#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x1000
7548#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
7549#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x2000
7550#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
7551#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x4000
7552#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
7553#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x8000
7554#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
7555#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x10000
7556#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
7557#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x20000
7558#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
7559#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x40000
7560#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
7561#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x80000
7562#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
7563#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0xf00000
7564#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
7565#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x1000000
7566#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
7567#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x2000000
7568#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
7569#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x4000000
7570#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
7571#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x8000000
7572#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
7573#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000
7574#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
7575#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x3
7576#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
7577#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0xc
7578#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
7579#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x30
7580#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
7581#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x40
7582#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
7583#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x80
7584#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
7585#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x300
7586#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
7587#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0xc00
7588#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
7589#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x3000
7590#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
7591#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0xc000
7592#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
7593#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x30000
7594#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
7595#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0xc0000
7596#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
7597#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x300000
7598#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
7599#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x3000000
7600#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
7601#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0xc000000
7602#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
7603#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0x30000000
7604#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1c
7605#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x3
7606#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
7607#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0xc
7608#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
7609#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x30
7610#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x4
7611#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x3
7612#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
7613#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0xc
7614#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
7615#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x7fff
7616#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
7617#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7fff0000
7618#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
7619#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7620#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7621#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x7fff
7622#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
7623#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7fff0000
7624#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
7625#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0xffff
7626#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
7627#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xffff0000
7628#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
7629#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0xffff
7630#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
7631#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xffff0000
7632#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
7633#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0xffff
7634#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
7635#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xffff0000
7636#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
7637#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x7fff
7638#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
7639#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7fff0000
7640#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
7641#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7642#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7643#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x7fff
7644#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
7645#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7fff0000
7646#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
7647#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x7fff
7648#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
7649#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7fff0000
7650#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
7651#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7652#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7653#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x7fff
7654#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
7655#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7fff0000
7656#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
7657#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7658#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7659#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x7fff
7660#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
7661#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7fff0000
7662#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
7663#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7664#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7665#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x7fff
7666#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
7667#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7fff0000
7668#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
7669#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7670#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7671#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x7fff
7672#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
7673#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7fff0000
7674#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
7675#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7676#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7677#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x7fff
7678#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
7679#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7fff0000
7680#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
7681#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7682#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7683#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x7fff
7684#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
7685#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7fff0000
7686#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
7687#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7688#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7689#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x7fff
7690#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
7691#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7fff0000
7692#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
7693#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7694#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7695#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x7fff
7696#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
7697#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7fff0000
7698#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
7699#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7700#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7701#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x7fff
7702#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
7703#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7fff0000
7704#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
7705#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7706#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7707#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x7fff
7708#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
7709#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7fff0000
7710#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
7711#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7712#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7713#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x7fff
7714#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
7715#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7fff0000
7716#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
7717#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7718#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7719#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x7fff
7720#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
7721#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7fff0000
7722#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
7723#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7724#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7725#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x7fff
7726#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
7727#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7fff0000
7728#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
7729#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7730#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7731#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x7fff
7732#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
7733#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7fff0000
7734#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
7735#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7736#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7737#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x7fff
7738#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
7739#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7fff0000
7740#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
7741#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000
7742#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
7743#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x7fff
7744#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
7745#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7fff0000
7746#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
7747#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x7fff
7748#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
7749#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7fff0000
7750#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
7751#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x7fff
7752#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
7753#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7fff0000
7754#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
7755#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x7fff
7756#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
7757#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7fff0000
7758#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
7759#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x7fff
7760#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
7761#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7fff0000
7762#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
7763#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x7fff
7764#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
7765#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7fff0000
7766#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
7767#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x7fff
7768#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
7769#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7fff0000
7770#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
7771#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x7fff
7772#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
7773#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7fff0000
7774#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
7775#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x7fff
7776#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
7777#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7fff0000
7778#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
7779#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x7fff
7780#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
7781#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7fff0000
7782#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
7783#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x7fff
7784#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
7785#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7fff0000
7786#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
7787#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x7fff
7788#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
7789#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7fff0000
7790#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
7791#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x7fff
7792#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
7793#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7fff0000
7794#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
7795#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x7fff
7796#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
7797#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7fff0000
7798#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
7799#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x7fff
7800#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
7801#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7fff0000
7802#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
7803#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x7fff
7804#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
7805#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7fff0000
7806#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
7807#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xffffffff
7808#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
7809#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xffffffff
7810#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
7811#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xffffffff
7812#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
7813#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xffffffff
7814#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
7815#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xffffffff
7816#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
7817#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xffffffff
7818#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
7819#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xffffffff
7820#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
7821#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xffffffff
7822#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
7823#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xffffffff
7824#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
7825#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xffffffff
7826#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
7827#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xffffffff
7828#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
7829#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xffffffff
7830#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
7831#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xffffffff
7832#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
7833#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xffffffff
7834#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
7835#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xffffffff
7836#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
7837#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xffffffff
7838#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
7839#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xffffffff
7840#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
7841#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xffffffff
7842#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
7843#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xffffffff
7844#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
7845#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xffffffff
7846#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
7847#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xffffffff
7848#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
7849#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xffffffff
7850#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
7851#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xffffffff
7852#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
7853#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xffffffff
7854#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
7855#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xffffffff
7856#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
7857#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xffffffff
7858#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
7859#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xffffffff
7860#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
7861#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xffffffff
7862#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
7863#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xffffffff
7864#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
7865#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xffffffff
7866#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
7867#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xffffffff
7868#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
7869#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xffffffff
7870#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
7871#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x1
7872#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
7873#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x2
7874#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
7875#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x4
7876#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
7877#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x8
7878#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
7879#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x10
7880#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
7881#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x20
7882#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
7883#define PA_SC_ENHANCE__DISABLE_PW_BUBBLE_COLLAPSE_MASK 0xc0
7884#define PA_SC_ENHANCE__DISABLE_PW_BUBBLE_COLLAPSE__SHIFT 0x6
7885#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x100
7886#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x8
7887#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x200
7888#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x9
7889#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x400
7890#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0xa
7891#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x800
7892#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0xb
7893#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x1000
7894#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xc
7895#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x2000
7896#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xd
7897#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x4000
7898#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xe
7899#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x8000
7900#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xf
7901#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x10000
7902#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0x10
7903#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x20000
7904#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0x11
7905#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x40000
7906#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x12
7907#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x80000
7908#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x13
7909#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x100000
7910#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x14
7911#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x200000
7912#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x15
7913#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x400000
7914#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x16
7915#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x800000
7916#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x17
7917#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x1000000
7918#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x18
7919#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x2000000
7920#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x19
7921#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x4000000
7922#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x1a
7923#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x8000000
7924#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x1b
7925#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x10000000
7926#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1c
7927#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x20000000
7928#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1d
7929#define PA_SC_ENHANCE__ECO_SPARE1_MASK 0x40000000
7930#define PA_SC_ENHANCE__ECO_SPARE1__SHIFT 0x1e
7931#define PA_SC_ENHANCE__ECO_SPARE0_MASK 0x80000000
7932#define PA_SC_ENHANCE__ECO_SPARE0__SHIFT 0x1f
7933#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x1
7934#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
7935#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x6
7936#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
7937#define PA_SC_ENHANCE_1__ENABLE_SC_BINNING_MASK 0x8
7938#define PA_SC_ENHANCE_1__ENABLE_SC_BINNING__SHIFT 0x3
7939#define PA_SC_ENHANCE_1__ECO_SPARE0_MASK 0x10
7940#define PA_SC_ENHANCE_1__ECO_SPARE0__SHIFT 0x4
7941#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x20
7942#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x5
7943#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x40
7944#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x6
7945#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x80
7946#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x7
7947#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x1
7948#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
7949#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x2
7950#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
7951#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x3f
7952#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
7953#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x7fc0
7954#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
7955#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x1f8000
7956#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
7957#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xff800000
7958#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x17
7959#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x3f
7960#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
7961#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0xfc0
7962#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
7963#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x3f000
7964#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
7965#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0xfc0000
7966#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
7967#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0xffff
7968#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
7969#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xffff0000
7970#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
7971#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0xf
7972#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
7973#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0xff00
7974#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
7975#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0xffff
7976#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
7977#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xffff0000
7978#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
7979#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0xffff
7980#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
7981#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xffff0000
7982#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
7983#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0xffff
7984#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
7985#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xffff0000
7986#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
7987#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0xffff
7988#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
7989#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xffff0000
7990#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
7991#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
7992#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
7993#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
7994#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
7995#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
7996#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
7997#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
7998#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
7999#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
8000#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
8001#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
8002#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
8003#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x3ff
8004#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
8005#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x3ff
8006#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
8007#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x3ff
8008#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
8009#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x3ff
8010#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
8011#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x3ff
8012#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
8013#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x3ff
8014#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
8015#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
8016#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
8017#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
8018#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
8019#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
8020#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
8021#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
8022#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
8023#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
8024#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
8025#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
8026#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
8027#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
8028#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
8029#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
8030#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
8031#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xffffffff
8032#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
8033#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xffffffff
8034#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
8035#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xffffffff
8036#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
8037#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xffffffff
8038#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
8039#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xffffffff
8040#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
8041#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xffffffff
8042#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
8043#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xffffffff
8044#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
8045#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xffffffff
8046#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
8047#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x1
8048#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
8049#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x2
8050#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
8051#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x3fff
8052#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
8053#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x3fff
8054#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
8055#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0xffff
8056#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
8057#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0xffff
8058#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
8059#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x1
8060#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
8061#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x2
8062#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
8063#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x3fff
8064#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
8065#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x3fff
8066#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
8067#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0xffff
8068#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
8069#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0xffff
8070#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
8071#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x1
8072#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
8073#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x2
8074#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
8075#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x3fff
8076#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
8077#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x3fff
8078#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
8079#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0xffff
8080#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
8081#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0xffff
8082#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
8083#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x1
8084#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
8085#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x1
8086#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
8087#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x1
8088#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
8089#define PA_CL_CNTL_STATUS__CL_BUSY_MASK 0x80000000
8090#define PA_CL_CNTL_STATUS__CL_BUSY__SHIFT 0x1f
8091#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000
8092#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
8093#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x3ff
8094#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
8095#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0xf
8096#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
8097#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
8098#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
8099#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
8100#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
8101#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
8102#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
8103#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
8104#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
8105#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
8106#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
8107#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
8108#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
8109#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000
8110#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
8111#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000
8112#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
8113#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000
8114#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
8115#define CGTT_SC_CLK_CTRL__ON_DELAY_MASK 0xf
8116#define CGTT_SC_CLK_CTRL__ON_DELAY__SHIFT 0x0
8117#define CGTT_SC_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
8118#define CGTT_SC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
8119#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
8120#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
8121#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
8122#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
8123#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
8124#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
8125#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
8126#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
8127#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
8128#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
8129#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
8130#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
8131#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
8132#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
8133#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
8134#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
8135#define PA_SU_DEBUG_CNTL__SU_DEBUG_INDX_MASK 0x1f
8136#define PA_SU_DEBUG_CNTL__SU_DEBUG_INDX__SHIFT 0x0
8137#define PA_SU_DEBUG_DATA__DATA_MASK 0xffffffff
8138#define PA_SU_DEBUG_DATA__DATA__SHIFT 0x0
8139#define PA_SC_DEBUG_CNTL__SC_DEBUG_INDX_MASK 0x3f
8140#define PA_SC_DEBUG_CNTL__SC_DEBUG_INDX__SHIFT 0x0
8141#define PA_SC_DEBUG_DATA__DATA_MASK 0xffffffff
8142#define PA_SC_DEBUG_DATA__DATA__SHIFT 0x0
8143#define CLIPPER_DEBUG_REG00__ALWAYS_ZERO_MASK 0xff
8144#define CLIPPER_DEBUG_REG00__ALWAYS_ZERO__SHIFT 0x0
8145#define CLIPPER_DEBUG_REG00__clip_ga_bc_fifo_write_MASK 0x100
8146#define CLIPPER_DEBUG_REG00__clip_ga_bc_fifo_write__SHIFT 0x8
8147#define CLIPPER_DEBUG_REG00__su_clip_baryc_free_MASK 0x600
8148#define CLIPPER_DEBUG_REG00__su_clip_baryc_free__SHIFT 0x9
8149#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_write_MASK 0x800
8150#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_write__SHIFT 0xb
8151#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_full_MASK 0x1000
8152#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_full__SHIFT 0xc
8153#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_empty_MASK 0x2000
8154#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_empty__SHIFT 0xd
8155#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_full_MASK 0x4000
8156#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_full__SHIFT 0xe
8157#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_empty_MASK 0x8000
8158#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_empty__SHIFT 0xf
8159#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_full_MASK 0x10000
8160#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_full__SHIFT 0x10
8161#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_empty_MASK 0x20000
8162#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_empty__SHIFT 0x11
8163#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_full_MASK 0x40000
8164#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_full__SHIFT 0x12
8165#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_empty_MASK 0x80000
8166#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_empty__SHIFT 0x13
8167#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_full_MASK 0x100000
8168#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_full__SHIFT 0x14
8169#define CLIPPER_DEBUG_REG00__clipcode_fifo_fifo_empty_MASK 0x200000
8170#define CLIPPER_DEBUG_REG00__clipcode_fifo_fifo_empty__SHIFT 0x15
8171#define CLIPPER_DEBUG_REG00__clipcode_fifo_full_MASK 0x400000
8172#define CLIPPER_DEBUG_REG00__clipcode_fifo_full__SHIFT 0x16
8173#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_empty_MASK 0x800000
8174#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_empty__SHIFT 0x17
8175#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_full_MASK 0x1000000
8176#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_full__SHIFT 0x18
8177#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_empty_MASK 0x2000000
8178#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_empty__SHIFT 0x19
8179#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_full_MASK 0x4000000
8180#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_full__SHIFT 0x1a
8181#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_empty_MASK 0x8000000
8182#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_empty__SHIFT 0x1b
8183#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_full_MASK 0x10000000
8184#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_full__SHIFT 0x1c
8185#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_write_MASK 0x20000000
8186#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_write__SHIFT 0x1d
8187#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_write_MASK 0x40000000
8188#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_write__SHIFT 0x1e
8189#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_write_MASK 0x80000000
8190#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_write__SHIFT 0x1f
8191#define CLIPPER_DEBUG_REG01__ALWAYS_ZERO_MASK 0xff
8192#define CLIPPER_DEBUG_REG01__ALWAYS_ZERO__SHIFT 0x0
8193#define CLIPPER_DEBUG_REG01__clip_extra_bc_valid_MASK 0x700
8194#define CLIPPER_DEBUG_REG01__clip_extra_bc_valid__SHIFT 0x8
8195#define CLIPPER_DEBUG_REG01__clip_vert_vte_valid_MASK 0x3800
8196#define CLIPPER_DEBUG_REG01__clip_vert_vte_valid__SHIFT 0xb
8197#define CLIPPER_DEBUG_REG01__clip_to_outsm_vertex_deallocate_MASK 0x1c000
8198#define CLIPPER_DEBUG_REG01__clip_to_outsm_vertex_deallocate__SHIFT 0xe
8199#define CLIPPER_DEBUG_REG01__clip_to_outsm_deallocate_slot_MASK 0xe0000
8200#define CLIPPER_DEBUG_REG01__clip_to_outsm_deallocate_slot__SHIFT 0x11
8201#define CLIPPER_DEBUG_REG01__clip_to_outsm_null_primitive_MASK 0x100000
8202#define CLIPPER_DEBUG_REG01__clip_to_outsm_null_primitive__SHIFT 0x14
8203#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_2_MASK 0x200000
8204#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_2__SHIFT 0x15
8205#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_1_MASK 0x400000
8206#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_1__SHIFT 0x16
8207#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_0_MASK 0x800000
8208#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_0__SHIFT 0x17
8209#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_extra_bc_valid_MASK 0x1000000
8210#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_extra_bc_valid__SHIFT 0x18
8211#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vte_naninf_kill_MASK 0x2000000
8212#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vte_naninf_kill__SHIFT 0x19
8213#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vertex_store_indx_MASK 0xc000000
8214#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vertex_store_indx__SHIFT 0x1a
8215#define CLIPPER_DEBUG_REG01__clip_ga_bc_fifo_write_MASK 0x10000000
8216#define CLIPPER_DEBUG_REG01__clip_ga_bc_fifo_write__SHIFT 0x1c
8217#define CLIPPER_DEBUG_REG01__clip_to_ga_fifo_write_MASK 0x20000000
8218#define CLIPPER_DEBUG_REG01__clip_to_ga_fifo_write__SHIFT 0x1d
8219#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_advanceread_MASK 0x40000000
8220#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_advanceread__SHIFT 0x1e
8221#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_empty_MASK 0x80000000
8222#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_empty__SHIFT 0x1f
8223#define CLIPPER_DEBUG_REG02__clip_extra_bc_valid_MASK 0x7
8224#define CLIPPER_DEBUG_REG02__clip_extra_bc_valid__SHIFT 0x0
8225#define CLIPPER_DEBUG_REG02__clip_vert_vte_valid_MASK 0x38
8226#define CLIPPER_DEBUG_REG02__clip_vert_vte_valid__SHIFT 0x3
8227#define CLIPPER_DEBUG_REG02__clip_to_outsm_clip_seq_indx_MASK 0xc0
8228#define CLIPPER_DEBUG_REG02__clip_to_outsm_clip_seq_indx__SHIFT 0x6
8229#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_2_MASK 0xf00
8230#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_2__SHIFT 0x8
8231#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_1_MASK 0xf000
8232#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_1__SHIFT 0xc
8233#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_0_MASK 0xf0000
8234#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_0__SHIFT 0x10
8235#define CLIPPER_DEBUG_REG02__clip_to_clipga_extra_bc_coords_MASK 0x100000
8236#define CLIPPER_DEBUG_REG02__clip_to_clipga_extra_bc_coords__SHIFT 0x14
8237#define CLIPPER_DEBUG_REG02__clip_to_clipga_vte_naninf_kill_MASK 0x200000
8238#define CLIPPER_DEBUG_REG02__clip_to_clipga_vte_naninf_kill__SHIFT 0x15
8239#define CLIPPER_DEBUG_REG02__clip_to_outsm_end_of_packet_MASK 0x400000
8240#define CLIPPER_DEBUG_REG02__clip_to_outsm_end_of_packet__SHIFT 0x16
8241#define CLIPPER_DEBUG_REG02__clip_to_outsm_first_prim_of_slot_MASK 0x800000
8242#define CLIPPER_DEBUG_REG02__clip_to_outsm_first_prim_of_slot__SHIFT 0x17
8243#define CLIPPER_DEBUG_REG02__clip_to_outsm_clipped_prim_MASK 0x1000000
8244#define CLIPPER_DEBUG_REG02__clip_to_outsm_clipped_prim__SHIFT 0x18
8245#define CLIPPER_DEBUG_REG02__clip_to_outsm_null_primitive_MASK 0x2000000
8246#define CLIPPER_DEBUG_REG02__clip_to_outsm_null_primitive__SHIFT 0x19
8247#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_full_MASK 0x4000000
8248#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_full__SHIFT 0x1a
8249#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_full_MASK 0x8000000
8250#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_full__SHIFT 0x1b
8251#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_write_MASK 0x10000000
8252#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_write__SHIFT 0x1c
8253#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_write_MASK 0x20000000
8254#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_write__SHIFT 0x1d
8255#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_advanceread_MASK 0x40000000
8256#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_advanceread__SHIFT 0x1e
8257#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_empty_MASK 0x80000000
8258#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_empty__SHIFT 0x1f
8259#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_code_or_MASK 0x3fff
8260#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_code_or__SHIFT 0x0
8261#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_id_MASK 0xfc000
8262#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_id__SHIFT 0xe
8263#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_state_var_indx_MASK 0x700000
8264#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_state_var_indx__SHIFT 0x14
8265#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_primitive_MASK 0x800000
8266#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_primitive__SHIFT 0x17
8267#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_deallocate_slot_MASK 0x7000000
8268#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_deallocate_slot__SHIFT 0x18
8269#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_first_prim_of_slot_MASK 0x8000000
8270#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_first_prim_of_slot__SHIFT 0x1b
8271#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_end_of_packet_MASK 0x10000000
8272#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_end_of_packet__SHIFT 0x1c
8273#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_MASK 0x20000000
8274#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event__SHIFT 0x1d
8275#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_null_primitive_MASK 0x40000000
8276#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_null_primitive__SHIFT 0x1e
8277#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000
8278#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x1f
8279#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_param_cache_indx_0_MASK 0x7fe
8280#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_param_cache_indx_0__SHIFT 0x1
8281#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_2_MASK 0x1f800
8282#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_2__SHIFT 0xb
8283#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_1_MASK 0x7e0000
8284#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_1__SHIFT 0x11
8285#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000
8286#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_0__SHIFT 0x17
8287#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_event_MASK 0x20000000
8288#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_event__SHIFT 0x1d
8289#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_null_primitive_MASK 0x40000000
8290#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_null_primitive__SHIFT 0x1e
8291#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000
8292#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x1f
8293#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_code_or_MASK 0x3fff
8294#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_code_or__SHIFT 0x0
8295#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_id_MASK 0xfc000
8296#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_id__SHIFT 0xe
8297#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_state_var_indx_MASK 0x700000
8298#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_state_var_indx__SHIFT 0x14
8299#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_primitive_MASK 0x800000
8300#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_primitive__SHIFT 0x17
8301#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_deallocate_slot_MASK 0x7000000
8302#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_deallocate_slot__SHIFT 0x18
8303#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_first_prim_of_slot_MASK 0x8000000
8304#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_first_prim_of_slot__SHIFT 0x1b
8305#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_end_of_packet_MASK 0x10000000
8306#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_end_of_packet__SHIFT 0x1c
8307#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_MASK 0x20000000
8308#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event__SHIFT 0x1d
8309#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_null_primitive_MASK 0x40000000
8310#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_null_primitive__SHIFT 0x1e
8311#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_prim_valid_MASK 0x80000000
8312#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x1f
8313#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_param_cache_indx_0_MASK 0x7fe
8314#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_param_cache_indx_0__SHIFT 0x1
8315#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_2_MASK 0x1f800
8316#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_2__SHIFT 0xb
8317#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_1_MASK 0x7e0000
8318#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_1__SHIFT 0x11
8319#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000
8320#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_0__SHIFT 0x17
8321#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_event_MASK 0x20000000
8322#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_event__SHIFT 0x1d
8323#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_null_primitive_MASK 0x40000000
8324#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_null_primitive__SHIFT 0x1e
8325#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_prim_valid_MASK 0x80000000
8326#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x1f
8327#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_code_or_MASK 0x3fff
8328#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_code_or__SHIFT 0x0
8329#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_id_MASK 0xfc000
8330#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_id__SHIFT 0xe
8331#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_state_var_indx_MASK 0x700000
8332#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_state_var_indx__SHIFT 0x14
8333#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_primitive_MASK 0x800000
8334#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_primitive__SHIFT 0x17
8335#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_deallocate_slot_MASK 0x7000000
8336#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_deallocate_slot__SHIFT 0x18
8337#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_first_prim_of_slot_MASK 0x8000000
8338#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_first_prim_of_slot__SHIFT 0x1b
8339#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_end_of_packet_MASK 0x10000000
8340#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_end_of_packet__SHIFT 0x1c
8341#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_MASK 0x20000000
8342#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event__SHIFT 0x1d
8343#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_null_primitive_MASK 0x40000000
8344#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_null_primitive__SHIFT 0x1e
8345#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_prim_valid_MASK 0x80000000
8346#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x1f
8347#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_param_cache_indx_0_MASK 0x7fe
8348#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_param_cache_indx_0__SHIFT 0x1
8349#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_2_MASK 0x1f800
8350#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_2__SHIFT 0xb
8351#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_1_MASK 0x7e0000
8352#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_1__SHIFT 0x11
8353#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000
8354#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_0__SHIFT 0x17
8355#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_event_MASK 0x20000000
8356#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_event__SHIFT 0x1d
8357#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_null_primitive_MASK 0x40000000
8358#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_null_primitive__SHIFT 0x1e
8359#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_prim_valid_MASK 0x80000000
8360#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x1f
8361#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_code_or_MASK 0x3fff
8362#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_code_or__SHIFT 0x0
8363#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_id_MASK 0xfc000
8364#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_id__SHIFT 0xe
8365#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_state_var_indx_MASK 0x700000
8366#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_state_var_indx__SHIFT 0x14
8367#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_primitive_MASK 0x800000
8368#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_primitive__SHIFT 0x17
8369#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_deallocate_slot_MASK 0x7000000
8370#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_deallocate_slot__SHIFT 0x18
8371#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_first_prim_of_slot_MASK 0x8000000
8372#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_first_prim_of_slot__SHIFT 0x1b
8373#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_end_of_packet_MASK 0x10000000
8374#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_end_of_packet__SHIFT 0x1c
8375#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_MASK 0x20000000
8376#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event__SHIFT 0x1d
8377#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_null_primitive_MASK 0x40000000
8378#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_null_primitive__SHIFT 0x1e
8379#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_prim_valid_MASK 0x80000000
8380#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x1f
8381#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_param_cache_indx_0_MASK 0x7fe
8382#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_param_cache_indx_0__SHIFT 0x1
8383#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_2_MASK 0x1f800
8384#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_2__SHIFT 0xb
8385#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_1_MASK 0x7e0000
8386#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_1__SHIFT 0x11
8387#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000
8388#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_0__SHIFT 0x17
8389#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_event_MASK 0x20000000
8390#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_event__SHIFT 0x1d
8391#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_null_primitive_MASK 0x40000000
8392#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_null_primitive__SHIFT 0x1e
8393#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_prim_valid_MASK 0x80000000
8394#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x1f
8395#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_event_MASK 0x1
8396#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_event__SHIFT 0x0
8397#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_event_MASK 0x2
8398#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_event__SHIFT 0x1
8399#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_event_MASK 0x4
8400#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_event__SHIFT 0x2
8401#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_event_MASK 0x8
8402#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_event__SHIFT 0x3
8403#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_primitive_MASK 0x10
8404#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_primitive__SHIFT 0x4
8405#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_primitive_MASK 0x20
8406#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_primitive__SHIFT 0x5
8407#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_primitive_MASK 0x40
8408#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_primitive__SHIFT 0x6
8409#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_primitive_MASK 0x80
8410#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_primitive__SHIFT 0x7
8411#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_to_outsm_cnt_MASK 0xf00
8412#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x8
8413#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_to_outsm_cnt_MASK 0xf000
8414#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0xc
8415#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_to_outsm_cnt_MASK 0xf0000
8416#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x10
8417#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_to_outsm_cnt_MASK 0xf00000
8418#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x14
8419#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_prim_valid_MASK 0x1000000
8420#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_prim_valid__SHIFT 0x18
8421#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_prim_valid_MASK 0x2000000
8422#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_prim_valid__SHIFT 0x19
8423#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_prim_valid_MASK 0x4000000
8424#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_prim_valid__SHIFT 0x1a
8425#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_prim_valid_MASK 0x8000000
8426#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_prim_valid__SHIFT 0x1b
8427#define CLIPPER_DEBUG_REG11__clipsm3_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x10000000
8428#define CLIPPER_DEBUG_REG11__clipsm3_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x1c
8429#define CLIPPER_DEBUG_REG11__clipsm2_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x20000000
8430#define CLIPPER_DEBUG_REG11__clipsm2_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x1d
8431#define CLIPPER_DEBUG_REG11__clipsm1_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x40000000
8432#define CLIPPER_DEBUG_REG11__clipsm1_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x1e
8433#define CLIPPER_DEBUG_REG11__clipsm0_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x80000000
8434#define CLIPPER_DEBUG_REG11__clipsm0_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x1f
8435#define CLIPPER_DEBUG_REG12__ALWAYS_ZERO_MASK 0xff
8436#define CLIPPER_DEBUG_REG12__ALWAYS_ZERO__SHIFT 0x0
8437#define CLIPPER_DEBUG_REG12__clip_priority_available_vte_out_clip_MASK 0x1f00
8438#define CLIPPER_DEBUG_REG12__clip_priority_available_vte_out_clip__SHIFT 0x8
8439#define CLIPPER_DEBUG_REG12__clip_priority_available_clip_verts_MASK 0x3e000
8440#define CLIPPER_DEBUG_REG12__clip_priority_available_clip_verts__SHIFT 0xd
8441#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_out_MASK 0xc0000
8442#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_out__SHIFT 0x12
8443#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_vert_MASK 0x300000
8444#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_vert__SHIFT 0x14
8445#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_load_MASK 0xc00000
8446#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_load__SHIFT 0x16
8447#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_clip_primitive_MASK 0x1000000
8448#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_clip_primitive__SHIFT 0x18
8449#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_prim_valid_MASK 0x2000000
8450#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x19
8451#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_clip_primitive_MASK 0x4000000
8452#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_clip_primitive__SHIFT 0x1a
8453#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_prim_valid_MASK 0x8000000
8454#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x1b
8455#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_clip_primitive_MASK 0x10000000
8456#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_clip_primitive__SHIFT 0x1c
8457#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_prim_valid_MASK 0x20000000
8458#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x1d
8459#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_clip_primitive_MASK 0x40000000
8460#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_clip_primitive__SHIFT 0x1e
8461#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000
8462#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x1f
8463#define CLIPPER_DEBUG_REG13__clprim_in_back_state_var_indx_MASK 0x7
8464#define CLIPPER_DEBUG_REG13__clprim_in_back_state_var_indx__SHIFT 0x0
8465#define CLIPPER_DEBUG_REG13__point_clip_candidate_MASK 0x8
8466#define CLIPPER_DEBUG_REG13__point_clip_candidate__SHIFT 0x3
8467#define CLIPPER_DEBUG_REG13__prim_nan_kill_MASK 0x10
8468#define CLIPPER_DEBUG_REG13__prim_nan_kill__SHIFT 0x4
8469#define CLIPPER_DEBUG_REG13__clprim_clip_primitive_MASK 0x20
8470#define CLIPPER_DEBUG_REG13__clprim_clip_primitive__SHIFT 0x5
8471#define CLIPPER_DEBUG_REG13__clprim_cull_primitive_MASK 0x40
8472#define CLIPPER_DEBUG_REG13__clprim_cull_primitive__SHIFT 0x6
8473#define CLIPPER_DEBUG_REG13__prim_back_valid_MASK 0x80
8474#define CLIPPER_DEBUG_REG13__prim_back_valid__SHIFT 0x7
8475#define CLIPPER_DEBUG_REG13__vertval_bits_vertex_cc_next_valid_MASK 0xf00
8476#define CLIPPER_DEBUG_REG13__vertval_bits_vertex_cc_next_valid__SHIFT 0x8
8477#define CLIPPER_DEBUG_REG13__clipcc_vertex_store_indx_MASK 0x3000
8478#define CLIPPER_DEBUG_REG13__clipcc_vertex_store_indx__SHIFT 0xc
8479#define CLIPPER_DEBUG_REG13__vte_out_orig_fifo_fifo_empty_MASK 0x4000
8480#define CLIPPER_DEBUG_REG13__vte_out_orig_fifo_fifo_empty__SHIFT 0xe
8481#define CLIPPER_DEBUG_REG13__clipcode_fifo_fifo_empty_MASK 0x8000
8482#define CLIPPER_DEBUG_REG13__clipcode_fifo_fifo_empty__SHIFT 0xf
8483#define CLIPPER_DEBUG_REG13__ccgen_to_clipcc_fifo_empty_MASK 0x10000
8484#define CLIPPER_DEBUG_REG13__ccgen_to_clipcc_fifo_empty__SHIFT 0x10
8485#define CLIPPER_DEBUG_REG13__clip_priority_seq_indx_out_cnt_MASK 0x1e0000
8486#define CLIPPER_DEBUG_REG13__clip_priority_seq_indx_out_cnt__SHIFT 0x11
8487#define CLIPPER_DEBUG_REG13__outsm_clr_rd_orig_vertices_MASK 0x600000
8488#define CLIPPER_DEBUG_REG13__outsm_clr_rd_orig_vertices__SHIFT 0x15
8489#define CLIPPER_DEBUG_REG13__outsm_clr_rd_clipsm_wait_MASK 0x800000
8490#define CLIPPER_DEBUG_REG13__outsm_clr_rd_clipsm_wait__SHIFT 0x17
8491#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_contents_MASK 0x1f000000
8492#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_contents__SHIFT 0x18
8493#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_full_MASK 0x20000000
8494#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_full__SHIFT 0x1d
8495#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_advanceread_MASK 0x40000000
8496#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_advanceread__SHIFT 0x1e
8497#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_write_MASK 0x80000000
8498#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_write__SHIFT 0x1f
8499#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_2_MASK 0x3f
8500#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_2__SHIFT 0x0
8501#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_1_MASK 0xfc0
8502#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_1__SHIFT 0x6
8503#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_0_MASK 0x3f000
8504#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_0__SHIFT 0xc
8505#define CLIPPER_DEBUG_REG14__outputclprimtoclip_null_primitive_MASK 0x40000
8506#define CLIPPER_DEBUG_REG14__outputclprimtoclip_null_primitive__SHIFT 0x12
8507#define CLIPPER_DEBUG_REG14__clprim_in_back_end_of_packet_MASK 0x80000
8508#define CLIPPER_DEBUG_REG14__clprim_in_back_end_of_packet__SHIFT 0x13
8509#define CLIPPER_DEBUG_REG14__clprim_in_back_first_prim_of_slot_MASK 0x100000
8510#define CLIPPER_DEBUG_REG14__clprim_in_back_first_prim_of_slot__SHIFT 0x14
8511#define CLIPPER_DEBUG_REG14__clprim_in_back_deallocate_slot_MASK 0xe00000
8512#define CLIPPER_DEBUG_REG14__clprim_in_back_deallocate_slot__SHIFT 0x15
8513#define CLIPPER_DEBUG_REG14__clprim_in_back_event_id_MASK 0x3f000000
8514#define CLIPPER_DEBUG_REG14__clprim_in_back_event_id__SHIFT 0x18
8515#define CLIPPER_DEBUG_REG14__clprim_in_back_event_MASK 0x40000000
8516#define CLIPPER_DEBUG_REG14__clprim_in_back_event__SHIFT 0x1e
8517#define CLIPPER_DEBUG_REG14__prim_back_valid_MASK 0x80000000
8518#define CLIPPER_DEBUG_REG14__prim_back_valid__SHIFT 0x1f
8519#define CLIPPER_DEBUG_REG15__vertval_bits_vertex_vertex_store_msb_MASK 0xffff
8520#define CLIPPER_DEBUG_REG15__vertval_bits_vertex_vertex_store_msb__SHIFT 0x0
8521#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_2_MASK 0x1f0000
8522#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_2__SHIFT 0x10
8523#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_1_MASK 0x3e00000
8524#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_1__SHIFT 0x15
8525#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_0_MASK 0x7c000000
8526#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_0__SHIFT 0x1a
8527#define CLIPPER_DEBUG_REG15__primic_to_clprim_valid_MASK 0x80000000
8528#define CLIPPER_DEBUG_REG15__primic_to_clprim_valid__SHIFT 0x1f
8529#define CLIPPER_DEBUG_REG16__sm0_prim_end_state_MASK 0x7f
8530#define CLIPPER_DEBUG_REG16__sm0_prim_end_state__SHIFT 0x0
8531#define CLIPPER_DEBUG_REG16__sm0_ps_expand_MASK 0x80
8532#define CLIPPER_DEBUG_REG16__sm0_ps_expand__SHIFT 0x7
8533#define CLIPPER_DEBUG_REG16__sm0_clip_vert_cnt_MASK 0x1f00
8534#define CLIPPER_DEBUG_REG16__sm0_clip_vert_cnt__SHIFT 0x8
8535#define CLIPPER_DEBUG_REG16__sm0_vertex_clip_cnt_MASK 0x3e000
8536#define CLIPPER_DEBUG_REG16__sm0_vertex_clip_cnt__SHIFT 0xd
8537#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_1_MASK 0x40000
8538#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_1__SHIFT 0x12
8539#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_0_MASK 0x80000
8540#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_0__SHIFT 0x13
8541#define CLIPPER_DEBUG_REG16__sm0_current_state_MASK 0x7f00000
8542#define CLIPPER_DEBUG_REG16__sm0_current_state__SHIFT 0x14
8543#define CLIPPER_DEBUG_REG16__sm0_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x8000000
8544#define CLIPPER_DEBUG_REG16__sm0_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x1b
8545#define CLIPPER_DEBUG_REG16__sm0_clip_to_outsm_fifo_full_MASK 0x10000000
8546#define CLIPPER_DEBUG_REG16__sm0_clip_to_outsm_fifo_full__SHIFT 0x1c
8547#define CLIPPER_DEBUG_REG16__sm0_highest_priority_seq_MASK 0x20000000
8548#define CLIPPER_DEBUG_REG16__sm0_highest_priority_seq__SHIFT 0x1d
8549#define CLIPPER_DEBUG_REG16__sm0_outputcliptoclipga_0_MASK 0x40000000
8550#define CLIPPER_DEBUG_REG16__sm0_outputcliptoclipga_0__SHIFT 0x1e
8551#define CLIPPER_DEBUG_REG16__sm0_clprim_to_clip_prim_valid_MASK 0x80000000
8552#define CLIPPER_DEBUG_REG16__sm0_clprim_to_clip_prim_valid__SHIFT 0x1f
8553#define CLIPPER_DEBUG_REG17__sm1_prim_end_state_MASK 0x7f
8554#define CLIPPER_DEBUG_REG17__sm1_prim_end_state__SHIFT 0x0
8555#define CLIPPER_DEBUG_REG17__sm1_ps_expand_MASK 0x80
8556#define CLIPPER_DEBUG_REG17__sm1_ps_expand__SHIFT 0x7
8557#define CLIPPER_DEBUG_REG17__sm1_clip_vert_cnt_MASK 0x1f00
8558#define CLIPPER_DEBUG_REG17__sm1_clip_vert_cnt__SHIFT 0x8
8559#define CLIPPER_DEBUG_REG17__sm1_vertex_clip_cnt_MASK 0x3e000
8560#define CLIPPER_DEBUG_REG17__sm1_vertex_clip_cnt__SHIFT 0xd
8561#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_1_MASK 0x40000
8562#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_1__SHIFT 0x12
8563#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_0_MASK 0x80000
8564#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_0__SHIFT 0x13
8565#define CLIPPER_DEBUG_REG17__sm1_current_state_MASK 0x7f00000
8566#define CLIPPER_DEBUG_REG17__sm1_current_state__SHIFT 0x14
8567#define CLIPPER_DEBUG_REG17__sm1_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x8000000
8568#define CLIPPER_DEBUG_REG17__sm1_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x1b
8569#define CLIPPER_DEBUG_REG17__sm1_clip_to_outsm_fifo_full_MASK 0x10000000
8570#define CLIPPER_DEBUG_REG17__sm1_clip_to_outsm_fifo_full__SHIFT 0x1c
8571#define CLIPPER_DEBUG_REG17__sm1_highest_priority_seq_MASK 0x20000000
8572#define CLIPPER_DEBUG_REG17__sm1_highest_priority_seq__SHIFT 0x1d
8573#define CLIPPER_DEBUG_REG17__sm1_outputcliptoclipga_0_MASK 0x40000000
8574#define CLIPPER_DEBUG_REG17__sm1_outputcliptoclipga_0__SHIFT 0x1e
8575#define CLIPPER_DEBUG_REG17__sm1_clprim_to_clip_prim_valid_MASK 0x80000000
8576#define CLIPPER_DEBUG_REG17__sm1_clprim_to_clip_prim_valid__SHIFT 0x1f
8577#define CLIPPER_DEBUG_REG18__sm2_prim_end_state_MASK 0x7f
8578#define CLIPPER_DEBUG_REG18__sm2_prim_end_state__SHIFT 0x0
8579#define CLIPPER_DEBUG_REG18__sm2_ps_expand_MASK 0x80
8580#define CLIPPER_DEBUG_REG18__sm2_ps_expand__SHIFT 0x7
8581#define CLIPPER_DEBUG_REG18__sm2_clip_vert_cnt_MASK 0x1f00
8582#define CLIPPER_DEBUG_REG18__sm2_clip_vert_cnt__SHIFT 0x8
8583#define CLIPPER_DEBUG_REG18__sm2_vertex_clip_cnt_MASK 0x3e000
8584#define CLIPPER_DEBUG_REG18__sm2_vertex_clip_cnt__SHIFT 0xd
8585#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_1_MASK 0x40000
8586#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_1__SHIFT 0x12
8587#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_0_MASK 0x80000
8588#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_0__SHIFT 0x13
8589#define CLIPPER_DEBUG_REG18__sm2_current_state_MASK 0x7f00000
8590#define CLIPPER_DEBUG_REG18__sm2_current_state__SHIFT 0x14
8591#define CLIPPER_DEBUG_REG18__sm2_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x8000000
8592#define CLIPPER_DEBUG_REG18__sm2_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x1b
8593#define CLIPPER_DEBUG_REG18__sm2_clip_to_outsm_fifo_full_MASK 0x10000000
8594#define CLIPPER_DEBUG_REG18__sm2_clip_to_outsm_fifo_full__SHIFT 0x1c
8595#define CLIPPER_DEBUG_REG18__sm2_highest_priority_seq_MASK 0x20000000
8596#define CLIPPER_DEBUG_REG18__sm2_highest_priority_seq__SHIFT 0x1d
8597#define CLIPPER_DEBUG_REG18__sm2_outputcliptoclipga_0_MASK 0x40000000
8598#define CLIPPER_DEBUG_REG18__sm2_outputcliptoclipga_0__SHIFT 0x1e
8599#define CLIPPER_DEBUG_REG18__sm2_clprim_to_clip_prim_valid_MASK 0x80000000
8600#define CLIPPER_DEBUG_REG18__sm2_clprim_to_clip_prim_valid__SHIFT 0x1f
8601#define CLIPPER_DEBUG_REG19__sm3_prim_end_state_MASK 0x7f
8602#define CLIPPER_DEBUG_REG19__sm3_prim_end_state__SHIFT 0x0
8603#define CLIPPER_DEBUG_REG19__sm3_ps_expand_MASK 0x80
8604#define CLIPPER_DEBUG_REG19__sm3_ps_expand__SHIFT 0x7
8605#define CLIPPER_DEBUG_REG19__sm3_clip_vert_cnt_MASK 0x1f00
8606#define CLIPPER_DEBUG_REG19__sm3_clip_vert_cnt__SHIFT 0x8
8607#define CLIPPER_DEBUG_REG19__sm3_vertex_clip_cnt_MASK 0x3e000
8608#define CLIPPER_DEBUG_REG19__sm3_vertex_clip_cnt__SHIFT 0xd
8609#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_1_MASK 0x40000
8610#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_1__SHIFT 0x12
8611#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_0_MASK 0x80000
8612#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_0__SHIFT 0x13
8613#define CLIPPER_DEBUG_REG19__sm3_current_state_MASK 0x7f00000
8614#define CLIPPER_DEBUG_REG19__sm3_current_state__SHIFT 0x14
8615#define CLIPPER_DEBUG_REG19__sm3_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x8000000
8616#define CLIPPER_DEBUG_REG19__sm3_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x1b
8617#define CLIPPER_DEBUG_REG19__sm3_clip_to_outsm_fifo_full_MASK 0x10000000
8618#define CLIPPER_DEBUG_REG19__sm3_clip_to_outsm_fifo_full__SHIFT 0x1c
8619#define CLIPPER_DEBUG_REG19__sm3_highest_priority_seq_MASK 0x20000000
8620#define CLIPPER_DEBUG_REG19__sm3_highest_priority_seq__SHIFT 0x1d
8621#define CLIPPER_DEBUG_REG19__sm3_outputcliptoclipga_0_MASK 0x40000000
8622#define CLIPPER_DEBUG_REG19__sm3_outputcliptoclipga_0__SHIFT 0x1e
8623#define CLIPPER_DEBUG_REG19__sm3_clprim_to_clip_prim_valid_MASK 0x80000000
8624#define CLIPPER_DEBUG_REG19__sm3_clprim_to_clip_prim_valid__SHIFT 0x1f
8625#define SXIFCCG_DEBUG_REG0__position_address_MASK 0x3f
8626#define SXIFCCG_DEBUG_REG0__position_address__SHIFT 0x0
8627#define SXIFCCG_DEBUG_REG0__point_address_MASK 0x1c0
8628#define SXIFCCG_DEBUG_REG0__point_address__SHIFT 0x6
8629#define SXIFCCG_DEBUG_REG0__sx_pending_rd_state_var_indx_MASK 0xe00
8630#define SXIFCCG_DEBUG_REG0__sx_pending_rd_state_var_indx__SHIFT 0x9
8631#define SXIFCCG_DEBUG_REG0__sx_pending_rd_req_mask_MASK 0xf000
8632#define SXIFCCG_DEBUG_REG0__sx_pending_rd_req_mask__SHIFT 0xc
8633#define SXIFCCG_DEBUG_REG0__sx_pending_rd_pci_MASK 0x3ff0000
8634#define SXIFCCG_DEBUG_REG0__sx_pending_rd_pci__SHIFT 0x10
8635#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_sel_MASK 0xc000000
8636#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_sel__SHIFT 0x1a
8637#define SXIFCCG_DEBUG_REG0__sx_pending_rd_sp_id_MASK 0x30000000
8638#define SXIFCCG_DEBUG_REG0__sx_pending_rd_sp_id__SHIFT 0x1c
8639#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_inc_MASK 0x40000000
8640#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_inc__SHIFT 0x1e
8641#define SXIFCCG_DEBUG_REG0__sx_pending_rd_advance_MASK 0x80000000
8642#define SXIFCCG_DEBUG_REG0__sx_pending_rd_advance__SHIFT 0x1f
8643#define SXIFCCG_DEBUG_REG1__available_positions_MASK 0x7f
8644#define SXIFCCG_DEBUG_REG1__available_positions__SHIFT 0x0
8645#define SXIFCCG_DEBUG_REG1__sx_receive_indx_MASK 0x380
8646#define SXIFCCG_DEBUG_REG1__sx_receive_indx__SHIFT 0x7
8647#define SXIFCCG_DEBUG_REG1__sx_pending_fifo_contents_MASK 0x7c00
8648#define SXIFCCG_DEBUG_REG1__sx_pending_fifo_contents__SHIFT 0xa
8649#define SXIFCCG_DEBUG_REG1__statevar_bits_vs_out_misc_vec_ena_MASK 0x8000
8650#define SXIFCCG_DEBUG_REG1__statevar_bits_vs_out_misc_vec_ena__SHIFT 0xf
8651#define SXIFCCG_DEBUG_REG1__statevar_bits_disable_sp_MASK 0xf0000
8652#define SXIFCCG_DEBUG_REG1__statevar_bits_disable_sp__SHIFT 0x10
8653#define SXIFCCG_DEBUG_REG1__aux_sel_MASK 0x300000
8654#define SXIFCCG_DEBUG_REG1__aux_sel__SHIFT 0x14
8655#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_1_MASK 0x400000
8656#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_1__SHIFT 0x16
8657#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_0_MASK 0x800000
8658#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_0__SHIFT 0x17
8659#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_1_MASK 0xf000000
8660#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_1__SHIFT 0x18
8661#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_0_MASK 0xf0000000
8662#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_0__SHIFT 0x1c
8663#define SXIFCCG_DEBUG_REG2__param_cache_base_MASK 0x7f
8664#define SXIFCCG_DEBUG_REG2__param_cache_base__SHIFT 0x0
8665#define SXIFCCG_DEBUG_REG2__sx_aux_MASK 0x180
8666#define SXIFCCG_DEBUG_REG2__sx_aux__SHIFT 0x7
8667#define SXIFCCG_DEBUG_REG2__sx_request_indx_MASK 0x7e00
8668#define SXIFCCG_DEBUG_REG2__sx_request_indx__SHIFT 0x9
8669#define SXIFCCG_DEBUG_REG2__req_active_verts_loaded_MASK 0x8000
8670#define SXIFCCG_DEBUG_REG2__req_active_verts_loaded__SHIFT 0xf
8671#define SXIFCCG_DEBUG_REG2__req_active_verts_MASK 0x7f0000
8672#define SXIFCCG_DEBUG_REG2__req_active_verts__SHIFT 0x10
8673#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_state_var_indx_MASK 0x3800000
8674#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_state_var_indx__SHIFT 0x17
8675#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_active_verts_MASK 0xfc000000
8676#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_active_verts__SHIFT 0x1a
8677#define SXIFCCG_DEBUG_REG3__ALWAYS_ZERO_MASK 0xff
8678#define SXIFCCG_DEBUG_REG3__ALWAYS_ZERO__SHIFT 0x0
8679#define SXIFCCG_DEBUG_REG3__vertex_fifo_entriesavailable_MASK 0xf00
8680#define SXIFCCG_DEBUG_REG3__vertex_fifo_entriesavailable__SHIFT 0x8
8681#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist1_vec_ena_MASK 0x1000
8682#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist1_vec_ena__SHIFT 0xc
8683#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist0_vec_ena_MASK 0x2000
8684#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist0_vec_ena__SHIFT 0xd
8685#define SXIFCCG_DEBUG_REG3__available_positions_MASK 0x1fc000
8686#define SXIFCCG_DEBUG_REG3__available_positions__SHIFT 0xe
8687#define SXIFCCG_DEBUG_REG3__current_state_MASK 0x600000
8688#define SXIFCCG_DEBUG_REG3__current_state__SHIFT 0x15
8689#define SXIFCCG_DEBUG_REG3__vertex_fifo_empty_MASK 0x800000
8690#define SXIFCCG_DEBUG_REG3__vertex_fifo_empty__SHIFT 0x17
8691#define SXIFCCG_DEBUG_REG3__vertex_fifo_full_MASK 0x1000000
8692#define SXIFCCG_DEBUG_REG3__vertex_fifo_full__SHIFT 0x18
8693#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_empty_MASK 0x2000000
8694#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_empty__SHIFT 0x19
8695#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_full_MASK 0x4000000
8696#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_full__SHIFT 0x1a
8697#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_empty_MASK 0x8000000
8698#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_empty__SHIFT 0x1b
8699#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_full_MASK 0x10000000
8700#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_full__SHIFT 0x1c
8701#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_fifo_full_MASK 0x20000000
8702#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_fifo_full__SHIFT 0x1d
8703#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_write_MASK 0x40000000
8704#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_write__SHIFT 0x1e
8705#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_write_MASK 0x80000000
8706#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_write__SHIFT 0x1f
8707#define SETUP_DEBUG_REG0__su_baryc_cntl_state_MASK 0x3
8708#define SETUP_DEBUG_REG0__su_baryc_cntl_state__SHIFT 0x0
8709#define SETUP_DEBUG_REG0__su_cntl_state_MASK 0x3c
8710#define SETUP_DEBUG_REG0__su_cntl_state__SHIFT 0x2
8711#define SETUP_DEBUG_REG0__pmode_state_MASK 0x3f00
8712#define SETUP_DEBUG_REG0__pmode_state__SHIFT 0x8
8713#define SETUP_DEBUG_REG0__ge_stallb_MASK 0x4000
8714#define SETUP_DEBUG_REG0__ge_stallb__SHIFT 0xe
8715#define SETUP_DEBUG_REG0__geom_enable_MASK 0x8000
8716#define SETUP_DEBUG_REG0__geom_enable__SHIFT 0xf
8717#define SETUP_DEBUG_REG0__su_clip_baryc_free_MASK 0x30000
8718#define SETUP_DEBUG_REG0__su_clip_baryc_free__SHIFT 0x10
8719#define SETUP_DEBUG_REG0__su_clip_rtr_MASK 0x40000
8720#define SETUP_DEBUG_REG0__su_clip_rtr__SHIFT 0x12
8721#define SETUP_DEBUG_REG0__pfifo_busy_MASK 0x80000
8722#define SETUP_DEBUG_REG0__pfifo_busy__SHIFT 0x13
8723#define SETUP_DEBUG_REG0__su_cntl_busy_MASK 0x100000
8724#define SETUP_DEBUG_REG0__su_cntl_busy__SHIFT 0x14
8725#define SETUP_DEBUG_REG0__geom_busy_MASK 0x200000
8726#define SETUP_DEBUG_REG0__geom_busy__SHIFT 0x15
8727#define SETUP_DEBUG_REG0__event_id_gated_MASK 0xfc00000
8728#define SETUP_DEBUG_REG0__event_id_gated__SHIFT 0x16
8729#define SETUP_DEBUG_REG0__event_gated_MASK 0x10000000
8730#define SETUP_DEBUG_REG0__event_gated__SHIFT 0x1c
8731#define SETUP_DEBUG_REG0__pmode_prim_gated_MASK 0x20000000
8732#define SETUP_DEBUG_REG0__pmode_prim_gated__SHIFT 0x1d
8733#define SETUP_DEBUG_REG0__su_dyn_sclk_vld_MASK 0x40000000
8734#define SETUP_DEBUG_REG0__su_dyn_sclk_vld__SHIFT 0x1e
8735#define SETUP_DEBUG_REG0__cl_dyn_sclk_vld_MASK 0x80000000
8736#define SETUP_DEBUG_REG0__cl_dyn_sclk_vld__SHIFT 0x1f
8737#define SETUP_DEBUG_REG1__y_sort0_gated_23_8_MASK 0xffff
8738#define SETUP_DEBUG_REG1__y_sort0_gated_23_8__SHIFT 0x0
8739#define SETUP_DEBUG_REG1__x_sort0_gated_23_8_MASK 0xffff0000
8740#define SETUP_DEBUG_REG1__x_sort0_gated_23_8__SHIFT 0x10
8741#define SETUP_DEBUG_REG2__y_sort1_gated_23_8_MASK 0xffff
8742#define SETUP_DEBUG_REG2__y_sort1_gated_23_8__SHIFT 0x0
8743#define SETUP_DEBUG_REG2__x_sort1_gated_23_8_MASK 0xffff0000
8744#define SETUP_DEBUG_REG2__x_sort1_gated_23_8__SHIFT 0x10
8745#define SETUP_DEBUG_REG3__y_sort2_gated_23_8_MASK 0xffff
8746#define SETUP_DEBUG_REG3__y_sort2_gated_23_8__SHIFT 0x0
8747#define SETUP_DEBUG_REG3__x_sort2_gated_23_8_MASK 0xffff0000
8748#define SETUP_DEBUG_REG3__x_sort2_gated_23_8__SHIFT 0x10
8749#define SETUP_DEBUG_REG4__attr_indx_sort0_gated_MASK 0x3fff
8750#define SETUP_DEBUG_REG4__attr_indx_sort0_gated__SHIFT 0x0
8751#define SETUP_DEBUG_REG4__null_prim_gated_MASK 0x4000
8752#define SETUP_DEBUG_REG4__null_prim_gated__SHIFT 0xe
8753#define SETUP_DEBUG_REG4__backfacing_gated_MASK 0x8000
8754#define SETUP_DEBUG_REG4__backfacing_gated__SHIFT 0xf
8755#define SETUP_DEBUG_REG4__st_indx_gated_MASK 0x70000
8756#define SETUP_DEBUG_REG4__st_indx_gated__SHIFT 0x10
8757#define SETUP_DEBUG_REG4__clipped_gated_MASK 0x80000
8758#define SETUP_DEBUG_REG4__clipped_gated__SHIFT 0x13
8759#define SETUP_DEBUG_REG4__dealloc_slot_gated_MASK 0x700000
8760#define SETUP_DEBUG_REG4__dealloc_slot_gated__SHIFT 0x14
8761#define SETUP_DEBUG_REG4__xmajor_gated_MASK 0x800000
8762#define SETUP_DEBUG_REG4__xmajor_gated__SHIFT 0x17
8763#define SETUP_DEBUG_REG4__diamond_rule_gated_MASK 0x3000000
8764#define SETUP_DEBUG_REG4__diamond_rule_gated__SHIFT 0x18
8765#define SETUP_DEBUG_REG4__type_gated_MASK 0x1c000000
8766#define SETUP_DEBUG_REG4__type_gated__SHIFT 0x1a
8767#define SETUP_DEBUG_REG4__fpov_gated_MASK 0x60000000
8768#define SETUP_DEBUG_REG4__fpov_gated__SHIFT 0x1d
8769#define SETUP_DEBUG_REG4__eop_gated_MASK 0x80000000
8770#define SETUP_DEBUG_REG4__eop_gated__SHIFT 0x1f
8771#define SETUP_DEBUG_REG5__attr_indx_sort2_gated_MASK 0x3fff
8772#define SETUP_DEBUG_REG5__attr_indx_sort2_gated__SHIFT 0x0
8773#define SETUP_DEBUG_REG5__attr_indx_sort1_gated_MASK 0xfffc000
8774#define SETUP_DEBUG_REG5__attr_indx_sort1_gated__SHIFT 0xe
8775#define SETUP_DEBUG_REG5__provoking_vtx_gated_MASK 0x30000000
8776#define SETUP_DEBUG_REG5__provoking_vtx_gated__SHIFT 0x1c
8777#define SETUP_DEBUG_REG5__valid_prim_gated_MASK 0x40000000
8778#define SETUP_DEBUG_REG5__valid_prim_gated__SHIFT 0x1e
8779#define SETUP_DEBUG_REG5__pa_reg_sclk_vld_MASK 0x80000000
8780#define SETUP_DEBUG_REG5__pa_reg_sclk_vld__SHIFT 0x1f
8781#define PA_SC_DEBUG_REG0__REG0_FIELD0_MASK 0x3
8782#define PA_SC_DEBUG_REG0__REG0_FIELD0__SHIFT 0x0
8783#define PA_SC_DEBUG_REG0__REG0_FIELD1_MASK 0xc
8784#define PA_SC_DEBUG_REG0__REG0_FIELD1__SHIFT 0x2
8785#define PA_SC_DEBUG_REG1__REG1_FIELD0_MASK 0x3
8786#define PA_SC_DEBUG_REG1__REG1_FIELD0__SHIFT 0x0
8787#define PA_SC_DEBUG_REG1__REG1_FIELD1_MASK 0xc
8788#define PA_SC_DEBUG_REG1__REG1_FIELD1__SHIFT 0x2
8789#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x1
8790#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
8791#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x2
8792#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
8793#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x4
8794#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
8795#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x8
8796#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
8797#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x10
8798#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
8799#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x20
8800#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
8801#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x40
8802#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
8803#define COMPUTE_DISPATCH_INITIATOR__DISPATCH_CACHE_CNTL_MASK 0x380
8804#define COMPUTE_DISPATCH_INITIATOR__DISPATCH_CACHE_CNTL__SHIFT 0x7
8805#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x400
8806#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
8807#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x800
8808#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
8809#define COMPUTE_DISPATCH_INITIATOR__DATA_ATC_MASK 0x1000
8810#define COMPUTE_DISPATCH_INITIATOR__DATA_ATC__SHIFT 0xc
8811#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x4000
8812#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
8813#define COMPUTE_DIM_X__SIZE_MASK 0xffffffff
8814#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
8815#define COMPUTE_DIM_Y__SIZE_MASK 0xffffffff
8816#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
8817#define COMPUTE_DIM_Z__SIZE_MASK 0xffffffff
8818#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
8819#define COMPUTE_START_X__START_MASK 0xffffffff
8820#define COMPUTE_START_X__START__SHIFT 0x0
8821#define COMPUTE_START_Y__START_MASK 0xffffffff
8822#define COMPUTE_START_Y__START__SHIFT 0x0
8823#define COMPUTE_START_Z__START_MASK 0xffffffff
8824#define COMPUTE_START_Z__START__SHIFT 0x0
8825#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0xffff
8826#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
8827#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xffff0000
8828#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
8829#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0xffff
8830#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
8831#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xffff0000
8832#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
8833#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0xffff
8834#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
8835#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xffff0000
8836#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
8837#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x1
8838#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
8839#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x1
8840#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
8841#define COMPUTE_PGM_LO__DATA_MASK 0xffffffff
8842#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
8843#define COMPUTE_PGM_HI__DATA_MASK 0xff
8844#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
8845#define COMPUTE_PGM_HI__INST_ATC_MASK 0x100
8846#define COMPUTE_PGM_HI__INST_ATC__SHIFT 0x8
8847#define COMPUTE_TBA_LO__DATA_MASK 0xffffffff
8848#define COMPUTE_TBA_LO__DATA__SHIFT 0x0
8849#define COMPUTE_TBA_HI__DATA_MASK 0xff
8850#define COMPUTE_TBA_HI__DATA__SHIFT 0x0
8851#define COMPUTE_TMA_LO__DATA_MASK 0xffffffff
8852#define COMPUTE_TMA_LO__DATA__SHIFT 0x0
8853#define COMPUTE_TMA_HI__DATA_MASK 0xff
8854#define COMPUTE_TMA_HI__DATA__SHIFT 0x0
8855#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x3f
8856#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
8857#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x3c0
8858#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
8859#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0xc00
8860#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
8861#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0xff000
8862#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
8863#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x100000
8864#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
8865#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x200000
8866#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
8867#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x400000
8868#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x16
8869#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x800000
8870#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
8871#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x1000000
8872#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
8873#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x2000000
8874#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x19
8875#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x1
8876#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
8877#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x3e
8878#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
8879#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x40
8880#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
8881#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x80
8882#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
8883#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x100
8884#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
8885#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x200
8886#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
8887#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x400
8888#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
8889#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x1800
8890#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
8891#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x6000
8892#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
8893#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0xff8000
8894#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
8895#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7f000000
8896#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
8897#define COMPUTE_VMID__DATA_MASK 0xf
8898#define COMPUTE_VMID__DATA__SHIFT 0x0
8899#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x3ff
8900#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
8901#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0xf000
8902#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
8903#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x3f0000
8904#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
8905#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x400000
8906#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
8907#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x800000
8908#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
8909#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x7000000
8910#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
8911#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN_MASK 0xffff
8912#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN__SHIFT 0x0
8913#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN_MASK 0xffff0000
8914#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN__SHIFT 0x10
8915#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN_MASK 0xffff
8916#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN__SHIFT 0x0
8917#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN_MASK 0xffff0000
8918#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN__SHIFT 0x10
8919#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0xfff
8920#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
8921#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x1fff000
8922#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
8923#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN_MASK 0xffff
8924#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN__SHIFT 0x0
8925#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN_MASK 0xffff0000
8926#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN__SHIFT 0x10
8927#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN_MASK 0xffff
8928#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN__SHIFT 0x0
8929#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN_MASK 0xffff0000
8930#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN__SHIFT 0x10
8931#define COMPUTE_RESTART_X__RESTART_MASK 0xffffffff
8932#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
8933#define COMPUTE_RESTART_Y__RESTART_MASK 0xffffffff
8934#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
8935#define COMPUTE_RESTART_Z__RESTART_MASK 0xffffffff
8936#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
8937#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x1
8938#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
8939#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x3
8940#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
8941#define COMPUTE_MISC_RESERVED__RESERVED2_MASK 0x4
8942#define COMPUTE_MISC_RESERVED__RESERVED2__SHIFT 0x2
8943#define COMPUTE_MISC_RESERVED__RESERVED3_MASK 0x8
8944#define COMPUTE_MISC_RESERVED__RESERVED3__SHIFT 0x3
8945#define COMPUTE_MISC_RESERVED__RESERVED4_MASK 0x10
8946#define COMPUTE_MISC_RESERVED__RESERVED4__SHIFT 0x4
8947#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x1ffe0
8948#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
8949#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xffffffff
8950#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
8951#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xffffffff
8952#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
8953#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3fffffff
8954#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
8955#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000
8956#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
8957#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000
8958#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
8959#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xffffffff
8960#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
8961#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xffff
8962#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
8963#define COMPUTE_WAVE_RESTORE_CONTROL__ATC_MASK 0x1
8964#define COMPUTE_WAVE_RESTORE_CONTROL__ATC__SHIFT 0x0
8965#define COMPUTE_WAVE_RESTORE_CONTROL__MTYPE_MASK 0x6
8966#define COMPUTE_WAVE_RESTORE_CONTROL__MTYPE__SHIFT 0x1
8967#define COMPUTE_USER_DATA_0__DATA_MASK 0xffffffff
8968#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
8969#define COMPUTE_USER_DATA_1__DATA_MASK 0xffffffff
8970#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
8971#define COMPUTE_USER_DATA_2__DATA_MASK 0xffffffff
8972#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
8973#define COMPUTE_USER_DATA_3__DATA_MASK 0xffffffff
8974#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
8975#define COMPUTE_USER_DATA_4__DATA_MASK 0xffffffff
8976#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
8977#define COMPUTE_USER_DATA_5__DATA_MASK 0xffffffff
8978#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
8979#define COMPUTE_USER_DATA_6__DATA_MASK 0xffffffff
8980#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
8981#define COMPUTE_USER_DATA_7__DATA_MASK 0xffffffff
8982#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
8983#define COMPUTE_USER_DATA_8__DATA_MASK 0xffffffff
8984#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
8985#define COMPUTE_USER_DATA_9__DATA_MASK 0xffffffff
8986#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
8987#define COMPUTE_USER_DATA_10__DATA_MASK 0xffffffff
8988#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
8989#define COMPUTE_USER_DATA_11__DATA_MASK 0xffffffff
8990#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
8991#define COMPUTE_USER_DATA_12__DATA_MASK 0xffffffff
8992#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
8993#define COMPUTE_USER_DATA_13__DATA_MASK 0xffffffff
8994#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
8995#define COMPUTE_USER_DATA_14__DATA_MASK 0xffffffff
8996#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
8997#define COMPUTE_USER_DATA_15__DATA_MASK 0xffffffff
8998#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
8999#define COMPUTE_NOWHERE__DATA_MASK 0xffffffff
9000#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
9001#define CSPRIV_CONNECT__DOORBELL_OFFSET_MASK 0x1fffff
9002#define CSPRIV_CONNECT__DOORBELL_OFFSET__SHIFT 0x0
9003#define CSPRIV_CONNECT__QUEUE_ID_MASK 0xe00000
9004#define CSPRIV_CONNECT__QUEUE_ID__SHIFT 0x15
9005#define CSPRIV_CONNECT__VMID_MASK 0x3c000000
9006#define CSPRIV_CONNECT__VMID__SHIFT 0x1a
9007#define CSPRIV_CONNECT__UNORD_DISP_MASK 0x80000000
9008#define CSPRIV_CONNECT__UNORD_DISP__SHIFT 0x1f
9009#define CSPRIV_THREAD_TRACE_TG0__TGID_X_MASK 0xffffffff
9010#define CSPRIV_THREAD_TRACE_TG0__TGID_X__SHIFT 0x0
9011#define CSPRIV_THREAD_TRACE_TG1__TGID_Y_MASK 0xffffffff
9012#define CSPRIV_THREAD_TRACE_TG1__TGID_Y__SHIFT 0x0
9013#define CSPRIV_THREAD_TRACE_TG2__TGID_Z_MASK 0xffffffff
9014#define CSPRIV_THREAD_TRACE_TG2__TGID_Z__SHIFT 0x0
9015#define CSPRIV_THREAD_TRACE_TG3__WAVE_ID_BASE_MASK 0xfff
9016#define CSPRIV_THREAD_TRACE_TG3__WAVE_ID_BASE__SHIFT 0x0
9017#define CSPRIV_THREAD_TRACE_TG3__THREADS_IN_GROUP_MASK 0xfff000
9018#define CSPRIV_THREAD_TRACE_TG3__THREADS_IN_GROUP__SHIFT 0xc
9019#define CSPRIV_THREAD_TRACE_TG3__PARTIAL_X_FLAG_MASK 0x1000000
9020#define CSPRIV_THREAD_TRACE_TG3__PARTIAL_X_FLAG__SHIFT 0x18
9021#define CSPRIV_THREAD_TRACE_TG3__PARTIAL_Y_FLAG_MASK 0x2000000
9022#define CSPRIV_THREAD_TRACE_TG3__PARTIAL_Y_FLAG__SHIFT 0x19
9023#define CSPRIV_THREAD_TRACE_TG3__PARTIAL_Z_FLAG_MASK 0x4000000
9024#define CSPRIV_THREAD_TRACE_TG3__PARTIAL_Z_FLAG__SHIFT 0x1a
9025#define CSPRIV_THREAD_TRACE_TG3__LAST_TG_MASK 0x8000000
9026#define CSPRIV_THREAD_TRACE_TG3__LAST_TG__SHIFT 0x1b
9027#define CSPRIV_THREAD_TRACE_TG3__FIRST_TG_MASK 0x10000000
9028#define CSPRIV_THREAD_TRACE_TG3__FIRST_TG__SHIFT 0x1c
9029#define CSPRIV_THREAD_TRACE_EVENT__EVENT_ID_MASK 0x1f
9030#define CSPRIV_THREAD_TRACE_EVENT__EVENT_ID__SHIFT 0x0
9031#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x1
9032#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
9033#define RLC_CNTL__FORCE_RETRY_MASK 0x2
9034#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
9035#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x4
9036#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
9037#define RLC_CNTL__RLC_STEP_F32_MASK 0x8
9038#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
9039#define RLC_CNTL__SOFT_RESET_DEBUG_MODE_MASK 0x10
9040#define RLC_CNTL__SOFT_RESET_DEBUG_MODE__SHIFT 0x4
9041#define RLC_CNTL__RESERVED_MASK 0xffffff00
9042#define RLC_CNTL__RESERVED__SHIFT 0x8
9043#define RLC_DEBUG_SELECT__SELECT_MASK 0xff
9044#define RLC_DEBUG_SELECT__SELECT__SHIFT 0x0
9045#define RLC_DEBUG_SELECT__RESERVED_MASK 0xffffff00
9046#define RLC_DEBUG_SELECT__RESERVED__SHIFT 0x8
9047#define RLC_DEBUG__DATA_MASK 0xffffffff
9048#define RLC_DEBUG__DATA__SHIFT 0x0
9049#define RLC_MC_CNTL__WRREQ_SWAP_MASK 0x3
9050#define RLC_MC_CNTL__WRREQ_SWAP__SHIFT 0x0
9051#define RLC_MC_CNTL__WRREQ_TRAN_MASK 0x4
9052#define RLC_MC_CNTL__WRREQ_TRAN__SHIFT 0x2
9053#define RLC_MC_CNTL__WRREQ_PRIV_MASK 0x8
9054#define RLC_MC_CNTL__WRREQ_PRIV__SHIFT 0x3
9055#define RLC_MC_CNTL__WRNFO_STALL_MASK 0x10
9056#define RLC_MC_CNTL__WRNFO_STALL__SHIFT 0x4
9057#define RLC_MC_CNTL__WRNFO_URG_MASK 0x1e0
9058#define RLC_MC_CNTL__WRNFO_URG__SHIFT 0x5
9059#define RLC_MC_CNTL__WRREQ_DW_IMASK_MASK 0x1e00
9060#define RLC_MC_CNTL__WRREQ_DW_IMASK__SHIFT 0x9
9061#define RLC_MC_CNTL__RESERVED_B_MASK 0xfe000
9062#define RLC_MC_CNTL__RESERVED_B__SHIFT 0xd
9063#define RLC_MC_CNTL__RDNFO_URG_MASK 0xf00000
9064#define RLC_MC_CNTL__RDNFO_URG__SHIFT 0x14
9065#define RLC_MC_CNTL__RDREQ_SWAP_MASK 0x3000000
9066#define RLC_MC_CNTL__RDREQ_SWAP__SHIFT 0x18
9067#define RLC_MC_CNTL__RDREQ_TRAN_MASK 0x4000000
9068#define RLC_MC_CNTL__RDREQ_TRAN__SHIFT 0x1a
9069#define RLC_MC_CNTL__RDREQ_PRIV_MASK 0x8000000
9070#define RLC_MC_CNTL__RDREQ_PRIV__SHIFT 0x1b
9071#define RLC_MC_CNTL__RDNFO_STALL_MASK 0x10000000
9072#define RLC_MC_CNTL__RDNFO_STALL__SHIFT 0x1c
9073#define RLC_MC_CNTL__RESERVED_MASK 0xe0000000
9074#define RLC_MC_CNTL__RESERVED__SHIFT 0x1d
9075#define RLC_STAT__RLC_BUSY_MASK 0x1
9076#define RLC_STAT__RLC_BUSY__SHIFT 0x0
9077#define RLC_STAT__RLC_GPM_BUSY_MASK 0x2
9078#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x1
9079#define RLC_STAT__RLC_SPM_BUSY_MASK 0x4
9080#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x2
9081#define RLC_STAT__RLC_SRM_BUSY_MASK 0x8
9082#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x3
9083#define RLC_STAT__RESERVED_MASK 0xfffffff0
9084#define RLC_STAT__RESERVED__SHIFT 0x4
9085#define RLC_SAFE_MODE__CMD_MASK 0x1
9086#define RLC_SAFE_MODE__CMD__SHIFT 0x0
9087#define RLC_SAFE_MODE__MESSAGE_MASK 0x1e
9088#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
9089#define RLC_SAFE_MODE__RESERVED1_MASK 0xe0
9090#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
9091#define RLC_SAFE_MODE__RESPONSE_MASK 0xf00
9092#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
9093#define RLC_SAFE_MODE__RESERVED_MASK 0xfffff000
9094#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
9095#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x1
9096#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
9097#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x2
9098#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
9099#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x7c
9100#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
9101#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x80
9102#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
9103#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0xff00
9104#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
9105#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0xff0000
9106#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
9107#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xff000000
9108#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
9109#define SMU_RLC_RESPONSE__RESP_MASK 0xffffffff
9110#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
9111#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x1
9112#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
9113#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x1e
9114#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
9115#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0xe0
9116#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
9117#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0xf00
9118#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
9119#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xfffff000
9120#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
9121#define RLC_SMU_SAFE_MODE__CMD_MASK 0x1
9122#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
9123#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x1e
9124#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
9125#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0xe0
9126#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
9127#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0xf00
9128#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
9129#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xfffff000
9130#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
9131#define RLC_RLCV_COMMAND__CMD_MASK 0xf
9132#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
9133#define RLC_RLCV_COMMAND__RESERVED_MASK 0xfffffff0
9134#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
9135#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL_MASK 0x1
9136#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL__SHIFT 0x0
9137#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL_MASK 0x2
9138#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL__SHIFT 0x1
9139#define RLC_CLK_CNTL__RESERVED_MASK 0xfffffffc
9140#define RLC_CLK_CNTL__RESERVED__SHIFT 0x2
9141#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE_MASK 0x1
9142#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE__SHIFT 0x0
9143#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x7
9144#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
9145#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x400
9146#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
9147#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0xff
9148#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
9149#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0xff
9150#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
9151#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
9152#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
9153#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
9154#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
9155#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
9156#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
9157#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
9158#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
9159#define CGTT_RLC_CLK_CTRL__ON_DELAY_MASK 0xf
9160#define CGTT_RLC_CLK_CTRL__ON_DELAY__SHIFT 0x0
9161#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
9162#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
9163#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
9164#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
9165#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
9166#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
9167#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK 0x1
9168#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE__SHIFT 0x0
9169#define RLC_LB_CNTL__LB_CNT_CP_BUSY_MASK 0x2
9170#define RLC_LB_CNTL__LB_CNT_CP_BUSY__SHIFT 0x1
9171#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK 0x4
9172#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE__SHIFT 0x2
9173#define RLC_LB_CNTL__LB_CNT_REG_INC_MASK 0x8
9174#define RLC_LB_CNTL__LB_CNT_REG_INC__SHIFT 0x3
9175#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST_MASK 0xff0
9176#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST__SHIFT 0x4
9177#define RLC_LB_CNTL__RESERVED_MASK 0xfffff000
9178#define RLC_LB_CNTL__RESERVED__SHIFT 0xc
9179#define RLC_LB_CNTR_MAX__LB_CNTR_MAX_MASK 0xffffffff
9180#define RLC_LB_CNTR_MAX__LB_CNTR_MAX__SHIFT 0x0
9181#define RLC_LB_CNTR_INIT__LB_CNTR_INIT_MASK 0xffffffff
9182#define RLC_LB_CNTR_INIT__LB_CNTR_INIT__SHIFT 0x0
9183#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR_MASK 0xffffffff
9184#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR__SHIFT 0x0
9185#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xffffffff
9186#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
9187#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0xff
9188#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
9189#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0xff00
9190#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
9191#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE_MASK 0xffff0000
9192#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE__SHIFT 0x10
9193#define RLC_GPM_DEBUG_SELECT__SELECT_MASK 0xff
9194#define RLC_GPM_DEBUG_SELECT__SELECT__SHIFT 0x0
9195#define RLC_GPM_DEBUG_SELECT__F32_DEBUG_SELECT_MASK 0x300
9196#define RLC_GPM_DEBUG_SELECT__F32_DEBUG_SELECT__SHIFT 0x8
9197#define RLC_GPM_DEBUG_SELECT__RESERVED_MASK 0xfffffc00
9198#define RLC_GPM_DEBUG_SELECT__RESERVED__SHIFT 0xa
9199#define RLC_GPM_DEBUG__DATA_MASK 0xffffffff
9200#define RLC_GPM_DEBUG__DATA__SHIFT 0x0
9201#define RLC_GPM_DEBUG_INST_A__INST_A_MASK 0xffffffff
9202#define RLC_GPM_DEBUG_INST_A__INST_A__SHIFT 0x0
9203#define RLC_GPM_DEBUG_INST_B__INST_B_MASK 0xffffffff
9204#define RLC_GPM_DEBUG_INST_B__INST_B__SHIFT 0x0
9205#define RLC_GPM_DEBUG_INST_ADDR__ADRR_A_MASK 0xffff
9206#define RLC_GPM_DEBUG_INST_ADDR__ADRR_A__SHIFT 0x0
9207#define RLC_GPM_DEBUG_INST_ADDR__ADDR_B_MASK 0xffff0000
9208#define RLC_GPM_DEBUG_INST_ADDR__ADDR_B__SHIFT 0x10
9209#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0xfff
9210#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
9211#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xfffff000
9212#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xc
9213#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xffffffff
9214#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
9215#define GPU_BIST_CONTROL__STOP_ON_FAIL_HW_MASK 0x1
9216#define GPU_BIST_CONTROL__STOP_ON_FAIL_HW__SHIFT 0x0
9217#define GPU_BIST_CONTROL__STOP_ON_FAIL_CU_HARV_MASK 0x2
9218#define GPU_BIST_CONTROL__STOP_ON_FAIL_CU_HARV__SHIFT 0x1
9219#define GPU_BIST_CONTROL__CU_HARV_LOOP_COUNT_MASK 0x3c
9220#define GPU_BIST_CONTROL__CU_HARV_LOOP_COUNT__SHIFT 0x2
9221#define GPU_BIST_CONTROL__RESERVED_MASK 0xffff80
9222#define GPU_BIST_CONTROL__RESERVED__SHIFT 0x7
9223#define GPU_BIST_CONTROL__GLOBAL_LOOP_COUNT_MASK 0xff000000
9224#define GPU_BIST_CONTROL__GLOBAL_LOOP_COUNT__SHIFT 0x18
9225#define RLC_ROM_CNTL__USE_ROM_MASK 0x1
9226#define RLC_ROM_CNTL__USE_ROM__SHIFT 0x0
9227#define RLC_ROM_CNTL__SLP_MODE_EN_MASK 0x2
9228#define RLC_ROM_CNTL__SLP_MODE_EN__SHIFT 0x1
9229#define RLC_ROM_CNTL__EFUSE_DISTRIB_EN_MASK 0x4
9230#define RLC_ROM_CNTL__EFUSE_DISTRIB_EN__SHIFT 0x2
9231#define RLC_ROM_CNTL__HELLOWORLD_EN_MASK 0x8
9232#define RLC_ROM_CNTL__HELLOWORLD_EN__SHIFT 0x3
9233#define RLC_ROM_CNTL__CU_HARVEST_EN_MASK 0x10
9234#define RLC_ROM_CNTL__CU_HARVEST_EN__SHIFT 0x4
9235#define RLC_ROM_CNTL__RESERVED_MASK 0xffffffe0
9236#define RLC_ROM_CNTL__RESERVED__SHIFT 0x5
9237#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xffffffff
9238#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
9239#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xffffffff
9240#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
9241#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x1
9242#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
9243#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xfffffffe
9244#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
9245#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xffffffff
9246#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
9247#define RLC_GPM_STAT__RLC_BUSY_MASK 0x1
9248#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
9249#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x2
9250#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
9251#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x4
9252#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
9253#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x8
9254#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
9255#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x10
9256#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
9257#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x20
9258#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
9259#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x40
9260#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
9261#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x80
9262#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
9263#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x100
9264#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
9265#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x200
9266#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
9267#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x400
9268#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
9269#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x800
9270#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
9271#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x1000
9272#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
9273#define RLC_GPM_STAT__STATIC_CU_POWERING_UP_MASK 0x2000
9274#define RLC_GPM_STAT__STATIC_CU_POWERING_UP__SHIFT 0xd
9275#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN_MASK 0x4000
9276#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN__SHIFT 0xe
9277#define RLC_GPM_STAT__DYN_CU_POWERING_UP_MASK 0x8000
9278#define RLC_GPM_STAT__DYN_CU_POWERING_UP__SHIFT 0xf
9279#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN_MASK 0x10000
9280#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
9281#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000
9282#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
9283#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000
9284#define RLC_GPM_STAT__RESERVED__SHIFT 0x12
9285#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000
9286#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
9287#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f
9288#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
9289#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xffffffc0
9290#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
9291#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xffffffff
9292#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
9293#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x1
9294#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
9295#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x2
9296#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
9297#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK 0x4
9298#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE__SHIFT 0x2
9299#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK 0x8
9300#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE__SHIFT 0x3
9301#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x10
9302#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
9303#define RLC_PG_CNTL__RESERVED_MASK 0x3fe0
9304#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
9305#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x4000
9306#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
9307#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x8000
9308#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
9309#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x10000
9310#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
9311#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x20000
9312#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
9313#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x40000
9314#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
9315#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000
9316#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
9317#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000
9318#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
9319#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff
9320#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
9321#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00
9322#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
9323#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0xff0000
9324#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
9325#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xff000000
9326#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
9327#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x1
9328#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
9329#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x2
9330#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
9331#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x4
9332#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
9333#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x8
9334#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
9335#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xfffffff0
9336#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
9337#define RLC_GPM_VMID_THREAD0__RLC_VMID_MASK 0xf
9338#define RLC_GPM_VMID_THREAD0__RLC_VMID__SHIFT 0x0
9339#define RLC_GPM_VMID_THREAD0__RESERVED0_MASK 0xf0
9340#define RLC_GPM_VMID_THREAD0__RESERVED0__SHIFT 0x4
9341#define RLC_GPM_VMID_THREAD0__RLC_QUEUEID_MASK 0x700
9342#define RLC_GPM_VMID_THREAD0__RLC_QUEUEID__SHIFT 0x8
9343#define RLC_GPM_VMID_THREAD0__RESERVED1_MASK 0xfffff800
9344#define RLC_GPM_VMID_THREAD0__RESERVED1__SHIFT 0xb
9345#define RLC_GPM_VMID_THREAD1__RLC_VMID_MASK 0xf
9346#define RLC_GPM_VMID_THREAD1__RLC_VMID__SHIFT 0x0
9347#define RLC_GPM_VMID_THREAD1__RESERVED0_MASK 0xf0
9348#define RLC_GPM_VMID_THREAD1__RESERVED0__SHIFT 0x4
9349#define RLC_GPM_VMID_THREAD1__RLC_QUEUEID_MASK 0x700
9350#define RLC_GPM_VMID_THREAD1__RLC_QUEUEID__SHIFT 0x8
9351#define RLC_GPM_VMID_THREAD1__RESERVED1_MASK 0xfffff800
9352#define RLC_GPM_VMID_THREAD1__RESERVED1__SHIFT 0xb
9353#define RLC_CGTT_MGCG_OVERRIDE__OVERRIDE_MASK 0xffffffff
9354#define RLC_CGTT_MGCG_OVERRIDE__OVERRIDE__SHIFT 0x0
9355#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x1
9356#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
9357#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x2
9358#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
9359#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0xfc
9360#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
9361#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x7ffff00
9362#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
9363#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x8000000
9364#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
9365#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000
9366#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
9367#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000
9368#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
9369#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000
9370#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
9371#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0xf
9372#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
9373#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0xf0
9374#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
9375#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0xf00
9376#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
9377#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0xf000
9378#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
9379#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0xfff0000
9380#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
9381#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xf0000000
9382#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
9383#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xffffffff
9384#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
9385#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK_MASK 0xffffffff
9386#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK__SHIFT 0x0
9387#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0xff
9388#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
9389#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0xff00
9390#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
9391#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0xff0000
9392#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
9393#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xff000000
9394#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
9395#define RLC_CU_STATUS__WORK_PENDING_MASK 0xffffffff
9396#define RLC_CU_STATUS__WORK_PENDING__SHIFT 0x0
9397#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK_MASK 0xffffffff
9398#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK__SHIFT 0x0
9399#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK_MASK 0xffffffff
9400#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK__SHIFT 0x0
9401#define RLC_LB_PARAMS__SKIP_L2_CHECK_MASK 0x1
9402#define RLC_LB_PARAMS__SKIP_L2_CHECK__SHIFT 0x0
9403#define RLC_LB_PARAMS__FIFO_SAMPLES_MASK 0xfe
9404#define RLC_LB_PARAMS__FIFO_SAMPLES__SHIFT 0x1
9405#define RLC_LB_PARAMS__PG_IDLE_SAMPLES_MASK 0xff00
9406#define RLC_LB_PARAMS__PG_IDLE_SAMPLES__SHIFT 0x8
9407#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL_MASK 0xffff0000
9408#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL__SHIFT 0x10
9409#define RLC_THREAD1_DELAY__CU_IDEL_DELAY_MASK 0xff
9410#define RLC_THREAD1_DELAY__CU_IDEL_DELAY__SHIFT 0x0
9411#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY_MASK 0xff00
9412#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY__SHIFT 0x8
9413#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY_MASK 0xff0000
9414#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY__SHIFT 0x10
9415#define RLC_THREAD1_DELAY__SPARE_MASK 0xff000000
9416#define RLC_THREAD1_DELAY__SPARE__SHIFT 0x18
9417#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK_MASK 0xffffffff
9418#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK__SHIFT 0x0
9419#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK 0xff
9420#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT 0x0
9421#define RLC_MAX_PG_CU__SPARE_MASK 0xffffff00
9422#define RLC_MAX_PG_CU__SPARE__SHIFT 0x8
9423#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x1
9424#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
9425#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x2
9426#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
9427#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x4
9428#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
9429#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x7fff8
9430#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
9431#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xfff80000
9432#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
9433#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE_MASK 0x1
9434#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE__SHIFT 0x0
9435#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE_MASK 0xfffffffe
9436#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE__SHIFT 0x1
9437#define RLC_SERDES_RD_MASTER_INDEX__CU_ID_MASK 0xf
9438#define RLC_SERDES_RD_MASTER_INDEX__CU_ID__SHIFT 0x0
9439#define RLC_SERDES_RD_MASTER_INDEX__SH_ID_MASK 0x30
9440#define RLC_SERDES_RD_MASTER_INDEX__SH_ID__SHIFT 0x4
9441#define RLC_SERDES_RD_MASTER_INDEX__SE_ID_MASK 0x1c0
9442#define RLC_SERDES_RD_MASTER_INDEX__SE_ID__SHIFT 0x6
9443#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID_MASK 0x200
9444#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID__SHIFT 0x9
9445#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_MASK 0x400
9446#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU__SHIFT 0xa
9447#define RLC_SERDES_RD_MASTER_INDEX__NON_SE_MASK 0x7800
9448#define RLC_SERDES_RD_MASTER_INDEX__NON_SE__SHIFT 0xb
9449#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID_MASK 0x18000
9450#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID__SHIFT 0xf
9451#define RLC_SERDES_RD_MASTER_INDEX__SPARE_MASK 0xfffe0000
9452#define RLC_SERDES_RD_MASTER_INDEX__SPARE__SHIFT 0x11
9453#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xffffffff
9454#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
9455#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xffffffff
9456#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
9457#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xffffffff
9458#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
9459#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK_MASK 0xffffffff
9460#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK__SHIFT 0x0
9461#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK_MASK 0xffff
9462#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK__SHIFT 0x0
9463#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK_MASK 0x10000
9464#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK__SHIFT 0x10
9465#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK_MASK 0x20000
9466#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK__SHIFT 0x11
9467#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK_MASK 0x40000
9468#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK__SHIFT 0x12
9469#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK_MASK 0x80000
9470#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK__SHIFT 0x13
9471#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK_MASK 0x100000
9472#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK__SHIFT 0x14
9473#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK_MASK 0x200000
9474#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK__SHIFT 0x15
9475#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK_MASK 0x400000
9476#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK__SHIFT 0x16
9477#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK_MASK 0x800000
9478#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK__SHIFT 0x17
9479#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED_MASK 0xff000000
9480#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED__SHIFT 0x18
9481#define RLC_SERDES_WR_CTRL__BPM_ADDR_MASK 0xff
9482#define RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT 0x0
9483#define RLC_SERDES_WR_CTRL__POWER_DOWN_MASK 0x100
9484#define RLC_SERDES_WR_CTRL__POWER_DOWN__SHIFT 0x8
9485#define RLC_SERDES_WR_CTRL__POWER_UP_MASK 0x200
9486#define RLC_SERDES_WR_CTRL__POWER_UP__SHIFT 0x9
9487#define RLC_SERDES_WR_CTRL__P1_SELECT_MASK 0x400
9488#define RLC_SERDES_WR_CTRL__P1_SELECT__SHIFT 0xa
9489#define RLC_SERDES_WR_CTRL__P2_SELECT_MASK 0x800
9490#define RLC_SERDES_WR_CTRL__P2_SELECT__SHIFT 0xb
9491#define RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK 0x1000
9492#define RLC_SERDES_WR_CTRL__WRITE_COMMAND__SHIFT 0xc
9493#define RLC_SERDES_WR_CTRL__READ_COMMAND_MASK 0x2000
9494#define RLC_SERDES_WR_CTRL__READ_COMMAND__SHIFT 0xd
9495#define RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK 0x4000
9496#define RLC_SERDES_WR_CTRL__RDDATA_RESET__SHIFT 0xe
9497#define RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK 0x8000
9498#define RLC_SERDES_WR_CTRL__SHORT_FORMAT__SHIFT 0xf
9499#define RLC_SERDES_WR_CTRL__BPM_DATA_MASK 0x3ff0000
9500#define RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT 0x10
9501#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK 0x4000000
9502#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE__SHIFT 0x1a
9503#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK 0x8000000
9504#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR__SHIFT 0x1b
9505#define RLC_SERDES_WR_CTRL__REG_ADDR_MASK 0xf0000000
9506#define RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT 0x1c
9507#define RLC_SERDES_WR_DATA__DATA_MASK 0xffffffff
9508#define RLC_SERDES_WR_DATA__DATA__SHIFT 0x0
9509#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY_MASK 0xffffffff
9510#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY__SHIFT 0x0
9511#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK 0xffff
9512#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY__SHIFT 0x0
9513#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK 0x10000
9514#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY__SHIFT 0x10
9515#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY_MASK 0x20000
9516#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY__SHIFT 0x11
9517#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK 0x40000
9518#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY__SHIFT 0x12
9519#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK 0x80000
9520#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY__SHIFT 0x13
9521#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY_MASK 0x100000
9522#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY__SHIFT 0x14
9523#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY_MASK 0x200000
9524#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY__SHIFT 0x15
9525#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY_MASK 0x400000
9526#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY__SHIFT 0x16
9527#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY_MASK 0x800000
9528#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY__SHIFT 0x17
9529#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED_MASK 0xff000000
9530#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED__SHIFT 0x18
9531#define RLC_GPM_GENERAL_0__DATA_MASK 0xffffffff
9532#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
9533#define RLC_GPM_GENERAL_1__DATA_MASK 0xffffffff
9534#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
9535#define RLC_GPM_GENERAL_2__DATA_MASK 0xffffffff
9536#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
9537#define RLC_GPM_GENERAL_3__DATA_MASK 0xffffffff
9538#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
9539#define RLC_GPM_GENERAL_4__DATA_MASK 0xffffffff
9540#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
9541#define RLC_GPM_GENERAL_5__DATA_MASK 0xffffffff
9542#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
9543#define RLC_GPM_GENERAL_6__DATA_MASK 0xffffffff
9544#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
9545#define RLC_GPM_GENERAL_7__DATA_MASK 0xffffffff
9546#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
9547#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x1ff
9548#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
9549#define RLC_GPM_SCRATCH_ADDR__RESERVED_MASK 0xfffffe00
9550#define RLC_GPM_SCRATCH_ADDR__RESERVED__SHIFT 0x9
9551#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xffffffff
9552#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
9553#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xffffffff
9554#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
9555#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0xf
9556#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
9557#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0xf0
9558#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
9559#define RLC_GPM_PERF_COUNT_0__SH_INDEX_MASK 0xf00
9560#define RLC_GPM_PERF_COUNT_0__SH_INDEX__SHIFT 0x8
9561#define RLC_GPM_PERF_COUNT_0__CU_INDEX_MASK 0xf000
9562#define RLC_GPM_PERF_COUNT_0__CU_INDEX__SHIFT 0xc
9563#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x30000
9564#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
9565#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0xc0000
9566#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
9567#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x100000
9568#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
9569#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xffe00000
9570#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
9571#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0xf
9572#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
9573#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0xf0
9574#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
9575#define RLC_GPM_PERF_COUNT_1__SH_INDEX_MASK 0xf00
9576#define RLC_GPM_PERF_COUNT_1__SH_INDEX__SHIFT 0x8
9577#define RLC_GPM_PERF_COUNT_1__CU_INDEX_MASK 0xf000
9578#define RLC_GPM_PERF_COUNT_1__CU_INDEX__SHIFT 0xc
9579#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x30000
9580#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
9581#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0xc0000
9582#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
9583#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x100000
9584#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
9585#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xffe00000
9586#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
9587#define RLC_GPR_REG1__DATA_MASK 0xffffffff
9588#define RLC_GPR_REG1__DATA__SHIFT 0x0
9589#define RLC_GPR_REG2__DATA_MASK 0xffffffff
9590#define RLC_GPR_REG2__DATA__SHIFT 0x0
9591#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x1
9592#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
9593#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x2
9594#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
9595#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x4
9596#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
9597#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x78
9598#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
9599#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x7f80
9600#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
9601#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL_MASK 0x8000
9602#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL__SHIFT 0xf
9603#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL_MASK 0x10000
9604#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x10
9605#define RLC_MGCG_CTRL__SPARE_MASK 0xfffe0000
9606#define RLC_MGCG_CTRL__SPARE__SHIFT 0x11
9607#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x1
9608#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
9609#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x2
9610#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
9611#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x4
9612#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
9613#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x8
9614#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
9615#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xfffffff0
9616#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
9617#define RLC_SPM_VMID__RLC_SPM_VMID_MASK 0xf
9618#define RLC_SPM_VMID__RLC_SPM_VMID__SHIFT 0x0
9619#define RLC_SPM_VMID__RESERVED_MASK 0xfffffff0
9620#define RLC_SPM_VMID__RESERVED__SHIFT 0x4
9621#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x1
9622#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
9623#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xfffffffe
9624#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
9625#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x1
9626#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
9627#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xfffffffe
9628#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
9629#define RLC_SPM_DEBUG_SELECT__SELECT_MASK 0xff
9630#define RLC_SPM_DEBUG_SELECT__SELECT__SHIFT 0x0
9631#define RLC_SPM_DEBUG_SELECT__RESERVED_MASK 0x7f00
9632#define RLC_SPM_DEBUG_SELECT__RESERVED__SHIFT 0x8
9633#define RLC_SPM_DEBUG_SELECT__RLC_SPM_DEBUG_MODE_MASK 0x8000
9634#define RLC_SPM_DEBUG_SELECT__RLC_SPM_DEBUG_MODE__SHIFT 0xf
9635#define RLC_SPM_DEBUG_SELECT__RLC_SPM_NUM_SAMPLE_MASK 0xffff0000
9636#define RLC_SPM_DEBUG_SELECT__RLC_SPM_NUM_SAMPLE__SHIFT 0x10
9637#define RLC_SPM_DEBUG__DATA_MASK 0xffffffff
9638#define RLC_SPM_DEBUG__DATA__SHIFT 0x0
9639#define RLC_SMU_MESSAGE__CMD_MASK 0xffffffff
9640#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
9641#define RLC_GPM_LOG_SIZE__SIZE_MASK 0xffffffff
9642#define RLC_GPM_LOG_SIZE__SIZE__SHIFT 0x0
9643#define RLC_GPM_LOG_CONT__CONT_MASK 0xffffffff
9644#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
9645#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff
9646#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
9647#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00
9648#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
9649#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff
9650#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
9651#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff
9652#define RLC_GPM_INT_DISABLE_TH1__DISABLE__SHIFT 0x0
9653#define RLC_GPM_INT_FORCE_TH0__FORCE_MASK 0xffffffff
9654#define RLC_GPM_INT_FORCE_TH0__FORCE__SHIFT 0x0
9655#define RLC_GPM_INT_FORCE_TH1__FORCE_MASK 0xffffffff
9656#define RLC_GPM_INT_FORCE_TH1__FORCE__SHIFT 0x0
9657#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x1
9658#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
9659#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x2
9660#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
9661#define RLC_SRM_CNTL__RESERVED_MASK 0xfffffffc
9662#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
9663#define RLC_SRM_DEBUG_SELECT__SELECT_MASK 0xff
9664#define RLC_SRM_DEBUG_SELECT__SELECT__SHIFT 0x0
9665#define RLC_SRM_DEBUG_SELECT__RESERVED_MASK 0xffffff00
9666#define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8
9667#define RLC_SRM_DEBUG__DATA_MASK 0xffffffff
9668#define RLC_SRM_DEBUG__DATA__SHIFT 0x0
9669#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff
9670#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
9671#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00
9672#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa
9673#define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff
9674#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
9675#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff
9676#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
9677#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00
9678#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa
9679#define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff
9680#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
9681#define RLC_SRM_GPM_COMMAND__OP_MASK 0x1
9682#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
9683#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x2
9684#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
9685#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x1c
9686#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
9687#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x1ffe0
9688#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
9689#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x1ffe0000
9690#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x11
9691#define RLC_SRM_GPM_COMMAND__RESERVED1_MASK 0x60000000
9692#define RLC_SRM_GPM_COMMAND__RESERVED1__SHIFT 0x1d
9693#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000
9694#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
9695#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x1
9696#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
9697#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x2
9698#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
9699#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xfffffffc
9700#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
9701#define RLC_SRM_RLCV_COMMAND__OP_MASK 0x1
9702#define RLC_SRM_RLCV_COMMAND__OP__SHIFT 0x0
9703#define RLC_SRM_RLCV_COMMAND__RESERVED_MASK 0xe
9704#define RLC_SRM_RLCV_COMMAND__RESERVED__SHIFT 0x1
9705#define RLC_SRM_RLCV_COMMAND__SIZE_MASK 0xfff0
9706#define RLC_SRM_RLCV_COMMAND__SIZE__SHIFT 0x4
9707#define RLC_SRM_RLCV_COMMAND__START_OFFSET_MASK 0xfff0000
9708#define RLC_SRM_RLCV_COMMAND__START_OFFSET__SHIFT 0x10
9709#define RLC_SRM_RLCV_COMMAND__RESERVED1_MASK 0x70000000
9710#define RLC_SRM_RLCV_COMMAND__RESERVED1__SHIFT 0x1c
9711#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY_MASK 0x80000000
9712#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY__SHIFT 0x1f
9713#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY_MASK 0x1
9714#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
9715#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL_MASK 0x2
9716#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
9717#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED_MASK 0xfffffffc
9718#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED__SHIFT 0x2
9719#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0xffff
9720#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
9721#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED_MASK 0xffff0000
9722#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED__SHIFT 0x10
9723#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0xffff
9724#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
9725#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED_MASK 0xffff0000
9726#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED__SHIFT 0x10
9727#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0xffff
9728#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
9729#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED_MASK 0xffff0000
9730#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED__SHIFT 0x10
9731#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0xffff
9732#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
9733#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED_MASK 0xffff0000
9734#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED__SHIFT 0x10
9735#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0xffff
9736#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
9737#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED_MASK 0xffff0000
9738#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED__SHIFT 0x10
9739#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0xffff
9740#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
9741#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED_MASK 0xffff0000
9742#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED__SHIFT 0x10
9743#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0xffff
9744#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
9745#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED_MASK 0xffff0000
9746#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED__SHIFT 0x10
9747#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0xffff
9748#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
9749#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED_MASK 0xffff0000
9750#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED__SHIFT 0x10
9751#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xffffffff
9752#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
9753#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xffffffff
9754#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
9755#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xffffffff
9756#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
9757#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xffffffff
9758#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
9759#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xffffffff
9760#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
9761#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xffffffff
9762#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
9763#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xffffffff
9764#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
9765#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xffffffff
9766#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
9767#define RLC_SRM_STAT__SRM_STATUS_MASK 0x1
9768#define RLC_SRM_STAT__SRM_STATUS__SHIFT 0x0
9769#define RLC_SRM_STAT__RESERVED_MASK 0xfffffffe
9770#define RLC_SRM_STAT__RESERVED__SHIFT 0x1
9771#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x1
9772#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
9773#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xfffffffe
9774#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
9775#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xffffffff
9776#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
9777#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0xffff
9778#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
9779#define RLC_CSIB_LENGTH__LENGTH_MASK 0xffffffff
9780#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
9781#define RLC_CP_RESPONSE0__RESPONSE_MASK 0xffffffff
9782#define RLC_CP_RESPONSE0__RESPONSE__SHIFT 0x0
9783#define RLC_CP_RESPONSE1__RESPONSE_MASK 0xffffffff
9784#define RLC_CP_RESPONSE1__RESPONSE__SHIFT 0x0
9785#define RLC_CP_RESPONSE2__RESPONSE_MASK 0xffffffff
9786#define RLC_CP_RESPONSE2__RESPONSE__SHIFT 0x0
9787#define RLC_CP_RESPONSE3__RESPONSE_MASK 0xffffffff
9788#define RLC_CP_RESPONSE3__RESPONSE__SHIFT 0x0
9789#define RLC_SMU_COMMAND__CMD_MASK 0xffffffff
9790#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
9791#define RLC_CP_SCHEDULERS__scheduler0_MASK 0xff
9792#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
9793#define RLC_CP_SCHEDULERS__scheduler1_MASK 0xff00
9794#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
9795#define RLC_CP_SCHEDULERS__scheduler2_MASK 0xff0000
9796#define RLC_CP_SCHEDULERS__scheduler2__SHIFT 0x10
9797#define RLC_CP_SCHEDULERS__scheduler3_MASK 0xff000000
9798#define RLC_CP_SCHEDULERS__scheduler3__SHIFT 0x18
9799#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xffffffff
9800#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
9801#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xffffffff
9802#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
9803#define RLC_GPM_GENERAL_8__DATA_MASK 0xffffffff
9804#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
9805#define RLC_GPM_GENERAL_9__DATA_MASK 0xffffffff
9806#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
9807#define RLC_GPM_GENERAL_10__DATA_MASK 0xffffffff
9808#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
9809#define RLC_GPM_GENERAL_11__DATA_MASK 0xffffffff
9810#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
9811#define RLC_GPM_GENERAL_12__DATA_MASK 0xffffffff
9812#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
9813#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0xfff
9814#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x0
9815#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x3000
9816#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
9817#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0xc000
9818#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xe
9819#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xffff0000
9820#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
9821#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xffffffff
9822#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
9823#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0xffff
9824#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
9825#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xffff0000
9826#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
9827#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xffffffff
9828#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
9829#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE_MASK 0xff
9830#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE__SHIFT 0x0
9831#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1_MASK 0x700
9832#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1__SHIFT 0x8
9833#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE_MASK 0xf800
9834#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE__SHIFT 0xb
9835#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE_MASK 0x1f0000
9836#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE__SHIFT 0x10
9837#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE_MASK 0x3e00000
9838#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE__SHIFT 0x15
9839#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE_MASK 0x7c000000
9840#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE__SHIFT 0x1a
9841#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED_MASK 0x80000000
9842#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED__SHIFT 0x1f
9843#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xffffffff
9844#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
9845#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xffffffff
9846#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
9847#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9848#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9849#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9850#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9851#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9852#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9853#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9854#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9855#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9856#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9857#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9858#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9859#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9860#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9861#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9862#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9863#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9864#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9865#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9866#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9867#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9868#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9869#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9870#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9871#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9872#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9873#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9874#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9875#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9876#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9877#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9878#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9879#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9880#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9881#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9882#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9883#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9884#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9885#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9886#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9887#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9888#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9889#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9890#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9891#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9892#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9893#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9894#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9895#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9896#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9897#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9898#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9899#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9900#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9901#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9902#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9903#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9904#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9905#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9906#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9907#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9908#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9909#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9910#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9911#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9912#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9913#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9914#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9915#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0xff
9916#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
9917#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xffffff00
9918#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
9919#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xffffffff
9920#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
9921#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xffffffff
9922#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
9923#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xffffffff
9924#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
9925#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0xffffffff
9926#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
9927#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x1
9928#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
9929#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0xfffe
9930#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
9931#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xffff0000
9932#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
9933#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xffffffff
9934#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
9935#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0xf
9936#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
9937#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_MASK 0x7ffffff0
9938#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
9939#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000
9940#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
9941#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x3f
9942#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
9943#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x300
9944#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
9945#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x400
9946#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
9947#define SPI_PS_INPUT_CNTL_0__CYL_WRAP_MASK 0x1e000
9948#define SPI_PS_INPUT_CNTL_0__CYL_WRAP__SHIFT 0xd
9949#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x20000
9950#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
9951#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x40000
9952#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
9953#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x80000
9954#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
9955#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x100000
9956#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
9957#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x600000
9958#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
9959#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x800000
9960#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
9961#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x1000000
9962#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
9963#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x2000000
9964#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
9965#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x3f
9966#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
9967#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x300
9968#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
9969#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x400
9970#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
9971#define SPI_PS_INPUT_CNTL_1__CYL_WRAP_MASK 0x1e000
9972#define SPI_PS_INPUT_CNTL_1__CYL_WRAP__SHIFT 0xd
9973#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x20000
9974#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
9975#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x40000
9976#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
9977#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x80000
9978#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
9979#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x100000
9980#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
9981#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x600000
9982#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
9983#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x800000
9984#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
9985#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x1000000
9986#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
9987#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x2000000
9988#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
9989#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x3f
9990#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
9991#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x300
9992#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
9993#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x400
9994#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
9995#define SPI_PS_INPUT_CNTL_2__CYL_WRAP_MASK 0x1e000
9996#define SPI_PS_INPUT_CNTL_2__CYL_WRAP__SHIFT 0xd
9997#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x20000
9998#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
9999#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x40000
10000#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
10001#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x80000
10002#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
10003#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x100000
10004#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
10005#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x600000
10006#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
10007#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10008#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10009#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x1000000
10010#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
10011#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x2000000
10012#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
10013#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x3f
10014#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
10015#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x300
10016#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
10017#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x400
10018#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
10019#define SPI_PS_INPUT_CNTL_3__CYL_WRAP_MASK 0x1e000
10020#define SPI_PS_INPUT_CNTL_3__CYL_WRAP__SHIFT 0xd
10021#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x20000
10022#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
10023#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x40000
10024#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
10025#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x80000
10026#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
10027#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x100000
10028#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
10029#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x600000
10030#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
10031#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10032#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10033#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x1000000
10034#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
10035#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x2000000
10036#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
10037#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x3f
10038#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
10039#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x300
10040#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
10041#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x400
10042#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
10043#define SPI_PS_INPUT_CNTL_4__CYL_WRAP_MASK 0x1e000
10044#define SPI_PS_INPUT_CNTL_4__CYL_WRAP__SHIFT 0xd
10045#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x20000
10046#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
10047#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x40000
10048#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
10049#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x80000
10050#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
10051#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x100000
10052#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
10053#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x600000
10054#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
10055#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10056#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10057#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x1000000
10058#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
10059#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x2000000
10060#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
10061#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x3f
10062#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
10063#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x300
10064#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
10065#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x400
10066#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
10067#define SPI_PS_INPUT_CNTL_5__CYL_WRAP_MASK 0x1e000
10068#define SPI_PS_INPUT_CNTL_5__CYL_WRAP__SHIFT 0xd
10069#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x20000
10070#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
10071#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x40000
10072#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
10073#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x80000
10074#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
10075#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x100000
10076#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
10077#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x600000
10078#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
10079#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10080#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10081#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x1000000
10082#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
10083#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x2000000
10084#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
10085#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x3f
10086#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
10087#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x300
10088#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
10089#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x400
10090#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
10091#define SPI_PS_INPUT_CNTL_6__CYL_WRAP_MASK 0x1e000
10092#define SPI_PS_INPUT_CNTL_6__CYL_WRAP__SHIFT 0xd
10093#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x20000
10094#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
10095#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x40000
10096#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
10097#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x80000
10098#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
10099#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x100000
10100#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
10101#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x600000
10102#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
10103#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10104#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10105#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x1000000
10106#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
10107#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x2000000
10108#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
10109#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x3f
10110#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
10111#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x300
10112#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
10113#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x400
10114#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
10115#define SPI_PS_INPUT_CNTL_7__CYL_WRAP_MASK 0x1e000
10116#define SPI_PS_INPUT_CNTL_7__CYL_WRAP__SHIFT 0xd
10117#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x20000
10118#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
10119#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x40000
10120#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
10121#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x80000
10122#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
10123#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x100000
10124#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
10125#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x600000
10126#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
10127#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10128#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10129#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x1000000
10130#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
10131#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x2000000
10132#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
10133#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x3f
10134#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
10135#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x300
10136#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
10137#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x400
10138#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
10139#define SPI_PS_INPUT_CNTL_8__CYL_WRAP_MASK 0x1e000
10140#define SPI_PS_INPUT_CNTL_8__CYL_WRAP__SHIFT 0xd
10141#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x20000
10142#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
10143#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x40000
10144#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
10145#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x80000
10146#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
10147#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x100000
10148#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
10149#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x600000
10150#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
10151#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10152#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10153#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x1000000
10154#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
10155#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x2000000
10156#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
10157#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x3f
10158#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
10159#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x300
10160#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
10161#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x400
10162#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
10163#define SPI_PS_INPUT_CNTL_9__CYL_WRAP_MASK 0x1e000
10164#define SPI_PS_INPUT_CNTL_9__CYL_WRAP__SHIFT 0xd
10165#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x20000
10166#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
10167#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x40000
10168#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
10169#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x80000
10170#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
10171#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x100000
10172#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
10173#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x600000
10174#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
10175#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10176#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10177#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x1000000
10178#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
10179#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x2000000
10180#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
10181#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x3f
10182#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
10183#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x300
10184#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
10185#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x400
10186#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
10187#define SPI_PS_INPUT_CNTL_10__CYL_WRAP_MASK 0x1e000
10188#define SPI_PS_INPUT_CNTL_10__CYL_WRAP__SHIFT 0xd
10189#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x20000
10190#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
10191#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x40000
10192#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
10193#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x80000
10194#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
10195#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x100000
10196#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
10197#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x600000
10198#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
10199#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10200#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10201#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x1000000
10202#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
10203#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x2000000
10204#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
10205#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x3f
10206#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
10207#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x300
10208#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
10209#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x400
10210#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
10211#define SPI_PS_INPUT_CNTL_11__CYL_WRAP_MASK 0x1e000
10212#define SPI_PS_INPUT_CNTL_11__CYL_WRAP__SHIFT 0xd
10213#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x20000
10214#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
10215#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x40000
10216#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
10217#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x80000
10218#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
10219#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x100000
10220#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
10221#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x600000
10222#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
10223#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10224#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10225#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x1000000
10226#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
10227#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x2000000
10228#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
10229#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x3f
10230#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
10231#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x300
10232#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
10233#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x400
10234#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
10235#define SPI_PS_INPUT_CNTL_12__CYL_WRAP_MASK 0x1e000
10236#define SPI_PS_INPUT_CNTL_12__CYL_WRAP__SHIFT 0xd
10237#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x20000
10238#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
10239#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x40000
10240#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
10241#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x80000
10242#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
10243#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x100000
10244#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
10245#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x600000
10246#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
10247#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10248#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10249#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x1000000
10250#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
10251#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x2000000
10252#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
10253#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x3f
10254#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
10255#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x300
10256#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
10257#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x400
10258#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
10259#define SPI_PS_INPUT_CNTL_13__CYL_WRAP_MASK 0x1e000
10260#define SPI_PS_INPUT_CNTL_13__CYL_WRAP__SHIFT 0xd
10261#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x20000
10262#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
10263#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x40000
10264#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
10265#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x80000
10266#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
10267#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x100000
10268#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
10269#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x600000
10270#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
10271#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10272#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10273#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x1000000
10274#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
10275#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x2000000
10276#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
10277#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x3f
10278#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
10279#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x300
10280#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
10281#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x400
10282#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
10283#define SPI_PS_INPUT_CNTL_14__CYL_WRAP_MASK 0x1e000
10284#define SPI_PS_INPUT_CNTL_14__CYL_WRAP__SHIFT 0xd
10285#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x20000
10286#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
10287#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x40000
10288#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
10289#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x80000
10290#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
10291#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x100000
10292#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
10293#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x600000
10294#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
10295#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10296#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10297#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x1000000
10298#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
10299#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x2000000
10300#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
10301#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x3f
10302#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
10303#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x300
10304#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
10305#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x400
10306#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
10307#define SPI_PS_INPUT_CNTL_15__CYL_WRAP_MASK 0x1e000
10308#define SPI_PS_INPUT_CNTL_15__CYL_WRAP__SHIFT 0xd
10309#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x20000
10310#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
10311#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x40000
10312#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
10313#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x80000
10314#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
10315#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x100000
10316#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
10317#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x600000
10318#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
10319#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10320#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10321#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x1000000
10322#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
10323#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x2000000
10324#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
10325#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x3f
10326#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
10327#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x300
10328#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
10329#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x400
10330#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
10331#define SPI_PS_INPUT_CNTL_16__CYL_WRAP_MASK 0x1e000
10332#define SPI_PS_INPUT_CNTL_16__CYL_WRAP__SHIFT 0xd
10333#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x20000
10334#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
10335#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x40000
10336#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
10337#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x80000
10338#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
10339#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x100000
10340#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
10341#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x600000
10342#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
10343#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10344#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10345#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x1000000
10346#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
10347#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x2000000
10348#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
10349#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x3f
10350#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
10351#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x300
10352#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
10353#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x400
10354#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
10355#define SPI_PS_INPUT_CNTL_17__CYL_WRAP_MASK 0x1e000
10356#define SPI_PS_INPUT_CNTL_17__CYL_WRAP__SHIFT 0xd
10357#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x20000
10358#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
10359#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x40000
10360#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
10361#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x80000
10362#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
10363#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x100000
10364#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
10365#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x600000
10366#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
10367#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10368#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10369#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x1000000
10370#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
10371#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x2000000
10372#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
10373#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x3f
10374#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
10375#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x300
10376#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
10377#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x400
10378#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
10379#define SPI_PS_INPUT_CNTL_18__CYL_WRAP_MASK 0x1e000
10380#define SPI_PS_INPUT_CNTL_18__CYL_WRAP__SHIFT 0xd
10381#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x20000
10382#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
10383#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x40000
10384#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
10385#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x80000
10386#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
10387#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x100000
10388#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
10389#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x600000
10390#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
10391#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10392#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10393#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x1000000
10394#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
10395#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x2000000
10396#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
10397#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x3f
10398#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
10399#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x300
10400#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
10401#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x400
10402#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
10403#define SPI_PS_INPUT_CNTL_19__CYL_WRAP_MASK 0x1e000
10404#define SPI_PS_INPUT_CNTL_19__CYL_WRAP__SHIFT 0xd
10405#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x20000
10406#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
10407#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x40000
10408#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
10409#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x80000
10410#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
10411#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x100000
10412#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
10413#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x600000
10414#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
10415#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x800000
10416#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
10417#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x1000000
10418#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
10419#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x2000000
10420#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
10421#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x3f
10422#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
10423#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x300
10424#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
10425#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x400
10426#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
10427#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x40000
10428#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
10429#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x80000
10430#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
10431#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x100000
10432#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
10433#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x600000
10434#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
10435#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x1000000
10436#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
10437#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x2000000
10438#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
10439#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x3f
10440#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
10441#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x300
10442#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
10443#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x400
10444#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
10445#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x40000
10446#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
10447#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x80000
10448#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
10449#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x100000
10450#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
10451#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x600000
10452#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
10453#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x1000000
10454#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
10455#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x2000000
10456#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
10457#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x3f
10458#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
10459#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x300
10460#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
10461#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x400
10462#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
10463#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x40000
10464#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
10465#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x80000
10466#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
10467#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x100000
10468#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
10469#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x600000
10470#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
10471#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x1000000
10472#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
10473#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x2000000
10474#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
10475#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x3f
10476#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
10477#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x300
10478#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
10479#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x400
10480#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
10481#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x40000
10482#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
10483#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x80000
10484#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
10485#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x100000
10486#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
10487#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x600000
10488#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
10489#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x1000000
10490#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
10491#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x2000000
10492#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
10493#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x3f
10494#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
10495#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x300
10496#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
10497#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x400
10498#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
10499#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x40000
10500#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
10501#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x80000
10502#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
10503#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x100000
10504#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
10505#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x600000
10506#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
10507#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x1000000
10508#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
10509#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x2000000
10510#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
10511#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x3f
10512#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
10513#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x300
10514#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
10515#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x400
10516#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
10517#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x40000
10518#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
10519#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x80000
10520#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
10521#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x100000
10522#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
10523#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x600000
10524#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
10525#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x1000000
10526#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
10527#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x2000000
10528#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
10529#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x3f
10530#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
10531#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x300
10532#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
10533#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x400
10534#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
10535#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x40000
10536#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
10537#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x80000
10538#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
10539#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x100000
10540#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
10541#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x600000
10542#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
10543#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x1000000
10544#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
10545#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x2000000
10546#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
10547#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x3f
10548#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
10549#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x300
10550#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
10551#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x400
10552#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
10553#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x40000
10554#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
10555#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x80000
10556#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
10557#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x100000
10558#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
10559#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x600000
10560#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
10561#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x1000000
10562#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
10563#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x2000000
10564#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
10565#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x3f
10566#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
10567#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x300
10568#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
10569#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x400
10570#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
10571#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x40000
10572#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
10573#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x80000
10574#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
10575#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x100000
10576#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
10577#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x600000
10578#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
10579#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x1000000
10580#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
10581#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x2000000
10582#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
10583#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x3f
10584#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
10585#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x300
10586#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
10587#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x400
10588#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
10589#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x40000
10590#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
10591#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x80000
10592#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
10593#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x100000
10594#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
10595#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x600000
10596#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
10597#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x1000000
10598#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
10599#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x2000000
10600#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
10601#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x3f
10602#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
10603#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x300
10604#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
10605#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x400
10606#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
10607#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x40000
10608#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
10609#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x80000
10610#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
10611#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x100000
10612#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
10613#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x600000
10614#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
10615#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x1000000
10616#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
10617#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x2000000
10618#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
10619#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x3f
10620#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
10621#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x300
10622#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
10623#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x400
10624#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
10625#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x40000
10626#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
10627#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x80000
10628#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
10629#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x100000
10630#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
10631#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x600000
10632#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
10633#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x1000000
10634#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
10635#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x2000000
10636#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
10637#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x3e
10638#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
10639#define SPI_VS_OUT_CONFIG__VS_HALF_PACK_MASK 0x40
10640#define SPI_VS_OUT_CONFIG__VS_HALF_PACK__SHIFT 0x6
10641#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x1
10642#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
10643#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x2
10644#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
10645#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x4
10646#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
10647#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x8
10648#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
10649#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x10
10650#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
10651#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x20
10652#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
10653#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x40
10654#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
10655#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x80
10656#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
10657#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x100
10658#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
10659#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x200
10660#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
10661#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x400
10662#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
10663#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x800
10664#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
10665#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x1000
10666#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
10667#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x2000
10668#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
10669#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x4000
10670#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
10671#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x8000
10672#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
10673#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x1
10674#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
10675#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x2
10676#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
10677#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x4
10678#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
10679#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x8
10680#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
10681#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x10
10682#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
10683#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x20
10684#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
10685#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x40
10686#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
10687#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x80
10688#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
10689#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x100
10690#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
10691#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x200
10692#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
10693#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x400
10694#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
10695#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x800
10696#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
10697#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x1000
10698#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
10699#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x2000
10700#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
10701#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x4000
10702#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
10703#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x8000
10704#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
10705#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x1
10706#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
10707#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x2
10708#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
10709#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x1c
10710#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
10711#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0xe0
10712#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
10713#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x700
10714#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
10715#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x3800
10716#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
10717#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x4000
10718#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
10719#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x3f
10720#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
10721#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x40
10722#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
10723#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x4000
10724#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
10725#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x1
10726#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
10727#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x10
10728#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
10729#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x100
10730#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
10731#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x1000
10732#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
10733#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x30000
10734#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
10735#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x100000
10736#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
10737#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x1000000
10738#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
10739#define SPI_TMPRING_SIZE__WAVES_MASK 0xfff
10740#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
10741#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x1fff000
10742#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
10743#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0xf
10744#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
10745#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0xf0
10746#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
10747#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0xf00
10748#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
10749#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0xf000
10750#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
10751#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0xf
10752#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
10753#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0xf
10754#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
10755#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0xf0
10756#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
10757#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0xf00
10758#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
10759#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0xf000
10760#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
10761#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0xf0000
10762#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
10763#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0xf00000
10764#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
10765#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0xf000000
10766#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
10767#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xf0000000
10768#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
10769#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x7
10770#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
10771#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x38
10772#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
10773#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x1c0
10774#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
10775#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0xe00
10776#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
10777#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x3000
10778#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
10779#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0xc000
10780#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
10781#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x30000
10782#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
10783#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0xc0000
10784#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
10785#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0xffff
10786#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
10787#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xffff0000
10788#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
10789#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0xffff
10790#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
10791#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xffff0000
10792#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
10793#define SPI_CDBG_SYS_GFX__PS_EN_MASK 0x1
10794#define SPI_CDBG_SYS_GFX__PS_EN__SHIFT 0x0
10795#define SPI_CDBG_SYS_GFX__VS_EN_MASK 0x2
10796#define SPI_CDBG_SYS_GFX__VS_EN__SHIFT 0x1
10797#define SPI_CDBG_SYS_GFX__GS_EN_MASK 0x4
10798#define SPI_CDBG_SYS_GFX__GS_EN__SHIFT 0x2
10799#define SPI_CDBG_SYS_GFX__ES_EN_MASK 0x8
10800#define SPI_CDBG_SYS_GFX__ES_EN__SHIFT 0x3
10801#define SPI_CDBG_SYS_GFX__HS_EN_MASK 0x10
10802#define SPI_CDBG_SYS_GFX__HS_EN__SHIFT 0x4
10803#define SPI_CDBG_SYS_GFX__LS_EN_MASK 0x20
10804#define SPI_CDBG_SYS_GFX__LS_EN__SHIFT 0x5
10805#define SPI_CDBG_SYS_GFX__CS_EN_MASK 0x40
10806#define SPI_CDBG_SYS_GFX__CS_EN__SHIFT 0x6
10807#define SPI_CDBG_SYS_HP3D__PS_EN_MASK 0x1
10808#define SPI_CDBG_SYS_HP3D__PS_EN__SHIFT 0x0
10809#define SPI_CDBG_SYS_HP3D__VS_EN_MASK 0x2
10810#define SPI_CDBG_SYS_HP3D__VS_EN__SHIFT 0x1
10811#define SPI_CDBG_SYS_HP3D__GS_EN_MASK 0x4
10812#define SPI_CDBG_SYS_HP3D__GS_EN__SHIFT 0x2
10813#define SPI_CDBG_SYS_HP3D__ES_EN_MASK 0x8
10814#define SPI_CDBG_SYS_HP3D__ES_EN__SHIFT 0x3
10815#define SPI_CDBG_SYS_HP3D__HS_EN_MASK 0x10
10816#define SPI_CDBG_SYS_HP3D__HS_EN__SHIFT 0x4
10817#define SPI_CDBG_SYS_HP3D__LS_EN_MASK 0x20
10818#define SPI_CDBG_SYS_HP3D__LS_EN__SHIFT 0x5
10819#define SPI_CDBG_SYS_CS0__PIPE0_MASK 0xff
10820#define SPI_CDBG_SYS_CS0__PIPE0__SHIFT 0x0
10821#define SPI_CDBG_SYS_CS0__PIPE1_MASK 0xff00
10822#define SPI_CDBG_SYS_CS0__PIPE1__SHIFT 0x8
10823#define SPI_CDBG_SYS_CS0__PIPE2_MASK 0xff0000
10824#define SPI_CDBG_SYS_CS0__PIPE2__SHIFT 0x10
10825#define SPI_CDBG_SYS_CS0__PIPE3_MASK 0xff000000
10826#define SPI_CDBG_SYS_CS0__PIPE3__SHIFT 0x18
10827#define SPI_CDBG_SYS_CS1__PIPE0_MASK 0xff
10828#define SPI_CDBG_SYS_CS1__PIPE0__SHIFT 0x0
10829#define SPI_CDBG_SYS_CS1__PIPE1_MASK 0xff00
10830#define SPI_CDBG_SYS_CS1__PIPE1__SHIFT 0x8
10831#define SPI_CDBG_SYS_CS1__PIPE2_MASK 0xff0000
10832#define SPI_CDBG_SYS_CS1__PIPE2__SHIFT 0x10
10833#define SPI_CDBG_SYS_CS1__PIPE3_MASK 0xff000000
10834#define SPI_CDBG_SYS_CS1__PIPE3__SHIFT 0x18
10835#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x7f
10836#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
10837#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE_MASK 0xf80
10838#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE__SHIFT 0x7
10839#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x1f000
10840#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
10841#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE_MASK 0x3e0000
10842#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE__SHIFT 0x11
10843#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x7c00000
10844#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
10845#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x7f
10846#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
10847#define SPI_WCL_PIPE_PERCENT_HP3D__LS_GRP_VALUE_MASK 0xf80
10848#define SPI_WCL_PIPE_PERCENT_HP3D__LS_GRP_VALUE__SHIFT 0x7
10849#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x1f000
10850#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
10851#define SPI_WCL_PIPE_PERCENT_HP3D__ES_GRP_VALUE_MASK 0x3e0000
10852#define SPI_WCL_PIPE_PERCENT_HP3D__ES_GRP_VALUE__SHIFT 0x11
10853#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x7c00000
10854#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
10855#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7f
10856#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
10857#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7f
10858#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
10859#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7f
10860#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
10861#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7f
10862#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
10863#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7f
10864#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
10865#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7f
10866#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
10867#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7f
10868#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
10869#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7f
10870#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
10871#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x1
10872#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0
10873#define SPI_GDBG_WAVE_CNTL__STALL_VMID_MASK 0x1fffe
10874#define SPI_GDBG_WAVE_CNTL__STALL_VMID__SHIFT 0x1
10875#define SPI_GDBG_TRAP_CONFIG__ME_SEL_MASK 0x3
10876#define SPI_GDBG_TRAP_CONFIG__ME_SEL__SHIFT 0x0
10877#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL_MASK 0xc
10878#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL__SHIFT 0x2
10879#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL_MASK 0x70
10880#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL__SHIFT 0x4
10881#define SPI_GDBG_TRAP_CONFIG__ME_MATCH_MASK 0x80
10882#define SPI_GDBG_TRAP_CONFIG__ME_MATCH__SHIFT 0x7
10883#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH_MASK 0x100
10884#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH__SHIFT 0x8
10885#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH_MASK 0x200
10886#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH__SHIFT 0x9
10887#define SPI_GDBG_TRAP_CONFIG__TRAP_EN_MASK 0x8000
10888#define SPI_GDBG_TRAP_CONFIG__TRAP_EN__SHIFT 0xf
10889#define SPI_GDBG_TRAP_CONFIG__VMID_SEL_MASK 0xffff0000
10890#define SPI_GDBG_TRAP_CONFIG__VMID_SEL__SHIFT 0x10
10891#define SPI_GDBG_TRAP_MASK__EXCP_EN_MASK 0x1ff
10892#define SPI_GDBG_TRAP_MASK__EXCP_EN__SHIFT 0x0
10893#define SPI_GDBG_TRAP_MASK__REPLACE_MASK 0x200
10894#define SPI_GDBG_TRAP_MASK__REPLACE__SHIFT 0x9
10895#define SPI_GDBG_TBA_LO__MEM_BASE_MASK 0xffffffff
10896#define SPI_GDBG_TBA_LO__MEM_BASE__SHIFT 0x0
10897#define SPI_GDBG_TBA_HI__MEM_BASE_MASK 0xff
10898#define SPI_GDBG_TBA_HI__MEM_BASE__SHIFT 0x0
10899#define SPI_GDBG_TMA_LO__MEM_BASE_MASK 0xffffffff
10900#define SPI_GDBG_TMA_LO__MEM_BASE__SHIFT 0x0
10901#define SPI_GDBG_TMA_HI__MEM_BASE_MASK 0xff
10902#define SPI_GDBG_TMA_HI__MEM_BASE__SHIFT 0x0
10903#define SPI_GDBG_TRAP_DATA0__DATA_MASK 0xffffffff
10904#define SPI_GDBG_TRAP_DATA0__DATA__SHIFT 0x0
10905#define SPI_GDBG_TRAP_DATA1__DATA_MASK 0xffffffff
10906#define SPI_GDBG_TRAP_DATA1__DATA__SHIFT 0x0
10907#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_MASK 0x1
10908#define SPI_RESET_DEBUG__DISABLE_GFX_RESET__SHIFT 0x0
10909#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID_MASK 0x2
10910#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID__SHIFT 0x1
10911#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID_MASK 0x4
10912#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID__SHIFT 0x2
10913#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE_MASK 0x8
10914#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE__SHIFT 0x3
10915#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY_MASK 0x10
10916#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY__SHIFT 0x4
10917#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x1
10918#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
10919#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0xf
10920#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
10921#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0xf0
10922#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
10923#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0xf00
10924#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
10925#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x7000
10926#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
10927#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x78000
10928#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
10929#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0xf
10930#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
10931#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0xf0
10932#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
10933#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0xf00
10934#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
10935#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x7000
10936#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
10937#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x78000
10938#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
10939#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0xf
10940#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
10941#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0xf0
10942#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
10943#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0xf00
10944#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
10945#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x7000
10946#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
10947#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x78000
10948#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
10949#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0xf
10950#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
10951#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0xf0
10952#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
10953#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0xf00
10954#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
10955#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x7000
10956#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
10957#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x78000
10958#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
10959#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0xf
10960#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
10961#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0xf0
10962#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
10963#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0xf00
10964#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
10965#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x7000
10966#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
10967#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x78000
10968#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
10969#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0xf
10970#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
10971#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0xf0
10972#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
10973#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0xf00
10974#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
10975#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x7000
10976#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
10977#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x78000
10978#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
10979#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0xf
10980#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
10981#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0xf0
10982#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
10983#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0xf00
10984#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
10985#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x7000
10986#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
10987#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x78000
10988#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
10989#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0xf
10990#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
10991#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0xf0
10992#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
10993#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0xf00
10994#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
10995#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x7000
10996#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
10997#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x78000
10998#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
10999#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0xf
11000#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
11001#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0xf0
11002#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
11003#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0xf00
11004#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
11005#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x7000
11006#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
11007#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x78000
11008#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
11009#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0xf
11010#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
11011#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0xf0
11012#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
11013#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0xf00
11014#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
11015#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x7000
11016#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
11017#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x78000
11018#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
11019#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0xf
11020#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
11021#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0xf0
11022#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
11023#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0xf00
11024#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
11025#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x7000
11026#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
11027#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x78000
11028#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
11029#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0xf
11030#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
11031#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0xf0
11032#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
11033#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0xf00
11034#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
11035#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x7000
11036#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
11037#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x78000
11038#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
11039#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0xf
11040#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
11041#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0xf0
11042#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
11043#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0xf00
11044#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
11045#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x7000
11046#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
11047#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x78000
11048#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
11049#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0xf
11050#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
11051#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0xf0
11052#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
11053#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0xf00
11054#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
11055#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x7000
11056#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
11057#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x78000
11058#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
11059#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0xf
11060#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
11061#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0xf0
11062#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
11063#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0xf00
11064#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
11065#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x7000
11066#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
11067#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x78000
11068#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
11069#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0xf
11070#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
11071#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0xf0
11072#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
11073#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0xf00
11074#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
11075#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x7000
11076#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
11077#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x78000
11078#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
11079#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x1
11080#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
11081#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0xfffe
11082#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
11083#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0xff0000
11084#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
11085#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY_MASK 0x1000000
11086#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY__SHIFT 0x18
11087#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x1
11088#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
11089#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0xfffe
11090#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
11091#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0xff0000
11092#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
11093#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY_MASK 0x1000000
11094#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY__SHIFT 0x18
11095#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x1
11096#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
11097#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0xfffe
11098#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
11099#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0xff0000
11100#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
11101#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY_MASK 0x1000000
11102#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY__SHIFT 0x18
11103#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x1
11104#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
11105#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0xfffe
11106#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
11107#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0xff0000
11108#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
11109#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY_MASK 0x1000000
11110#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY__SHIFT 0x18
11111#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x1
11112#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
11113#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0xfffe
11114#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
11115#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0xff0000
11116#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
11117#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY_MASK 0x1000000
11118#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY__SHIFT 0x18
11119#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x1
11120#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
11121#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0xfffe
11122#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
11123#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0xff0000
11124#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
11125#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY_MASK 0x1000000
11126#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY__SHIFT 0x18
11127#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x1
11128#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
11129#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0xfffe
11130#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
11131#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0xff0000
11132#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
11133#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY_MASK 0x1000000
11134#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY__SHIFT 0x18
11135#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x1
11136#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
11137#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0xfffe
11138#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
11139#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0xff0000
11140#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
11141#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY_MASK 0x1000000
11142#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY__SHIFT 0x18
11143#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x1
11144#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
11145#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0xfffe
11146#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
11147#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0xff0000
11148#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
11149#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY_MASK 0x1000000
11150#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY__SHIFT 0x18
11151#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x1
11152#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
11153#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0xfffe
11154#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
11155#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0xff0000
11156#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
11157#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY_MASK 0x1000000
11158#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY__SHIFT 0x18
11159#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x1
11160#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
11161#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0xfffe
11162#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
11163#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0xff0000
11164#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
11165#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY_MASK 0x1000000
11166#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY__SHIFT 0x18
11167#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x1
11168#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
11169#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0xfffe
11170#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
11171#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0xff0000
11172#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
11173#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY_MASK 0x1000000
11174#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY__SHIFT 0x18
11175#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x1
11176#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
11177#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0xfffe
11178#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
11179#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0xff0000
11180#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
11181#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY_MASK 0x1000000
11182#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY__SHIFT 0x18
11183#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x1
11184#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
11185#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0xfffe
11186#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
11187#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0xff0000
11188#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
11189#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY_MASK 0x1000000
11190#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY__SHIFT 0x18
11191#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x1
11192#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
11193#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0xfffe
11194#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
11195#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0xff0000
11196#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
11197#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY_MASK 0x1000000
11198#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY__SHIFT 0x18
11199#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x1
11200#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
11201#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0xfffe
11202#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
11203#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0xff0000
11204#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
11205#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY_MASK 0x1000000
11206#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY__SHIFT 0x18
11207#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x1
11208#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
11209#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x2
11210#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
11211#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x4
11212#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
11213#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000
11214#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
11215#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000
11216#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
11217#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0xfff
11218#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
11219#define SPI_START_PHASE__VGPR_START_PHASE_MASK 0x3
11220#define SPI_START_PHASE__VGPR_START_PHASE__SHIFT 0x0
11221#define SPI_START_PHASE__SGPR_START_PHASE_MASK 0xc
11222#define SPI_START_PHASE__SGPR_START_PHASE__SHIFT 0x2
11223#define SPI_START_PHASE__WAVE_START_PHASE_MASK 0x30
11224#define SPI_START_PHASE__WAVE_START_PHASE__SHIFT 0x4
11225#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x1
11226#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
11227#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x1fffff
11228#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
11229#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0xe00000
11230#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
11231#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x1000000
11232#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
11233#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x2000000
11234#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
11235#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET_MASK 0x4000000
11236#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET__SHIFT 0x1a
11237#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL_MASK 0x8000000
11238#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL__SHIFT 0x1b
11239#define SPI_DEBUG_CNTL__DEBUG_GRBM_OVERRIDE_MASK 0x1
11240#define SPI_DEBUG_CNTL__DEBUG_GRBM_OVERRIDE__SHIFT 0x0
11241#define SPI_DEBUG_CNTL__DEBUG_THREAD_TYPE_SEL_MASK 0xe
11242#define SPI_DEBUG_CNTL__DEBUG_THREAD_TYPE_SEL__SHIFT 0x1
11243#define SPI_DEBUG_CNTL__DEBUG_GROUP_SEL_MASK 0x3f0
11244#define SPI_DEBUG_CNTL__DEBUG_GROUP_SEL__SHIFT 0x4
11245#define SPI_DEBUG_CNTL__DEBUG_SIMD_SEL_MASK 0xfc00
11246#define SPI_DEBUG_CNTL__DEBUG_SIMD_SEL__SHIFT 0xa
11247#define SPI_DEBUG_CNTL__DEBUG_SH_SEL_MASK 0x10000
11248#define SPI_DEBUG_CNTL__DEBUG_SH_SEL__SHIFT 0x10
11249#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_0_MASK 0x20000
11250#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_0__SHIFT 0x11
11251#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_1_MASK 0x40000
11252#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_1__SHIFT 0x12
11253#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_2_MASK 0x80000
11254#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_2__SHIFT 0x13
11255#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_3_MASK 0x100000
11256#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_3__SHIFT 0x14
11257#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_4_MASK 0x200000
11258#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_4__SHIFT 0x15
11259#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_5_MASK 0x400000
11260#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_5__SHIFT 0x16
11261#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_6_MASK 0x800000
11262#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_6__SHIFT 0x17
11263#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_7_MASK 0x1000000
11264#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_7__SHIFT 0x18
11265#define SPI_DEBUG_CNTL__DEBUG_PIPE_SEL_MASK 0xe000000
11266#define SPI_DEBUG_CNTL__DEBUG_PIPE_SEL__SHIFT 0x19
11267#define SPI_DEBUG_CNTL__DEBUG_REG_EN_MASK 0x80000000
11268#define SPI_DEBUG_CNTL__DEBUG_REG_EN__SHIFT 0x1f
11269#define SPI_DEBUG_READ__DATA_MASK 0xffffff
11270#define SPI_DEBUG_READ__DATA__SHIFT 0x0
11271#define SPI_DSM_CNTL__Sel_DSM_SPI_Irritator_data0_MASK 0x1
11272#define SPI_DSM_CNTL__Sel_DSM_SPI_Irritator_data0__SHIFT 0x0
11273#define SPI_DSM_CNTL__Sel_DSM_SPI_Irritator_data1_MASK 0x2
11274#define SPI_DSM_CNTL__Sel_DSM_SPI_Irritator_data1__SHIFT 0x1
11275#define SPI_DSM_CNTL__SPI_Enable_Single_Write_MASK 0x4
11276#define SPI_DSM_CNTL__SPI_Enable_Single_Write__SHIFT 0x2
11277#define SPI_DSM_CNTL__UNUSED_MASK 0xfffffff8
11278#define SPI_DSM_CNTL__UNUSED__SHIFT 0x3
11279#define SPI_EDC_CNT__SED_MASK 0xff
11280#define SPI_EDC_CNT__SED__SHIFT 0x0
11281#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
11282#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
11283#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
11284#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
11285#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
11286#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
11287#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
11288#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
11289#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0xffc00
11290#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
11291#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
11292#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
11293#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x3ff
11294#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
11295#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0xffc00
11296#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
11297#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
11298#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
11299#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x3ff
11300#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
11301#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0xffc00
11302#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
11303#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
11304#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
11305#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
11306#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
11307#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
11308#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
11309#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x3ff
11310#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
11311#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0xffc00
11312#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
11313#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x3ff
11314#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
11315#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0xffc00
11316#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
11317#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x3ff
11318#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
11319#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0xffc00
11320#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
11321#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0xff
11322#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
11323#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0xff
11324#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
11325#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0xf
11326#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
11327#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0xf0
11328#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
11329#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0xf00
11330#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
11331#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0xf000
11332#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
11333#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0xf0000
11334#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
11335#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0xf00000
11336#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
11337#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0xf000000
11338#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
11339#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xf0000000
11340#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
11341#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
11342#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
11343#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
11344#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
11345#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
11346#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
11347#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
11348#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
11349#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
11350#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
11351#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
11352#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
11353#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
11354#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
11355#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
11356#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
11357#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xffffffff
11358#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
11359#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xffffffff
11360#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
11361#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xffffffff
11362#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
11363#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xffffffff
11364#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
11365#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0xf
11366#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
11367#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x10
11368#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
11369#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x40
11370#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x6
11371#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x80
11372#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
11373#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE_MASK 0x100
11374#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE__SHIFT 0x8
11375#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE_MASK 0x200
11376#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE__SHIFT 0x9
11377#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x3c00
11378#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
11379#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE_MASK 0xffff0000
11380#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE__SHIFT 0x10
11381#define SPI_DEBUG_BUSY__LS_BUSY_MASK 0x1
11382#define SPI_DEBUG_BUSY__LS_BUSY__SHIFT 0x0
11383#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x2
11384#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x1
11385#define SPI_DEBUG_BUSY__ES_BUSY_MASK 0x4
11386#define SPI_DEBUG_BUSY__ES_BUSY__SHIFT 0x2
11387#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x8
11388#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x3
11389#define SPI_DEBUG_BUSY__VS_BUSY_MASK 0x10
11390#define SPI_DEBUG_BUSY__VS_BUSY__SHIFT 0x4
11391#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x20
11392#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x5
11393#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x40
11394#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x6
11395#define SPI_DEBUG_BUSY__CSG_BUSY_MASK 0x80
11396#define SPI_DEBUG_BUSY__CSG_BUSY__SHIFT 0x7
11397#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x100
11398#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x8
11399#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x200
11400#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x9
11401#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x400
11402#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0xa
11403#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x800
11404#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0xb
11405#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x1000
11406#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0xc
11407#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x2000
11408#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0xd
11409#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x4000
11410#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0xe
11411#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x8000
11412#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0xf
11413#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x10000
11414#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0x10
11415#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x20000
11416#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0x11
11417#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY_MASK 0x40000
11418#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY__SHIFT 0x12
11419#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY_MASK 0x80000
11420#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY__SHIFT 0x13
11421#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x100000
11422#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x14
11423#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x200000
11424#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x15
11425#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x400000
11426#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x16
11427#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x800000
11428#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x17
11429#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0xf
11430#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
11431#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0xf0
11432#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
11433#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY_MASK 0xf
11434#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY__SHIFT 0x0
11435#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY_MASK 0xff0
11436#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY__SHIFT 0x4
11437#define CGTS_SM_CTRL_REG__MGCG_ENABLED_MASK 0x1000
11438#define CGTS_SM_CTRL_REG__MGCG_ENABLED__SHIFT 0xc
11439#define CGTS_SM_CTRL_REG__BASE_MODE_MASK 0x10000
11440#define CGTS_SM_CTRL_REG__BASE_MODE__SHIFT 0x10
11441#define CGTS_SM_CTRL_REG__SM_MODE_MASK 0xe0000
11442#define CGTS_SM_CTRL_REG__SM_MODE__SHIFT 0x11
11443#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK 0x100000
11444#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE__SHIFT 0x14
11445#define CGTS_SM_CTRL_REG__OVERRIDE_MASK 0x200000
11446#define CGTS_SM_CTRL_REG__OVERRIDE__SHIFT 0x15
11447#define CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK 0x400000
11448#define CGTS_SM_CTRL_REG__LS_OVERRIDE__SHIFT 0x16
11449#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK 0x800000
11450#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN__SHIFT 0x17
11451#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK 0xff000000
11452#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT 0x18
11453#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x1f
11454#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
11455#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x1f00
11456#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x8
11457#define CGTS_RD_REG__READ_DATA_MASK 0x3fff
11458#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
11459#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xffff0000
11460#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
11461#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xffff0000
11462#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
11463#define CGTS_CU0_SP0_CTRL_REG__SP00_MASK 0x7f
11464#define CGTS_CU0_SP0_CTRL_REG__SP00__SHIFT 0x0
11465#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
11466#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
11467#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
11468#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
11469#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
11470#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
11471#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
11472#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
11473#define CGTS_CU0_SP0_CTRL_REG__SP01_MASK 0x7f0000
11474#define CGTS_CU0_SP0_CTRL_REG__SP01__SHIFT 0x10
11475#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
11476#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
11477#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
11478#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
11479#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
11480#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
11481#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
11482#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11483#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
11484#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
11485#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
11486#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
11487#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
11488#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
11489#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
11490#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
11491#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
11492#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
11493#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
11494#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
11495#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
11496#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
11497#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
11498#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
11499#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
11500#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
11501#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
11502#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11503#define CGTS_CU0_TA_SQC_CTRL_REG__TA_MASK 0x7f
11504#define CGTS_CU0_TA_SQC_CTRL_REG__TA__SHIFT 0x0
11505#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x80
11506#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
11507#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
11508#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
11509#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
11510#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
11511#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
11512#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
11513#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_MASK 0x7f0000
11514#define CGTS_CU0_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
11515#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x800000
11516#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
11517#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x3000000
11518#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
11519#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x4000000
11520#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
11521#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x8000000
11522#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11523#define CGTS_CU0_SP1_CTRL_REG__SP10_MASK 0x7f
11524#define CGTS_CU0_SP1_CTRL_REG__SP10__SHIFT 0x0
11525#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
11526#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
11527#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
11528#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
11529#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
11530#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
11531#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
11532#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
11533#define CGTS_CU0_SP1_CTRL_REG__SP11_MASK 0x7f0000
11534#define CGTS_CU0_SP1_CTRL_REG__SP11__SHIFT 0x10
11535#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
11536#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
11537#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
11538#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
11539#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
11540#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
11541#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
11542#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11543#define CGTS_CU0_TD_TCP_CTRL_REG__TD_MASK 0x7f
11544#define CGTS_CU0_TD_TCP_CTRL_REG__TD__SHIFT 0x0
11545#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
11546#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
11547#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
11548#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
11549#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
11550#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
11551#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
11552#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
11553#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
11554#define CGTS_CU0_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
11555#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
11556#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
11557#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
11558#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
11559#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
11560#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
11561#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
11562#define CGTS_CU0_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11563#define CGTS_CU1_SP0_CTRL_REG__SP00_MASK 0x7f
11564#define CGTS_CU1_SP0_CTRL_REG__SP00__SHIFT 0x0
11565#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
11566#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
11567#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
11568#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
11569#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
11570#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
11571#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
11572#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
11573#define CGTS_CU1_SP0_CTRL_REG__SP01_MASK 0x7f0000
11574#define CGTS_CU1_SP0_CTRL_REG__SP01__SHIFT 0x10
11575#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
11576#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
11577#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
11578#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
11579#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
11580#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
11581#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
11582#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11583#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
11584#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
11585#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
11586#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
11587#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
11588#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
11589#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
11590#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
11591#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
11592#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
11593#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
11594#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
11595#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
11596#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
11597#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
11598#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
11599#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
11600#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
11601#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
11602#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11603#define CGTS_CU1_TA_CTRL_REG__TA_MASK 0x7f
11604#define CGTS_CU1_TA_CTRL_REG__TA__SHIFT 0x0
11605#define CGTS_CU1_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
11606#define CGTS_CU1_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
11607#define CGTS_CU1_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
11608#define CGTS_CU1_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
11609#define CGTS_CU1_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
11610#define CGTS_CU1_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
11611#define CGTS_CU1_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
11612#define CGTS_CU1_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
11613#define CGTS_CU1_SP1_CTRL_REG__SP10_MASK 0x7f
11614#define CGTS_CU1_SP1_CTRL_REG__SP10__SHIFT 0x0
11615#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
11616#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
11617#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
11618#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
11619#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
11620#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
11621#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
11622#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
11623#define CGTS_CU1_SP1_CTRL_REG__SP11_MASK 0x7f0000
11624#define CGTS_CU1_SP1_CTRL_REG__SP11__SHIFT 0x10
11625#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
11626#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
11627#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
11628#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
11629#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
11630#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
11631#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
11632#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11633#define CGTS_CU1_TD_TCP_CTRL_REG__TD_MASK 0x7f
11634#define CGTS_CU1_TD_TCP_CTRL_REG__TD__SHIFT 0x0
11635#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
11636#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
11637#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
11638#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
11639#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
11640#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
11641#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
11642#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
11643#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
11644#define CGTS_CU1_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
11645#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
11646#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
11647#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
11648#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
11649#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
11650#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
11651#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
11652#define CGTS_CU1_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11653#define CGTS_CU2_SP0_CTRL_REG__SP00_MASK 0x7f
11654#define CGTS_CU2_SP0_CTRL_REG__SP00__SHIFT 0x0
11655#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
11656#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
11657#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
11658#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
11659#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
11660#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
11661#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
11662#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
11663#define CGTS_CU2_SP0_CTRL_REG__SP01_MASK 0x7f0000
11664#define CGTS_CU2_SP0_CTRL_REG__SP01__SHIFT 0x10
11665#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
11666#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
11667#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
11668#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
11669#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
11670#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
11671#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
11672#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11673#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
11674#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
11675#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
11676#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
11677#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
11678#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
11679#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
11680#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
11681#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
11682#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
11683#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
11684#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
11685#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
11686#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
11687#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
11688#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
11689#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
11690#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
11691#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
11692#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11693#define CGTS_CU2_TA_CTRL_REG__TA_MASK 0x7f
11694#define CGTS_CU2_TA_CTRL_REG__TA__SHIFT 0x0
11695#define CGTS_CU2_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
11696#define CGTS_CU2_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
11697#define CGTS_CU2_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
11698#define CGTS_CU2_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
11699#define CGTS_CU2_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
11700#define CGTS_CU2_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
11701#define CGTS_CU2_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
11702#define CGTS_CU2_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
11703#define CGTS_CU2_SP1_CTRL_REG__SP10_MASK 0x7f
11704#define CGTS_CU2_SP1_CTRL_REG__SP10__SHIFT 0x0
11705#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
11706#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
11707#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
11708#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
11709#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
11710#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
11711#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
11712#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
11713#define CGTS_CU2_SP1_CTRL_REG__SP11_MASK 0x7f0000
11714#define CGTS_CU2_SP1_CTRL_REG__SP11__SHIFT 0x10
11715#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
11716#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
11717#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
11718#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
11719#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
11720#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
11721#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
11722#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11723#define CGTS_CU2_TD_TCP_CTRL_REG__TD_MASK 0x7f
11724#define CGTS_CU2_TD_TCP_CTRL_REG__TD__SHIFT 0x0
11725#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
11726#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
11727#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
11728#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
11729#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
11730#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
11731#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
11732#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
11733#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
11734#define CGTS_CU2_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
11735#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
11736#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
11737#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
11738#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
11739#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
11740#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
11741#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
11742#define CGTS_CU2_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11743#define CGTS_CU3_SP0_CTRL_REG__SP00_MASK 0x7f
11744#define CGTS_CU3_SP0_CTRL_REG__SP00__SHIFT 0x0
11745#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
11746#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
11747#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
11748#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
11749#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
11750#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
11751#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
11752#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
11753#define CGTS_CU3_SP0_CTRL_REG__SP01_MASK 0x7f0000
11754#define CGTS_CU3_SP0_CTRL_REG__SP01__SHIFT 0x10
11755#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
11756#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
11757#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
11758#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
11759#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
11760#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
11761#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
11762#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11763#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
11764#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
11765#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
11766#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
11767#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
11768#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
11769#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
11770#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
11771#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
11772#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
11773#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
11774#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
11775#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
11776#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
11777#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
11778#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
11779#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
11780#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
11781#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
11782#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11783#define CGTS_CU3_TA_CTRL_REG__TA_MASK 0x7f
11784#define CGTS_CU3_TA_CTRL_REG__TA__SHIFT 0x0
11785#define CGTS_CU3_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
11786#define CGTS_CU3_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
11787#define CGTS_CU3_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
11788#define CGTS_CU3_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
11789#define CGTS_CU3_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
11790#define CGTS_CU3_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
11791#define CGTS_CU3_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
11792#define CGTS_CU3_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
11793#define CGTS_CU3_SP1_CTRL_REG__SP10_MASK 0x7f
11794#define CGTS_CU3_SP1_CTRL_REG__SP10__SHIFT 0x0
11795#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
11796#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
11797#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
11798#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
11799#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
11800#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
11801#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
11802#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
11803#define CGTS_CU3_SP1_CTRL_REG__SP11_MASK 0x7f0000
11804#define CGTS_CU3_SP1_CTRL_REG__SP11__SHIFT 0x10
11805#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
11806#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
11807#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
11808#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
11809#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
11810#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
11811#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
11812#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11813#define CGTS_CU3_TD_TCP_CTRL_REG__TD_MASK 0x7f
11814#define CGTS_CU3_TD_TCP_CTRL_REG__TD__SHIFT 0x0
11815#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
11816#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
11817#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
11818#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
11819#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
11820#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
11821#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
11822#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
11823#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
11824#define CGTS_CU3_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
11825#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
11826#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
11827#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
11828#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
11829#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
11830#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
11831#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
11832#define CGTS_CU3_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11833#define CGTS_CU4_SP0_CTRL_REG__SP00_MASK 0x7f
11834#define CGTS_CU4_SP0_CTRL_REG__SP00__SHIFT 0x0
11835#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
11836#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
11837#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
11838#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
11839#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
11840#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
11841#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
11842#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
11843#define CGTS_CU4_SP0_CTRL_REG__SP01_MASK 0x7f0000
11844#define CGTS_CU4_SP0_CTRL_REG__SP01__SHIFT 0x10
11845#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
11846#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
11847#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
11848#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
11849#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
11850#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
11851#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
11852#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11853#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
11854#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
11855#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
11856#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
11857#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
11858#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
11859#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
11860#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
11861#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
11862#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
11863#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
11864#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
11865#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
11866#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
11867#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
11868#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
11869#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
11870#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
11871#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
11872#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11873#define CGTS_CU4_TA_SQC_CTRL_REG__TA_MASK 0x7f
11874#define CGTS_CU4_TA_SQC_CTRL_REG__TA__SHIFT 0x0
11875#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x80
11876#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
11877#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
11878#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
11879#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
11880#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
11881#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
11882#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
11883#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_MASK 0x7f0000
11884#define CGTS_CU4_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
11885#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x800000
11886#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
11887#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x3000000
11888#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
11889#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x4000000
11890#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
11891#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x8000000
11892#define CGTS_CU4_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11893#define CGTS_CU4_SP1_CTRL_REG__SP10_MASK 0x7f
11894#define CGTS_CU4_SP1_CTRL_REG__SP10__SHIFT 0x0
11895#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
11896#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
11897#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
11898#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
11899#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
11900#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
11901#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
11902#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
11903#define CGTS_CU4_SP1_CTRL_REG__SP11_MASK 0x7f0000
11904#define CGTS_CU4_SP1_CTRL_REG__SP11__SHIFT 0x10
11905#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
11906#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
11907#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
11908#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
11909#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
11910#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
11911#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
11912#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11913#define CGTS_CU4_TD_TCP_CTRL_REG__TD_MASK 0x7f
11914#define CGTS_CU4_TD_TCP_CTRL_REG__TD__SHIFT 0x0
11915#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
11916#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
11917#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
11918#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
11919#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
11920#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
11921#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
11922#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
11923#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
11924#define CGTS_CU4_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
11925#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
11926#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
11927#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
11928#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
11929#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
11930#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
11931#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
11932#define CGTS_CU4_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11933#define CGTS_CU5_SP0_CTRL_REG__SP00_MASK 0x7f
11934#define CGTS_CU5_SP0_CTRL_REG__SP00__SHIFT 0x0
11935#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
11936#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
11937#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
11938#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
11939#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
11940#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
11941#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
11942#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
11943#define CGTS_CU5_SP0_CTRL_REG__SP01_MASK 0x7f0000
11944#define CGTS_CU5_SP0_CTRL_REG__SP01__SHIFT 0x10
11945#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
11946#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
11947#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
11948#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
11949#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
11950#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
11951#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
11952#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11953#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
11954#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
11955#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
11956#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
11957#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
11958#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
11959#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
11960#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
11961#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
11962#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
11963#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
11964#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
11965#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
11966#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
11967#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
11968#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
11969#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
11970#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
11971#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
11972#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
11973#define CGTS_CU5_TA_CTRL_REG__TA_MASK 0x7f
11974#define CGTS_CU5_TA_CTRL_REG__TA__SHIFT 0x0
11975#define CGTS_CU5_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
11976#define CGTS_CU5_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
11977#define CGTS_CU5_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
11978#define CGTS_CU5_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
11979#define CGTS_CU5_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
11980#define CGTS_CU5_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
11981#define CGTS_CU5_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
11982#define CGTS_CU5_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
11983#define CGTS_CU5_SP1_CTRL_REG__SP10_MASK 0x7f
11984#define CGTS_CU5_SP1_CTRL_REG__SP10__SHIFT 0x0
11985#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
11986#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
11987#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
11988#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
11989#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
11990#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
11991#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
11992#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
11993#define CGTS_CU5_SP1_CTRL_REG__SP11_MASK 0x7f0000
11994#define CGTS_CU5_SP1_CTRL_REG__SP11__SHIFT 0x10
11995#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
11996#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
11997#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
11998#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
11999#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12000#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12001#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12002#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12003#define CGTS_CU5_TD_TCP_CTRL_REG__TD_MASK 0x7f
12004#define CGTS_CU5_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12005#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12006#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12007#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12008#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12009#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12010#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12011#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12012#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12013#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12014#define CGTS_CU5_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12015#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12016#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12017#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12018#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12019#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12020#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12021#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12022#define CGTS_CU5_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12023#define CGTS_CU6_SP0_CTRL_REG__SP00_MASK 0x7f
12024#define CGTS_CU6_SP0_CTRL_REG__SP00__SHIFT 0x0
12025#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12026#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12027#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12028#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12029#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12030#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12031#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12032#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12033#define CGTS_CU6_SP0_CTRL_REG__SP01_MASK 0x7f0000
12034#define CGTS_CU6_SP0_CTRL_REG__SP01__SHIFT 0x10
12035#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12036#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12037#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12038#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12039#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12040#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12041#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12042#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12043#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12044#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12045#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12046#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12047#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12048#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12049#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12050#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12051#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12052#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12053#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12054#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12055#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12056#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12057#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12058#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12059#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12060#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12061#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12062#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12063#define CGTS_CU6_TA_CTRL_REG__TA_MASK 0x7f
12064#define CGTS_CU6_TA_CTRL_REG__TA__SHIFT 0x0
12065#define CGTS_CU6_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12066#define CGTS_CU6_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12067#define CGTS_CU6_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12068#define CGTS_CU6_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12069#define CGTS_CU6_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12070#define CGTS_CU6_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12071#define CGTS_CU6_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12072#define CGTS_CU6_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12073#define CGTS_CU6_SP1_CTRL_REG__SP10_MASK 0x7f
12074#define CGTS_CU6_SP1_CTRL_REG__SP10__SHIFT 0x0
12075#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12076#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12077#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12078#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12079#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12080#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12081#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12082#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12083#define CGTS_CU6_SP1_CTRL_REG__SP11_MASK 0x7f0000
12084#define CGTS_CU6_SP1_CTRL_REG__SP11__SHIFT 0x10
12085#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12086#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12087#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12088#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12089#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12090#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12091#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12092#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12093#define CGTS_CU6_TD_TCP_CTRL_REG__TD_MASK 0x7f
12094#define CGTS_CU6_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12095#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12096#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12097#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12098#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12099#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12100#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12101#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12102#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12103#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12104#define CGTS_CU6_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12105#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12106#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12107#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12108#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12109#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12110#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12111#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12112#define CGTS_CU6_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12113#define CGTS_CU7_SP0_CTRL_REG__SP00_MASK 0x7f
12114#define CGTS_CU7_SP0_CTRL_REG__SP00__SHIFT 0x0
12115#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12116#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12117#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12118#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12119#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12120#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12121#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12122#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12123#define CGTS_CU7_SP0_CTRL_REG__SP01_MASK 0x7f0000
12124#define CGTS_CU7_SP0_CTRL_REG__SP01__SHIFT 0x10
12125#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12126#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12127#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12128#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12129#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12130#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12131#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12132#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12133#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12134#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12135#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12136#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12137#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12138#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12139#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12140#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12141#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12142#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12143#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12144#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12145#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12146#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12147#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12148#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12149#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12150#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12151#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12152#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12153#define CGTS_CU7_TA_CTRL_REG__TA_MASK 0x7f
12154#define CGTS_CU7_TA_CTRL_REG__TA__SHIFT 0x0
12155#define CGTS_CU7_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12156#define CGTS_CU7_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12157#define CGTS_CU7_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12158#define CGTS_CU7_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12159#define CGTS_CU7_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12160#define CGTS_CU7_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12161#define CGTS_CU7_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12162#define CGTS_CU7_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12163#define CGTS_CU7_SP1_CTRL_REG__SP10_MASK 0x7f
12164#define CGTS_CU7_SP1_CTRL_REG__SP10__SHIFT 0x0
12165#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12166#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12167#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12168#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12169#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12170#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12171#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12172#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12173#define CGTS_CU7_SP1_CTRL_REG__SP11_MASK 0x7f0000
12174#define CGTS_CU7_SP1_CTRL_REG__SP11__SHIFT 0x10
12175#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12176#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12177#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12178#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12179#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12180#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12181#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12182#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12183#define CGTS_CU7_TD_TCP_CTRL_REG__TD_MASK 0x7f
12184#define CGTS_CU7_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12185#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12186#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12187#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12188#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12189#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12190#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12191#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12192#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12193#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12194#define CGTS_CU7_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12195#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12196#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12197#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12198#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12199#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12200#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12201#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12202#define CGTS_CU7_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12203#define CGTS_CU8_SP0_CTRL_REG__SP00_MASK 0x7f
12204#define CGTS_CU8_SP0_CTRL_REG__SP00__SHIFT 0x0
12205#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12206#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12207#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12208#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12209#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12210#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12211#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12212#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12213#define CGTS_CU8_SP0_CTRL_REG__SP01_MASK 0x7f0000
12214#define CGTS_CU8_SP0_CTRL_REG__SP01__SHIFT 0x10
12215#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12216#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12217#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12218#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12219#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12220#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12221#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12222#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12223#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12224#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12225#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12226#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12227#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12228#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12229#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12230#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12231#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12232#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12233#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12234#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12235#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12236#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12237#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12238#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12239#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12240#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12241#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12242#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12243#define CGTS_CU8_TA_SQC_CTRL_REG__TA_MASK 0x7f
12244#define CGTS_CU8_TA_SQC_CTRL_REG__TA__SHIFT 0x0
12245#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x80
12246#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12247#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12248#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12249#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12250#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12251#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12252#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12253#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_MASK 0x7f0000
12254#define CGTS_CU8_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
12255#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x800000
12256#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
12257#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x3000000
12258#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
12259#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x4000000
12260#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
12261#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x8000000
12262#define CGTS_CU8_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12263#define CGTS_CU8_SP1_CTRL_REG__SP10_MASK 0x7f
12264#define CGTS_CU8_SP1_CTRL_REG__SP10__SHIFT 0x0
12265#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12266#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12267#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12268#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12269#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12270#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12271#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12272#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12273#define CGTS_CU8_SP1_CTRL_REG__SP11_MASK 0x7f0000
12274#define CGTS_CU8_SP1_CTRL_REG__SP11__SHIFT 0x10
12275#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12276#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12277#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12278#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12279#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12280#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12281#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12282#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12283#define CGTS_CU8_TD_TCP_CTRL_REG__TD_MASK 0x7f
12284#define CGTS_CU8_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12285#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12286#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12287#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12288#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12289#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12290#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12291#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12292#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12293#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12294#define CGTS_CU8_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12295#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12296#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12297#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12298#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12299#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12300#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12301#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12302#define CGTS_CU8_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12303#define CGTS_CU9_SP0_CTRL_REG__SP00_MASK 0x7f
12304#define CGTS_CU9_SP0_CTRL_REG__SP00__SHIFT 0x0
12305#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12306#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12307#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12308#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12309#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12310#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12311#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12312#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12313#define CGTS_CU9_SP0_CTRL_REG__SP01_MASK 0x7f0000
12314#define CGTS_CU9_SP0_CTRL_REG__SP01__SHIFT 0x10
12315#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12316#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12317#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12318#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12319#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12320#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12321#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12322#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12323#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12324#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12325#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12326#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12327#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12328#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12329#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12330#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12331#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12332#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12333#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12334#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12335#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12336#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12337#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12338#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12339#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12340#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12341#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12342#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12343#define CGTS_CU9_TA_CTRL_REG__TA_MASK 0x7f
12344#define CGTS_CU9_TA_CTRL_REG__TA__SHIFT 0x0
12345#define CGTS_CU9_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12346#define CGTS_CU9_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12347#define CGTS_CU9_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12348#define CGTS_CU9_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12349#define CGTS_CU9_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12350#define CGTS_CU9_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12351#define CGTS_CU9_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12352#define CGTS_CU9_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12353#define CGTS_CU9_SP1_CTRL_REG__SP10_MASK 0x7f
12354#define CGTS_CU9_SP1_CTRL_REG__SP10__SHIFT 0x0
12355#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12356#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12357#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12358#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12359#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12360#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12361#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12362#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12363#define CGTS_CU9_SP1_CTRL_REG__SP11_MASK 0x7f0000
12364#define CGTS_CU9_SP1_CTRL_REG__SP11__SHIFT 0x10
12365#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12366#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12367#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12368#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12369#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12370#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12371#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12372#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12373#define CGTS_CU9_TD_TCP_CTRL_REG__TD_MASK 0x7f
12374#define CGTS_CU9_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12375#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12376#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12377#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12378#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12379#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12380#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12381#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12382#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12383#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12384#define CGTS_CU9_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12385#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12386#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12387#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12388#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12389#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12390#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12391#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12392#define CGTS_CU9_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12393#define CGTS_CU10_SP0_CTRL_REG__SP00_MASK 0x7f
12394#define CGTS_CU10_SP0_CTRL_REG__SP00__SHIFT 0x0
12395#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12396#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12397#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12398#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12399#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12400#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12401#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12402#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12403#define CGTS_CU10_SP0_CTRL_REG__SP01_MASK 0x7f0000
12404#define CGTS_CU10_SP0_CTRL_REG__SP01__SHIFT 0x10
12405#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12406#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12407#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12408#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12409#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12410#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12411#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12412#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12413#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12414#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12415#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12416#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12417#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12418#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12419#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12420#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12421#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12422#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12423#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12424#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12425#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12426#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12427#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12428#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12429#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12430#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12431#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12432#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12433#define CGTS_CU10_TA_CTRL_REG__TA_MASK 0x7f
12434#define CGTS_CU10_TA_CTRL_REG__TA__SHIFT 0x0
12435#define CGTS_CU10_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12436#define CGTS_CU10_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12437#define CGTS_CU10_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12438#define CGTS_CU10_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12439#define CGTS_CU10_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12440#define CGTS_CU10_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12441#define CGTS_CU10_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12442#define CGTS_CU10_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12443#define CGTS_CU10_SP1_CTRL_REG__SP10_MASK 0x7f
12444#define CGTS_CU10_SP1_CTRL_REG__SP10__SHIFT 0x0
12445#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12446#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12447#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12448#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12449#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12450#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12451#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12452#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12453#define CGTS_CU10_SP1_CTRL_REG__SP11_MASK 0x7f0000
12454#define CGTS_CU10_SP1_CTRL_REG__SP11__SHIFT 0x10
12455#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12456#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12457#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12458#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12459#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12460#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12461#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12462#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12463#define CGTS_CU10_TD_TCP_CTRL_REG__TD_MASK 0x7f
12464#define CGTS_CU10_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12465#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12466#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12467#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12468#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12469#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12470#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12471#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12472#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12473#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12474#define CGTS_CU10_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12475#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12476#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12477#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12478#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12479#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12480#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12481#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12482#define CGTS_CU10_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12483#define CGTS_CU11_SP0_CTRL_REG__SP00_MASK 0x7f
12484#define CGTS_CU11_SP0_CTRL_REG__SP00__SHIFT 0x0
12485#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12486#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12487#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12488#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12489#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12490#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12491#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12492#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12493#define CGTS_CU11_SP0_CTRL_REG__SP01_MASK 0x7f0000
12494#define CGTS_CU11_SP0_CTRL_REG__SP01__SHIFT 0x10
12495#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12496#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12497#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12498#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12499#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12500#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12501#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12502#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12503#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12504#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12505#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12506#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12507#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12508#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12509#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12510#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12511#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12512#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12513#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12514#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12515#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12516#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12517#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12518#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12519#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12520#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12521#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12522#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12523#define CGTS_CU11_TA_CTRL_REG__TA_MASK 0x7f
12524#define CGTS_CU11_TA_CTRL_REG__TA__SHIFT 0x0
12525#define CGTS_CU11_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12526#define CGTS_CU11_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12527#define CGTS_CU11_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12528#define CGTS_CU11_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12529#define CGTS_CU11_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12530#define CGTS_CU11_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12531#define CGTS_CU11_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12532#define CGTS_CU11_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12533#define CGTS_CU11_SP1_CTRL_REG__SP10_MASK 0x7f
12534#define CGTS_CU11_SP1_CTRL_REG__SP10__SHIFT 0x0
12535#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12536#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12537#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12538#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12539#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12540#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12541#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12542#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12543#define CGTS_CU11_SP1_CTRL_REG__SP11_MASK 0x7f0000
12544#define CGTS_CU11_SP1_CTRL_REG__SP11__SHIFT 0x10
12545#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12546#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12547#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12548#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12549#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12550#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12551#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12552#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12553#define CGTS_CU11_TD_TCP_CTRL_REG__TD_MASK 0x7f
12554#define CGTS_CU11_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12555#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12556#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12557#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12558#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12559#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12560#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12561#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12562#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12563#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12564#define CGTS_CU11_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12565#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12566#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12567#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12568#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12569#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12570#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12571#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12572#define CGTS_CU11_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12573#define CGTS_CU12_SP0_CTRL_REG__SP00_MASK 0x7f
12574#define CGTS_CU12_SP0_CTRL_REG__SP00__SHIFT 0x0
12575#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12576#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12577#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12578#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12579#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12580#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12581#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12582#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12583#define CGTS_CU12_SP0_CTRL_REG__SP01_MASK 0x7f0000
12584#define CGTS_CU12_SP0_CTRL_REG__SP01__SHIFT 0x10
12585#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12586#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12587#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12588#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12589#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12590#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12591#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12592#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12593#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12594#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12595#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12596#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12597#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12598#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12599#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12600#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12601#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12602#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12603#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12604#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12605#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12606#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12607#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12608#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12609#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12610#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12611#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12612#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12613#define CGTS_CU12_TA_SQC_CTRL_REG__TA_MASK 0x7f
12614#define CGTS_CU12_TA_SQC_CTRL_REG__TA__SHIFT 0x0
12615#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x80
12616#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12617#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12618#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12619#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12620#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12621#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12622#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12623#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_MASK 0x7f0000
12624#define CGTS_CU12_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
12625#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x800000
12626#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
12627#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x3000000
12628#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
12629#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x4000000
12630#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
12631#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x8000000
12632#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12633#define CGTS_CU12_SP1_CTRL_REG__SP10_MASK 0x7f
12634#define CGTS_CU12_SP1_CTRL_REG__SP10__SHIFT 0x0
12635#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12636#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12637#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12638#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12639#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12640#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12641#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12642#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12643#define CGTS_CU12_SP1_CTRL_REG__SP11_MASK 0x7f0000
12644#define CGTS_CU12_SP1_CTRL_REG__SP11__SHIFT 0x10
12645#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12646#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12647#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12648#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12649#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12650#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12651#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12652#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12653#define CGTS_CU12_TD_TCP_CTRL_REG__TD_MASK 0x7f
12654#define CGTS_CU12_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12655#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12656#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12657#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12658#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12659#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12660#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12661#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12662#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12663#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12664#define CGTS_CU12_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12665#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12666#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12667#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12668#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12669#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12670#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12671#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12672#define CGTS_CU12_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12673#define CGTS_CU13_SP0_CTRL_REG__SP00_MASK 0x7f
12674#define CGTS_CU13_SP0_CTRL_REG__SP00__SHIFT 0x0
12675#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12676#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12677#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12678#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12679#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12680#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12681#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12682#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12683#define CGTS_CU13_SP0_CTRL_REG__SP01_MASK 0x7f0000
12684#define CGTS_CU13_SP0_CTRL_REG__SP01__SHIFT 0x10
12685#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12686#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12687#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12688#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12689#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12690#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12691#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12692#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12693#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12694#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12695#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12696#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12697#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12698#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12699#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12700#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12701#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12702#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12703#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12704#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12705#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12706#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12707#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12708#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12709#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12710#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12711#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12712#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12713#define CGTS_CU13_TA_CTRL_REG__TA_MASK 0x7f
12714#define CGTS_CU13_TA_CTRL_REG__TA__SHIFT 0x0
12715#define CGTS_CU13_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12716#define CGTS_CU13_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12717#define CGTS_CU13_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12718#define CGTS_CU13_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12719#define CGTS_CU13_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12720#define CGTS_CU13_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12721#define CGTS_CU13_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12722#define CGTS_CU13_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12723#define CGTS_CU13_SP1_CTRL_REG__SP10_MASK 0x7f
12724#define CGTS_CU13_SP1_CTRL_REG__SP10__SHIFT 0x0
12725#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12726#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12727#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12728#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12729#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12730#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12731#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12732#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12733#define CGTS_CU13_SP1_CTRL_REG__SP11_MASK 0x7f0000
12734#define CGTS_CU13_SP1_CTRL_REG__SP11__SHIFT 0x10
12735#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12736#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12737#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12738#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12739#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12740#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12741#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12742#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12743#define CGTS_CU13_TD_TCP_CTRL_REG__TD_MASK 0x7f
12744#define CGTS_CU13_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12745#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12746#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12747#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12748#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12749#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12750#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12751#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12752#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12753#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12754#define CGTS_CU13_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12755#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12756#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12757#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12758#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12759#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12760#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12761#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12762#define CGTS_CU13_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12763#define CGTS_CU14_SP0_CTRL_REG__SP00_MASK 0x7f
12764#define CGTS_CU14_SP0_CTRL_REG__SP00__SHIFT 0x0
12765#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12766#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12767#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12768#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12769#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12770#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12771#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12772#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12773#define CGTS_CU14_SP0_CTRL_REG__SP01_MASK 0x7f0000
12774#define CGTS_CU14_SP0_CTRL_REG__SP01__SHIFT 0x10
12775#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12776#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12777#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12778#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12779#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12780#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12781#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12782#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12783#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12784#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12785#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12786#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12787#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12788#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12789#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12790#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12791#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12792#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12793#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12794#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12795#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12796#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12797#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12798#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12799#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12800#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12801#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12802#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12803#define CGTS_CU14_TA_CTRL_REG__TA_MASK 0x7f
12804#define CGTS_CU14_TA_CTRL_REG__TA__SHIFT 0x0
12805#define CGTS_CU14_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12806#define CGTS_CU14_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12807#define CGTS_CU14_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12808#define CGTS_CU14_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12809#define CGTS_CU14_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12810#define CGTS_CU14_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12811#define CGTS_CU14_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12812#define CGTS_CU14_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12813#define CGTS_CU14_SP1_CTRL_REG__SP10_MASK 0x7f
12814#define CGTS_CU14_SP1_CTRL_REG__SP10__SHIFT 0x0
12815#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12816#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12817#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12818#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12819#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12820#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12821#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12822#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12823#define CGTS_CU14_SP1_CTRL_REG__SP11_MASK 0x7f0000
12824#define CGTS_CU14_SP1_CTRL_REG__SP11__SHIFT 0x10
12825#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12826#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12827#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12828#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12829#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12830#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12831#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12832#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12833#define CGTS_CU14_TD_TCP_CTRL_REG__TD_MASK 0x7f
12834#define CGTS_CU14_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12835#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12836#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12837#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12838#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12839#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12840#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12841#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12842#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12843#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12844#define CGTS_CU14_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12845#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12846#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12847#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12848#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12849#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12850#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12851#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12852#define CGTS_CU14_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12853#define CGTS_CU15_SP0_CTRL_REG__SP00_MASK 0x7f
12854#define CGTS_CU15_SP0_CTRL_REG__SP00__SHIFT 0x0
12855#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x80
12856#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
12857#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x300
12858#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
12859#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x400
12860#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
12861#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x800
12862#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
12863#define CGTS_CU15_SP0_CTRL_REG__SP01_MASK 0x7f0000
12864#define CGTS_CU15_SP0_CTRL_REG__SP01__SHIFT 0x10
12865#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x800000
12866#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
12867#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x3000000
12868#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
12869#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x4000000
12870#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
12871#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x8000000
12872#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12873#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_MASK 0x7f
12874#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
12875#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x80
12876#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
12877#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x300
12878#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
12879#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x400
12880#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
12881#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x800
12882#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
12883#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_MASK 0x7f0000
12884#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
12885#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x800000
12886#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
12887#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x3000000
12888#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
12889#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x4000000
12890#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
12891#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x8000000
12892#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12893#define CGTS_CU15_TA_CTRL_REG__TA_MASK 0x7f
12894#define CGTS_CU15_TA_CTRL_REG__TA__SHIFT 0x0
12895#define CGTS_CU15_TA_CTRL_REG__TA_OVERRIDE_MASK 0x80
12896#define CGTS_CU15_TA_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
12897#define CGTS_CU15_TA_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x300
12898#define CGTS_CU15_TA_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
12899#define CGTS_CU15_TA_CTRL_REG__TA_LS_OVERRIDE_MASK 0x400
12900#define CGTS_CU15_TA_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
12901#define CGTS_CU15_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x800
12902#define CGTS_CU15_TA_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
12903#define CGTS_CU15_SP1_CTRL_REG__SP10_MASK 0x7f
12904#define CGTS_CU15_SP1_CTRL_REG__SP10__SHIFT 0x0
12905#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x80
12906#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
12907#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x300
12908#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
12909#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x400
12910#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
12911#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x800
12912#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
12913#define CGTS_CU15_SP1_CTRL_REG__SP11_MASK 0x7f0000
12914#define CGTS_CU15_SP1_CTRL_REG__SP11__SHIFT 0x10
12915#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x800000
12916#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
12917#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x3000000
12918#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
12919#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x4000000
12920#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
12921#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x8000000
12922#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12923#define CGTS_CU15_TD_TCP_CTRL_REG__TD_MASK 0x7f
12924#define CGTS_CU15_TD_TCP_CTRL_REG__TD__SHIFT 0x0
12925#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x80
12926#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
12927#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x300
12928#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
12929#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x400
12930#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
12931#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x800
12932#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
12933#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_MASK 0x7f0000
12934#define CGTS_CU15_TD_TCP_CTRL_REG__TCP__SHIFT 0x10
12935#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_OVERRIDE_MASK 0x800000
12936#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_OVERRIDE__SHIFT 0x17
12937#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE_MASK 0x3000000
12938#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_BUSY_OVERRIDE__SHIFT 0x18
12939#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE_MASK 0x4000000
12940#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_LS_OVERRIDE__SHIFT 0x1a
12941#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE_MASK 0x8000000
12942#define CGTS_CU15_TD_TCP_CTRL_REG__TCP_SIMDBUSY_OVERRIDE__SHIFT 0x1b
12943#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0xf
12944#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
12945#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
12946#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
12947#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0xfc0000
12948#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
12949#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x1000000
12950#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
12951#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x4000000
12952#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x1a
12953#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x8000000
12954#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
12955#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000
12956#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
12957#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000
12958#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
12959#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000
12960#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
12961#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
12962#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
12963#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0xf
12964#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
12965#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
12966#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
12967#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0xfc0000
12968#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
12969#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x1000000
12970#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
12971#define CGTT_PC_CLK_CTRL__BACK_CLK_ON_OVERRIDE_MASK 0x2000000
12972#define CGTT_PC_CLK_CTRL__BACK_CLK_ON_OVERRIDE__SHIFT 0x19
12973#define CGTT_PC_CLK_CTRL__FRONT_CLK_ON_OVERRIDE_MASK 0x4000000
12974#define CGTT_PC_CLK_CTRL__FRONT_CLK_ON_OVERRIDE__SHIFT 0x1a
12975#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x8000000
12976#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
12977#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000
12978#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
12979#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000
12980#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
12981#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000
12982#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
12983#define CGTT_PC_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
12984#define CGTT_PC_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
12985#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0xf
12986#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
12987#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
12988#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
12989#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0xfff000
12990#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
12991#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x1000000
12992#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
12993#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x2000000
12994#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
12995#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x4000000
12996#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
12997#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x8000000
12998#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
12999#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000
13000#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
13001#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000
13002#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
13003#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000
13004#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
13005#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
13006#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
13007#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0xf
13008#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
13009#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x10
13010#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
13011#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7fffffff
13012#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
13013#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000
13014#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
13015#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7fffffff
13016#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
13017#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000
13018#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
13019#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7fffffff
13020#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
13021#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000
13022#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
13023#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7fffffff
13024#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
13025#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000
13026#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
13027#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7fffffff
13028#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
13029#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000
13030#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
13031#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7fffffff
13032#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
13033#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000
13034#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
13035#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT_MASK 0x7fffffff
13036#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT__SHIFT 0x0
13037#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN_MASK 0x80000000
13038#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN__SHIFT 0x1f
13039#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT_MASK 0x7fffffff
13040#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT__SHIFT 0x0
13041#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN_MASK 0x80000000
13042#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN__SHIFT 0x1f
13043#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT_MASK 0x7fffffff
13044#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT__SHIFT 0x0
13045#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN_MASK 0x80000000
13046#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN__SHIFT 0x1f
13047#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT_MASK 0x7fffffff
13048#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT__SHIFT 0x0
13049#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN_MASK 0x80000000
13050#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN__SHIFT 0x1f
13051#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7fffffff
13052#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
13053#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000
13054#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
13055#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT_MASK 0x7fffffff
13056#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT__SHIFT 0x0
13057#define SPI_WF_LIFETIME_STATUS_1__INT_SENT_MASK 0x80000000
13058#define SPI_WF_LIFETIME_STATUS_1__INT_SENT__SHIFT 0x1f
13059#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7fffffff
13060#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
13061#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000
13062#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
13063#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT_MASK 0x7fffffff
13064#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT__SHIFT 0x0
13065#define SPI_WF_LIFETIME_STATUS_3__INT_SENT_MASK 0x80000000
13066#define SPI_WF_LIFETIME_STATUS_3__INT_SENT__SHIFT 0x1f
13067#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7fffffff
13068#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
13069#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000
13070#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
13071#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT_MASK 0x7fffffff
13072#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT__SHIFT 0x0
13073#define SPI_WF_LIFETIME_STATUS_5__INT_SENT_MASK 0x80000000
13074#define SPI_WF_LIFETIME_STATUS_5__INT_SENT__SHIFT 0x1f
13075#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7fffffff
13076#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
13077#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000
13078#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
13079#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7fffffff
13080#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
13081#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000
13082#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
13083#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT_MASK 0x7fffffff
13084#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT__SHIFT 0x0
13085#define SPI_WF_LIFETIME_STATUS_8__INT_SENT_MASK 0x80000000
13086#define SPI_WF_LIFETIME_STATUS_8__INT_SENT__SHIFT 0x1f
13087#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7fffffff
13088#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
13089#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000
13090#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
13091#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT_MASK 0x7fffffff
13092#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT__SHIFT 0x0
13093#define SPI_WF_LIFETIME_STATUS_10__INT_SENT_MASK 0x80000000
13094#define SPI_WF_LIFETIME_STATUS_10__INT_SENT__SHIFT 0x1f
13095#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7fffffff
13096#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
13097#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000
13098#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
13099#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT_MASK 0x7fffffff
13100#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT__SHIFT 0x0
13101#define SPI_WF_LIFETIME_STATUS_12__INT_SENT_MASK 0x80000000
13102#define SPI_WF_LIFETIME_STATUS_12__INT_SENT__SHIFT 0x1f
13103#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7fffffff
13104#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
13105#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000
13106#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
13107#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7fffffff
13108#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
13109#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000
13110#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
13111#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7fffffff
13112#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
13113#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000
13114#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
13115#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7fffffff
13116#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
13117#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000
13118#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
13119#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7fffffff
13120#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
13121#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000
13122#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
13123#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7fffffff
13124#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
13125#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000
13126#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
13127#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7fffffff
13128#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
13129#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000
13130#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
13131#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7fffffff
13132#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
13133#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000
13134#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
13135#define SPI_WF_LIFETIME_DEBUG__START_VALUE_MASK 0x7fffffff
13136#define SPI_WF_LIFETIME_DEBUG__START_VALUE__SHIFT 0x0
13137#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN_MASK 0x80000000
13138#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN__SHIFT 0x1f
13139#define SPI_SLAVE_DEBUG_BUSY__LS_VTX_BUSY_MASK 0x1
13140#define SPI_SLAVE_DEBUG_BUSY__LS_VTX_BUSY__SHIFT 0x0
13141#define SPI_SLAVE_DEBUG_BUSY__HS_VTX_BUSY_MASK 0x2
13142#define SPI_SLAVE_DEBUG_BUSY__HS_VTX_BUSY__SHIFT 0x1
13143#define SPI_SLAVE_DEBUG_BUSY__ES_VTX_BUSY_MASK 0x4
13144#define SPI_SLAVE_DEBUG_BUSY__ES_VTX_BUSY__SHIFT 0x2
13145#define SPI_SLAVE_DEBUG_BUSY__GS_VTX_BUSY_MASK 0x8
13146#define SPI_SLAVE_DEBUG_BUSY__GS_VTX_BUSY__SHIFT 0x3
13147#define SPI_SLAVE_DEBUG_BUSY__VS_VTX_BUSY_MASK 0x10
13148#define SPI_SLAVE_DEBUG_BUSY__VS_VTX_BUSY__SHIFT 0x4
13149#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC00_BUSY_MASK 0x20
13150#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC00_BUSY__SHIFT 0x5
13151#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC01_BUSY_MASK 0x40
13152#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC01_BUSY__SHIFT 0x6
13153#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC10_BUSY_MASK 0x80
13154#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC10_BUSY__SHIFT 0x7
13155#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC11_BUSY_MASK 0x100
13156#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC11_BUSY__SHIFT 0x8
13157#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC00_BUSY_MASK 0x200
13158#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC00_BUSY__SHIFT 0x9
13159#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC01_BUSY_MASK 0x400
13160#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC01_BUSY__SHIFT 0xa
13161#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC02_BUSY_MASK 0x800
13162#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC02_BUSY__SHIFT 0xb
13163#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC03_BUSY_MASK 0x1000
13164#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC03_BUSY__SHIFT 0xc
13165#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC10_BUSY_MASK 0x2000
13166#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC10_BUSY__SHIFT 0xd
13167#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC11_BUSY_MASK 0x4000
13168#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC11_BUSY__SHIFT 0xe
13169#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC12_BUSY_MASK 0x8000
13170#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC12_BUSY__SHIFT 0xf
13171#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC13_BUSY_MASK 0x10000
13172#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC13_BUSY__SHIFT 0x10
13173#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER0_BUSY_MASK 0x20000
13174#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER0_BUSY__SHIFT 0x11
13175#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER1_BUSY_MASK 0x40000
13176#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER1_BUSY__SHIFT 0x12
13177#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC0_BUSY_MASK 0x80000
13178#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC0_BUSY__SHIFT 0x13
13179#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC1_BUSY_MASK 0x100000
13180#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC1_BUSY__SHIFT 0x14
13181#define SPI_SLAVE_DEBUG_BUSY__EVENT_CNTL_BUSY_MASK 0x200000
13182#define SPI_SLAVE_DEBUG_BUSY__EVENT_CNTL_BUSY__SHIFT 0x15
13183#define SPI_SLAVE_DEBUG_BUSY__SAVE_CTX_BUSY_MASK 0x400000
13184#define SPI_SLAVE_DEBUG_BUSY__SAVE_CTX_BUSY__SHIFT 0x16
13185#define SPI_LB_CTR_CTRL__LOAD_MASK 0x1
13186#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
13187#define SPI_LB_CU_MASK__CU_MASK_MASK 0xffff
13188#define SPI_LB_CU_MASK__CU_MASK__SHIFT 0x0
13189#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xffffffff
13190#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
13191#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK_MASK 0xffff
13192#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK__SHIFT 0x0
13193#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0xff
13194#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
13195#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0xff00
13196#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
13197#define SPI_GDS_CREDITS__UNUSED_MASK 0xffff0000
13198#define SPI_GDS_CREDITS__UNUSED__SHIFT 0x10
13199#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0xffff
13200#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
13201#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xffff0000
13202#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
13203#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0xffff
13204#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
13205#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xffff0000
13206#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
13207#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xffffffff
13208#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
13209#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x7ff
13210#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
13211#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x7ff
13212#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
13213#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x7ff
13214#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
13215#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x7ff
13216#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
13217#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT_MASK 0x7ff
13218#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT__SHIFT 0x0
13219#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT_MASK 0x7ff
13220#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT__SHIFT 0x0
13221#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT_MASK 0x7ff
13222#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT__SHIFT 0x0
13223#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT_MASK 0x7ff
13224#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT__SHIFT 0x0
13225#define BCI_DEBUG_READ__DATA_MASK 0xffffff
13226#define BCI_DEBUG_READ__DATA__SHIFT 0x0
13227#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xffffffff
13228#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
13229#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xff
13230#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
13231#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xffffffff
13232#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
13233#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xff
13234#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
13235#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x3f
13236#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
13237#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x3c0
13238#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
13239#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xffffffff
13240#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
13241#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xff
13242#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
13243#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xffffffff
13244#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
13245#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xff
13246#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
13247#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x3f
13248#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
13249#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x3c0
13250#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
13251#define SPI_SHADER_TBA_LO_PS__MEM_BASE_MASK 0xffffffff
13252#define SPI_SHADER_TBA_LO_PS__MEM_BASE__SHIFT 0x0
13253#define SPI_SHADER_TBA_HI_PS__MEM_BASE_MASK 0xff
13254#define SPI_SHADER_TBA_HI_PS__MEM_BASE__SHIFT 0x0
13255#define SPI_SHADER_TMA_LO_PS__MEM_BASE_MASK 0xffffffff
13256#define SPI_SHADER_TMA_LO_PS__MEM_BASE__SHIFT 0x0
13257#define SPI_SHADER_TMA_HI_PS__MEM_BASE_MASK 0xff
13258#define SPI_SHADER_TMA_HI_PS__MEM_BASE__SHIFT 0x0
13259#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xffffffff
13260#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
13261#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xff
13262#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
13263#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x3f
13264#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
13265#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x3c0
13266#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
13267#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0xc00
13268#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
13269#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0xff000
13270#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
13271#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x100000
13272#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
13273#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x200000
13274#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
13275#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x400000
13276#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x16
13277#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x800000
13278#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
13279#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x1000000
13280#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
13281#define SPI_SHADER_PGM_RSRC1_PS__CACHE_CTL_MASK 0xe000000
13282#define SPI_SHADER_PGM_RSRC1_PS__CACHE_CTL__SHIFT 0x19
13283#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000
13284#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x1c
13285#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x1
13286#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
13287#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x3e
13288#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
13289#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x40
13290#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
13291#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x80
13292#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
13293#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0xff00
13294#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
13295#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x1ff0000
13296#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
13297#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0xffff
13298#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
13299#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x3f0000
13300#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
13301#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD_MASK 0x3c00000
13302#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD__SHIFT 0x16
13303#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xffffffff
13304#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
13305#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xffffffff
13306#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
13307#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xffffffff
13308#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
13309#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xffffffff
13310#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
13311#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xffffffff
13312#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
13313#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xffffffff
13314#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
13315#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xffffffff
13316#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
13317#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xffffffff
13318#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
13319#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xffffffff
13320#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
13321#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xffffffff
13322#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
13323#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xffffffff
13324#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
13325#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xffffffff
13326#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
13327#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xffffffff
13328#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
13329#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xffffffff
13330#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
13331#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xffffffff
13332#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
13333#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xffffffff
13334#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
13335#define SPI_SHADER_TBA_LO_VS__MEM_BASE_MASK 0xffffffff
13336#define SPI_SHADER_TBA_LO_VS__MEM_BASE__SHIFT 0x0
13337#define SPI_SHADER_TBA_HI_VS__MEM_BASE_MASK 0xff
13338#define SPI_SHADER_TBA_HI_VS__MEM_BASE__SHIFT 0x0
13339#define SPI_SHADER_TMA_LO_VS__MEM_BASE_MASK 0xffffffff
13340#define SPI_SHADER_TMA_LO_VS__MEM_BASE__SHIFT 0x0
13341#define SPI_SHADER_TMA_HI_VS__MEM_BASE_MASK 0xff
13342#define SPI_SHADER_TMA_HI_VS__MEM_BASE__SHIFT 0x0
13343#define SPI_SHADER_PGM_LO_VS__MEM_BASE_MASK 0xffffffff
13344#define SPI_SHADER_PGM_LO_VS__MEM_BASE__SHIFT 0x0
13345#define SPI_SHADER_PGM_HI_VS__MEM_BASE_MASK 0xff
13346#define SPI_SHADER_PGM_HI_VS__MEM_BASE__SHIFT 0x0
13347#define SPI_SHADER_PGM_RSRC1_VS__VGPRS_MASK 0x3f
13348#define SPI_SHADER_PGM_RSRC1_VS__VGPRS__SHIFT 0x0
13349#define SPI_SHADER_PGM_RSRC1_VS__SGPRS_MASK 0x3c0
13350#define SPI_SHADER_PGM_RSRC1_VS__SGPRS__SHIFT 0x6
13351#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY_MASK 0xc00
13352#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY__SHIFT 0xa
13353#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE_MASK 0xff000
13354#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE__SHIFT 0xc
13355#define SPI_SHADER_PGM_RSRC1_VS__PRIV_MASK 0x100000
13356#define SPI_SHADER_PGM_RSRC1_VS__PRIV__SHIFT 0x14
13357#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP_MASK 0x200000
13358#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP__SHIFT 0x15
13359#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE_MASK 0x400000
13360#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE__SHIFT 0x16
13361#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE_MASK 0x800000
13362#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE__SHIFT 0x17
13363#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT_MASK 0x3000000
13364#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT__SHIFT 0x18
13365#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE_MASK 0x4000000
13366#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE__SHIFT 0x1a
13367#define SPI_SHADER_PGM_RSRC1_VS__CACHE_CTL_MASK 0x38000000
13368#define SPI_SHADER_PGM_RSRC1_VS__CACHE_CTL__SHIFT 0x1b
13369#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER_MASK 0x40000000
13370#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER__SHIFT 0x1e
13371#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN_MASK 0x1
13372#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN__SHIFT 0x0
13373#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MASK 0x3e
13374#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR__SHIFT 0x1
13375#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT_MASK 0x40
13376#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT__SHIFT 0x6
13377#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN_MASK 0x80
13378#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN__SHIFT 0x7
13379#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN_MASK 0x100
13380#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN__SHIFT 0x8
13381#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN_MASK 0x200
13382#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN__SHIFT 0x9
13383#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN_MASK 0x400
13384#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN__SHIFT 0xa
13385#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN_MASK 0x800
13386#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN__SHIFT 0xb
13387#define SPI_SHADER_PGM_RSRC2_VS__SO_EN_MASK 0x1000
13388#define SPI_SHADER_PGM_RSRC2_VS__SO_EN__SHIFT 0xc
13389#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN_MASK 0x3fe000
13390#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN__SHIFT 0xd
13391#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN_MASK 0x1000000
13392#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN__SHIFT 0x18
13393#define SPI_SHADER_PGM_RSRC3_VS__CU_EN_MASK 0xffff
13394#define SPI_SHADER_PGM_RSRC3_VS__CU_EN__SHIFT 0x0
13395#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT_MASK 0x3f0000
13396#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT__SHIFT 0x10
13397#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD_MASK 0x3c00000
13398#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD__SHIFT 0x16
13399#define SPI_SHADER_LATE_ALLOC_VS__LIMIT_MASK 0x3f
13400#define SPI_SHADER_LATE_ALLOC_VS__LIMIT__SHIFT 0x0
13401#define SPI_SHADER_USER_DATA_VS_0__DATA_MASK 0xffffffff
13402#define SPI_SHADER_USER_DATA_VS_0__DATA__SHIFT 0x0
13403#define SPI_SHADER_USER_DATA_VS_1__DATA_MASK 0xffffffff
13404#define SPI_SHADER_USER_DATA_VS_1__DATA__SHIFT 0x0
13405#define SPI_SHADER_USER_DATA_VS_2__DATA_MASK 0xffffffff
13406#define SPI_SHADER_USER_DATA_VS_2__DATA__SHIFT 0x0
13407#define SPI_SHADER_USER_DATA_VS_3__DATA_MASK 0xffffffff
13408#define SPI_SHADER_USER_DATA_VS_3__DATA__SHIFT 0x0
13409#define SPI_SHADER_USER_DATA_VS_4__DATA_MASK 0xffffffff
13410#define SPI_SHADER_USER_DATA_VS_4__DATA__SHIFT 0x0
13411#define SPI_SHADER_USER_DATA_VS_5__DATA_MASK 0xffffffff
13412#define SPI_SHADER_USER_DATA_VS_5__DATA__SHIFT 0x0
13413#define SPI_SHADER_USER_DATA_VS_6__DATA_MASK 0xffffffff
13414#define SPI_SHADER_USER_DATA_VS_6__DATA__SHIFT 0x0
13415#define SPI_SHADER_USER_DATA_VS_7__DATA_MASK 0xffffffff
13416#define SPI_SHADER_USER_DATA_VS_7__DATA__SHIFT 0x0
13417#define SPI_SHADER_USER_DATA_VS_8__DATA_MASK 0xffffffff
13418#define SPI_SHADER_USER_DATA_VS_8__DATA__SHIFT 0x0
13419#define SPI_SHADER_USER_DATA_VS_9__DATA_MASK 0xffffffff
13420#define SPI_SHADER_USER_DATA_VS_9__DATA__SHIFT 0x0
13421#define SPI_SHADER_USER_DATA_VS_10__DATA_MASK 0xffffffff
13422#define SPI_SHADER_USER_DATA_VS_10__DATA__SHIFT 0x0
13423#define SPI_SHADER_USER_DATA_VS_11__DATA_MASK 0xffffffff
13424#define SPI_SHADER_USER_DATA_VS_11__DATA__SHIFT 0x0
13425#define SPI_SHADER_USER_DATA_VS_12__DATA_MASK 0xffffffff
13426#define SPI_SHADER_USER_DATA_VS_12__DATA__SHIFT 0x0
13427#define SPI_SHADER_USER_DATA_VS_13__DATA_MASK 0xffffffff
13428#define SPI_SHADER_USER_DATA_VS_13__DATA__SHIFT 0x0
13429#define SPI_SHADER_USER_DATA_VS_14__DATA_MASK 0xffffffff
13430#define SPI_SHADER_USER_DATA_VS_14__DATA__SHIFT 0x0
13431#define SPI_SHADER_USER_DATA_VS_15__DATA_MASK 0xffffffff
13432#define SPI_SHADER_USER_DATA_VS_15__DATA__SHIFT 0x0
13433#define SPI_SHADER_PGM_RSRC2_ES_VS__SCRATCH_EN_MASK 0x1
13434#define SPI_SHADER_PGM_RSRC2_ES_VS__SCRATCH_EN__SHIFT 0x0
13435#define SPI_SHADER_PGM_RSRC2_ES_VS__USER_SGPR_MASK 0x3e
13436#define SPI_SHADER_PGM_RSRC2_ES_VS__USER_SGPR__SHIFT 0x1
13437#define SPI_SHADER_PGM_RSRC2_ES_VS__TRAP_PRESENT_MASK 0x40
13438#define SPI_SHADER_PGM_RSRC2_ES_VS__TRAP_PRESENT__SHIFT 0x6
13439#define SPI_SHADER_PGM_RSRC2_ES_VS__OC_LDS_EN_MASK 0x80
13440#define SPI_SHADER_PGM_RSRC2_ES_VS__OC_LDS_EN__SHIFT 0x7
13441#define SPI_SHADER_PGM_RSRC2_ES_VS__EXCP_EN_MASK 0x1ff00
13442#define SPI_SHADER_PGM_RSRC2_ES_VS__EXCP_EN__SHIFT 0x8
13443#define SPI_SHADER_PGM_RSRC2_ES_VS__LDS_SIZE_MASK 0x1ff00000
13444#define SPI_SHADER_PGM_RSRC2_ES_VS__LDS_SIZE__SHIFT 0x14
13445#define SPI_SHADER_PGM_RSRC2_LS_VS__SCRATCH_EN_MASK 0x1
13446#define SPI_SHADER_PGM_RSRC2_LS_VS__SCRATCH_EN__SHIFT 0x0
13447#define SPI_SHADER_PGM_RSRC2_LS_VS__USER_SGPR_MASK 0x3e
13448#define SPI_SHADER_PGM_RSRC2_LS_VS__USER_SGPR__SHIFT 0x1
13449#define SPI_SHADER_PGM_RSRC2_LS_VS__TRAP_PRESENT_MASK 0x40
13450#define SPI_SHADER_PGM_RSRC2_LS_VS__TRAP_PRESENT__SHIFT 0x6
13451#define SPI_SHADER_PGM_RSRC2_LS_VS__LDS_SIZE_MASK 0xff80
13452#define SPI_SHADER_PGM_RSRC2_LS_VS__LDS_SIZE__SHIFT 0x7
13453#define SPI_SHADER_PGM_RSRC2_LS_VS__EXCP_EN_MASK 0x1ff0000
13454#define SPI_SHADER_PGM_RSRC2_LS_VS__EXCP_EN__SHIFT 0x10
13455#define SPI_SHADER_TBA_LO_GS__MEM_BASE_MASK 0xffffffff
13456#define SPI_SHADER_TBA_LO_GS__MEM_BASE__SHIFT 0x0
13457#define SPI_SHADER_TBA_HI_GS__MEM_BASE_MASK 0xff
13458#define SPI_SHADER_TBA_HI_GS__MEM_BASE__SHIFT 0x0
13459#define SPI_SHADER_TMA_LO_GS__MEM_BASE_MASK 0xffffffff
13460#define SPI_SHADER_TMA_LO_GS__MEM_BASE__SHIFT 0x0
13461#define SPI_SHADER_TMA_HI_GS__MEM_BASE_MASK 0xff
13462#define SPI_SHADER_TMA_HI_GS__MEM_BASE__SHIFT 0x0
13463#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xffffffff
13464#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
13465#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xff
13466#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
13467#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x3f
13468#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
13469#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x3c0
13470#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
13471#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0xc00
13472#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
13473#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0xff000
13474#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
13475#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x100000
13476#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
13477#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x200000
13478#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
13479#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x400000
13480#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x16
13481#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x800000
13482#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
13483#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x1000000
13484#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
13485#define SPI_SHADER_PGM_RSRC1_GS__CACHE_CTL_MASK 0xe000000
13486#define SPI_SHADER_PGM_RSRC1_GS__CACHE_CTL__SHIFT 0x19
13487#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000
13488#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x1c
13489#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x1
13490#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
13491#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x3e
13492#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
13493#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x40
13494#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
13495#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0xff80
13496#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
13497#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0xffff
13498#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
13499#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x3f0000
13500#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
13501#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x3c00000
13502#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
13503#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH_MASK 0xfc000000
13504#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH__SHIFT 0x1a
13505#define SPI_SHADER_USER_DATA_GS_0__DATA_MASK 0xffffffff
13506#define SPI_SHADER_USER_DATA_GS_0__DATA__SHIFT 0x0
13507#define SPI_SHADER_USER_DATA_GS_1__DATA_MASK 0xffffffff
13508#define SPI_SHADER_USER_DATA_GS_1__DATA__SHIFT 0x0
13509#define SPI_SHADER_USER_DATA_GS_2__DATA_MASK 0xffffffff
13510#define SPI_SHADER_USER_DATA_GS_2__DATA__SHIFT 0x0
13511#define SPI_SHADER_USER_DATA_GS_3__DATA_MASK 0xffffffff
13512#define SPI_SHADER_USER_DATA_GS_3__DATA__SHIFT 0x0
13513#define SPI_SHADER_USER_DATA_GS_4__DATA_MASK 0xffffffff
13514#define SPI_SHADER_USER_DATA_GS_4__DATA__SHIFT 0x0
13515#define SPI_SHADER_USER_DATA_GS_5__DATA_MASK 0xffffffff
13516#define SPI_SHADER_USER_DATA_GS_5__DATA__SHIFT 0x0
13517#define SPI_SHADER_USER_DATA_GS_6__DATA_MASK 0xffffffff
13518#define SPI_SHADER_USER_DATA_GS_6__DATA__SHIFT 0x0
13519#define SPI_SHADER_USER_DATA_GS_7__DATA_MASK 0xffffffff
13520#define SPI_SHADER_USER_DATA_GS_7__DATA__SHIFT 0x0
13521#define SPI_SHADER_USER_DATA_GS_8__DATA_MASK 0xffffffff
13522#define SPI_SHADER_USER_DATA_GS_8__DATA__SHIFT 0x0
13523#define SPI_SHADER_USER_DATA_GS_9__DATA_MASK 0xffffffff
13524#define SPI_SHADER_USER_DATA_GS_9__DATA__SHIFT 0x0
13525#define SPI_SHADER_USER_DATA_GS_10__DATA_MASK 0xffffffff
13526#define SPI_SHADER_USER_DATA_GS_10__DATA__SHIFT 0x0
13527#define SPI_SHADER_USER_DATA_GS_11__DATA_MASK 0xffffffff
13528#define SPI_SHADER_USER_DATA_GS_11__DATA__SHIFT 0x0
13529#define SPI_SHADER_USER_DATA_GS_12__DATA_MASK 0xffffffff
13530#define SPI_SHADER_USER_DATA_GS_12__DATA__SHIFT 0x0
13531#define SPI_SHADER_USER_DATA_GS_13__DATA_MASK 0xffffffff
13532#define SPI_SHADER_USER_DATA_GS_13__DATA__SHIFT 0x0
13533#define SPI_SHADER_USER_DATA_GS_14__DATA_MASK 0xffffffff
13534#define SPI_SHADER_USER_DATA_GS_14__DATA__SHIFT 0x0
13535#define SPI_SHADER_USER_DATA_GS_15__DATA_MASK 0xffffffff
13536#define SPI_SHADER_USER_DATA_GS_15__DATA__SHIFT 0x0
13537#define SPI_SHADER_PGM_RSRC2_ES_GS__SCRATCH_EN_MASK 0x1
13538#define SPI_SHADER_PGM_RSRC2_ES_GS__SCRATCH_EN__SHIFT 0x0
13539#define SPI_SHADER_PGM_RSRC2_ES_GS__USER_SGPR_MASK 0x3e
13540#define SPI_SHADER_PGM_RSRC2_ES_GS__USER_SGPR__SHIFT 0x1
13541#define SPI_SHADER_PGM_RSRC2_ES_GS__TRAP_PRESENT_MASK 0x40
13542#define SPI_SHADER_PGM_RSRC2_ES_GS__TRAP_PRESENT__SHIFT 0x6
13543#define SPI_SHADER_PGM_RSRC2_ES_GS__OC_LDS_EN_MASK 0x80
13544#define SPI_SHADER_PGM_RSRC2_ES_GS__OC_LDS_EN__SHIFT 0x7
13545#define SPI_SHADER_PGM_RSRC2_ES_GS__EXCP_EN_MASK 0x1ff00
13546#define SPI_SHADER_PGM_RSRC2_ES_GS__EXCP_EN__SHIFT 0x8
13547#define SPI_SHADER_PGM_RSRC2_ES_GS__LDS_SIZE_MASK 0x1ff00000
13548#define SPI_SHADER_PGM_RSRC2_ES_GS__LDS_SIZE__SHIFT 0x14
13549#define SPI_SHADER_TBA_LO_ES__MEM_BASE_MASK 0xffffffff
13550#define SPI_SHADER_TBA_LO_ES__MEM_BASE__SHIFT 0x0
13551#define SPI_SHADER_TBA_HI_ES__MEM_BASE_MASK 0xff
13552#define SPI_SHADER_TBA_HI_ES__MEM_BASE__SHIFT 0x0
13553#define SPI_SHADER_TMA_LO_ES__MEM_BASE_MASK 0xffffffff
13554#define SPI_SHADER_TMA_LO_ES__MEM_BASE__SHIFT 0x0
13555#define SPI_SHADER_TMA_HI_ES__MEM_BASE_MASK 0xff
13556#define SPI_SHADER_TMA_HI_ES__MEM_BASE__SHIFT 0x0
13557#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xffffffff
13558#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
13559#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xff
13560#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
13561#define SPI_SHADER_PGM_RSRC1_ES__VGPRS_MASK 0x3f
13562#define SPI_SHADER_PGM_RSRC1_ES__VGPRS__SHIFT 0x0
13563#define SPI_SHADER_PGM_RSRC1_ES__SGPRS_MASK 0x3c0
13564#define SPI_SHADER_PGM_RSRC1_ES__SGPRS__SHIFT 0x6
13565#define SPI_SHADER_PGM_RSRC1_ES__PRIORITY_MASK 0xc00
13566#define SPI_SHADER_PGM_RSRC1_ES__PRIORITY__SHIFT 0xa
13567#define SPI_SHADER_PGM_RSRC1_ES__FLOAT_MODE_MASK 0xff000
13568#define SPI_SHADER_PGM_RSRC1_ES__FLOAT_MODE__SHIFT 0xc
13569#define SPI_SHADER_PGM_RSRC1_ES__PRIV_MASK 0x100000
13570#define SPI_SHADER_PGM_RSRC1_ES__PRIV__SHIFT 0x14
13571#define SPI_SHADER_PGM_RSRC1_ES__DX10_CLAMP_MASK 0x200000
13572#define SPI_SHADER_PGM_RSRC1_ES__DX10_CLAMP__SHIFT 0x15
13573#define SPI_SHADER_PGM_RSRC1_ES__DEBUG_MODE_MASK 0x400000
13574#define SPI_SHADER_PGM_RSRC1_ES__DEBUG_MODE__SHIFT 0x16
13575#define SPI_SHADER_PGM_RSRC1_ES__IEEE_MODE_MASK 0x800000
13576#define SPI_SHADER_PGM_RSRC1_ES__IEEE_MODE__SHIFT 0x17
13577#define SPI_SHADER_PGM_RSRC1_ES__VGPR_COMP_CNT_MASK 0x3000000
13578#define SPI_SHADER_PGM_RSRC1_ES__VGPR_COMP_CNT__SHIFT 0x18
13579#define SPI_SHADER_PGM_RSRC1_ES__CU_GROUP_ENABLE_MASK 0x4000000
13580#define SPI_SHADER_PGM_RSRC1_ES__CU_GROUP_ENABLE__SHIFT 0x1a
13581#define SPI_SHADER_PGM_RSRC1_ES__CACHE_CTL_MASK 0x38000000
13582#define SPI_SHADER_PGM_RSRC1_ES__CACHE_CTL__SHIFT 0x1b
13583#define SPI_SHADER_PGM_RSRC1_ES__CDBG_USER_MASK 0x40000000
13584#define SPI_SHADER_PGM_RSRC1_ES__CDBG_USER__SHIFT 0x1e
13585#define SPI_SHADER_PGM_RSRC2_ES__SCRATCH_EN_MASK 0x1
13586#define SPI_SHADER_PGM_RSRC2_ES__SCRATCH_EN__SHIFT 0x0
13587#define SPI_SHADER_PGM_RSRC2_ES__USER_SGPR_MASK 0x3e
13588#define SPI_SHADER_PGM_RSRC2_ES__USER_SGPR__SHIFT 0x1
13589#define SPI_SHADER_PGM_RSRC2_ES__TRAP_PRESENT_MASK 0x40
13590#define SPI_SHADER_PGM_RSRC2_ES__TRAP_PRESENT__SHIFT 0x6
13591#define SPI_SHADER_PGM_RSRC2_ES__OC_LDS_EN_MASK 0x80
13592#define SPI_SHADER_PGM_RSRC2_ES__OC_LDS_EN__SHIFT 0x7
13593#define SPI_SHADER_PGM_RSRC2_ES__EXCP_EN_MASK 0x1ff00
13594#define SPI_SHADER_PGM_RSRC2_ES__EXCP_EN__SHIFT 0x8
13595#define SPI_SHADER_PGM_RSRC2_ES__LDS_SIZE_MASK 0x1ff00000
13596#define SPI_SHADER_PGM_RSRC2_ES__LDS_SIZE__SHIFT 0x14
13597#define SPI_SHADER_PGM_RSRC3_ES__CU_EN_MASK 0xffff
13598#define SPI_SHADER_PGM_RSRC3_ES__CU_EN__SHIFT 0x0
13599#define SPI_SHADER_PGM_RSRC3_ES__WAVE_LIMIT_MASK 0x3f0000
13600#define SPI_SHADER_PGM_RSRC3_ES__WAVE_LIMIT__SHIFT 0x10
13601#define SPI_SHADER_PGM_RSRC3_ES__LOCK_LOW_THRESHOLD_MASK 0x3c00000
13602#define SPI_SHADER_PGM_RSRC3_ES__LOCK_LOW_THRESHOLD__SHIFT 0x16
13603#define SPI_SHADER_PGM_RSRC3_ES__GROUP_FIFO_DEPTH_MASK 0xfc000000
13604#define SPI_SHADER_PGM_RSRC3_ES__GROUP_FIFO_DEPTH__SHIFT 0x1a
13605#define SPI_SHADER_USER_DATA_ES_0__DATA_MASK 0xffffffff
13606#define SPI_SHADER_USER_DATA_ES_0__DATA__SHIFT 0x0
13607#define SPI_SHADER_USER_DATA_ES_1__DATA_MASK 0xffffffff
13608#define SPI_SHADER_USER_DATA_ES_1__DATA__SHIFT 0x0
13609#define SPI_SHADER_USER_DATA_ES_2__DATA_MASK 0xffffffff
13610#define SPI_SHADER_USER_DATA_ES_2__DATA__SHIFT 0x0
13611#define SPI_SHADER_USER_DATA_ES_3__DATA_MASK 0xffffffff
13612#define SPI_SHADER_USER_DATA_ES_3__DATA__SHIFT 0x0
13613#define SPI_SHADER_USER_DATA_ES_4__DATA_MASK 0xffffffff
13614#define SPI_SHADER_USER_DATA_ES_4__DATA__SHIFT 0x0
13615#define SPI_SHADER_USER_DATA_ES_5__DATA_MASK 0xffffffff
13616#define SPI_SHADER_USER_DATA_ES_5__DATA__SHIFT 0x0
13617#define SPI_SHADER_USER_DATA_ES_6__DATA_MASK 0xffffffff
13618#define SPI_SHADER_USER_DATA_ES_6__DATA__SHIFT 0x0
13619#define SPI_SHADER_USER_DATA_ES_7__DATA_MASK 0xffffffff
13620#define SPI_SHADER_USER_DATA_ES_7__DATA__SHIFT 0x0
13621#define SPI_SHADER_USER_DATA_ES_8__DATA_MASK 0xffffffff
13622#define SPI_SHADER_USER_DATA_ES_8__DATA__SHIFT 0x0
13623#define SPI_SHADER_USER_DATA_ES_9__DATA_MASK 0xffffffff
13624#define SPI_SHADER_USER_DATA_ES_9__DATA__SHIFT 0x0
13625#define SPI_SHADER_USER_DATA_ES_10__DATA_MASK 0xffffffff
13626#define SPI_SHADER_USER_DATA_ES_10__DATA__SHIFT 0x0
13627#define SPI_SHADER_USER_DATA_ES_11__DATA_MASK 0xffffffff
13628#define SPI_SHADER_USER_DATA_ES_11__DATA__SHIFT 0x0
13629#define SPI_SHADER_USER_DATA_ES_12__DATA_MASK 0xffffffff
13630#define SPI_SHADER_USER_DATA_ES_12__DATA__SHIFT 0x0
13631#define SPI_SHADER_USER_DATA_ES_13__DATA_MASK 0xffffffff
13632#define SPI_SHADER_USER_DATA_ES_13__DATA__SHIFT 0x0
13633#define SPI_SHADER_USER_DATA_ES_14__DATA_MASK 0xffffffff
13634#define SPI_SHADER_USER_DATA_ES_14__DATA__SHIFT 0x0
13635#define SPI_SHADER_USER_DATA_ES_15__DATA_MASK 0xffffffff
13636#define SPI_SHADER_USER_DATA_ES_15__DATA__SHIFT 0x0
13637#define SPI_SHADER_PGM_RSRC2_LS_ES__SCRATCH_EN_MASK 0x1
13638#define SPI_SHADER_PGM_RSRC2_LS_ES__SCRATCH_EN__SHIFT 0x0
13639#define SPI_SHADER_PGM_RSRC2_LS_ES__USER_SGPR_MASK 0x3e
13640#define SPI_SHADER_PGM_RSRC2_LS_ES__USER_SGPR__SHIFT 0x1
13641#define SPI_SHADER_PGM_RSRC2_LS_ES__TRAP_PRESENT_MASK 0x40
13642#define SPI_SHADER_PGM_RSRC2_LS_ES__TRAP_PRESENT__SHIFT 0x6
13643#define SPI_SHADER_PGM_RSRC2_LS_ES__LDS_SIZE_MASK 0xff80
13644#define SPI_SHADER_PGM_RSRC2_LS_ES__LDS_SIZE__SHIFT 0x7
13645#define SPI_SHADER_PGM_RSRC2_LS_ES__EXCP_EN_MASK 0x1ff0000
13646#define SPI_SHADER_PGM_RSRC2_LS_ES__EXCP_EN__SHIFT 0x10
13647#define SPI_SHADER_TBA_LO_HS__MEM_BASE_MASK 0xffffffff
13648#define SPI_SHADER_TBA_LO_HS__MEM_BASE__SHIFT 0x0
13649#define SPI_SHADER_TBA_HI_HS__MEM_BASE_MASK 0xff
13650#define SPI_SHADER_TBA_HI_HS__MEM_BASE__SHIFT 0x0
13651#define SPI_SHADER_TMA_LO_HS__MEM_BASE_MASK 0xffffffff
13652#define SPI_SHADER_TMA_LO_HS__MEM_BASE__SHIFT 0x0
13653#define SPI_SHADER_TMA_HI_HS__MEM_BASE_MASK 0xff
13654#define SPI_SHADER_TMA_HI_HS__MEM_BASE__SHIFT 0x0
13655#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xffffffff
13656#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
13657#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xff
13658#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
13659#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x3f
13660#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
13661#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x3c0
13662#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
13663#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0xc00
13664#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
13665#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0xff000
13666#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
13667#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x100000
13668#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
13669#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x200000
13670#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
13671#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x400000
13672#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x16
13673#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x800000
13674#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
13675#define SPI_SHADER_PGM_RSRC1_HS__CACHE_CTL_MASK 0x7000000
13676#define SPI_SHADER_PGM_RSRC1_HS__CACHE_CTL__SHIFT 0x18
13677#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x8000000
13678#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x1b
13679#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x1
13680#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
13681#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x3e
13682#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
13683#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x40
13684#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
13685#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN_MASK 0x80
13686#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN__SHIFT 0x7
13687#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN_MASK 0x100
13688#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN__SHIFT 0x8
13689#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x3fe00
13690#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x9
13691#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x3f
13692#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
13693#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x3c0
13694#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
13695#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH_MASK 0xfc00
13696#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH__SHIFT 0xa
13697#define SPI_SHADER_USER_DATA_HS_0__DATA_MASK 0xffffffff
13698#define SPI_SHADER_USER_DATA_HS_0__DATA__SHIFT 0x0
13699#define SPI_SHADER_USER_DATA_HS_1__DATA_MASK 0xffffffff
13700#define SPI_SHADER_USER_DATA_HS_1__DATA__SHIFT 0x0
13701#define SPI_SHADER_USER_DATA_HS_2__DATA_MASK 0xffffffff
13702#define SPI_SHADER_USER_DATA_HS_2__DATA__SHIFT 0x0
13703#define SPI_SHADER_USER_DATA_HS_3__DATA_MASK 0xffffffff
13704#define SPI_SHADER_USER_DATA_HS_3__DATA__SHIFT 0x0
13705#define SPI_SHADER_USER_DATA_HS_4__DATA_MASK 0xffffffff
13706#define SPI_SHADER_USER_DATA_HS_4__DATA__SHIFT 0x0
13707#define SPI_SHADER_USER_DATA_HS_5__DATA_MASK 0xffffffff
13708#define SPI_SHADER_USER_DATA_HS_5__DATA__SHIFT 0x0
13709#define SPI_SHADER_USER_DATA_HS_6__DATA_MASK 0xffffffff
13710#define SPI_SHADER_USER_DATA_HS_6__DATA__SHIFT 0x0
13711#define SPI_SHADER_USER_DATA_HS_7__DATA_MASK 0xffffffff
13712#define SPI_SHADER_USER_DATA_HS_7__DATA__SHIFT 0x0
13713#define SPI_SHADER_USER_DATA_HS_8__DATA_MASK 0xffffffff
13714#define SPI_SHADER_USER_DATA_HS_8__DATA__SHIFT 0x0
13715#define SPI_SHADER_USER_DATA_HS_9__DATA_MASK 0xffffffff
13716#define SPI_SHADER_USER_DATA_HS_9__DATA__SHIFT 0x0
13717#define SPI_SHADER_USER_DATA_HS_10__DATA_MASK 0xffffffff
13718#define SPI_SHADER_USER_DATA_HS_10__DATA__SHIFT 0x0
13719#define SPI_SHADER_USER_DATA_HS_11__DATA_MASK 0xffffffff
13720#define SPI_SHADER_USER_DATA_HS_11__DATA__SHIFT 0x0
13721#define SPI_SHADER_USER_DATA_HS_12__DATA_MASK 0xffffffff
13722#define SPI_SHADER_USER_DATA_HS_12__DATA__SHIFT 0x0
13723#define SPI_SHADER_USER_DATA_HS_13__DATA_MASK 0xffffffff
13724#define SPI_SHADER_USER_DATA_HS_13__DATA__SHIFT 0x0
13725#define SPI_SHADER_USER_DATA_HS_14__DATA_MASK 0xffffffff
13726#define SPI_SHADER_USER_DATA_HS_14__DATA__SHIFT 0x0
13727#define SPI_SHADER_USER_DATA_HS_15__DATA_MASK 0xffffffff
13728#define SPI_SHADER_USER_DATA_HS_15__DATA__SHIFT 0x0
13729#define SPI_SHADER_PGM_RSRC2_LS_HS__SCRATCH_EN_MASK 0x1
13730#define SPI_SHADER_PGM_RSRC2_LS_HS__SCRATCH_EN__SHIFT 0x0
13731#define SPI_SHADER_PGM_RSRC2_LS_HS__USER_SGPR_MASK 0x3e
13732#define SPI_SHADER_PGM_RSRC2_LS_HS__USER_SGPR__SHIFT 0x1
13733#define SPI_SHADER_PGM_RSRC2_LS_HS__TRAP_PRESENT_MASK 0x40
13734#define SPI_SHADER_PGM_RSRC2_LS_HS__TRAP_PRESENT__SHIFT 0x6
13735#define SPI_SHADER_PGM_RSRC2_LS_HS__LDS_SIZE_MASK 0xff80
13736#define SPI_SHADER_PGM_RSRC2_LS_HS__LDS_SIZE__SHIFT 0x7
13737#define SPI_SHADER_PGM_RSRC2_LS_HS__EXCP_EN_MASK 0x1ff0000
13738#define SPI_SHADER_PGM_RSRC2_LS_HS__EXCP_EN__SHIFT 0x10
13739#define SPI_SHADER_TBA_LO_LS__MEM_BASE_MASK 0xffffffff
13740#define SPI_SHADER_TBA_LO_LS__MEM_BASE__SHIFT 0x0
13741#define SPI_SHADER_TBA_HI_LS__MEM_BASE_MASK 0xff
13742#define SPI_SHADER_TBA_HI_LS__MEM_BASE__SHIFT 0x0
13743#define SPI_SHADER_TMA_LO_LS__MEM_BASE_MASK 0xffffffff
13744#define SPI_SHADER_TMA_LO_LS__MEM_BASE__SHIFT 0x0
13745#define SPI_SHADER_TMA_HI_LS__MEM_BASE_MASK 0xff
13746#define SPI_SHADER_TMA_HI_LS__MEM_BASE__SHIFT 0x0
13747#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xffffffff
13748#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
13749#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xff
13750#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
13751#define SPI_SHADER_PGM_RSRC1_LS__VGPRS_MASK 0x3f
13752#define SPI_SHADER_PGM_RSRC1_LS__VGPRS__SHIFT 0x0
13753#define SPI_SHADER_PGM_RSRC1_LS__SGPRS_MASK 0x3c0
13754#define SPI_SHADER_PGM_RSRC1_LS__SGPRS__SHIFT 0x6
13755#define SPI_SHADER_PGM_RSRC1_LS__PRIORITY_MASK 0xc00
13756#define SPI_SHADER_PGM_RSRC1_LS__PRIORITY__SHIFT 0xa
13757#define SPI_SHADER_PGM_RSRC1_LS__FLOAT_MODE_MASK 0xff000
13758#define SPI_SHADER_PGM_RSRC1_LS__FLOAT_MODE__SHIFT 0xc
13759#define SPI_SHADER_PGM_RSRC1_LS__PRIV_MASK 0x100000
13760#define SPI_SHADER_PGM_RSRC1_LS__PRIV__SHIFT 0x14
13761#define SPI_SHADER_PGM_RSRC1_LS__DX10_CLAMP_MASK 0x200000
13762#define SPI_SHADER_PGM_RSRC1_LS__DX10_CLAMP__SHIFT 0x15
13763#define SPI_SHADER_PGM_RSRC1_LS__DEBUG_MODE_MASK 0x400000
13764#define SPI_SHADER_PGM_RSRC1_LS__DEBUG_MODE__SHIFT 0x16
13765#define SPI_SHADER_PGM_RSRC1_LS__IEEE_MODE_MASK 0x800000
13766#define SPI_SHADER_PGM_RSRC1_LS__IEEE_MODE__SHIFT 0x17
13767#define SPI_SHADER_PGM_RSRC1_LS__VGPR_COMP_CNT_MASK 0x3000000
13768#define SPI_SHADER_PGM_RSRC1_LS__VGPR_COMP_CNT__SHIFT 0x18
13769#define SPI_SHADER_PGM_RSRC1_LS__CACHE_CTL_MASK 0x1c000000
13770#define SPI_SHADER_PGM_RSRC1_LS__CACHE_CTL__SHIFT 0x1a
13771#define SPI_SHADER_PGM_RSRC1_LS__CDBG_USER_MASK 0x20000000
13772#define SPI_SHADER_PGM_RSRC1_LS__CDBG_USER__SHIFT 0x1d
13773#define SPI_SHADER_PGM_RSRC2_LS__SCRATCH_EN_MASK 0x1
13774#define SPI_SHADER_PGM_RSRC2_LS__SCRATCH_EN__SHIFT 0x0
13775#define SPI_SHADER_PGM_RSRC2_LS__USER_SGPR_MASK 0x3e
13776#define SPI_SHADER_PGM_RSRC2_LS__USER_SGPR__SHIFT 0x1
13777#define SPI_SHADER_PGM_RSRC2_LS__TRAP_PRESENT_MASK 0x40
13778#define SPI_SHADER_PGM_RSRC2_LS__TRAP_PRESENT__SHIFT 0x6
13779#define SPI_SHADER_PGM_RSRC2_LS__LDS_SIZE_MASK 0xff80
13780#define SPI_SHADER_PGM_RSRC2_LS__LDS_SIZE__SHIFT 0x7
13781#define SPI_SHADER_PGM_RSRC2_LS__EXCP_EN_MASK 0x1ff0000
13782#define SPI_SHADER_PGM_RSRC2_LS__EXCP_EN__SHIFT 0x10
13783#define SPI_SHADER_PGM_RSRC3_LS__CU_EN_MASK 0xffff
13784#define SPI_SHADER_PGM_RSRC3_LS__CU_EN__SHIFT 0x0
13785#define SPI_SHADER_PGM_RSRC3_LS__WAVE_LIMIT_MASK 0x3f0000
13786#define SPI_SHADER_PGM_RSRC3_LS__WAVE_LIMIT__SHIFT 0x10
13787#define SPI_SHADER_PGM_RSRC3_LS__LOCK_LOW_THRESHOLD_MASK 0x3c00000
13788#define SPI_SHADER_PGM_RSRC3_LS__LOCK_LOW_THRESHOLD__SHIFT 0x16
13789#define SPI_SHADER_PGM_RSRC3_LS__GROUP_FIFO_DEPTH_MASK 0xfc000000
13790#define SPI_SHADER_PGM_RSRC3_LS__GROUP_FIFO_DEPTH__SHIFT 0x1a
13791#define SPI_SHADER_USER_DATA_LS_0__DATA_MASK 0xffffffff
13792#define SPI_SHADER_USER_DATA_LS_0__DATA__SHIFT 0x0
13793#define SPI_SHADER_USER_DATA_LS_1__DATA_MASK 0xffffffff
13794#define SPI_SHADER_USER_DATA_LS_1__DATA__SHIFT 0x0
13795#define SPI_SHADER_USER_DATA_LS_2__DATA_MASK 0xffffffff
13796#define SPI_SHADER_USER_DATA_LS_2__DATA__SHIFT 0x0
13797#define SPI_SHADER_USER_DATA_LS_3__DATA_MASK 0xffffffff
13798#define SPI_SHADER_USER_DATA_LS_3__DATA__SHIFT 0x0
13799#define SPI_SHADER_USER_DATA_LS_4__DATA_MASK 0xffffffff
13800#define SPI_SHADER_USER_DATA_LS_4__DATA__SHIFT 0x0
13801#define SPI_SHADER_USER_DATA_LS_5__DATA_MASK 0xffffffff
13802#define SPI_SHADER_USER_DATA_LS_5__DATA__SHIFT 0x0
13803#define SPI_SHADER_USER_DATA_LS_6__DATA_MASK 0xffffffff
13804#define SPI_SHADER_USER_DATA_LS_6__DATA__SHIFT 0x0
13805#define SPI_SHADER_USER_DATA_LS_7__DATA_MASK 0xffffffff
13806#define SPI_SHADER_USER_DATA_LS_7__DATA__SHIFT 0x0
13807#define SPI_SHADER_USER_DATA_LS_8__DATA_MASK 0xffffffff
13808#define SPI_SHADER_USER_DATA_LS_8__DATA__SHIFT 0x0
13809#define SPI_SHADER_USER_DATA_LS_9__DATA_MASK 0xffffffff
13810#define SPI_SHADER_USER_DATA_LS_9__DATA__SHIFT 0x0
13811#define SPI_SHADER_USER_DATA_LS_10__DATA_MASK 0xffffffff
13812#define SPI_SHADER_USER_DATA_LS_10__DATA__SHIFT 0x0
13813#define SPI_SHADER_USER_DATA_LS_11__DATA_MASK 0xffffffff
13814#define SPI_SHADER_USER_DATA_LS_11__DATA__SHIFT 0x0
13815#define SPI_SHADER_USER_DATA_LS_12__DATA_MASK 0xffffffff
13816#define SPI_SHADER_USER_DATA_LS_12__DATA__SHIFT 0x0
13817#define SPI_SHADER_USER_DATA_LS_13__DATA_MASK 0xffffffff
13818#define SPI_SHADER_USER_DATA_LS_13__DATA__SHIFT 0x0
13819#define SPI_SHADER_USER_DATA_LS_14__DATA_MASK 0xffffffff
13820#define SPI_SHADER_USER_DATA_LS_14__DATA__SHIFT 0x0
13821#define SPI_SHADER_USER_DATA_LS_15__DATA_MASK 0xffffffff
13822#define SPI_SHADER_USER_DATA_LS_15__DATA__SHIFT 0x0
13823#define SQ_CONFIG__UNUSED_MASK 0xff
13824#define SQ_CONFIG__UNUSED__SHIFT 0x0
13825#define SQ_CONFIG__DEBUG_EN_MASK 0x100
13826#define SQ_CONFIG__DEBUG_EN__SHIFT 0x8
13827#define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x200
13828#define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9
13829#define SQ_CONFIG__DEBUG_ONE_INST_CLAUSE_MASK 0x400
13830#define SQ_CONFIG__DEBUG_ONE_INST_CLAUSE__SHIFT 0xa
13831#define SQ_CONFIG__EARLY_TA_DONE_DISABLE_MASK 0x1000
13832#define SQ_CONFIG__EARLY_TA_DONE_DISABLE__SHIFT 0xc
13833#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE_MASK 0x2000
13834#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE__SHIFT 0xd
13835#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE_MASK 0x4000
13836#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE__SHIFT 0xe
13837#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE_MASK 0x8000
13838#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE__SHIFT 0xf
13839#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE_MASK 0x10000
13840#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE__SHIFT 0x10
13841#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE_MASK 0x20000
13842#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE__SHIFT 0x11
13843#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS_MASK 0x40000
13844#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS__SHIFT 0x12
13845#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS_MASK 0x180000
13846#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS__SHIFT 0x13
13847#define SQ_CONFIG__REPLAY_SLEEP_CNT_MASK 0x1e00000
13848#define SQ_CONFIG__REPLAY_SLEEP_CNT__SHIFT 0x15
13849#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x3
13850#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
13851#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0xc
13852#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
13853#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x30
13854#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
13855#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x40
13856#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
13857#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x80
13858#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
13859#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x100
13860#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
13861#define SQC_CONFIG__IDENTITY_HASH_BANK_MASK 0x200
13862#define SQC_CONFIG__IDENTITY_HASH_BANK__SHIFT 0x9
13863#define SQC_CONFIG__IDENTITY_HASH_SET_MASK 0x400
13864#define SQC_CONFIG__IDENTITY_HASH_SET__SHIFT 0xa
13865#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x800
13866#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0xb
13867#define SQC_CONFIG__EVICT_LRU_MASK 0x3000
13868#define SQC_CONFIG__EVICT_LRU__SHIFT 0xc
13869#define SQC_CONFIG__FORCE_2_BANK_MASK 0x4000
13870#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xe
13871#define SQC_CONFIG__FORCE_1_BANK_MASK 0x8000
13872#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xf
13873#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0xff0000
13874#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0x10
13875#define SQC_CACHES__TARGET_INST_MASK 0x1
13876#define SQC_CACHES__TARGET_INST__SHIFT 0x0
13877#define SQC_CACHES__TARGET_DATA_MASK 0x2
13878#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
13879#define SQC_CACHES__INVALIDATE_MASK 0x4
13880#define SQC_CACHES__INVALIDATE__SHIFT 0x2
13881#define SQC_CACHES__WRITEBACK_MASK 0x8
13882#define SQC_CACHES__WRITEBACK__SHIFT 0x3
13883#define SQC_CACHES__VOL_MASK 0x10
13884#define SQC_CACHES__VOL__SHIFT 0x4
13885#define SQC_CACHES__COMPLETE_MASK 0x10000
13886#define SQC_CACHES__COMPLETE__SHIFT 0x10
13887#define SQC_WRITEBACK__DWB_MASK 0x1
13888#define SQC_WRITEBACK__DWB__SHIFT 0x0
13889#define SQC_WRITEBACK__DIRTY_MASK 0x2
13890#define SQC_WRITEBACK__DIRTY__SHIFT 0x1
13891#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKA_MASK 0x3
13892#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKA__SHIFT 0x0
13893#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKA_MASK 0x4
13894#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKA__SHIFT 0x2
13895#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKB_MASK 0x18
13896#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKB__SHIFT 0x3
13897#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKB_MASK 0x20
13898#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKB__SHIFT 0x5
13899#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKC_MASK 0xc0
13900#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKC__SHIFT 0x6
13901#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKC_MASK 0x100
13902#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKC__SHIFT 0x8
13903#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKD_MASK 0x600
13904#define SQC_DSM_CNTL__SEL_DATA_ICACHE_BANKD__SHIFT 0x9
13905#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKD_MASK 0x800
13906#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_BANKD__SHIFT 0xb
13907#define SQC_DSM_CNTL__SEL_DATA_ICACHE_GATCL1_MASK 0x3000
13908#define SQC_DSM_CNTL__SEL_DATA_ICACHE_GATCL1__SHIFT 0xc
13909#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_GATCL1_MASK 0x4000
13910#define SQC_DSM_CNTL__EN_SINGLE_WR_ICACHE_GATCL1__SHIFT 0xe
13911#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKA_MASK 0x18000
13912#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKA__SHIFT 0xf
13913#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKA_MASK 0x20000
13914#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKA__SHIFT 0x11
13915#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKB_MASK 0xc0000
13916#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKB__SHIFT 0x12
13917#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKB_MASK 0x100000
13918#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKB__SHIFT 0x14
13919#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKC_MASK 0x600000
13920#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKC__SHIFT 0x15
13921#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKC_MASK 0x800000
13922#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKC__SHIFT 0x17
13923#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKD_MASK 0x3000000
13924#define SQC_DSM_CNTL__SEL_DATA_DCACHE_BANKD__SHIFT 0x18
13925#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKD_MASK 0x4000000
13926#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_BANKD__SHIFT 0x1a
13927#define SQC_DSM_CNTL__SEL_DATA_DCACHE_GATCL1_MASK 0x18000000
13928#define SQC_DSM_CNTL__SEL_DATA_DCACHE_GATCL1__SHIFT 0x1b
13929#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_GATCL1_MASK 0x20000000
13930#define SQC_DSM_CNTL__EN_SINGLE_WR_DCACHE_GATCL1__SHIFT 0x1d
13931#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x7f
13932#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
13933#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x380
13934#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
13935#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x1ffc00
13936#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
13937#define SQ_REG_CREDITS__SRBM_CREDITS_MASK 0x3f
13938#define SQ_REG_CREDITS__SRBM_CREDITS__SHIFT 0x0
13939#define SQ_REG_CREDITS__CMD_CREDITS_MASK 0xf00
13940#define SQ_REG_CREDITS__CMD_CREDITS__SHIFT 0x8
13941#define SQ_REG_CREDITS__REG_BUSY_MASK 0x10000000
13942#define SQ_REG_CREDITS__REG_BUSY__SHIFT 0x1c
13943#define SQ_REG_CREDITS__SRBM_OVERFLOW_MASK 0x20000000
13944#define SQ_REG_CREDITS__SRBM_OVERFLOW__SHIFT 0x1d
13945#define SQ_REG_CREDITS__IMMED_OVERFLOW_MASK 0x40000000
13946#define SQ_REG_CREDITS__IMMED_OVERFLOW__SHIFT 0x1e
13947#define SQ_REG_CREDITS__CMD_OVERFLOW_MASK 0x80000000
13948#define SQ_REG_CREDITS__CMD_OVERFLOW__SHIFT 0x1f
13949#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0xf
13950#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
13951#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0xf00
13952#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
13953#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE_MASK 0x30000
13954#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE__SHIFT 0x10
13955#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0xc0000
13956#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
13957#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x1
13958#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
13959#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x2
13960#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
13961#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x4
13962#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
13963#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x8
13964#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
13965#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x100
13966#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
13967#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x200
13968#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
13969#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x400
13970#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
13971#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x10000
13972#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
13973#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x20000
13974#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
13975#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x40000
13976#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
13977#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x80000
13978#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
13979#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x100000
13980#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
13981#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x200000
13982#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
13983#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x1000000
13984#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
13985#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x2000000
13986#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
13987#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x4000000
13988#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
13989#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x6
13990#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
13991#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x8
13992#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
13993#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x10
13994#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
13995#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x6
13996#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
13997#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x8
13998#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
13999#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x10
14000#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
14001#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0xffffff
14002#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
14003#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x1
14004#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
14005#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x1
14006#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
14007#define SQ_PERFCOUNTER_CTRL__VS_EN_MASK 0x2
14008#define SQ_PERFCOUNTER_CTRL__VS_EN__SHIFT 0x1
14009#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x4
14010#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
14011#define SQ_PERFCOUNTER_CTRL__ES_EN_MASK 0x8
14012#define SQ_PERFCOUNTER_CTRL__ES_EN__SHIFT 0x3
14013#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x10
14014#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
14015#define SQ_PERFCOUNTER_CTRL__LS_EN_MASK 0x20
14016#define SQ_PERFCOUNTER_CTRL__LS_EN__SHIFT 0x5
14017#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x40
14018#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
14019#define SQ_PERFCOUNTER_CTRL__CNTR_RATE_MASK 0x1f00
14020#define SQ_PERFCOUNTER_CTRL__CNTR_RATE__SHIFT 0x8
14021#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH_MASK 0x2000
14022#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH__SHIFT 0xd
14023#define SQ_PERFCOUNTER_MASK__SH0_MASK_MASK 0xffff
14024#define SQ_PERFCOUNTER_MASK__SH0_MASK__SHIFT 0x0
14025#define SQ_PERFCOUNTER_MASK__SH1_MASK_MASK 0xffff0000
14026#define SQ_PERFCOUNTER_MASK__SH1_MASK__SHIFT 0x10
14027#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x1
14028#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
14029#define CC_SQC_BANK_DISABLE__SQC0_BANK_DISABLE_MASK 0xf0000
14030#define CC_SQC_BANK_DISABLE__SQC0_BANK_DISABLE__SHIFT 0x10
14031#define CC_SQC_BANK_DISABLE__SQC1_BANK_DISABLE_MASK 0xf00000
14032#define CC_SQC_BANK_DISABLE__SQC1_BANK_DISABLE__SHIFT 0x14
14033#define CC_SQC_BANK_DISABLE__SQC2_BANK_DISABLE_MASK 0xf000000
14034#define CC_SQC_BANK_DISABLE__SQC2_BANK_DISABLE__SHIFT 0x18
14035#define CC_SQC_BANK_DISABLE__SQC3_BANK_DISABLE_MASK 0xf0000000
14036#define CC_SQC_BANK_DISABLE__SQC3_BANK_DISABLE__SHIFT 0x1c
14037#define USER_SQC_BANK_DISABLE__SQC0_BANK_DISABLE_MASK 0xf0000
14038#define USER_SQC_BANK_DISABLE__SQC0_BANK_DISABLE__SHIFT 0x10
14039#define USER_SQC_BANK_DISABLE__SQC1_BANK_DISABLE_MASK 0xf00000
14040#define USER_SQC_BANK_DISABLE__SQC1_BANK_DISABLE__SHIFT 0x14
14041#define USER_SQC_BANK_DISABLE__SQC2_BANK_DISABLE_MASK 0xf000000
14042#define USER_SQC_BANK_DISABLE__SQC2_BANK_DISABLE__SHIFT 0x18
14043#define USER_SQC_BANK_DISABLE__SQC3_BANK_DISABLE_MASK 0xf0000000
14044#define USER_SQC_BANK_DISABLE__SQC3_BANK_DISABLE__SHIFT 0x1c
14045#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
14046#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
14047#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
14048#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
14049#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
14050#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
14051#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
14052#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
14053#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xffffffff
14054#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
14055#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xffffffff
14056#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
14057#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xffffffff
14058#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
14059#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xffffffff
14060#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
14061#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO_MASK 0xffffffff
14062#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO__SHIFT 0x0
14063#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO_MASK 0xffffffff
14064#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO__SHIFT 0x0
14065#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO_MASK 0xffffffff
14066#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO__SHIFT 0x0
14067#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO_MASK 0xffffffff
14068#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO__SHIFT 0x0
14069#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO_MASK 0xffffffff
14070#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO__SHIFT 0x0
14071#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO_MASK 0xffffffff
14072#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO__SHIFT 0x0
14073#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO_MASK 0xffffffff
14074#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO__SHIFT 0x0
14075#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO_MASK 0xffffffff
14076#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO__SHIFT 0x0
14077#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
14078#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
14079#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
14080#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
14081#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
14082#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
14083#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
14084#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
14085#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xffffffff
14086#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
14087#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xffffffff
14088#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
14089#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xffffffff
14090#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
14091#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xffffffff
14092#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
14093#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI_MASK 0xffffffff
14094#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI__SHIFT 0x0
14095#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI_MASK 0xffffffff
14096#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI__SHIFT 0x0
14097#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI_MASK 0xffffffff
14098#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI__SHIFT 0x0
14099#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI_MASK 0xffffffff
14100#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI__SHIFT 0x0
14101#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI_MASK 0xffffffff
14102#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI__SHIFT 0x0
14103#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI_MASK 0xffffffff
14104#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI__SHIFT 0x0
14105#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI_MASK 0xffffffff
14106#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI__SHIFT 0x0
14107#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI_MASK 0xffffffff
14108#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI__SHIFT 0x0
14109#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x1ff
14110#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
14111#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK_MASK 0xf000
14112#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK__SHIFT 0xc
14113#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14114#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14115#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0xf00000
14116#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
14117#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK_MASK 0xf000000
14118#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK__SHIFT 0x18
14119#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
14120#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
14121#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x1ff
14122#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
14123#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK_MASK 0xf000
14124#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK__SHIFT 0xc
14125#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14126#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14127#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0xf00000
14128#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
14129#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK_MASK 0xf000000
14130#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK__SHIFT 0x18
14131#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
14132#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
14133#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x1ff
14134#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
14135#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK_MASK 0xf000
14136#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK__SHIFT 0xc
14137#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14138#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14139#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0xf00000
14140#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
14141#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK_MASK 0xf000000
14142#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK__SHIFT 0x18
14143#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
14144#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
14145#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x1ff
14146#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
14147#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK_MASK 0xf000
14148#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK__SHIFT 0xc
14149#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14150#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14151#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0xf00000
14152#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
14153#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK_MASK 0xf000000
14154#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK__SHIFT 0x18
14155#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
14156#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
14157#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x1ff
14158#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
14159#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK_MASK 0xf000
14160#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK__SHIFT 0xc
14161#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14162#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14163#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0xf00000
14164#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
14165#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK_MASK 0xf000000
14166#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK__SHIFT 0x18
14167#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xf0000000
14168#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
14169#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x1ff
14170#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
14171#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK_MASK 0xf000
14172#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK__SHIFT 0xc
14173#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14174#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14175#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0xf00000
14176#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
14177#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK_MASK 0xf000000
14178#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK__SHIFT 0x18
14179#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xf0000000
14180#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
14181#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x1ff
14182#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
14183#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK_MASK 0xf000
14184#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK__SHIFT 0xc
14185#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14186#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14187#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0xf00000
14188#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
14189#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK_MASK 0xf000000
14190#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK__SHIFT 0x18
14191#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xf0000000
14192#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
14193#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x1ff
14194#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
14195#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK_MASK 0xf000
14196#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK__SHIFT 0xc
14197#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14198#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14199#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0xf00000
14200#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
14201#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK_MASK 0xf000000
14202#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK__SHIFT 0x18
14203#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xf0000000
14204#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
14205#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x1ff
14206#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
14207#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK_MASK 0xf000
14208#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK__SHIFT 0xc
14209#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14210#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14211#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0xf00000
14212#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
14213#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK_MASK 0xf000000
14214#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK__SHIFT 0x18
14215#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xf0000000
14216#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
14217#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x1ff
14218#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
14219#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK_MASK 0xf000
14220#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK__SHIFT 0xc
14221#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14222#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14223#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0xf00000
14224#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
14225#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK_MASK 0xf000000
14226#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK__SHIFT 0x18
14227#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xf0000000
14228#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
14229#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x1ff
14230#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
14231#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK_MASK 0xf000
14232#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK__SHIFT 0xc
14233#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14234#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14235#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0xf00000
14236#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
14237#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK_MASK 0xf000000
14238#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK__SHIFT 0x18
14239#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xf0000000
14240#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
14241#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x1ff
14242#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
14243#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK_MASK 0xf000
14244#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK__SHIFT 0xc
14245#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14246#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14247#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0xf00000
14248#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
14249#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK_MASK 0xf000000
14250#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK__SHIFT 0x18
14251#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xf0000000
14252#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
14253#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x1ff
14254#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
14255#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK_MASK 0xf000
14256#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK__SHIFT 0xc
14257#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14258#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14259#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0xf00000
14260#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
14261#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK_MASK 0xf000000
14262#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK__SHIFT 0x18
14263#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xf0000000
14264#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
14265#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x1ff
14266#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
14267#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK_MASK 0xf000
14268#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK__SHIFT 0xc
14269#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14270#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14271#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0xf00000
14272#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
14273#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK_MASK 0xf000000
14274#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK__SHIFT 0x18
14275#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xf0000000
14276#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
14277#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x1ff
14278#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
14279#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK_MASK 0xf000
14280#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK__SHIFT 0xc
14281#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14282#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14283#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0xf00000
14284#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
14285#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK_MASK 0xf000000
14286#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK__SHIFT 0x18
14287#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xf0000000
14288#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
14289#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x1ff
14290#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
14291#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK_MASK 0xf000
14292#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK__SHIFT 0xc
14293#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK_MASK 0xf0000
14294#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
14295#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0xf00000
14296#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
14297#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK_MASK 0xf000000
14298#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK__SHIFT 0x18
14299#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xf0000000
14300#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
14301#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0xf
14302#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
14303#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
14304#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
14305#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000
14306#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
14307#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000
14308#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
14309#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
14310#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
14311#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0xf
14312#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
14313#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
14314#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
14315#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000
14316#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
14317#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000
14318#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
14319#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000
14320#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
14321#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
14322#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
14323#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0xffff
14324#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
14325#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xffff0000
14326#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
14327#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0xffff
14328#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
14329#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xffff0000
14330#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
14331#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0xffff
14332#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
14333#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xffff0000
14334#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
14335#define SQ_POWER_THROTTLE__MIN_POWER_MASK 0x3fff
14336#define SQ_POWER_THROTTLE__MIN_POWER__SHIFT 0x0
14337#define SQ_POWER_THROTTLE__MAX_POWER_MASK 0x3fff0000
14338#define SQ_POWER_THROTTLE__MAX_POWER__SHIFT 0x10
14339#define SQ_POWER_THROTTLE__PHASE_OFFSET_MASK 0xc0000000
14340#define SQ_POWER_THROTTLE__PHASE_OFFSET__SHIFT 0x1e
14341#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK 0x3fff
14342#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT 0x0
14343#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK 0x3ff0000
14344#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
14345#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000
14346#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
14347#define SQ_POWER_THROTTLE2__USE_REF_CLOCK_MASK 0x80000000
14348#define SQ_POWER_THROTTLE2__USE_REF_CLOCK__SHIFT 0x1f
14349#define SQ_TIME_HI__TIME_MASK 0xffffffff
14350#define SQ_TIME_HI__TIME__SHIFT 0x0
14351#define SQ_TIME_LO__TIME_MASK 0xffffffff
14352#define SQ_TIME_LO__TIME__SHIFT 0x0
14353#define SQ_THREAD_TRACE_BASE__ADDR_MASK 0xffffffff
14354#define SQ_THREAD_TRACE_BASE__ADDR__SHIFT 0x0
14355#define SQ_THREAD_TRACE_BASE2__ADDR_HI_MASK 0xf
14356#define SQ_THREAD_TRACE_BASE2__ADDR_HI__SHIFT 0x0
14357#define SQ_THREAD_TRACE_SIZE__SIZE_MASK 0x3fffff
14358#define SQ_THREAD_TRACE_SIZE__SIZE__SHIFT 0x0
14359#define SQ_THREAD_TRACE_MASK__CU_SEL_MASK 0x1f
14360#define SQ_THREAD_TRACE_MASK__CU_SEL__SHIFT 0x0
14361#define SQ_THREAD_TRACE_MASK__SH_SEL_MASK 0x20
14362#define SQ_THREAD_TRACE_MASK__SH_SEL__SHIFT 0x5
14363#define SQ_THREAD_TRACE_MASK__REG_STALL_EN_MASK 0x80
14364#define SQ_THREAD_TRACE_MASK__REG_STALL_EN__SHIFT 0x7
14365#define SQ_THREAD_TRACE_MASK__SIMD_EN_MASK 0xf00
14366#define SQ_THREAD_TRACE_MASK__SIMD_EN__SHIFT 0x8
14367#define SQ_THREAD_TRACE_MASK__VM_ID_MASK_MASK 0x3000
14368#define SQ_THREAD_TRACE_MASK__VM_ID_MASK__SHIFT 0xc
14369#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN_MASK 0x4000
14370#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN__SHIFT 0xe
14371#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN_MASK 0x8000
14372#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN__SHIFT 0xf
14373#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xffffffff
14374#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
14375#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xffffffff
14376#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
14377#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xffffffff
14378#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
14379#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xffffffff
14380#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
14381#define SQ_THREAD_TRACE_MODE__MASK_PS_MASK 0x7
14382#define SQ_THREAD_TRACE_MODE__MASK_PS__SHIFT 0x0
14383#define SQ_THREAD_TRACE_MODE__MASK_VS_MASK 0x38
14384#define SQ_THREAD_TRACE_MODE__MASK_VS__SHIFT 0x3
14385#define SQ_THREAD_TRACE_MODE__MASK_GS_MASK 0x1c0
14386#define SQ_THREAD_TRACE_MODE__MASK_GS__SHIFT 0x6
14387#define SQ_THREAD_TRACE_MODE__MASK_ES_MASK 0xe00
14388#define SQ_THREAD_TRACE_MODE__MASK_ES__SHIFT 0x9
14389#define SQ_THREAD_TRACE_MODE__MASK_HS_MASK 0x7000
14390#define SQ_THREAD_TRACE_MODE__MASK_HS__SHIFT 0xc
14391#define SQ_THREAD_TRACE_MODE__MASK_LS_MASK 0x38000
14392#define SQ_THREAD_TRACE_MODE__MASK_LS__SHIFT 0xf
14393#define SQ_THREAD_TRACE_MODE__MASK_CS_MASK 0x1c0000
14394#define SQ_THREAD_TRACE_MODE__MASK_CS__SHIFT 0x12
14395#define SQ_THREAD_TRACE_MODE__MODE_MASK 0x600000
14396#define SQ_THREAD_TRACE_MODE__MODE__SHIFT 0x15
14397#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE_MASK 0x1800000
14398#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE__SHIFT 0x17
14399#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN_MASK 0x2000000
14400#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN__SHIFT 0x19
14401#define SQ_THREAD_TRACE_MODE__PRIV_MASK 0x4000000
14402#define SQ_THREAD_TRACE_MODE__PRIV__SHIFT 0x1a
14403#define SQ_THREAD_TRACE_MODE__ISSUE_MASK_MASK 0x18000000
14404#define SQ_THREAD_TRACE_MODE__ISSUE_MASK__SHIFT 0x1b
14405#define SQ_THREAD_TRACE_MODE__TEST_MODE_MASK 0x20000000
14406#define SQ_THREAD_TRACE_MODE__TEST_MODE__SHIFT 0x1d
14407#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN_MASK 0x40000000
14408#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN__SHIFT 0x1e
14409#define SQ_THREAD_TRACE_MODE__WRAP_MASK 0x80000000
14410#define SQ_THREAD_TRACE_MODE__WRAP__SHIFT 0x1f
14411#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER_MASK 0x80000000
14412#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER__SHIFT 0x1f
14413#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK_MASK 0xffff
14414#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK__SHIFT 0x0
14415#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK_MASK 0xff0000
14416#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK__SHIFT 0x10
14417#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL_MASK 0x1000000
14418#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL__SHIFT 0x18
14419#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK_MASK 0xffffffff
14420#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK__SHIFT 0x0
14421#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK_MASK 0xffff
14422#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK__SHIFT 0x0
14423#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK_MASK 0xffff0000
14424#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK__SHIFT 0x10
14425#define SQ_THREAD_TRACE_WPTR__WPTR_MASK 0x3fffffff
14426#define SQ_THREAD_TRACE_WPTR__WPTR__SHIFT 0x0
14427#define SQ_THREAD_TRACE_WPTR__READ_OFFSET_MASK 0xc0000000
14428#define SQ_THREAD_TRACE_WPTR__READ_OFFSET__SHIFT 0x1e
14429#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x3ff
14430#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
14431#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x3ff0000
14432#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0x10
14433#define SQ_THREAD_TRACE_STATUS__NEW_BUF_MASK 0x20000000
14434#define SQ_THREAD_TRACE_STATUS__NEW_BUF__SHIFT 0x1d
14435#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x40000000
14436#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x1e
14437#define SQ_THREAD_TRACE_STATUS__FULL_MASK 0x80000000
14438#define SQ_THREAD_TRACE_STATUS__FULL__SHIFT 0x1f
14439#define SQ_THREAD_TRACE_CNTR__CNTR_MASK 0xffffffff
14440#define SQ_THREAD_TRACE_CNTR__CNTR__SHIFT 0x0
14441#define SQ_THREAD_TRACE_HIWATER__HIWATER_MASK 0x7
14442#define SQ_THREAD_TRACE_HIWATER__HIWATER__SHIFT 0x0
14443#define SQ_LB_CTR_CTRL__START_MASK 0x1
14444#define SQ_LB_CTR_CTRL__START__SHIFT 0x0
14445#define SQ_LB_CTR_CTRL__LOAD_MASK 0x2
14446#define SQ_LB_CTR_CTRL__LOAD__SHIFT 0x1
14447#define SQ_LB_CTR_CTRL__CLEAR_MASK 0x4
14448#define SQ_LB_CTR_CTRL__CLEAR__SHIFT 0x2
14449#define SQ_LB_DATA_ALU_CYCLES__DATA_MASK 0xffffffff
14450#define SQ_LB_DATA_ALU_CYCLES__DATA__SHIFT 0x0
14451#define SQ_LB_DATA_TEX_CYCLES__DATA_MASK 0xffffffff
14452#define SQ_LB_DATA_TEX_CYCLES__DATA__SHIFT 0x0
14453#define SQ_LB_DATA_ALU_STALLS__DATA_MASK 0xffffffff
14454#define SQ_LB_DATA_ALU_STALLS__DATA__SHIFT 0x0
14455#define SQ_LB_DATA_TEX_STALLS__DATA_MASK 0xffffffff
14456#define SQ_LB_DATA_TEX_STALLS__DATA__SHIFT 0x0
14457#define SQC_EDC_CNT__INST_SEC_MASK 0xff
14458#define SQC_EDC_CNT__INST_SEC__SHIFT 0x0
14459#define SQC_EDC_CNT__INST_DED_MASK 0xff00
14460#define SQC_EDC_CNT__INST_DED__SHIFT 0x8
14461#define SQC_EDC_CNT__DATA_SEC_MASK 0xff0000
14462#define SQC_EDC_CNT__DATA_SEC__SHIFT 0x10
14463#define SQC_EDC_CNT__DATA_DED_MASK 0xff000000
14464#define SQC_EDC_CNT__DATA_DED__SHIFT 0x18
14465#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0xff
14466#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0
14467#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0xff00
14468#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8
14469#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0xff0000
14470#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10
14471#define SQ_EDC_DED_CNT__LDS_DED_MASK 0xff
14472#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0
14473#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0xff00
14474#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8
14475#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0xff0000
14476#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10
14477#define SQ_EDC_INFO__WAVE_ID_MASK 0xf
14478#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0
14479#define SQ_EDC_INFO__SIMD_ID_MASK 0x30
14480#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4
14481#define SQ_EDC_INFO__SOURCE_MASK 0x1c0
14482#define SQ_EDC_INFO__SOURCE__SHIFT 0x6
14483#define SQ_EDC_INFO__VM_ID_MASK 0x1e00
14484#define SQ_EDC_INFO__VM_ID__SHIFT 0x9
14485#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS_MASK 0xffffffff
14486#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
14487#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0xffff
14488#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
14489#define SQ_BUF_RSRC_WORD1__STRIDE_MASK 0x3fff0000
14490#define SQ_BUF_RSRC_WORD1__STRIDE__SHIFT 0x10
14491#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE_MASK 0x40000000
14492#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE__SHIFT 0x1e
14493#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE_MASK 0x80000000
14494#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE__SHIFT 0x1f
14495#define SQ_BUF_RSRC_WORD2__NUM_RECORDS_MASK 0xffffffff
14496#define SQ_BUF_RSRC_WORD2__NUM_RECORDS__SHIFT 0x0
14497#define SQ_BUF_RSRC_WORD3__DST_SEL_X_MASK 0x7
14498#define SQ_BUF_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
14499#define SQ_BUF_RSRC_WORD3__DST_SEL_Y_MASK 0x38
14500#define SQ_BUF_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
14501#define SQ_BUF_RSRC_WORD3__DST_SEL_Z_MASK 0x1c0
14502#define SQ_BUF_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
14503#define SQ_BUF_RSRC_WORD3__DST_SEL_W_MASK 0xe00
14504#define SQ_BUF_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
14505#define SQ_BUF_RSRC_WORD3__NUM_FORMAT_MASK 0x7000
14506#define SQ_BUF_RSRC_WORD3__NUM_FORMAT__SHIFT 0xc
14507#define SQ_BUF_RSRC_WORD3__DATA_FORMAT_MASK 0x78000
14508#define SQ_BUF_RSRC_WORD3__DATA_FORMAT__SHIFT 0xf
14509#define SQ_BUF_RSRC_WORD3__ELEMENT_SIZE_MASK 0x180000
14510#define SQ_BUF_RSRC_WORD3__ELEMENT_SIZE__SHIFT 0x13
14511#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE_MASK 0x600000
14512#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE__SHIFT 0x15
14513#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE_MASK 0x800000
14514#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE__SHIFT 0x17
14515#define SQ_BUF_RSRC_WORD3__ATC_MASK 0x1000000
14516#define SQ_BUF_RSRC_WORD3__ATC__SHIFT 0x18
14517#define SQ_BUF_RSRC_WORD3__HASH_ENABLE_MASK 0x2000000
14518#define SQ_BUF_RSRC_WORD3__HASH_ENABLE__SHIFT 0x19
14519#define SQ_BUF_RSRC_WORD3__HEAP_MASK 0x4000000
14520#define SQ_BUF_RSRC_WORD3__HEAP__SHIFT 0x1a
14521#define SQ_BUF_RSRC_WORD3__MTYPE_MASK 0x38000000
14522#define SQ_BUF_RSRC_WORD3__MTYPE__SHIFT 0x1b
14523#define SQ_BUF_RSRC_WORD3__TYPE_MASK 0xc0000000
14524#define SQ_BUF_RSRC_WORD3__TYPE__SHIFT 0x1e
14525#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS_MASK 0xffffffff
14526#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
14527#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0xff
14528#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
14529#define SQ_IMG_RSRC_WORD1__MIN_LOD_MASK 0xfff00
14530#define SQ_IMG_RSRC_WORD1__MIN_LOD__SHIFT 0x8
14531#define SQ_IMG_RSRC_WORD1__DATA_FORMAT_MASK 0x3f00000
14532#define SQ_IMG_RSRC_WORD1__DATA_FORMAT__SHIFT 0x14
14533#define SQ_IMG_RSRC_WORD1__NUM_FORMAT_MASK 0x3c000000
14534#define SQ_IMG_RSRC_WORD1__NUM_FORMAT__SHIFT 0x1a
14535#define SQ_IMG_RSRC_WORD1__MTYPE_MASK 0xc0000000
14536#define SQ_IMG_RSRC_WORD1__MTYPE__SHIFT 0x1e
14537#define SQ_IMG_RSRC_WORD2__WIDTH_MASK 0x3fff
14538#define SQ_IMG_RSRC_WORD2__WIDTH__SHIFT 0x0
14539#define SQ_IMG_RSRC_WORD2__HEIGHT_MASK 0xfffc000
14540#define SQ_IMG_RSRC_WORD2__HEIGHT__SHIFT 0xe
14541#define SQ_IMG_RSRC_WORD2__PERF_MOD_MASK 0x70000000
14542#define SQ_IMG_RSRC_WORD2__PERF_MOD__SHIFT 0x1c
14543#define SQ_IMG_RSRC_WORD2__INTERLACED_MASK 0x80000000
14544#define SQ_IMG_RSRC_WORD2__INTERLACED__SHIFT 0x1f
14545#define SQ_IMG_RSRC_WORD3__DST_SEL_X_MASK 0x7
14546#define SQ_IMG_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
14547#define SQ_IMG_RSRC_WORD3__DST_SEL_Y_MASK 0x38
14548#define SQ_IMG_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
14549#define SQ_IMG_RSRC_WORD3__DST_SEL_Z_MASK 0x1c0
14550#define SQ_IMG_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
14551#define SQ_IMG_RSRC_WORD3__DST_SEL_W_MASK 0xe00
14552#define SQ_IMG_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
14553#define SQ_IMG_RSRC_WORD3__BASE_LEVEL_MASK 0xf000
14554#define SQ_IMG_RSRC_WORD3__BASE_LEVEL__SHIFT 0xc
14555#define SQ_IMG_RSRC_WORD3__LAST_LEVEL_MASK 0xf0000
14556#define SQ_IMG_RSRC_WORD3__LAST_LEVEL__SHIFT 0x10
14557#define SQ_IMG_RSRC_WORD3__TILING_INDEX_MASK 0x1f00000
14558#define SQ_IMG_RSRC_WORD3__TILING_INDEX__SHIFT 0x14
14559#define SQ_IMG_RSRC_WORD3__POW2_PAD_MASK 0x2000000
14560#define SQ_IMG_RSRC_WORD3__POW2_PAD__SHIFT 0x19
14561#define SQ_IMG_RSRC_WORD3__MTYPE_MASK 0x4000000
14562#define SQ_IMG_RSRC_WORD3__MTYPE__SHIFT 0x1a
14563#define SQ_IMG_RSRC_WORD3__ATC_MASK 0x8000000
14564#define SQ_IMG_RSRC_WORD3__ATC__SHIFT 0x1b
14565#define SQ_IMG_RSRC_WORD3__TYPE_MASK 0xf0000000
14566#define SQ_IMG_RSRC_WORD3__TYPE__SHIFT 0x1c
14567#define SQ_IMG_RSRC_WORD4__DEPTH_MASK 0x1fff
14568#define SQ_IMG_RSRC_WORD4__DEPTH__SHIFT 0x0
14569#define SQ_IMG_RSRC_WORD4__PITCH_MASK 0x7ffe000
14570#define SQ_IMG_RSRC_WORD4__PITCH__SHIFT 0xd
14571#define SQ_IMG_RSRC_WORD5__BASE_ARRAY_MASK 0x1fff
14572#define SQ_IMG_RSRC_WORD5__BASE_ARRAY__SHIFT 0x0
14573#define SQ_IMG_RSRC_WORD5__LAST_ARRAY_MASK 0x3ffe000
14574#define SQ_IMG_RSRC_WORD5__LAST_ARRAY__SHIFT 0xd
14575#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN_MASK 0xfff
14576#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN__SHIFT 0x0
14577#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID_MASK 0xff000
14578#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID__SHIFT 0xc
14579#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN_MASK 0x100000
14580#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN__SHIFT 0x14
14581#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN_MASK 0x200000
14582#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN__SHIFT 0x15
14583#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB_MASK 0x400000
14584#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB__SHIFT 0x16
14585#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM_MASK 0x800000
14586#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM__SHIFT 0x17
14587#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS_MASK 0xf000000
14588#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS__SHIFT 0x18
14589#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS_MASK 0xf0000000
14590#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS__SHIFT 0x1c
14591#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS_MASK 0xffffffff
14592#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS__SHIFT 0x0
14593#define SQ_IMG_SAMP_WORD0__CLAMP_X_MASK 0x7
14594#define SQ_IMG_SAMP_WORD0__CLAMP_X__SHIFT 0x0
14595#define SQ_IMG_SAMP_WORD0__CLAMP_Y_MASK 0x38
14596#define SQ_IMG_SAMP_WORD0__CLAMP_Y__SHIFT 0x3
14597#define SQ_IMG_SAMP_WORD0__CLAMP_Z_MASK 0x1c0
14598#define SQ_IMG_SAMP_WORD0__CLAMP_Z__SHIFT 0x6
14599#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO_MASK 0xe00
14600#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO__SHIFT 0x9
14601#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC_MASK 0x7000
14602#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC__SHIFT 0xc
14603#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED_MASK 0x8000
14604#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED__SHIFT 0xf
14605#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD_MASK 0x70000
14606#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD__SHIFT 0x10
14607#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC_MASK 0x80000
14608#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC__SHIFT 0x13
14609#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA_MASK 0x100000
14610#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA__SHIFT 0x14
14611#define SQ_IMG_SAMP_WORD0__ANISO_BIAS_MASK 0x7e00000
14612#define SQ_IMG_SAMP_WORD0__ANISO_BIAS__SHIFT 0x15
14613#define SQ_IMG_SAMP_WORD0__TRUNC_COORD_MASK 0x8000000
14614#define SQ_IMG_SAMP_WORD0__TRUNC_COORD__SHIFT 0x1b
14615#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP_MASK 0x10000000
14616#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP__SHIFT 0x1c
14617#define SQ_IMG_SAMP_WORD0__FILTER_MODE_MASK 0x60000000
14618#define SQ_IMG_SAMP_WORD0__FILTER_MODE__SHIFT 0x1d
14619#define SQ_IMG_SAMP_WORD0__COMPAT_MODE_MASK 0x80000000
14620#define SQ_IMG_SAMP_WORD0__COMPAT_MODE__SHIFT 0x1f
14621#define SQ_IMG_SAMP_WORD1__MIN_LOD_MASK 0xfff
14622#define SQ_IMG_SAMP_WORD1__MIN_LOD__SHIFT 0x0
14623#define SQ_IMG_SAMP_WORD1__MAX_LOD_MASK 0xfff000
14624#define SQ_IMG_SAMP_WORD1__MAX_LOD__SHIFT 0xc
14625#define SQ_IMG_SAMP_WORD1__PERF_MIP_MASK 0xf000000
14626#define SQ_IMG_SAMP_WORD1__PERF_MIP__SHIFT 0x18
14627#define SQ_IMG_SAMP_WORD1__PERF_Z_MASK 0xf0000000
14628#define SQ_IMG_SAMP_WORD1__PERF_Z__SHIFT 0x1c
14629#define SQ_IMG_SAMP_WORD2__LOD_BIAS_MASK 0x3fff
14630#define SQ_IMG_SAMP_WORD2__LOD_BIAS__SHIFT 0x0
14631#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC_MASK 0xfc000
14632#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC__SHIFT 0xe
14633#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER_MASK 0x300000
14634#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER__SHIFT 0x14
14635#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER_MASK 0xc00000
14636#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER__SHIFT 0x16
14637#define SQ_IMG_SAMP_WORD2__Z_FILTER_MASK 0x3000000
14638#define SQ_IMG_SAMP_WORD2__Z_FILTER__SHIFT 0x18
14639#define SQ_IMG_SAMP_WORD2__MIP_FILTER_MASK 0xc000000
14640#define SQ_IMG_SAMP_WORD2__MIP_FILTER__SHIFT 0x1a
14641#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP_MASK 0x10000000
14642#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP__SHIFT 0x1c
14643#define SQ_IMG_SAMP_WORD2__DISABLE_LSB_CEIL_MASK 0x20000000
14644#define SQ_IMG_SAMP_WORD2__DISABLE_LSB_CEIL__SHIFT 0x1d
14645#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX_MASK 0x40000000
14646#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX__SHIFT 0x1e
14647#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE_MASK 0x80000000
14648#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE__SHIFT 0x1f
14649#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR_MASK 0xfff
14650#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR__SHIFT 0x0
14651#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE_MASK 0xc0000000
14652#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE__SHIFT 0x1e
14653#define SQ_FLAT_SCRATCH_WORD0__SIZE_MASK 0x7ffff
14654#define SQ_FLAT_SCRATCH_WORD0__SIZE__SHIFT 0x0
14655#define SQ_FLAT_SCRATCH_WORD1__OFFSET_MASK 0xffffff
14656#define SQ_FLAT_SCRATCH_WORD1__OFFSET__SHIFT 0x0
14657#define SQ_M0_GPR_IDX_WORD__INDEX_MASK 0xff
14658#define SQ_M0_GPR_IDX_WORD__INDEX__SHIFT 0x0
14659#define SQ_M0_GPR_IDX_WORD__VSRC0_REL_MASK 0x1000
14660#define SQ_M0_GPR_IDX_WORD__VSRC0_REL__SHIFT 0xc
14661#define SQ_M0_GPR_IDX_WORD__VSRC1_REL_MASK 0x2000
14662#define SQ_M0_GPR_IDX_WORD__VSRC1_REL__SHIFT 0xd
14663#define SQ_M0_GPR_IDX_WORD__VSRC2_REL_MASK 0x4000
14664#define SQ_M0_GPR_IDX_WORD__VSRC2_REL__SHIFT 0xe
14665#define SQ_M0_GPR_IDX_WORD__VDST_REL_MASK 0x8000
14666#define SQ_M0_GPR_IDX_WORD__VDST_REL__SHIFT 0xf
14667#define SQ_IND_INDEX__WAVE_ID_MASK 0xf
14668#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
14669#define SQ_IND_INDEX__SIMD_ID_MASK 0x30
14670#define SQ_IND_INDEX__SIMD_ID__SHIFT 0x4
14671#define SQ_IND_INDEX__THREAD_ID_MASK 0xfc0
14672#define SQ_IND_INDEX__THREAD_ID__SHIFT 0x6
14673#define SQ_IND_INDEX__AUTO_INCR_MASK 0x1000
14674#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xc
14675#define SQ_IND_INDEX__FORCE_READ_MASK 0x2000
14676#define SQ_IND_INDEX__FORCE_READ__SHIFT 0xd
14677#define SQ_IND_INDEX__READ_TIMEOUT_MASK 0x4000
14678#define SQ_IND_INDEX__READ_TIMEOUT__SHIFT 0xe
14679#define SQ_IND_INDEX__UNINDEXED_MASK 0x8000
14680#define SQ_IND_INDEX__UNINDEXED__SHIFT 0xf
14681#define SQ_IND_INDEX__INDEX_MASK 0xffff0000
14682#define SQ_IND_INDEX__INDEX__SHIFT 0x10
14683#define SQ_CMD__CMD_MASK 0x7
14684#define SQ_CMD__CMD__SHIFT 0x0
14685#define SQ_CMD__MODE_MASK 0x70
14686#define SQ_CMD__MODE__SHIFT 0x4
14687#define SQ_CMD__CHECK_VMID_MASK 0x80
14688#define SQ_CMD__CHECK_VMID__SHIFT 0x7
14689#define SQ_CMD__DATA_MASK 0x700
14690#define SQ_CMD__DATA__SHIFT 0x8
14691#define SQ_CMD__WAVE_ID_MASK 0xf0000
14692#define SQ_CMD__WAVE_ID__SHIFT 0x10
14693#define SQ_CMD__SIMD_ID_MASK 0x300000
14694#define SQ_CMD__SIMD_ID__SHIFT 0x14
14695#define SQ_CMD__QUEUE_ID_MASK 0x7000000
14696#define SQ_CMD__QUEUE_ID__SHIFT 0x18
14697#define SQ_CMD__VM_ID_MASK 0xf0000000
14698#define SQ_CMD__VM_ID__SHIFT 0x1c
14699#define SQ_IND_DATA__DATA_MASK 0xffffffff
14700#define SQ_IND_DATA__DATA__SHIFT 0x0
14701#define SQ_REG_TIMESTAMP__TIMESTAMP_MASK 0xff
14702#define SQ_REG_TIMESTAMP__TIMESTAMP__SHIFT 0x0
14703#define SQ_CMD_TIMESTAMP__TIMESTAMP_MASK 0xff
14704#define SQ_CMD_TIMESTAMP__TIMESTAMP__SHIFT 0x0
14705#define SQ_HV_VMID_CTRL__DEFAULT_VMID_MASK 0xf
14706#define SQ_HV_VMID_CTRL__DEFAULT_VMID__SHIFT 0x0
14707#define SQ_HV_VMID_CTRL__ALLOWED_VMID_MASK_MASK 0xffff0
14708#define SQ_HV_VMID_CTRL__ALLOWED_VMID_MASK__SHIFT 0x4
14709#define SQ_WAVE_INST_DW0__INST_DW0_MASK 0xffffffff
14710#define SQ_WAVE_INST_DW0__INST_DW0__SHIFT 0x0
14711#define SQ_WAVE_INST_DW1__INST_DW1_MASK 0xffffffff
14712#define SQ_WAVE_INST_DW1__INST_DW1__SHIFT 0x0
14713#define SQ_WAVE_PC_LO__PC_LO_MASK 0xffffffff
14714#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
14715#define SQ_WAVE_PC_HI__PC_HI_MASK 0xffff
14716#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
14717#define SQ_WAVE_IB_DBG0__IBUF_ST_MASK 0x7
14718#define SQ_WAVE_IB_DBG0__IBUF_ST__SHIFT 0x0
14719#define SQ_WAVE_IB_DBG0__PC_INVALID_MASK 0x8
14720#define SQ_WAVE_IB_DBG0__PC_INVALID__SHIFT 0x3
14721#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW_MASK 0x10
14722#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW__SHIFT 0x4
14723#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_MASK 0xe0
14724#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT__SHIFT 0x5
14725#define SQ_WAVE_IB_DBG0__IBUF_RPTR_MASK 0x300
14726#define SQ_WAVE_IB_DBG0__IBUF_RPTR__SHIFT 0x8
14727#define SQ_WAVE_IB_DBG0__IBUF_WPTR_MASK 0xc00
14728#define SQ_WAVE_IB_DBG0__IBUF_WPTR__SHIFT 0xa
14729#define SQ_WAVE_IB_DBG0__INST_STR_ST_MASK 0xf0000
14730#define SQ_WAVE_IB_DBG0__INST_STR_ST__SHIFT 0x10
14731#define SQ_WAVE_IB_DBG0__MISC_CNT_MASK 0xf00000
14732#define SQ_WAVE_IB_DBG0__MISC_CNT__SHIFT 0x14
14733#define SQ_WAVE_IB_DBG0__ECC_ST_MASK 0x3000000
14734#define SQ_WAVE_IB_DBG0__ECC_ST__SHIFT 0x18
14735#define SQ_WAVE_IB_DBG0__IS_HYB_MASK 0x4000000
14736#define SQ_WAVE_IB_DBG0__IS_HYB__SHIFT 0x1a
14737#define SQ_WAVE_IB_DBG0__HYB_CNT_MASK 0x18000000
14738#define SQ_WAVE_IB_DBG0__HYB_CNT__SHIFT 0x1b
14739#define SQ_WAVE_IB_DBG0__KILL_MASK 0x20000000
14740#define SQ_WAVE_IB_DBG0__KILL__SHIFT 0x1d
14741#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH_MASK 0x40000000
14742#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH__SHIFT 0x1e
14743#define SQ_WAVE_IB_DBG1__IXNACK_MASK 0x1
14744#define SQ_WAVE_IB_DBG1__IXNACK__SHIFT 0x0
14745#define SQ_WAVE_IB_DBG1__XNACK_MASK 0x2
14746#define SQ_WAVE_IB_DBG1__XNACK__SHIFT 0x1
14747#define SQ_WAVE_IB_DBG1__TA_NEED_RESET_MASK 0x4
14748#define SQ_WAVE_IB_DBG1__TA_NEED_RESET__SHIFT 0x2
14749#define SQ_WAVE_IB_DBG1__XCNT_MASK 0xf0
14750#define SQ_WAVE_IB_DBG1__XCNT__SHIFT 0x4
14751#define SQ_WAVE_IB_DBG1__QCNT_MASK 0xf00
14752#define SQ_WAVE_IB_DBG1__QCNT__SHIFT 0x8
14753#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xffffffff
14754#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
14755#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xffffffff
14756#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
14757#define SQ_WAVE_STATUS__SCC_MASK 0x1
14758#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
14759#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x6
14760#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
14761#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x18
14762#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
14763#define SQ_WAVE_STATUS__PRIV_MASK 0x20
14764#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
14765#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x40
14766#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
14767#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x80
14768#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
14769#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x100
14770#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
14771#define SQ_WAVE_STATUS__EXECZ_MASK 0x200
14772#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
14773#define SQ_WAVE_STATUS__VCCZ_MASK 0x400
14774#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
14775#define SQ_WAVE_STATUS__IN_TG_MASK 0x800
14776#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
14777#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x1000
14778#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
14779#define SQ_WAVE_STATUS__HALT_MASK 0x2000
14780#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
14781#define SQ_WAVE_STATUS__TRAP_MASK 0x4000
14782#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
14783#define SQ_WAVE_STATUS__TTRACE_CU_EN_MASK 0x8000
14784#define SQ_WAVE_STATUS__TTRACE_CU_EN__SHIFT 0xf
14785#define SQ_WAVE_STATUS__VALID_MASK 0x10000
14786#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
14787#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x20000
14788#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
14789#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x40000
14790#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
14791#define SQ_WAVE_STATUS__PERF_EN_MASK 0x80000
14792#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
14793#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x100000
14794#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x14
14795#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x200000
14796#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x15
14797#define SQ_WAVE_STATUS__ALLOW_REPLAY_MASK 0x400000
14798#define SQ_WAVE_STATUS__ALLOW_REPLAY__SHIFT 0x16
14799#define SQ_WAVE_STATUS__INST_ATC_MASK 0x800000
14800#define SQ_WAVE_STATUS__INST_ATC__SHIFT 0x17
14801#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x8000000
14802#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
14803#define SQ_WAVE_MODE__FP_ROUND_MASK 0xf
14804#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
14805#define SQ_WAVE_MODE__FP_DENORM_MASK 0xf0
14806#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
14807#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x100
14808#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
14809#define SQ_WAVE_MODE__IEEE_MASK 0x200
14810#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
14811#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x400
14812#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
14813#define SQ_WAVE_MODE__DEBUG_EN_MASK 0x800
14814#define SQ_WAVE_MODE__DEBUG_EN__SHIFT 0xb
14815#define SQ_WAVE_MODE__EXCP_EN_MASK 0x1ff000
14816#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
14817#define SQ_WAVE_MODE__GPR_IDX_EN_MASK 0x8000000
14818#define SQ_WAVE_MODE__GPR_IDX_EN__SHIFT 0x1b
14819#define SQ_WAVE_MODE__VSKIP_MASK 0x10000000
14820#define SQ_WAVE_MODE__VSKIP__SHIFT 0x1c
14821#define SQ_WAVE_MODE__CSP_MASK 0xe0000000
14822#define SQ_WAVE_MODE__CSP__SHIFT 0x1d
14823#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x1ff
14824#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
14825#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x400
14826#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
14827#define SQ_WAVE_TRAPSTS__EXCP_CYCLE_MASK 0x3f0000
14828#define SQ_WAVE_TRAPSTS__EXCP_CYCLE__SHIFT 0x10
14829#define SQ_WAVE_TRAPSTS__DP_RATE_MASK 0xe0000000
14830#define SQ_WAVE_TRAPSTS__DP_RATE__SHIFT 0x1d
14831#define SQ_WAVE_HW_ID__WAVE_ID_MASK 0xf
14832#define SQ_WAVE_HW_ID__WAVE_ID__SHIFT 0x0
14833#define SQ_WAVE_HW_ID__SIMD_ID_MASK 0x30
14834#define SQ_WAVE_HW_ID__SIMD_ID__SHIFT 0x4
14835#define SQ_WAVE_HW_ID__PIPE_ID_MASK 0xc0
14836#define SQ_WAVE_HW_ID__PIPE_ID__SHIFT 0x6
14837#define SQ_WAVE_HW_ID__CU_ID_MASK 0xf00
14838#define SQ_WAVE_HW_ID__CU_ID__SHIFT 0x8
14839#define SQ_WAVE_HW_ID__SH_ID_MASK 0x1000
14840#define SQ_WAVE_HW_ID__SH_ID__SHIFT 0xc
14841#define SQ_WAVE_HW_ID__SE_ID_MASK 0x6000
14842#define SQ_WAVE_HW_ID__SE_ID__SHIFT 0xd
14843#define SQ_WAVE_HW_ID__TG_ID_MASK 0xf0000
14844#define SQ_WAVE_HW_ID__TG_ID__SHIFT 0x10
14845#define SQ_WAVE_HW_ID__VM_ID_MASK 0xf00000
14846#define SQ_WAVE_HW_ID__VM_ID__SHIFT 0x14
14847#define SQ_WAVE_HW_ID__QUEUE_ID_MASK 0x7000000
14848#define SQ_WAVE_HW_ID__QUEUE_ID__SHIFT 0x18
14849#define SQ_WAVE_HW_ID__STATE_ID_MASK 0x38000000
14850#define SQ_WAVE_HW_ID__STATE_ID__SHIFT 0x1b
14851#define SQ_WAVE_HW_ID__ME_ID_MASK 0xc0000000
14852#define SQ_WAVE_HW_ID__ME_ID__SHIFT 0x1e
14853#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x3f
14854#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
14855#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x3f00
14856#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0x8
14857#define SQ_WAVE_GPR_ALLOC__SGPR_BASE_MASK 0x3f0000
14858#define SQ_WAVE_GPR_ALLOC__SGPR_BASE__SHIFT 0x10
14859#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE_MASK 0xf000000
14860#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE__SHIFT 0x18
14861#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0xff
14862#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
14863#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x1ff000
14864#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
14865#define SQ_WAVE_IB_STS__VM_CNT_MASK 0xf
14866#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0x0
14867#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x70
14868#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x4
14869#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0xf00
14870#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x8
14871#define SQ_WAVE_IB_STS__VALU_CNT_MASK 0x7000
14872#define SQ_WAVE_IB_STS__VALU_CNT__SHIFT 0xc
14873#define SQ_WAVE_IB_STS__FIRST_REPLAY_MASK 0x8000
14874#define SQ_WAVE_IB_STS__FIRST_REPLAY__SHIFT 0xf
14875#define SQ_WAVE_IB_STS__RCNT_MASK 0xf0000
14876#define SQ_WAVE_IB_STS__RCNT__SHIFT 0x10
14877#define SQ_WAVE_M0__M0_MASK 0xffffffff
14878#define SQ_WAVE_M0__M0__SHIFT 0x0
14879#define SQ_WAVE_TBA_LO__ADDR_LO_MASK 0xffffffff
14880#define SQ_WAVE_TBA_LO__ADDR_LO__SHIFT 0x0
14881#define SQ_WAVE_TBA_HI__ADDR_HI_MASK 0xff
14882#define SQ_WAVE_TBA_HI__ADDR_HI__SHIFT 0x0
14883#define SQ_WAVE_TMA_LO__ADDR_LO_MASK 0xffffffff
14884#define SQ_WAVE_TMA_LO__ADDR_LO__SHIFT 0x0
14885#define SQ_WAVE_TMA_HI__ADDR_HI_MASK 0xff
14886#define SQ_WAVE_TMA_HI__ADDR_HI__SHIFT 0x0
14887#define SQ_WAVE_TTMP0__DATA_MASK 0xffffffff
14888#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
14889#define SQ_WAVE_TTMP1__DATA_MASK 0xffffffff
14890#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
14891#define SQ_WAVE_TTMP2__DATA_MASK 0xffffffff
14892#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
14893#define SQ_WAVE_TTMP3__DATA_MASK 0xffffffff
14894#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
14895#define SQ_WAVE_TTMP4__DATA_MASK 0xffffffff
14896#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
14897#define SQ_WAVE_TTMP5__DATA_MASK 0xffffffff
14898#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
14899#define SQ_WAVE_TTMP6__DATA_MASK 0xffffffff
14900#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
14901#define SQ_WAVE_TTMP7__DATA_MASK 0xffffffff
14902#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
14903#define SQ_WAVE_TTMP8__DATA_MASK 0xffffffff
14904#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
14905#define SQ_WAVE_TTMP9__DATA_MASK 0xffffffff
14906#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
14907#define SQ_WAVE_TTMP10__DATA_MASK 0xffffffff
14908#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
14909#define SQ_WAVE_TTMP11__DATA_MASK 0xffffffff
14910#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
14911#define SQ_DEBUG_STS_GLOBAL__BUSY_MASK 0x1
14912#define SQ_DEBUG_STS_GLOBAL__BUSY__SHIFT 0x0
14913#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY_MASK 0x2
14914#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY__SHIFT 0x1
14915#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0_MASK 0xfff0
14916#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0__SHIFT 0x4
14917#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1_MASK 0xfff0000
14918#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1__SHIFT 0x10
14919#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0_MASK 0xff
14920#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0__SHIFT 0x0
14921#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1_MASK 0xff00
14922#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1__SHIFT 0x8
14923#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED_MASK 0xff0000
14924#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED__SHIFT 0x10
14925#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST_MASK 0xff000000
14926#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST__SHIFT 0x18
14927#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD_MASK 0xf
14928#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD__SHIFT 0x0
14929#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG_MASK 0x3f0
14930#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG__SHIFT 0x4
14931#define SQ_DEBUG_STS_LOCAL__BUSY_MASK 0x1
14932#define SQ_DEBUG_STS_LOCAL__BUSY__SHIFT 0x0
14933#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL_MASK 0x3f0
14934#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL__SHIFT 0x4
14935#define SQ_DEBUG_CTRL_LOCAL__UNUSED_MASK 0xff
14936#define SQ_DEBUG_CTRL_LOCAL__UNUSED__SHIFT 0x0
14937#define SH_MEM_BASES__PRIVATE_BASE_MASK 0xffff
14938#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
14939#define SH_MEM_BASES__SHARED_BASE_MASK 0xffff0000
14940#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
14941#define SH_MEM_APE1_BASE__BASE_MASK 0xffffffff
14942#define SH_MEM_APE1_BASE__BASE__SHIFT 0x0
14943#define SH_MEM_APE1_LIMIT__LIMIT_MASK 0xffffffff
14944#define SH_MEM_APE1_LIMIT__LIMIT__SHIFT 0x0
14945#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x3
14946#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
14947#define SH_MEM_CONFIG__PRIVATE_ATC_MASK 0x4
14948#define SH_MEM_CONFIG__PRIVATE_ATC__SHIFT 0x2
14949#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x18
14950#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
14951#define SH_MEM_CONFIG__DEFAULT_MTYPE_MASK 0xe0
14952#define SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT 0x5
14953#define SH_MEM_CONFIG__APE1_MTYPE_MASK 0x700
14954#define SH_MEM_CONFIG__APE1_MTYPE__SHIFT 0x8
14955#define SH_MEM_CONFIG__APE1_ATC_MASK 0x800
14956#define SH_MEM_CONFIG__APE1_ATC__SHIFT 0xb
14957#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE_MASK 0xf
14958#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE__SHIFT 0x0
14959#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA_MASK 0x10
14960#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA__SHIFT 0x4
14961#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE_MASK 0xf
14962#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE__SHIFT 0x0
14963#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA_MASK 0x10
14964#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA__SHIFT 0x4
14965#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID_MASK 0x1e0
14966#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID__SHIFT 0x5
14967#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID_MASK 0x600
14968#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID__SHIFT 0x9
14969#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE_MASK 0xf800
14970#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE__SHIFT 0xb
14971#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE_MASK 0xf
14972#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE__SHIFT 0x0
14973#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA_MASK 0x10
14974#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA__SHIFT 0x4
14975#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID_MASK 0x1e0
14976#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID__SHIFT 0x5
14977#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID_MASK 0x600
14978#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID__SHIFT 0x9
14979#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO_MASK 0xffff0000
14980#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO__SHIFT 0x10
14981#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI_MASK 0xffffff
14982#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI__SHIFT 0x0
14983#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE_MASK 0xf
14984#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE__SHIFT 0x0
14985#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA_MASK 0x10
14986#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA__SHIFT 0x4
14987#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID_MASK 0x20
14988#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID__SHIFT 0x5
14989#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID_MASK 0x3c0
14990#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID__SHIFT 0x6
14991#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID_MASK 0x3c00
14992#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID__SHIFT 0xa
14993#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID_MASK 0xc000
14994#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID__SHIFT 0xe
14995#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO_MASK 0xffff0000
14996#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO__SHIFT 0x10
14997#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI_MASK 0xffff
14998#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI__SHIFT 0x0
14999#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE_MASK 0xf
15000#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE__SHIFT 0x0
15001#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO_MASK 0xffff0000
15002#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO__SHIFT 0x10
15003#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI_MASK 0xffffffff
15004#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI__SHIFT 0x0
15005#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE_MASK 0xf
15006#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE__SHIFT 0x0
15007#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA_MASK 0x10
15008#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA__SHIFT 0x4
15009#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID_MASK 0x20
15010#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID__SHIFT 0x5
15011#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID_MASK 0x3c0
15012#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID__SHIFT 0x6
15013#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID_MASK 0x3c00
15014#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID__SHIFT 0xa
15015#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID_MASK 0xc000
15016#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID__SHIFT 0xe
15017#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE_MASK 0xf
15018#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE__SHIFT 0x0
15019#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA_MASK 0xff0
15020#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA__SHIFT 0x4
15021#define SQ_THREAD_TRACE_WORD_MISC__SH_ID_MASK 0x1000
15022#define SQ_THREAD_TRACE_WORD_MISC__SH_ID__SHIFT 0xc
15023#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE_MASK 0xe000
15024#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE__SHIFT 0xd
15025#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE_MASK 0xf
15026#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE__SHIFT 0x0
15027#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA_MASK 0x10
15028#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA__SHIFT 0x4
15029#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID_MASK 0x20
15030#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID__SHIFT 0x5
15031#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID_MASK 0x3c0
15032#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID__SHIFT 0x6
15033#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID_MASK 0x3c00
15034#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID__SHIFT 0xa
15035#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID_MASK 0xc000
15036#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID__SHIFT 0xe
15037#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER_MASK 0x1f0000
15038#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER__SHIFT 0x10
15039#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED_MASK 0x200000
15040#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED__SHIFT 0x15
15041#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT_MASK 0x1fc00000
15042#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT__SHIFT 0x16
15043#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID_MASK 0xe0000000
15044#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID__SHIFT 0x1d
15045#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE_MASK 0xf
15046#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE__SHIFT 0x0
15047#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA_MASK 0x10
15048#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA__SHIFT 0x4
15049#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID_MASK 0x60
15050#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID__SHIFT 0x5
15051#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID_MASK 0x180
15052#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID__SHIFT 0x7
15053#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV_MASK 0x200
15054#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV__SHIFT 0x9
15055#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE_MASK 0x1c00
15056#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE__SHIFT 0xa
15057#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV_MASK 0x4000
15058#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV__SHIFT 0xe
15059#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP_MASK 0x8000
15060#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP__SHIFT 0xf
15061#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR_MASK 0xffff0000
15062#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR__SHIFT 0x10
15063#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA_MASK 0xffffffff
15064#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA__SHIFT 0x0
15065#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE_MASK 0xf
15066#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE__SHIFT 0x0
15067#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA_MASK 0x10
15068#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA__SHIFT 0x4
15069#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID_MASK 0x60
15070#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID__SHIFT 0x5
15071#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID_MASK 0x180
15072#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID__SHIFT 0x7
15073#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR_MASK 0xfe00
15074#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR__SHIFT 0x9
15075#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO_MASK 0xffff0000
15076#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO__SHIFT 0x10
15077#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI_MASK 0xffff
15078#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI__SHIFT 0x0
15079#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE_MASK 0xf
15080#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE__SHIFT 0x0
15081#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA_MASK 0x10
15082#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA__SHIFT 0x4
15083#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID_MASK 0x20
15084#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID__SHIFT 0x5
15085#define SQ_THREAD_TRACE_WORD_EVENT__STAGE_MASK 0x1c0
15086#define SQ_THREAD_TRACE_WORD_EVENT__STAGE__SHIFT 0x6
15087#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE_MASK 0xfc00
15088#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE__SHIFT 0xa
15089#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE_MASK 0xf
15090#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE__SHIFT 0x0
15091#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA_MASK 0x10
15092#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA__SHIFT 0x4
15093#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID_MASK 0x60
15094#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID__SHIFT 0x5
15095#define SQ_THREAD_TRACE_WORD_ISSUE__INST0_MASK 0x300
15096#define SQ_THREAD_TRACE_WORD_ISSUE__INST0__SHIFT 0x8
15097#define SQ_THREAD_TRACE_WORD_ISSUE__INST1_MASK 0xc00
15098#define SQ_THREAD_TRACE_WORD_ISSUE__INST1__SHIFT 0xa
15099#define SQ_THREAD_TRACE_WORD_ISSUE__INST2_MASK 0x3000
15100#define SQ_THREAD_TRACE_WORD_ISSUE__INST2__SHIFT 0xc
15101#define SQ_THREAD_TRACE_WORD_ISSUE__INST3_MASK 0xc000
15102#define SQ_THREAD_TRACE_WORD_ISSUE__INST3__SHIFT 0xe
15103#define SQ_THREAD_TRACE_WORD_ISSUE__INST4_MASK 0x30000
15104#define SQ_THREAD_TRACE_WORD_ISSUE__INST4__SHIFT 0x10
15105#define SQ_THREAD_TRACE_WORD_ISSUE__INST5_MASK 0xc0000
15106#define SQ_THREAD_TRACE_WORD_ISSUE__INST5__SHIFT 0x12
15107#define SQ_THREAD_TRACE_WORD_ISSUE__INST6_MASK 0x300000
15108#define SQ_THREAD_TRACE_WORD_ISSUE__INST6__SHIFT 0x14
15109#define SQ_THREAD_TRACE_WORD_ISSUE__INST7_MASK 0xc00000
15110#define SQ_THREAD_TRACE_WORD_ISSUE__INST7__SHIFT 0x16
15111#define SQ_THREAD_TRACE_WORD_ISSUE__INST8_MASK 0x3000000
15112#define SQ_THREAD_TRACE_WORD_ISSUE__INST8__SHIFT 0x18
15113#define SQ_THREAD_TRACE_WORD_ISSUE__INST9_MASK 0xc000000
15114#define SQ_THREAD_TRACE_WORD_ISSUE__INST9__SHIFT 0x1a
15115#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE_MASK 0xf
15116#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE__SHIFT 0x0
15117#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA_MASK 0x10
15118#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA__SHIFT 0x4
15119#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID_MASK 0x20
15120#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID__SHIFT 0x5
15121#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID_MASK 0x3c0
15122#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID__SHIFT 0x6
15123#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK_MASK 0xc00
15124#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK__SHIFT 0xa
15125#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0_MASK 0x1fff000
15126#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0__SHIFT 0xc
15127#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO_MASK 0xfe000000
15128#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO__SHIFT 0x19
15129#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI_MASK 0x3f
15130#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI__SHIFT 0x0
15131#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2_MASK 0x7ffc0
15132#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2__SHIFT 0x6
15133#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3_MASK 0xfff80000
15134#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3__SHIFT 0x13
15135#define SQ_WREXEC_EXEC_LO__ADDR_LO_MASK 0xffffffff
15136#define SQ_WREXEC_EXEC_LO__ADDR_LO__SHIFT 0x0
15137#define SQ_WREXEC_EXEC_HI__ADDR_HI_MASK 0xffff
15138#define SQ_WREXEC_EXEC_HI__ADDR_HI__SHIFT 0x0
15139#define SQ_WREXEC_EXEC_HI__FIRST_WAVE_MASK 0x4000000
15140#define SQ_WREXEC_EXEC_HI__FIRST_WAVE__SHIFT 0x1a
15141#define SQ_WREXEC_EXEC_HI__ATC_MASK 0x8000000
15142#define SQ_WREXEC_EXEC_HI__ATC__SHIFT 0x1b
15143#define SQ_WREXEC_EXEC_HI__MTYPE_MASK 0x70000000
15144#define SQ_WREXEC_EXEC_HI__MTYPE__SHIFT 0x1c
15145#define SQ_WREXEC_EXEC_HI__MSB_MASK 0x80000000
15146#define SQ_WREXEC_EXEC_HI__MSB__SHIFT 0x1f
15147#define SQC_GATCL1_CNTL__RESERVED_MASK 0x3ffff
15148#define SQC_GATCL1_CNTL__RESERVED__SHIFT 0x0
15149#define SQC_GATCL1_CNTL__DCACHE_INVALIDATE_ALL_VMID_MASK 0x40000
15150#define SQC_GATCL1_CNTL__DCACHE_INVALIDATE_ALL_VMID__SHIFT 0x12
15151#define SQC_GATCL1_CNTL__DCACHE_FORCE_MISS_MASK 0x80000
15152#define SQC_GATCL1_CNTL__DCACHE_FORCE_MISS__SHIFT 0x13
15153#define SQC_GATCL1_CNTL__DCACHE_FORCE_IN_ORDER_MASK 0x100000
15154#define SQC_GATCL1_CNTL__DCACHE_FORCE_IN_ORDER__SHIFT 0x14
15155#define SQC_GATCL1_CNTL__DCACHE_REDUCE_FIFO_DEPTH_BY_2_MASK 0x600000
15156#define SQC_GATCL1_CNTL__DCACHE_REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x15
15157#define SQC_GATCL1_CNTL__DCACHE_REDUCE_CACHE_SIZE_BY_2_MASK 0x1800000
15158#define SQC_GATCL1_CNTL__DCACHE_REDUCE_CACHE_SIZE_BY_2__SHIFT 0x17
15159#define SQC_GATCL1_CNTL__ICACHE_INVALIDATE_ALL_VMID_MASK 0x2000000
15160#define SQC_GATCL1_CNTL__ICACHE_INVALIDATE_ALL_VMID__SHIFT 0x19
15161#define SQC_GATCL1_CNTL__ICACHE_FORCE_MISS_MASK 0x4000000
15162#define SQC_GATCL1_CNTL__ICACHE_FORCE_MISS__SHIFT 0x1a
15163#define SQC_GATCL1_CNTL__ICACHE_FORCE_IN_ORDER_MASK 0x8000000
15164#define SQC_GATCL1_CNTL__ICACHE_FORCE_IN_ORDER__SHIFT 0x1b
15165#define SQC_GATCL1_CNTL__ICACHE_REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000
15166#define SQC_GATCL1_CNTL__ICACHE_REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
15167#define SQC_GATCL1_CNTL__ICACHE_REDUCE_CACHE_SIZE_BY_2_MASK 0xc0000000
15168#define SQC_GATCL1_CNTL__ICACHE_REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
15169#define SQC_ATC_EDC_GATCL1_CNT__ICACHE_DATA_SEC_MASK 0xff
15170#define SQC_ATC_EDC_GATCL1_CNT__ICACHE_DATA_SEC__SHIFT 0x0
15171#define SQC_ATC_EDC_GATCL1_CNT__DCACHE_DATA_SEC_MASK 0xff0000
15172#define SQC_ATC_EDC_GATCL1_CNT__DCACHE_DATA_SEC__SHIFT 0x10
15173#define SQ_INTERRUPT_WORD_CMN__SE_ID_MASK 0x3000000
15174#define SQ_INTERRUPT_WORD_CMN__SE_ID__SHIFT 0x18
15175#define SQ_INTERRUPT_WORD_CMN__ENCODING_MASK 0xc000000
15176#define SQ_INTERRUPT_WORD_CMN__ENCODING__SHIFT 0x1a
15177#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_MASK 0x1
15178#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE__SHIFT 0x0
15179#define SQ_INTERRUPT_WORD_AUTO__WLT_MASK 0x2
15180#define SQ_INTERRUPT_WORD_AUTO__WLT__SHIFT 0x1
15181#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_BUF_FULL_MASK 0x4
15182#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_BUF_FULL__SHIFT 0x2
15183#define SQ_INTERRUPT_WORD_AUTO__REG_TIMESTAMP_MASK 0x8
15184#define SQ_INTERRUPT_WORD_AUTO__REG_TIMESTAMP__SHIFT 0x3
15185#define SQ_INTERRUPT_WORD_AUTO__CMD_TIMESTAMP_MASK 0x10
15186#define SQ_INTERRUPT_WORD_AUTO__CMD_TIMESTAMP__SHIFT 0x4
15187#define SQ_INTERRUPT_WORD_AUTO__HOST_CMD_OVERFLOW_MASK 0x20
15188#define SQ_INTERRUPT_WORD_AUTO__HOST_CMD_OVERFLOW__SHIFT 0x5
15189#define SQ_INTERRUPT_WORD_AUTO__HOST_REG_OVERFLOW_MASK 0x40
15190#define SQ_INTERRUPT_WORD_AUTO__HOST_REG_OVERFLOW__SHIFT 0x6
15191#define SQ_INTERRUPT_WORD_AUTO__IMMED_OVERFLOW_MASK 0x80
15192#define SQ_INTERRUPT_WORD_AUTO__IMMED_OVERFLOW__SHIFT 0x7
15193#define SQ_INTERRUPT_WORD_AUTO__SE_ID_MASK 0x3000000
15194#define SQ_INTERRUPT_WORD_AUTO__SE_ID__SHIFT 0x18
15195#define SQ_INTERRUPT_WORD_AUTO__ENCODING_MASK 0xc000000
15196#define SQ_INTERRUPT_WORD_AUTO__ENCODING__SHIFT 0x1a
15197#define SQ_INTERRUPT_WORD_WAVE__DATA_MASK 0xff
15198#define SQ_INTERRUPT_WORD_WAVE__DATA__SHIFT 0x0
15199#define SQ_INTERRUPT_WORD_WAVE__SH_ID_MASK 0x100
15200#define SQ_INTERRUPT_WORD_WAVE__SH_ID__SHIFT 0x8
15201#define SQ_INTERRUPT_WORD_WAVE__PRIV_MASK 0x200
15202#define SQ_INTERRUPT_WORD_WAVE__PRIV__SHIFT 0x9
15203#define SQ_INTERRUPT_WORD_WAVE__VM_ID_MASK 0x3c00
15204#define SQ_INTERRUPT_WORD_WAVE__VM_ID__SHIFT 0xa
15205#define SQ_INTERRUPT_WORD_WAVE__WAVE_ID_MASK 0x3c000
15206#define SQ_INTERRUPT_WORD_WAVE__WAVE_ID__SHIFT 0xe
15207#define SQ_INTERRUPT_WORD_WAVE__SIMD_ID_MASK 0xc0000
15208#define SQ_INTERRUPT_WORD_WAVE__SIMD_ID__SHIFT 0x12
15209#define SQ_INTERRUPT_WORD_WAVE__CU_ID_MASK 0xf00000
15210#define SQ_INTERRUPT_WORD_WAVE__CU_ID__SHIFT 0x14
15211#define SQ_INTERRUPT_WORD_WAVE__SE_ID_MASK 0x3000000
15212#define SQ_INTERRUPT_WORD_WAVE__SE_ID__SHIFT 0x18
15213#define SQ_INTERRUPT_WORD_WAVE__ENCODING_MASK 0xc000000
15214#define SQ_INTERRUPT_WORD_WAVE__ENCODING__SHIFT 0x1a
15215#define SQ_SOP2__SSRC0_MASK 0xff
15216#define SQ_SOP2__SSRC0__SHIFT 0x0
15217#define SQ_SOP2__SSRC1_MASK 0xff00
15218#define SQ_SOP2__SSRC1__SHIFT 0x8
15219#define SQ_SOP2__SDST_MASK 0x7f0000
15220#define SQ_SOP2__SDST__SHIFT 0x10
15221#define SQ_SOP2__OP_MASK 0x3f800000
15222#define SQ_SOP2__OP__SHIFT 0x17
15223#define SQ_SOP2__ENCODING_MASK 0xc0000000
15224#define SQ_SOP2__ENCODING__SHIFT 0x1e
15225#define SQ_VOP1__SRC0_MASK 0x1ff
15226#define SQ_VOP1__SRC0__SHIFT 0x0
15227#define SQ_VOP1__OP_MASK 0x1fe00
15228#define SQ_VOP1__OP__SHIFT 0x9
15229#define SQ_VOP1__VDST_MASK 0x1fe0000
15230#define SQ_VOP1__VDST__SHIFT 0x11
15231#define SQ_VOP1__ENCODING_MASK 0xfe000000
15232#define SQ_VOP1__ENCODING__SHIFT 0x19
15233#define SQ_MTBUF_1__VADDR_MASK 0xff
15234#define SQ_MTBUF_1__VADDR__SHIFT 0x0
15235#define SQ_MTBUF_1__VDATA_MASK 0xff00
15236#define SQ_MTBUF_1__VDATA__SHIFT 0x8
15237#define SQ_MTBUF_1__SRSRC_MASK 0x1f0000
15238#define SQ_MTBUF_1__SRSRC__SHIFT 0x10
15239#define SQ_MTBUF_1__SLC_MASK 0x400000
15240#define SQ_MTBUF_1__SLC__SHIFT 0x16
15241#define SQ_MTBUF_1__TFE_MASK 0x800000
15242#define SQ_MTBUF_1__TFE__SHIFT 0x17
15243#define SQ_MTBUF_1__SOFFSET_MASK 0xff000000
15244#define SQ_MTBUF_1__SOFFSET__SHIFT 0x18
15245#define SQ_EXP_1__VSRC0_MASK 0xff
15246#define SQ_EXP_1__VSRC0__SHIFT 0x0
15247#define SQ_EXP_1__VSRC1_MASK 0xff00
15248#define SQ_EXP_1__VSRC1__SHIFT 0x8
15249#define SQ_EXP_1__VSRC2_MASK 0xff0000
15250#define SQ_EXP_1__VSRC2__SHIFT 0x10
15251#define SQ_EXP_1__VSRC3_MASK 0xff000000
15252#define SQ_EXP_1__VSRC3__SHIFT 0x18
15253#define SQ_MUBUF_1__VADDR_MASK 0xff
15254#define SQ_MUBUF_1__VADDR__SHIFT 0x0
15255#define SQ_MUBUF_1__VDATA_MASK 0xff00
15256#define SQ_MUBUF_1__VDATA__SHIFT 0x8
15257#define SQ_MUBUF_1__SRSRC_MASK 0x1f0000
15258#define SQ_MUBUF_1__SRSRC__SHIFT 0x10
15259#define SQ_MUBUF_1__TFE_MASK 0x800000
15260#define SQ_MUBUF_1__TFE__SHIFT 0x17
15261#define SQ_MUBUF_1__SOFFSET_MASK 0xff000000
15262#define SQ_MUBUF_1__SOFFSET__SHIFT 0x18
15263#define SQ_SMEM_1__OFFSET_MASK 0xfffff
15264#define SQ_SMEM_1__OFFSET__SHIFT 0x0
15265#define SQ_INST__ENCODING_MASK 0xffffffff
15266#define SQ_INST__ENCODING__SHIFT 0x0
15267#define SQ_EXP_0__EN_MASK 0xf
15268#define SQ_EXP_0__EN__SHIFT 0x0
15269#define SQ_EXP_0__TGT_MASK 0x3f0
15270#define SQ_EXP_0__TGT__SHIFT 0x4
15271#define SQ_EXP_0__COMPR_MASK 0x400
15272#define SQ_EXP_0__COMPR__SHIFT 0xa
15273#define SQ_EXP_0__DONE_MASK 0x800
15274#define SQ_EXP_0__DONE__SHIFT 0xb
15275#define SQ_EXP_0__VM_MASK 0x1000
15276#define SQ_EXP_0__VM__SHIFT 0xc
15277#define SQ_EXP_0__ENCODING_MASK 0xfc000000
15278#define SQ_EXP_0__ENCODING__SHIFT 0x1a
15279#define SQ_MUBUF_0__OFFSET_MASK 0xfff
15280#define SQ_MUBUF_0__OFFSET__SHIFT 0x0
15281#define SQ_MUBUF_0__OFFEN_MASK 0x1000
15282#define SQ_MUBUF_0__OFFEN__SHIFT 0xc
15283#define SQ_MUBUF_0__IDXEN_MASK 0x2000
15284#define SQ_MUBUF_0__IDXEN__SHIFT 0xd
15285#define SQ_MUBUF_0__GLC_MASK 0x4000
15286#define SQ_MUBUF_0__GLC__SHIFT 0xe
15287#define SQ_MUBUF_0__LDS_MASK 0x10000
15288#define SQ_MUBUF_0__LDS__SHIFT 0x10
15289#define SQ_MUBUF_0__SLC_MASK 0x20000
15290#define SQ_MUBUF_0__SLC__SHIFT 0x11
15291#define SQ_MUBUF_0__OP_MASK 0x1fc0000
15292#define SQ_MUBUF_0__OP__SHIFT 0x12
15293#define SQ_MUBUF_0__ENCODING_MASK 0xfc000000
15294#define SQ_MUBUF_0__ENCODING__SHIFT 0x1a
15295#define SQ_VOP_SDWA__SRC0_MASK 0xff
15296#define SQ_VOP_SDWA__SRC0__SHIFT 0x0
15297#define SQ_VOP_SDWA__DST_SEL_MASK 0x700
15298#define SQ_VOP_SDWA__DST_SEL__SHIFT 0x8
15299#define SQ_VOP_SDWA__DST_UNUSED_MASK 0x1800
15300#define SQ_VOP_SDWA__DST_UNUSED__SHIFT 0xb
15301#define SQ_VOP_SDWA__CLAMP_MASK 0x2000
15302#define SQ_VOP_SDWA__CLAMP__SHIFT 0xd
15303#define SQ_VOP_SDWA__SRC0_SEL_MASK 0x70000
15304#define SQ_VOP_SDWA__SRC0_SEL__SHIFT 0x10
15305#define SQ_VOP_SDWA__SRC0_SEXT_MASK 0x80000
15306#define SQ_VOP_SDWA__SRC0_SEXT__SHIFT 0x13
15307#define SQ_VOP_SDWA__SRC0_NEG_MASK 0x100000
15308#define SQ_VOP_SDWA__SRC0_NEG__SHIFT 0x14
15309#define SQ_VOP_SDWA__SRC0_ABS_MASK 0x200000
15310#define SQ_VOP_SDWA__SRC0_ABS__SHIFT 0x15
15311#define SQ_VOP_SDWA__SRC1_SEL_MASK 0x7000000
15312#define SQ_VOP_SDWA__SRC1_SEL__SHIFT 0x18
15313#define SQ_VOP_SDWA__SRC1_SEXT_MASK 0x8000000
15314#define SQ_VOP_SDWA__SRC1_SEXT__SHIFT 0x1b
15315#define SQ_VOP_SDWA__SRC1_NEG_MASK 0x10000000
15316#define SQ_VOP_SDWA__SRC1_NEG__SHIFT 0x1c
15317#define SQ_VOP_SDWA__SRC1_ABS_MASK 0x20000000
15318#define SQ_VOP_SDWA__SRC1_ABS__SHIFT 0x1d
15319#define SQ_VOP3_0__VDST_MASK 0xff
15320#define SQ_VOP3_0__VDST__SHIFT 0x0
15321#define SQ_VOP3_0__ABS_MASK 0x700
15322#define SQ_VOP3_0__ABS__SHIFT 0x8
15323#define SQ_VOP3_0__CLAMP_MASK 0x8000
15324#define SQ_VOP3_0__CLAMP__SHIFT 0xf
15325#define SQ_VOP3_0__OP_MASK 0x3ff0000
15326#define SQ_VOP3_0__OP__SHIFT 0x10
15327#define SQ_VOP3_0__ENCODING_MASK 0xfc000000
15328#define SQ_VOP3_0__ENCODING__SHIFT 0x1a
15329#define SQ_VOP2__SRC0_MASK 0x1ff
15330#define SQ_VOP2__SRC0__SHIFT 0x0
15331#define SQ_VOP2__VSRC1_MASK 0x1fe00
15332#define SQ_VOP2__VSRC1__SHIFT 0x9
15333#define SQ_VOP2__VDST_MASK 0x1fe0000
15334#define SQ_VOP2__VDST__SHIFT 0x11
15335#define SQ_VOP2__OP_MASK 0x7e000000
15336#define SQ_VOP2__OP__SHIFT 0x19
15337#define SQ_VOP2__ENCODING_MASK 0x80000000
15338#define SQ_VOP2__ENCODING__SHIFT 0x1f
15339#define SQ_MTBUF_0__OFFSET_MASK 0xfff
15340#define SQ_MTBUF_0__OFFSET__SHIFT 0x0
15341#define SQ_MTBUF_0__OFFEN_MASK 0x1000
15342#define SQ_MTBUF_0__OFFEN__SHIFT 0xc
15343#define SQ_MTBUF_0__IDXEN_MASK 0x2000
15344#define SQ_MTBUF_0__IDXEN__SHIFT 0xd
15345#define SQ_MTBUF_0__GLC_MASK 0x4000
15346#define SQ_MTBUF_0__GLC__SHIFT 0xe
15347#define SQ_MTBUF_0__OP_MASK 0x78000
15348#define SQ_MTBUF_0__OP__SHIFT 0xf
15349#define SQ_MTBUF_0__DFMT_MASK 0x780000
15350#define SQ_MTBUF_0__DFMT__SHIFT 0x13
15351#define SQ_MTBUF_0__NFMT_MASK 0x3800000
15352#define SQ_MTBUF_0__NFMT__SHIFT 0x17
15353#define SQ_MTBUF_0__ENCODING_MASK 0xfc000000
15354#define SQ_MTBUF_0__ENCODING__SHIFT 0x1a
15355#define SQ_SOPP__SIMM16_MASK 0xffff
15356#define SQ_SOPP__SIMM16__SHIFT 0x0
15357#define SQ_SOPP__OP_MASK 0x7f0000
15358#define SQ_SOPP__OP__SHIFT 0x10
15359#define SQ_SOPP__ENCODING_MASK 0xff800000
15360#define SQ_SOPP__ENCODING__SHIFT 0x17
15361#define SQ_FLAT_0__GLC_MASK 0x10000
15362#define SQ_FLAT_0__GLC__SHIFT 0x10
15363#define SQ_FLAT_0__SLC_MASK 0x20000
15364#define SQ_FLAT_0__SLC__SHIFT 0x11
15365#define SQ_FLAT_0__OP_MASK 0x1fc0000
15366#define SQ_FLAT_0__OP__SHIFT 0x12
15367#define SQ_FLAT_0__ENCODING_MASK 0xfc000000
15368#define SQ_FLAT_0__ENCODING__SHIFT 0x1a
15369#define SQ_VOP3_0_SDST_ENC__VDST_MASK 0xff
15370#define SQ_VOP3_0_SDST_ENC__VDST__SHIFT 0x0
15371#define SQ_VOP3_0_SDST_ENC__SDST_MASK 0x7f00
15372#define SQ_VOP3_0_SDST_ENC__SDST__SHIFT 0x8
15373#define SQ_VOP3_0_SDST_ENC__CLAMP_MASK 0x8000
15374#define SQ_VOP3_0_SDST_ENC__CLAMP__SHIFT 0xf
15375#define SQ_VOP3_0_SDST_ENC__OP_MASK 0x3ff0000
15376#define SQ_VOP3_0_SDST_ENC__OP__SHIFT 0x10
15377#define SQ_VOP3_0_SDST_ENC__ENCODING_MASK 0xfc000000
15378#define SQ_VOP3_0_SDST_ENC__ENCODING__SHIFT 0x1a
15379#define SQ_MIMG_1__VADDR_MASK 0xff
15380#define SQ_MIMG_1__VADDR__SHIFT 0x0
15381#define SQ_MIMG_1__VDATA_MASK 0xff00
15382#define SQ_MIMG_1__VDATA__SHIFT 0x8
15383#define SQ_MIMG_1__SRSRC_MASK 0x1f0000
15384#define SQ_MIMG_1__SRSRC__SHIFT 0x10
15385#define SQ_MIMG_1__SSAMP_MASK 0x3e00000
15386#define SQ_MIMG_1__SSAMP__SHIFT 0x15
15387#define SQ_MIMG_1__D16_MASK 0x80000000
15388#define SQ_MIMG_1__D16__SHIFT 0x1f
15389#define SQ_SOP1__SSRC0_MASK 0xff
15390#define SQ_SOP1__SSRC0__SHIFT 0x0
15391#define SQ_SOP1__OP_MASK 0xff00
15392#define SQ_SOP1__OP__SHIFT 0x8
15393#define SQ_SOP1__SDST_MASK 0x7f0000
15394#define SQ_SOP1__SDST__SHIFT 0x10
15395#define SQ_SOP1__ENCODING_MASK 0xff800000
15396#define SQ_SOP1__ENCODING__SHIFT 0x17
15397#define SQ_SOPC__SSRC0_MASK 0xff
15398#define SQ_SOPC__SSRC0__SHIFT 0x0
15399#define SQ_SOPC__SSRC1_MASK 0xff00
15400#define SQ_SOPC__SSRC1__SHIFT 0x8
15401#define SQ_SOPC__OP_MASK 0x7f0000
15402#define SQ_SOPC__OP__SHIFT 0x10
15403#define SQ_SOPC__ENCODING_MASK 0xff800000
15404#define SQ_SOPC__ENCODING__SHIFT 0x17
15405#define SQ_FLAT_1__ADDR_MASK 0xff
15406#define SQ_FLAT_1__ADDR__SHIFT 0x0
15407#define SQ_FLAT_1__DATA_MASK 0xff00
15408#define SQ_FLAT_1__DATA__SHIFT 0x8
15409#define SQ_FLAT_1__TFE_MASK 0x800000
15410#define SQ_FLAT_1__TFE__SHIFT 0x17
15411#define SQ_FLAT_1__VDST_MASK 0xff000000
15412#define SQ_FLAT_1__VDST__SHIFT 0x18
15413#define SQ_DS_1__ADDR_MASK 0xff
15414#define SQ_DS_1__ADDR__SHIFT 0x0
15415#define SQ_DS_1__DATA0_MASK 0xff00
15416#define SQ_DS_1__DATA0__SHIFT 0x8
15417#define SQ_DS_1__DATA1_MASK 0xff0000
15418#define SQ_DS_1__DATA1__SHIFT 0x10
15419#define SQ_DS_1__VDST_MASK 0xff000000
15420#define SQ_DS_1__VDST__SHIFT 0x18
15421#define SQ_VOP3_1__SRC0_MASK 0x1ff
15422#define SQ_VOP3_1__SRC0__SHIFT 0x0
15423#define SQ_VOP3_1__SRC1_MASK 0x3fe00
15424#define SQ_VOP3_1__SRC1__SHIFT 0x9
15425#define SQ_VOP3_1__SRC2_MASK 0x7fc0000
15426#define SQ_VOP3_1__SRC2__SHIFT 0x12
15427#define SQ_VOP3_1__OMOD_MASK 0x18000000
15428#define SQ_VOP3_1__OMOD__SHIFT 0x1b
15429#define SQ_VOP3_1__NEG_MASK 0xe0000000
15430#define SQ_VOP3_1__NEG__SHIFT 0x1d
15431#define SQ_SMEM_0__SBASE_MASK 0x3f
15432#define SQ_SMEM_0__SBASE__SHIFT 0x0
15433#define SQ_SMEM_0__SDATA_MASK 0x1fc0
15434#define SQ_SMEM_0__SDATA__SHIFT 0x6
15435#define SQ_SMEM_0__GLC_MASK 0x10000
15436#define SQ_SMEM_0__GLC__SHIFT 0x10
15437#define SQ_SMEM_0__IMM_MASK 0x20000
15438#define SQ_SMEM_0__IMM__SHIFT 0x11
15439#define SQ_SMEM_0__OP_MASK 0x3fc0000
15440#define SQ_SMEM_0__OP__SHIFT 0x12
15441#define SQ_SMEM_0__ENCODING_MASK 0xfc000000
15442#define SQ_SMEM_0__ENCODING__SHIFT 0x1a
15443#define SQ_MIMG_0__DMASK_MASK 0xf00
15444#define SQ_MIMG_0__DMASK__SHIFT 0x8
15445#define SQ_MIMG_0__UNORM_MASK 0x1000
15446#define SQ_MIMG_0__UNORM__SHIFT 0xc
15447#define SQ_MIMG_0__GLC_MASK 0x2000
15448#define SQ_MIMG_0__GLC__SHIFT 0xd
15449#define SQ_MIMG_0__DA_MASK 0x4000
15450#define SQ_MIMG_0__DA__SHIFT 0xe
15451#define SQ_MIMG_0__R128_MASK 0x8000
15452#define SQ_MIMG_0__R128__SHIFT 0xf
15453#define SQ_MIMG_0__TFE_MASK 0x10000
15454#define SQ_MIMG_0__TFE__SHIFT 0x10
15455#define SQ_MIMG_0__LWE_MASK 0x20000
15456#define SQ_MIMG_0__LWE__SHIFT 0x11
15457#define SQ_MIMG_0__OP_MASK 0x1fc0000
15458#define SQ_MIMG_0__OP__SHIFT 0x12
15459#define SQ_MIMG_0__SLC_MASK 0x2000000
15460#define SQ_MIMG_0__SLC__SHIFT 0x19
15461#define SQ_MIMG_0__ENCODING_MASK 0xfc000000
15462#define SQ_MIMG_0__ENCODING__SHIFT 0x1a
15463#define SQ_SOPK__SIMM16_MASK 0xffff
15464#define SQ_SOPK__SIMM16__SHIFT 0x0
15465#define SQ_SOPK__SDST_MASK 0x7f0000
15466#define SQ_SOPK__SDST__SHIFT 0x10
15467#define SQ_SOPK__OP_MASK 0xf800000
15468#define SQ_SOPK__OP__SHIFT 0x17
15469#define SQ_SOPK__ENCODING_MASK 0xf0000000
15470#define SQ_SOPK__ENCODING__SHIFT 0x1c
15471#define SQ_DS_0__OFFSET0_MASK 0xff
15472#define SQ_DS_0__OFFSET0__SHIFT 0x0
15473#define SQ_DS_0__OFFSET1_MASK 0xff00
15474#define SQ_DS_0__OFFSET1__SHIFT 0x8
15475#define SQ_DS_0__GDS_MASK 0x10000
15476#define SQ_DS_0__GDS__SHIFT 0x10
15477#define SQ_DS_0__OP_MASK 0x1fe0000
15478#define SQ_DS_0__OP__SHIFT 0x11
15479#define SQ_DS_0__ENCODING_MASK 0xfc000000
15480#define SQ_DS_0__ENCODING__SHIFT 0x1a
15481#define SQ_VOP_DPP__SRC0_MASK 0xff
15482#define SQ_VOP_DPP__SRC0__SHIFT 0x0
15483#define SQ_VOP_DPP__DPP_CTRL_MASK 0x1ff00
15484#define SQ_VOP_DPP__DPP_CTRL__SHIFT 0x8
15485#define SQ_VOP_DPP__BOUND_CTRL_MASK 0x80000
15486#define SQ_VOP_DPP__BOUND_CTRL__SHIFT 0x13
15487#define SQ_VOP_DPP__SRC0_NEG_MASK 0x100000
15488#define SQ_VOP_DPP__SRC0_NEG__SHIFT 0x14
15489#define SQ_VOP_DPP__SRC0_ABS_MASK 0x200000
15490#define SQ_VOP_DPP__SRC0_ABS__SHIFT 0x15
15491#define SQ_VOP_DPP__SRC1_NEG_MASK 0x400000
15492#define SQ_VOP_DPP__SRC1_NEG__SHIFT 0x16
15493#define SQ_VOP_DPP__SRC1_ABS_MASK 0x800000
15494#define SQ_VOP_DPP__SRC1_ABS__SHIFT 0x17
15495#define SQ_VOP_DPP__BANK_MASK_MASK 0xf000000
15496#define SQ_VOP_DPP__BANK_MASK__SHIFT 0x18
15497#define SQ_VOP_DPP__ROW_MASK_MASK 0xf0000000
15498#define SQ_VOP_DPP__ROW_MASK__SHIFT 0x1c
15499#define SQ_VOPC__SRC0_MASK 0x1ff
15500#define SQ_VOPC__SRC0__SHIFT 0x0
15501#define SQ_VOPC__VSRC1_MASK 0x1fe00
15502#define SQ_VOPC__VSRC1__SHIFT 0x9
15503#define SQ_VOPC__OP_MASK 0x1fe0000
15504#define SQ_VOPC__OP__SHIFT 0x11
15505#define SQ_VOPC__ENCODING_MASK 0xfe000000
15506#define SQ_VOPC__ENCODING__SHIFT 0x19
15507#define SQ_VINTRP__VSRC_MASK 0xff
15508#define SQ_VINTRP__VSRC__SHIFT 0x0
15509#define SQ_VINTRP__ATTRCHAN_MASK 0x300
15510#define SQ_VINTRP__ATTRCHAN__SHIFT 0x8
15511#define SQ_VINTRP__ATTR_MASK 0xfc00
15512#define SQ_VINTRP__ATTR__SHIFT 0xa
15513#define SQ_VINTRP__OP_MASK 0x30000
15514#define SQ_VINTRP__OP__SHIFT 0x10
15515#define SQ_VINTRP__VDST_MASK 0x3fc0000
15516#define SQ_VINTRP__VDST__SHIFT 0x12
15517#define SQ_VINTRP__ENCODING_MASK 0xfc000000
15518#define SQ_VINTRP__ENCODING__SHIFT 0x1a
15519#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0xf
15520#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x0
15521#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0
15522#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
15523#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0xfff000
15524#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0xc
15525#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x1000000
15526#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
15527#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x2000000
15528#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
15529#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x4000000
15530#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
15531#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x8000000
15532#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
15533#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000
15534#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
15535#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000
15536#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
15537#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000
15538#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
15539#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000
15540#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
15541#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0xf
15542#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x0
15543#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0xff0
15544#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
15545#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0xfff000
15546#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0xc
15547#define CGTT_SX_CLK_CTRL1__DBG_EN_MASK 0x1000000
15548#define CGTT_SX_CLK_CTRL1__DBG_EN__SHIFT 0x18
15549#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x2000000
15550#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
15551#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x4000000
15552#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
15553#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x8000000
15554#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
15555#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000
15556#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
15557#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000
15558#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
15559#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000
15560#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
15561#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000
15562#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x1f
15563#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0xf
15564#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x0
15565#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0xff0
15566#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
15567#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0xfff000
15568#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0xc
15569#define CGTT_SX_CLK_CTRL2__DBG_EN_MASK 0x1000000
15570#define CGTT_SX_CLK_CTRL2__DBG_EN__SHIFT 0x18
15571#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x2000000
15572#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
15573#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x4000000
15574#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
15575#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x8000000
15576#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
15577#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000
15578#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
15579#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000
15580#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
15581#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000
15582#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
15583#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000
15584#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x1f
15585#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0xf
15586#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x0
15587#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0xff0
15588#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
15589#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0xfff000
15590#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0xc
15591#define CGTT_SX_CLK_CTRL3__DBG_EN_MASK 0x1000000
15592#define CGTT_SX_CLK_CTRL3__DBG_EN__SHIFT 0x18
15593#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x2000000
15594#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
15595#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x4000000
15596#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
15597#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x8000000
15598#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
15599#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000
15600#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
15601#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000
15602#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
15603#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000
15604#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
15605#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000
15606#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x1f
15607#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0xf
15608#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x0
15609#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0xff0
15610#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x4
15611#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0xfff000
15612#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0xc
15613#define CGTT_SX_CLK_CTRL4__DBG_EN_MASK 0x1000000
15614#define CGTT_SX_CLK_CTRL4__DBG_EN__SHIFT 0x18
15615#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x2000000
15616#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x19
15617#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x4000000
15618#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x1a
15619#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x8000000
15620#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x1b
15621#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000
15622#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x1c
15623#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000
15624#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x1d
15625#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000
15626#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x1e
15627#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000
15628#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x1f
15629#define SX_DEBUG_BUSY__POS_FREE_OR_VALIDS_MASK 0x1
15630#define SX_DEBUG_BUSY__POS_FREE_OR_VALIDS__SHIFT 0x0
15631#define SX_DEBUG_BUSY__POS_REQUESTER_BUSY_MASK 0x2
15632#define SX_DEBUG_BUSY__POS_REQUESTER_BUSY__SHIFT 0x1
15633#define SX_DEBUG_BUSY__PA_SX_BUSY_MASK 0x4
15634#define SX_DEBUG_BUSY__PA_SX_BUSY__SHIFT 0x2
15635#define SX_DEBUG_BUSY__POS_SCBD_BUSY_MASK 0x8
15636#define SX_DEBUG_BUSY__POS_SCBD_BUSY__SHIFT 0x3
15637#define SX_DEBUG_BUSY__POS_BANK3VAL3_BUSY_MASK 0x10
15638#define SX_DEBUG_BUSY__POS_BANK3VAL3_BUSY__SHIFT 0x4
15639#define SX_DEBUG_BUSY__POS_BANK3VAL2_BUSY_MASK 0x20
15640#define SX_DEBUG_BUSY__POS_BANK3VAL2_BUSY__SHIFT 0x5
15641#define SX_DEBUG_BUSY__POS_BANK3VAL1_BUSY_MASK 0x40
15642#define SX_DEBUG_BUSY__POS_BANK3VAL1_BUSY__SHIFT 0x6
15643#define SX_DEBUG_BUSY__POS_BANK3VAL0_BUSY_MASK 0x80
15644#define SX_DEBUG_BUSY__POS_BANK3VAL0_BUSY__SHIFT 0x7
15645#define SX_DEBUG_BUSY__POS_BANK2VAL3_BUSY_MASK 0x100
15646#define SX_DEBUG_BUSY__POS_BANK2VAL3_BUSY__SHIFT 0x8
15647#define SX_DEBUG_BUSY__POS_BANK2VAL2_BUSY_MASK 0x200
15648#define SX_DEBUG_BUSY__POS_BANK2VAL2_BUSY__SHIFT 0x9
15649#define SX_DEBUG_BUSY__POS_BANK2VAL1_BUSY_MASK 0x400
15650#define SX_DEBUG_BUSY__POS_BANK2VAL1_BUSY__SHIFT 0xa
15651#define SX_DEBUG_BUSY__POS_BANK2VAL0_BUSY_MASK 0x800
15652#define SX_DEBUG_BUSY__POS_BANK2VAL0_BUSY__SHIFT 0xb
15653#define SX_DEBUG_BUSY__POS_BANK1VAL3_BUSY_MASK 0x1000
15654#define SX_DEBUG_BUSY__POS_BANK1VAL3_BUSY__SHIFT 0xc
15655#define SX_DEBUG_BUSY__POS_BANK1VAL2_BUSY_MASK 0x2000
15656#define SX_DEBUG_BUSY__POS_BANK1VAL2_BUSY__SHIFT 0xd
15657#define SX_DEBUG_BUSY__POS_BANK1VAL1_BUSY_MASK 0x4000
15658#define SX_DEBUG_BUSY__POS_BANK1VAL1_BUSY__SHIFT 0xe
15659#define SX_DEBUG_BUSY__POS_BANK1VAL0_BUSY_MASK 0x8000
15660#define SX_DEBUG_BUSY__POS_BANK1VAL0_BUSY__SHIFT 0xf
15661#define SX_DEBUG_BUSY__POS_BANK0VAL3_BUSY_MASK 0x10000
15662#define SX_DEBUG_BUSY__POS_BANK0VAL3_BUSY__SHIFT 0x10
15663#define SX_DEBUG_BUSY__POS_BANK0VAL2_BUSY_MASK 0x20000
15664#define SX_DEBUG_BUSY__POS_BANK0VAL2_BUSY__SHIFT 0x11
15665#define SX_DEBUG_BUSY__POS_BANK0VAL1_BUSY_MASK 0x40000
15666#define SX_DEBUG_BUSY__POS_BANK0VAL1_BUSY__SHIFT 0x12
15667#define SX_DEBUG_BUSY__POS_BANK0VAL0_BUSY_MASK 0x80000
15668#define SX_DEBUG_BUSY__POS_BANK0VAL0_BUSY__SHIFT 0x13
15669#define SX_DEBUG_BUSY__POS_INMUX_VALID_MASK 0x100000
15670#define SX_DEBUG_BUSY__POS_INMUX_VALID__SHIFT 0x14
15671#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ3_MASK 0x200000
15672#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ3__SHIFT 0x15
15673#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ2_MASK 0x400000
15674#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ2__SHIFT 0x16
15675#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ1_MASK 0x800000
15676#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ1__SHIFT 0x17
15677#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ3_MASK 0x1000000
15678#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ3__SHIFT 0x18
15679#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ2_MASK 0x2000000
15680#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ2__SHIFT 0x19
15681#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ1_MASK 0x4000000
15682#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ1__SHIFT 0x1a
15683#define SX_DEBUG_BUSY__PCCMD_VALID_MASK 0x8000000
15684#define SX_DEBUG_BUSY__PCCMD_VALID__SHIFT 0x1b
15685#define SX_DEBUG_BUSY__VDATA1_VALID_MASK 0x10000000
15686#define SX_DEBUG_BUSY__VDATA1_VALID__SHIFT 0x1c
15687#define SX_DEBUG_BUSY__VDATA0_VALID_MASK 0x20000000
15688#define SX_DEBUG_BUSY__VDATA0_VALID__SHIFT 0x1d
15689#define SX_DEBUG_BUSY__CMD_BUSYORVAL_MASK 0x40000000
15690#define SX_DEBUG_BUSY__CMD_BUSYORVAL__SHIFT 0x1e
15691#define SX_DEBUG_BUSY__ADDR_BUSYORVAL_MASK 0x80000000
15692#define SX_DEBUG_BUSY__ADDR_BUSYORVAL__SHIFT 0x1f
15693#define SX_DEBUG_BUSY_2__COL_SCBD_BUSY_MASK 0x1
15694#define SX_DEBUG_BUSY_2__COL_SCBD_BUSY__SHIFT 0x0
15695#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0_MASK 0x2
15696#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0__SHIFT 0x1
15697#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE_MASK 0x4
15698#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE__SHIFT 0x2
15699#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY_MASK 0x8
15700#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY__SHIFT 0x3
15701#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0_MASK 0x10
15702#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0__SHIFT 0x4
15703#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE_MASK 0x20
15704#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE__SHIFT 0x5
15705#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY_MASK 0x40
15706#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY__SHIFT 0x6
15707#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0_MASK 0x80
15708#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0__SHIFT 0x7
15709#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE_MASK 0x100
15710#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE__SHIFT 0x8
15711#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY_MASK 0x200
15712#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY__SHIFT 0x9
15713#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0_MASK 0x400
15714#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0__SHIFT 0xa
15715#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE_MASK 0x800
15716#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE__SHIFT 0xb
15717#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY_MASK 0x1000
15718#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY__SHIFT 0xc
15719#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY_MASK 0x2000
15720#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY__SHIFT 0xd
15721#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY_MASK 0x4000
15722#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY__SHIFT 0xe
15723#define SX_DEBUG_BUSY_2__COL_DBIF3_READ_VALID_MASK 0x8000
15724#define SX_DEBUG_BUSY_2__COL_DBIF3_READ_VALID__SHIFT 0xf
15725#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY_MASK 0x10000
15726#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY__SHIFT 0x10
15727#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY_MASK 0x20000
15728#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY__SHIFT 0x11
15729#define SX_DEBUG_BUSY_2__COL_DBIF2_READ_VALID_MASK 0x40000
15730#define SX_DEBUG_BUSY_2__COL_DBIF2_READ_VALID__SHIFT 0x12
15731#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY_MASK 0x80000
15732#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY__SHIFT 0x13
15733#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY_MASK 0x100000
15734#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY__SHIFT 0x14
15735#define SX_DEBUG_BUSY_2__COL_DBIF1_READ_VALID_MASK 0x200000
15736#define SX_DEBUG_BUSY_2__COL_DBIF1_READ_VALID__SHIFT 0x15
15737#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY_MASK 0x400000
15738#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY__SHIFT 0x16
15739#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY_MASK 0x800000
15740#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY__SHIFT 0x17
15741#define SX_DEBUG_BUSY_2__COL_DBIF0_READ_VALID_MASK 0x1000000
15742#define SX_DEBUG_BUSY_2__COL_DBIF0_READ_VALID__SHIFT 0x18
15743#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY_MASK 0x2000000
15744#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY__SHIFT 0x19
15745#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY_MASK 0x4000000
15746#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY__SHIFT 0x1a
15747#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY_MASK 0x8000000
15748#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY__SHIFT 0x1b
15749#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY_MASK 0x10000000
15750#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY__SHIFT 0x1c
15751#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY_MASK 0x20000000
15752#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY__SHIFT 0x1d
15753#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY_MASK 0x40000000
15754#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY__SHIFT 0x1e
15755#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY_MASK 0x80000000
15756#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY__SHIFT 0x1f
15757#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY_MASK 0x1
15758#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY__SHIFT 0x0
15759#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY_MASK 0x2
15760#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY__SHIFT 0x1
15761#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY_MASK 0x4
15762#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY__SHIFT 0x2
15763#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY_MASK 0x8
15764#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY__SHIFT 0x3
15765#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY_MASK 0x10
15766#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY__SHIFT 0x4
15767#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY_MASK 0x20
15768#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY__SHIFT 0x5
15769#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY_MASK 0x40
15770#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY__SHIFT 0x6
15771#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY_MASK 0x80
15772#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY__SHIFT 0x7
15773#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY_MASK 0x100
15774#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY__SHIFT 0x8
15775#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY_MASK 0x200
15776#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY__SHIFT 0x9
15777#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY_MASK 0x400
15778#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY__SHIFT 0xa
15779#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY_MASK 0x800
15780#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY__SHIFT 0xb
15781#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY_MASK 0x1000
15782#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY__SHIFT 0xc
15783#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY_MASK 0x2000
15784#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY__SHIFT 0xd
15785#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY_MASK 0x4000
15786#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY__SHIFT 0xe
15787#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY_MASK 0x8000
15788#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY__SHIFT 0xf
15789#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY_MASK 0x10000
15790#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY__SHIFT 0x10
15791#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY_MASK 0x20000
15792#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY__SHIFT 0x11
15793#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY_MASK 0x40000
15794#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY__SHIFT 0x12
15795#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY_MASK 0x80000
15796#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY__SHIFT 0x13
15797#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY_MASK 0x100000
15798#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY__SHIFT 0x14
15799#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY_MASK 0x200000
15800#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY__SHIFT 0x15
15801#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY_MASK 0x400000
15802#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY__SHIFT 0x16
15803#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY_MASK 0x800000
15804#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY__SHIFT 0x17
15805#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY_MASK 0x1000000
15806#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY__SHIFT 0x18
15807#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY_MASK 0x2000000
15808#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY__SHIFT 0x19
15809#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY_MASK 0x4000000
15810#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY__SHIFT 0x1a
15811#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY_MASK 0x8000000
15812#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY__SHIFT 0x1b
15813#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY_MASK 0x10000000
15814#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY__SHIFT 0x1c
15815#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY_MASK 0x20000000
15816#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY__SHIFT 0x1d
15817#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY_MASK 0x40000000
15818#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY__SHIFT 0x1e
15819#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY_MASK 0x80000000
15820#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY__SHIFT 0x1f
15821#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY_MASK 0x1
15822#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY__SHIFT 0x0
15823#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY_MASK 0x2
15824#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY__SHIFT 0x1
15825#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY_MASK 0x4
15826#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY__SHIFT 0x2
15827#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY_MASK 0x8
15828#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY__SHIFT 0x3
15829#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY_MASK 0x10
15830#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY__SHIFT 0x4
15831#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY_MASK 0x20
15832#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY__SHIFT 0x5
15833#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY_MASK 0x40
15834#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY__SHIFT 0x6
15835#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY_MASK 0x80
15836#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY__SHIFT 0x7
15837#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY_MASK 0x100
15838#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY__SHIFT 0x8
15839#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY_MASK 0x200
15840#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY__SHIFT 0x9
15841#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY_MASK 0x400
15842#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY__SHIFT 0xa
15843#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY_MASK 0x800
15844#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY__SHIFT 0xb
15845#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY_MASK 0x1000
15846#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY__SHIFT 0xc
15847#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY_MASK 0x2000
15848#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY__SHIFT 0xd
15849#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY_MASK 0x4000
15850#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY__SHIFT 0xe
15851#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY_MASK 0x8000
15852#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY__SHIFT 0xf
15853#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY_MASK 0x10000
15854#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY__SHIFT 0x10
15855#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY_MASK 0x20000
15856#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY__SHIFT 0x11
15857#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY_MASK 0x40000
15858#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY__SHIFT 0x12
15859#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY_MASK 0x80000
15860#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY__SHIFT 0x13
15861#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY_MASK 0x100000
15862#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY__SHIFT 0x14
15863#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY_MASK 0x200000
15864#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY__SHIFT 0x15
15865#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY_MASK 0x400000
15866#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY__SHIFT 0x16
15867#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY_MASK 0x800000
15868#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY__SHIFT 0x17
15869#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY_MASK 0x1000000
15870#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY__SHIFT 0x18
15871#define SX_DEBUG_BUSY_4__RESERVED_MASK 0xfe000000
15872#define SX_DEBUG_BUSY_4__RESERVED__SHIFT 0x19
15873#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x7f
15874#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x0
15875#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x100
15876#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x8
15877#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS_MASK 0x200
15878#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x9
15879#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x400
15880#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0xa
15881#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT_MASK 0x800
15882#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT__SHIFT 0xb
15883#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT_MASK 0x1000
15884#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT__SHIFT 0xc
15885#define SX_DEBUG_1__DEBUG_DATA_MASK 0xffffe000
15886#define SX_DEBUG_1__DEBUG_DATA__SHIFT 0xd
15887#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
15888#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
15889#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
15890#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
15891#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
15892#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
15893#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
15894#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
15895#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
15896#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
15897#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
15898#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
15899#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
15900#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
15901#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
15902#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
15903#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
15904#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
15905#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
15906#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
15907#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
15908#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
15909#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
15910#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
15911#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x3ff
15912#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
15913#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0xffc00
15914#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
15915#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2_MASK 0x3ff
15916#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
15917#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3_MASK 0xffc00
15918#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
15919#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
15920#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
15921#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
15922#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
15923#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
15924#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
15925#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
15926#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
15927#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
15928#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
15929#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
15930#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
15931#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
15932#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
15933#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
15934#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
15935#define SX_PS_DOWNCONVERT__MRT0_MASK 0xf
15936#define SX_PS_DOWNCONVERT__MRT0__SHIFT 0x0
15937#define SX_PS_DOWNCONVERT__MRT1_MASK 0xf0
15938#define SX_PS_DOWNCONVERT__MRT1__SHIFT 0x4
15939#define SX_PS_DOWNCONVERT__MRT2_MASK 0xf00
15940#define SX_PS_DOWNCONVERT__MRT2__SHIFT 0x8
15941#define SX_PS_DOWNCONVERT__MRT3_MASK 0xf000
15942#define SX_PS_DOWNCONVERT__MRT3__SHIFT 0xc
15943#define SX_PS_DOWNCONVERT__MRT4_MASK 0xf0000
15944#define SX_PS_DOWNCONVERT__MRT4__SHIFT 0x10
15945#define SX_PS_DOWNCONVERT__MRT5_MASK 0xf00000
15946#define SX_PS_DOWNCONVERT__MRT5__SHIFT 0x14
15947#define SX_PS_DOWNCONVERT__MRT6_MASK 0xf000000
15948#define SX_PS_DOWNCONVERT__MRT6__SHIFT 0x18
15949#define SX_PS_DOWNCONVERT__MRT7_MASK 0xf0000000
15950#define SX_PS_DOWNCONVERT__MRT7__SHIFT 0x1c
15951#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON_MASK 0xf
15952#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON__SHIFT 0x0
15953#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON_MASK 0xf0
15954#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON__SHIFT 0x4
15955#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON_MASK 0xf00
15956#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON__SHIFT 0x8
15957#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON_MASK 0xf000
15958#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON__SHIFT 0xc
15959#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON_MASK 0xf0000
15960#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON__SHIFT 0x10
15961#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON_MASK 0xf00000
15962#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON__SHIFT 0x14
15963#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON_MASK 0xf000000
15964#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON__SHIFT 0x18
15965#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON_MASK 0xf0000000
15966#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON__SHIFT 0x1c
15967#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE_MASK 0x1
15968#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE__SHIFT 0x0
15969#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE_MASK 0x2
15970#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE__SHIFT 0x1
15971#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE_MASK 0x10
15972#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE__SHIFT 0x4
15973#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE_MASK 0x20
15974#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE__SHIFT 0x5
15975#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE_MASK 0x100
15976#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE__SHIFT 0x8
15977#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE_MASK 0x200
15978#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE__SHIFT 0x9
15979#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE_MASK 0x1000
15980#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE__SHIFT 0xc
15981#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE_MASK 0x2000
15982#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE__SHIFT 0xd
15983#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE_MASK 0x10000
15984#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE__SHIFT 0x10
15985#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE_MASK 0x20000
15986#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE__SHIFT 0x11
15987#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE_MASK 0x100000
15988#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE__SHIFT 0x14
15989#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE_MASK 0x200000
15990#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE__SHIFT 0x15
15991#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE_MASK 0x1000000
15992#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE__SHIFT 0x18
15993#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE_MASK 0x2000000
15994#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE__SHIFT 0x19
15995#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE_MASK 0x10000000
15996#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE__SHIFT 0x1c
15997#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE_MASK 0x20000000
15998#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE__SHIFT 0x1d
15999#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE_MASK 0x80000000
16000#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE__SHIFT 0x1f
16001#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16002#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16003#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16004#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16005#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16006#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16007#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16008#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16009#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16010#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16011#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16012#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16013#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16014#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16015#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16016#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16017#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16018#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16019#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16020#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16021#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16022#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16023#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16024#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16025#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16026#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16027#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16028#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16029#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16030#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16031#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16032#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16033#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16034#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16035#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16036#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16037#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16038#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16039#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16040#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16041#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16042#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16043#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16044#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16045#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16046#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16047#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16048#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16049#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16050#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16051#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16052#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16053#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16054#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16055#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16056#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16057#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16058#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16059#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16060#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16061#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16062#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16063#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16064#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16065#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16066#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16067#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16068#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16069#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16070#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16071#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16072#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16073#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16074#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16075#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16076#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16077#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16078#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16079#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16080#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16081#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16082#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16083#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16084#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16085#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT_MASK 0x7
16086#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
16087#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT_MASK 0x70
16088#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
16089#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN_MASK 0x700
16090#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
16091#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x70000
16092#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
16093#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT_MASK 0x700000
16094#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
16095#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x7000000
16096#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
16097#define TCC_CTRL__CACHE_SIZE_MASK 0x3
16098#define TCC_CTRL__CACHE_SIZE__SHIFT 0x0
16099#define TCC_CTRL__RATE_MASK 0xc
16100#define TCC_CTRL__RATE__SHIFT 0x2
16101#define TCC_CTRL__WRITEBACK_MARGIN_MASK 0xf0
16102#define TCC_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
16103#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE_MASK 0xf00
16104#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE__SHIFT 0x8
16105#define TCC_CTRL__SRC_FIFO_SIZE_MASK 0xf000
16106#define TCC_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
16107#define TCC_CTRL__LATENCY_FIFO_SIZE_MASK 0xf0000
16108#define TCC_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
16109#define TCC_CTRL__WB_OR_INV_ALL_VMIDS_MASK 0x100000
16110#define TCC_CTRL__WB_OR_INV_ALL_VMIDS__SHIFT 0x14
16111#define TCC_CTRL__MDC_SIZE_MASK 0x3000000
16112#define TCC_CTRL__MDC_SIZE__SHIFT 0x18
16113#define TCC_CTRL__MDC_SECTOR_SIZE_MASK 0xc000000
16114#define TCC_CTRL__MDC_SECTOR_SIZE__SHIFT 0x1a
16115#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE_MASK 0xf0000000
16116#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE__SHIFT 0x1c
16117#define TCC_EDC_CNT__SEC_COUNT_MASK 0xff
16118#define TCC_EDC_CNT__SEC_COUNT__SHIFT 0x0
16119#define TCC_EDC_CNT__DED_COUNT_MASK 0xff0000
16120#define TCC_EDC_CNT__DED_COUNT__SHIFT 0x10
16121#define TCC_REDUNDANCY__MC_SEL0_MASK 0x1
16122#define TCC_REDUNDANCY__MC_SEL0__SHIFT 0x0
16123#define TCC_REDUNDANCY__MC_SEL1_MASK 0x2
16124#define TCC_REDUNDANCY__MC_SEL1__SHIFT 0x1
16125#define TCC_EXE_DISABLE__EXE_DISABLE_MASK 0x2
16126#define TCC_EXE_DISABLE__EXE_DISABLE__SHIFT 0x1
16127#define TCC_DSM_CNTL__CACHE_RAM_IRRITATOR_DATA_SEL_MASK 0x3
16128#define TCC_DSM_CNTL__CACHE_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0
16129#define TCC_DSM_CNTL__CACHE_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x4
16130#define TCC_DSM_CNTL__CACHE_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
16131#define TCC_CGTT_SCLK_CTRL__ON_DELAY_MASK 0xf
16132#define TCC_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
16133#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
16134#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
16135#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
16136#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
16137#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
16138#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
16139#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
16140#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
16141#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
16142#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
16143#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
16144#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
16145#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
16146#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
16147#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
16148#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
16149#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
16150#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
16151#define TCA_CGTT_SCLK_CTRL__ON_DELAY_MASK 0xf
16152#define TCA_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
16153#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
16154#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
16155#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
16156#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
16157#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
16158#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
16159#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
16160#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
16161#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
16162#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
16163#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
16164#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
16165#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
16166#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
16167#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
16168#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
16169#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
16170#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
16171#define TCC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
16172#define TCC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
16173#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
16174#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
16175#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
16176#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
16177#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
16178#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
16179#define TCC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
16180#define TCC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
16181#define TCC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
16182#define TCC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
16183#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0xffc00
16184#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
16185#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
16186#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
16187#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0xf000000
16188#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
16189#define TCC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
16190#define TCC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
16191#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
16192#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
16193#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
16194#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
16195#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf000000
16196#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
16197#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf0000000
16198#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
16199#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x3ff
16200#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
16201#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0xffc00
16202#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
16203#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf000000
16204#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
16205#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf0000000
16206#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
16207#define TCC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x3ff
16208#define TCC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
16209#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
16210#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
16211#define TCC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
16212#define TCC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
16213#define TCC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x3ff
16214#define TCC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
16215#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
16216#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
16217#define TCC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
16218#define TCC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
16219#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
16220#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
16221#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
16222#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
16223#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
16224#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
16225#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
16226#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
16227#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
16228#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
16229#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
16230#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
16231#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
16232#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
16233#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
16234#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
16235#define TCA_CTRL__HOLE_TIMEOUT_MASK 0xf
16236#define TCA_CTRL__HOLE_TIMEOUT__SHIFT 0x0
16237#define TCA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
16238#define TCA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
16239#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
16240#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
16241#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
16242#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
16243#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
16244#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
16245#define TCA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
16246#define TCA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
16247#define TCA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
16248#define TCA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
16249#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0xffc00
16250#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
16251#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
16252#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
16253#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0xf000000
16254#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
16255#define TCA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
16256#define TCA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
16257#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
16258#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
16259#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
16260#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
16261#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf000000
16262#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
16263#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf0000000
16264#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
16265#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x3ff
16266#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
16267#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0xffc00
16268#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
16269#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf000000
16270#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
16271#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf0000000
16272#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
16273#define TCA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x3ff
16274#define TCA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
16275#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
16276#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
16277#define TCA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
16278#define TCA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
16279#define TCA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x3ff
16280#define TCA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
16281#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
16282#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
16283#define TCA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
16284#define TCA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
16285#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
16286#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
16287#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
16288#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
16289#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
16290#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
16291#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
16292#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
16293#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
16294#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
16295#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
16296#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
16297#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
16298#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
16299#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
16300#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
16301#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xffffffff
16302#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
16303#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0xff
16304#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
16305#define TD_CNTL__SYNC_PHASE_SH_MASK 0x3
16306#define TD_CNTL__SYNC_PHASE_SH__SHIFT 0x0
16307#define TD_CNTL__SYNC_PHASE_VC_SMX_MASK 0x30
16308#define TD_CNTL__SYNC_PHASE_VC_SMX__SHIFT 0x4
16309#define TD_CNTL__PAD_STALL_EN_MASK 0x100
16310#define TD_CNTL__PAD_STALL_EN__SHIFT 0x8
16311#define TD_CNTL__EXTEND_LDS_STALL_MASK 0x600
16312#define TD_CNTL__EXTEND_LDS_STALL__SHIFT 0x9
16313#define TD_CNTL__LDS_STALL_PHASE_ADJUST_MASK 0x1800
16314#define TD_CNTL__LDS_STALL_PHASE_ADJUST__SHIFT 0xb
16315#define TD_CNTL__PRECISION_COMPATIBILITY_MASK 0x8000
16316#define TD_CNTL__PRECISION_COMPATIBILITY__SHIFT 0xf
16317#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x10000
16318#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x10
16319#define TD_CNTL__LD_FLOAT_MODE_MASK 0x40000
16320#define TD_CNTL__LD_FLOAT_MODE__SHIFT 0x12
16321#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x80000
16322#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x13
16323#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x100000
16324#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
16325#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x200000
16326#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
16327#define TD_CNTL__DISABLE_D16_PACKING_MASK 0x400000
16328#define TD_CNTL__DISABLE_D16_PACKING__SHIFT 0x16
16329#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x800000
16330#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
16331#define TD_STATUS__BUSY_MASK 0x80000000
16332#define TD_STATUS__BUSY__SHIFT 0x1f
16333#define TD_DEBUG_INDEX__INDEX_MASK 0x1f
16334#define TD_DEBUG_INDEX__INDEX__SHIFT 0x0
16335#define TD_DEBUG_DATA__DATA_MASK 0xffffffff
16336#define TD_DEBUG_DATA__DATA__SHIFT 0x0
16337#define TD_DSM_CNTL__FORCE_SEDB_0_MASK 0x1
16338#define TD_DSM_CNTL__FORCE_SEDB_0__SHIFT 0x0
16339#define TD_DSM_CNTL__FORCE_SEDB_1_MASK 0x2
16340#define TD_DSM_CNTL__FORCE_SEDB_1__SHIFT 0x1
16341#define TD_DSM_CNTL__EN_SINGLE_WR_SEDB_MASK 0x4
16342#define TD_DSM_CNTL__EN_SINGLE_WR_SEDB__SHIFT 0x2
16343#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0xff
16344#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
16345#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x3fc00
16346#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
16347#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
16348#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
16349#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
16350#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
16351#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
16352#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
16353#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0xff
16354#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
16355#define TD_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x3fc00
16356#define TD_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
16357#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
16358#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
16359#define TD_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0xf000000
16360#define TD_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
16361#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
16362#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
16363#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0xff
16364#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
16365#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x3fc00
16366#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
16367#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf000000
16368#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
16369#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000
16370#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
16371#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
16372#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
16373#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
16374#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
16375#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
16376#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
16377#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
16378#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
16379#define TD_SCRATCH__SCRATCH_MASK 0xffffffff
16380#define TD_SCRATCH__SCRATCH__SHIFT 0x0
16381#define TA_CNTL__FX_XNACK_CREDIT_MASK 0x7f
16382#define TA_CNTL__FX_XNACK_CREDIT__SHIFT 0x0
16383#define TA_CNTL__SQ_XNACK_CREDIT_MASK 0x1e00
16384#define TA_CNTL__SQ_XNACK_CREDIT__SHIFT 0x9
16385#define TA_CNTL__TC_DATA_CREDIT_MASK 0xe000
16386#define TA_CNTL__TC_DATA_CREDIT__SHIFT 0xd
16387#define TA_CNTL__ALIGNER_CREDIT_MASK 0x1f0000
16388#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
16389#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xffc00000
16390#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
16391#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x1
16392#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
16393#define TA_CNTL_AUX__RESERVED_MASK 0xe
16394#define TA_CNTL_AUX__RESERVED__SHIFT 0x1
16395#define TA_CNTL_AUX__D16_PACK_DISABLE_MASK 0x10
16396#define TA_CNTL_AUX__D16_PACK_DISABLE__SHIFT 0x4
16397#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x10000
16398#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x10
16399#define TA_CNTL_AUX__ANISO_RATIO_LUT_MASK 0x20000
16400#define TA_CNTL_AUX__ANISO_RATIO_LUT__SHIFT 0x11
16401#define TA_CNTL_AUX__ANISO_TAP_MASK 0x40000
16402#define TA_CNTL_AUX__ANISO_TAP__SHIFT 0x12
16403#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE_MASK 0x80000
16404#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE__SHIFT 0x13
16405#define TA_RESERVED_010C__Unused_MASK 0xffffffff
16406#define TA_RESERVED_010C__Unused__SHIFT 0x0
16407#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xffffffff
16408#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
16409#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0xff
16410#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
16411#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x1000
16412#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
16413#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x2000
16414#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
16415#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x4000
16416#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0xe
16417#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x10000
16418#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
16419#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x20000
16420#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x11
16421#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x40000
16422#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x12
16423#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x100000
16424#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
16425#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x200000
16426#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x15
16427#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x400000
16428#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x16
16429#define TA_STATUS__IN_BUSY_MASK 0x1000000
16430#define TA_STATUS__IN_BUSY__SHIFT 0x18
16431#define TA_STATUS__FG_BUSY_MASK 0x2000000
16432#define TA_STATUS__FG_BUSY__SHIFT 0x19
16433#define TA_STATUS__LA_BUSY_MASK 0x4000000
16434#define TA_STATUS__LA_BUSY__SHIFT 0x1a
16435#define TA_STATUS__FL_BUSY_MASK 0x8000000
16436#define TA_STATUS__FL_BUSY__SHIFT 0x1b
16437#define TA_STATUS__TA_BUSY_MASK 0x10000000
16438#define TA_STATUS__TA_BUSY__SHIFT 0x1c
16439#define TA_STATUS__FA_BUSY_MASK 0x20000000
16440#define TA_STATUS__FA_BUSY__SHIFT 0x1d
16441#define TA_STATUS__AL_BUSY_MASK 0x40000000
16442#define TA_STATUS__AL_BUSY__SHIFT 0x1e
16443#define TA_STATUS__BUSY_MASK 0x80000000
16444#define TA_STATUS__BUSY__SHIFT 0x1f
16445#define TA_DEBUG_INDEX__INDEX_MASK 0x1f
16446#define TA_DEBUG_INDEX__INDEX__SHIFT 0x0
16447#define TA_DEBUG_DATA__DATA_MASK 0xffffffff
16448#define TA_DEBUG_DATA__DATA__SHIFT 0x0
16449#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0xff
16450#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
16451#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x3fc00
16452#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
16453#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
16454#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
16455#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
16456#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
16457#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
16458#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
16459#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0xff
16460#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
16461#define TA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x3fc00
16462#define TA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
16463#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
16464#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
16465#define TA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0xf000000
16466#define TA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
16467#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
16468#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
16469#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0xff
16470#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
16471#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x3fc00
16472#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
16473#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf000000
16474#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
16475#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000
16476#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
16477#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
16478#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
16479#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
16480#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
16481#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
16482#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
16483#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
16484#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
16485#define TA_SCRATCH__SCRATCH_MASK 0xffffffff
16486#define TA_SCRATCH__SCRATCH__SHIFT 0x0
16487#define SH_HIDDEN_PRIVATE_BASE_VMID__ADDRESS_MASK 0xffffffff
16488#define SH_HIDDEN_PRIVATE_BASE_VMID__ADDRESS__SHIFT 0x0
16489#define SH_STATIC_MEM_CONFIG__SWIZZLE_ENABLE_MASK 0x1
16490#define SH_STATIC_MEM_CONFIG__SWIZZLE_ENABLE__SHIFT 0x0
16491#define SH_STATIC_MEM_CONFIG__ELEMENT_SIZE_MASK 0x6
16492#define SH_STATIC_MEM_CONFIG__ELEMENT_SIZE__SHIFT 0x1
16493#define SH_STATIC_MEM_CONFIG__INDEX_STRIDE_MASK 0x18
16494#define SH_STATIC_MEM_CONFIG__INDEX_STRIDE__SHIFT 0x3
16495#define SH_STATIC_MEM_CONFIG__PRIVATE_MTYPE_MASK 0xe0
16496#define SH_STATIC_MEM_CONFIG__PRIVATE_MTYPE__SHIFT 0x5
16497#define SH_STATIC_MEM_CONFIG__READ_ONLY_CNTL_MASK 0xff00
16498#define SH_STATIC_MEM_CONFIG__READ_ONLY_CNTL__SHIFT 0x8
16499#define TCP_INVALIDATE__START_MASK 0x1
16500#define TCP_INVALIDATE__START__SHIFT 0x0
16501#define TCP_STATUS__TCP_BUSY_MASK 0x1
16502#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
16503#define TCP_STATUS__INPUT_BUSY_MASK 0x2
16504#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
16505#define TCP_STATUS__ADRS_BUSY_MASK 0x4
16506#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
16507#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x8
16508#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
16509#define TCP_STATUS__CNTRL_BUSY_MASK 0x10
16510#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
16511#define TCP_STATUS__LFIFO_BUSY_MASK 0x20
16512#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
16513#define TCP_STATUS__READ_BUSY_MASK 0x40
16514#define TCP_STATUS__READ_BUSY__SHIFT 0x6
16515#define TCP_STATUS__FORMAT_BUSY_MASK 0x80
16516#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
16517#define TCP_CNTL__FORCE_HIT_MASK 0x1
16518#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
16519#define TCP_CNTL__FORCE_MISS_MASK 0x2
16520#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
16521#define TCP_CNTL__L1_SIZE_MASK 0xc
16522#define TCP_CNTL__L1_SIZE__SHIFT 0x2
16523#define TCP_CNTL__FLAT_BUF_HASH_ENABLE_MASK 0x10
16524#define TCP_CNTL__FLAT_BUF_HASH_ENABLE__SHIFT 0x4
16525#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x20
16526#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
16527#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x1f8000
16528#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
16529#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT_MASK 0xfc00000
16530#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT__SHIFT 0x16
16531#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000
16532#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
16533#define TCP_CNTL__INV_ALL_VMIDS_MASK 0x20000000
16534#define TCP_CNTL__INV_ALL_VMIDS__SHIFT 0x1d
16535#define TCP_CHAN_STEER_LO__CHAN0_MASK 0xf
16536#define TCP_CHAN_STEER_LO__CHAN0__SHIFT 0x0
16537#define TCP_CHAN_STEER_LO__CHAN1_MASK 0xf0
16538#define TCP_CHAN_STEER_LO__CHAN1__SHIFT 0x4
16539#define TCP_CHAN_STEER_LO__CHAN2_MASK 0xf00
16540#define TCP_CHAN_STEER_LO__CHAN2__SHIFT 0x8
16541#define TCP_CHAN_STEER_LO__CHAN3_MASK 0xf000
16542#define TCP_CHAN_STEER_LO__CHAN3__SHIFT 0xc
16543#define TCP_CHAN_STEER_LO__CHAN4_MASK 0xf0000
16544#define TCP_CHAN_STEER_LO__CHAN4__SHIFT 0x10
16545#define TCP_CHAN_STEER_LO__CHAN5_MASK 0xf00000
16546#define TCP_CHAN_STEER_LO__CHAN5__SHIFT 0x14
16547#define TCP_CHAN_STEER_LO__CHAN6_MASK 0xf000000
16548#define TCP_CHAN_STEER_LO__CHAN6__SHIFT 0x18
16549#define TCP_CHAN_STEER_LO__CHAN7_MASK 0xf0000000
16550#define TCP_CHAN_STEER_LO__CHAN7__SHIFT 0x1c
16551#define TCP_CHAN_STEER_HI__CHAN8_MASK 0xf
16552#define TCP_CHAN_STEER_HI__CHAN8__SHIFT 0x0
16553#define TCP_CHAN_STEER_HI__CHAN9_MASK 0xf0
16554#define TCP_CHAN_STEER_HI__CHAN9__SHIFT 0x4
16555#define TCP_CHAN_STEER_HI__CHANA_MASK 0xf00
16556#define TCP_CHAN_STEER_HI__CHANA__SHIFT 0x8
16557#define TCP_CHAN_STEER_HI__CHANB_MASK 0xf000
16558#define TCP_CHAN_STEER_HI__CHANB__SHIFT 0xc
16559#define TCP_CHAN_STEER_HI__CHANC_MASK 0xf0000
16560#define TCP_CHAN_STEER_HI__CHANC__SHIFT 0x10
16561#define TCP_CHAN_STEER_HI__CHAND_MASK 0xf00000
16562#define TCP_CHAN_STEER_HI__CHAND__SHIFT 0x14
16563#define TCP_CHAN_STEER_HI__CHANE_MASK 0xf000000
16564#define TCP_CHAN_STEER_HI__CHANE__SHIFT 0x18
16565#define TCP_CHAN_STEER_HI__CHANF_MASK 0xf0000000
16566#define TCP_CHAN_STEER_HI__CHANF__SHIFT 0x1c
16567#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0xf
16568#define TCP_ADDR_CONFIG__NUM_TCC_BANKS__SHIFT 0x0
16569#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x30
16570#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4
16571#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x1c0
16572#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6
16573#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x200
16574#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9
16575#define TCP_CREDIT__LFIFO_CREDIT_MASK 0x3ff
16576#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
16577#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x7f0000
16578#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
16579#define TCP_CREDIT__TD_CREDIT_MASK 0xe0000000
16580#define TCP_CREDIT__TD_CREDIT__SHIFT 0x1d
16581#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
16582#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
16583#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
16584#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
16585#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
16586#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
16587#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
16588#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
16589#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
16590#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
16591#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
16592#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
16593#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0xffc00
16594#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
16595#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
16596#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
16597#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0xf000000
16598#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
16599#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
16600#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
16601#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
16602#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
16603#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
16604#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
16605#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf000000
16606#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
16607#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000
16608#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
16609#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x3ff
16610#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
16611#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0xffc00
16612#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
16613#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf000000
16614#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
16615#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000
16616#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
16617#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x3ff
16618#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
16619#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
16620#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
16621#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
16622#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
16623#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x3ff
16624#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
16625#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
16626#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
16627#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
16628#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
16629#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
16630#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
16631#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
16632#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
16633#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
16634#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
16635#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
16636#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
16637#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
16638#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
16639#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
16640#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
16641#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
16642#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
16643#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
16644#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
16645#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS_MASK 0x7
16646#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS__SHIFT 0x0
16647#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS_MASK 0x700
16648#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS__SHIFT 0x8
16649#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT_MASK 0x70000
16650#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT__SHIFT 0x10
16651#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT_MASK 0x7000000
16652#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT__SHIFT 0x18
16653#define TCP_EDC_CNT__SEC_COUNT_MASK 0xff
16654#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0
16655#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0xff00
16656#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8
16657#define TCP_EDC_CNT__DED_COUNT_MASK 0xff0000
16658#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10
16659#define TCP_EDC_CNT__UNUSED_MASK 0xff000000
16660#define TCP_EDC_CNT__UNUSED__SHIFT 0x18
16661#define TC_CFG_L1_LOAD_POLICY0__POLICY_0_MASK 0x3
16662#define TC_CFG_L1_LOAD_POLICY0__POLICY_0__SHIFT 0x0
16663#define TC_CFG_L1_LOAD_POLICY0__POLICY_1_MASK 0xc
16664#define TC_CFG_L1_LOAD_POLICY0__POLICY_1__SHIFT 0x2
16665#define TC_CFG_L1_LOAD_POLICY0__POLICY_2_MASK 0x30
16666#define TC_CFG_L1_LOAD_POLICY0__POLICY_2__SHIFT 0x4
16667#define TC_CFG_L1_LOAD_POLICY0__POLICY_3_MASK 0xc0
16668#define TC_CFG_L1_LOAD_POLICY0__POLICY_3__SHIFT 0x6
16669#define TC_CFG_L1_LOAD_POLICY0__POLICY_4_MASK 0x300
16670#define TC_CFG_L1_LOAD_POLICY0__POLICY_4__SHIFT 0x8
16671#define TC_CFG_L1_LOAD_POLICY0__POLICY_5_MASK 0xc00
16672#define TC_CFG_L1_LOAD_POLICY0__POLICY_5__SHIFT 0xa
16673#define TC_CFG_L1_LOAD_POLICY0__POLICY_6_MASK 0x3000
16674#define TC_CFG_L1_LOAD_POLICY0__POLICY_6__SHIFT 0xc
16675#define TC_CFG_L1_LOAD_POLICY0__POLICY_7_MASK 0xc000
16676#define TC_CFG_L1_LOAD_POLICY0__POLICY_7__SHIFT 0xe
16677#define TC_CFG_L1_LOAD_POLICY0__POLICY_8_MASK 0x30000
16678#define TC_CFG_L1_LOAD_POLICY0__POLICY_8__SHIFT 0x10
16679#define TC_CFG_L1_LOAD_POLICY0__POLICY_9_MASK 0xc0000
16680#define TC_CFG_L1_LOAD_POLICY0__POLICY_9__SHIFT 0x12
16681#define TC_CFG_L1_LOAD_POLICY0__POLICY_10_MASK 0x300000
16682#define TC_CFG_L1_LOAD_POLICY0__POLICY_10__SHIFT 0x14
16683#define TC_CFG_L1_LOAD_POLICY0__POLICY_11_MASK 0xc00000
16684#define TC_CFG_L1_LOAD_POLICY0__POLICY_11__SHIFT 0x16
16685#define TC_CFG_L1_LOAD_POLICY0__POLICY_12_MASK 0x3000000
16686#define TC_CFG_L1_LOAD_POLICY0__POLICY_12__SHIFT 0x18
16687#define TC_CFG_L1_LOAD_POLICY0__POLICY_13_MASK 0xc000000
16688#define TC_CFG_L1_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
16689#define TC_CFG_L1_LOAD_POLICY0__POLICY_14_MASK 0x30000000
16690#define TC_CFG_L1_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
16691#define TC_CFG_L1_LOAD_POLICY0__POLICY_15_MASK 0xc0000000
16692#define TC_CFG_L1_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
16693#define TC_CFG_L1_LOAD_POLICY1__POLICY_16_MASK 0x3
16694#define TC_CFG_L1_LOAD_POLICY1__POLICY_16__SHIFT 0x0
16695#define TC_CFG_L1_LOAD_POLICY1__POLICY_17_MASK 0xc
16696#define TC_CFG_L1_LOAD_POLICY1__POLICY_17__SHIFT 0x2
16697#define TC_CFG_L1_LOAD_POLICY1__POLICY_18_MASK 0x30
16698#define TC_CFG_L1_LOAD_POLICY1__POLICY_18__SHIFT 0x4
16699#define TC_CFG_L1_LOAD_POLICY1__POLICY_19_MASK 0xc0
16700#define TC_CFG_L1_LOAD_POLICY1__POLICY_19__SHIFT 0x6
16701#define TC_CFG_L1_LOAD_POLICY1__POLICY_20_MASK 0x300
16702#define TC_CFG_L1_LOAD_POLICY1__POLICY_20__SHIFT 0x8
16703#define TC_CFG_L1_LOAD_POLICY1__POLICY_21_MASK 0xc00
16704#define TC_CFG_L1_LOAD_POLICY1__POLICY_21__SHIFT 0xa
16705#define TC_CFG_L1_LOAD_POLICY1__POLICY_22_MASK 0x3000
16706#define TC_CFG_L1_LOAD_POLICY1__POLICY_22__SHIFT 0xc
16707#define TC_CFG_L1_LOAD_POLICY1__POLICY_23_MASK 0xc000
16708#define TC_CFG_L1_LOAD_POLICY1__POLICY_23__SHIFT 0xe
16709#define TC_CFG_L1_LOAD_POLICY1__POLICY_24_MASK 0x30000
16710#define TC_CFG_L1_LOAD_POLICY1__POLICY_24__SHIFT 0x10
16711#define TC_CFG_L1_LOAD_POLICY1__POLICY_25_MASK 0xc0000
16712#define TC_CFG_L1_LOAD_POLICY1__POLICY_25__SHIFT 0x12
16713#define TC_CFG_L1_LOAD_POLICY1__POLICY_26_MASK 0x300000
16714#define TC_CFG_L1_LOAD_POLICY1__POLICY_26__SHIFT 0x14
16715#define TC_CFG_L1_LOAD_POLICY1__POLICY_27_MASK 0xc00000
16716#define TC_CFG_L1_LOAD_POLICY1__POLICY_27__SHIFT 0x16
16717#define TC_CFG_L1_LOAD_POLICY1__POLICY_28_MASK 0x3000000
16718#define TC_CFG_L1_LOAD_POLICY1__POLICY_28__SHIFT 0x18
16719#define TC_CFG_L1_LOAD_POLICY1__POLICY_29_MASK 0xc000000
16720#define TC_CFG_L1_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
16721#define TC_CFG_L1_LOAD_POLICY1__POLICY_30_MASK 0x30000000
16722#define TC_CFG_L1_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
16723#define TC_CFG_L1_LOAD_POLICY1__POLICY_31_MASK 0xc0000000
16724#define TC_CFG_L1_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
16725#define TC_CFG_L1_STORE_POLICY__POLICY_0_MASK 0x1
16726#define TC_CFG_L1_STORE_POLICY__POLICY_0__SHIFT 0x0
16727#define TC_CFG_L1_STORE_POLICY__POLICY_1_MASK 0x2
16728#define TC_CFG_L1_STORE_POLICY__POLICY_1__SHIFT 0x1
16729#define TC_CFG_L1_STORE_POLICY__POLICY_2_MASK 0x4
16730#define TC_CFG_L1_STORE_POLICY__POLICY_2__SHIFT 0x2
16731#define TC_CFG_L1_STORE_POLICY__POLICY_3_MASK 0x8
16732#define TC_CFG_L1_STORE_POLICY__POLICY_3__SHIFT 0x3
16733#define TC_CFG_L1_STORE_POLICY__POLICY_4_MASK 0x10
16734#define TC_CFG_L1_STORE_POLICY__POLICY_4__SHIFT 0x4
16735#define TC_CFG_L1_STORE_POLICY__POLICY_5_MASK 0x20
16736#define TC_CFG_L1_STORE_POLICY__POLICY_5__SHIFT 0x5
16737#define TC_CFG_L1_STORE_POLICY__POLICY_6_MASK 0x40
16738#define TC_CFG_L1_STORE_POLICY__POLICY_6__SHIFT 0x6
16739#define TC_CFG_L1_STORE_POLICY__POLICY_7_MASK 0x80
16740#define TC_CFG_L1_STORE_POLICY__POLICY_7__SHIFT 0x7
16741#define TC_CFG_L1_STORE_POLICY__POLICY_8_MASK 0x100
16742#define TC_CFG_L1_STORE_POLICY__POLICY_8__SHIFT 0x8
16743#define TC_CFG_L1_STORE_POLICY__POLICY_9_MASK 0x200
16744#define TC_CFG_L1_STORE_POLICY__POLICY_9__SHIFT 0x9
16745#define TC_CFG_L1_STORE_POLICY__POLICY_10_MASK 0x400
16746#define TC_CFG_L1_STORE_POLICY__POLICY_10__SHIFT 0xa
16747#define TC_CFG_L1_STORE_POLICY__POLICY_11_MASK 0x800
16748#define TC_CFG_L1_STORE_POLICY__POLICY_11__SHIFT 0xb
16749#define TC_CFG_L1_STORE_POLICY__POLICY_12_MASK 0x1000
16750#define TC_CFG_L1_STORE_POLICY__POLICY_12__SHIFT 0xc
16751#define TC_CFG_L1_STORE_POLICY__POLICY_13_MASK 0x2000
16752#define TC_CFG_L1_STORE_POLICY__POLICY_13__SHIFT 0xd
16753#define TC_CFG_L1_STORE_POLICY__POLICY_14_MASK 0x4000
16754#define TC_CFG_L1_STORE_POLICY__POLICY_14__SHIFT 0xe
16755#define TC_CFG_L1_STORE_POLICY__POLICY_15_MASK 0x8000
16756#define TC_CFG_L1_STORE_POLICY__POLICY_15__SHIFT 0xf
16757#define TC_CFG_L1_STORE_POLICY__POLICY_16_MASK 0x10000
16758#define TC_CFG_L1_STORE_POLICY__POLICY_16__SHIFT 0x10
16759#define TC_CFG_L1_STORE_POLICY__POLICY_17_MASK 0x20000
16760#define TC_CFG_L1_STORE_POLICY__POLICY_17__SHIFT 0x11
16761#define TC_CFG_L1_STORE_POLICY__POLICY_18_MASK 0x40000
16762#define TC_CFG_L1_STORE_POLICY__POLICY_18__SHIFT 0x12
16763#define TC_CFG_L1_STORE_POLICY__POLICY_19_MASK 0x80000
16764#define TC_CFG_L1_STORE_POLICY__POLICY_19__SHIFT 0x13
16765#define TC_CFG_L1_STORE_POLICY__POLICY_20_MASK 0x100000
16766#define TC_CFG_L1_STORE_POLICY__POLICY_20__SHIFT 0x14
16767#define TC_CFG_L1_STORE_POLICY__POLICY_21_MASK 0x200000
16768#define TC_CFG_L1_STORE_POLICY__POLICY_21__SHIFT 0x15
16769#define TC_CFG_L1_STORE_POLICY__POLICY_22_MASK 0x400000
16770#define TC_CFG_L1_STORE_POLICY__POLICY_22__SHIFT 0x16
16771#define TC_CFG_L1_STORE_POLICY__POLICY_23_MASK 0x800000
16772#define TC_CFG_L1_STORE_POLICY__POLICY_23__SHIFT 0x17
16773#define TC_CFG_L1_STORE_POLICY__POLICY_24_MASK 0x1000000
16774#define TC_CFG_L1_STORE_POLICY__POLICY_24__SHIFT 0x18
16775#define TC_CFG_L1_STORE_POLICY__POLICY_25_MASK 0x2000000
16776#define TC_CFG_L1_STORE_POLICY__POLICY_25__SHIFT 0x19
16777#define TC_CFG_L1_STORE_POLICY__POLICY_26_MASK 0x4000000
16778#define TC_CFG_L1_STORE_POLICY__POLICY_26__SHIFT 0x1a
16779#define TC_CFG_L1_STORE_POLICY__POLICY_27_MASK 0x8000000
16780#define TC_CFG_L1_STORE_POLICY__POLICY_27__SHIFT 0x1b
16781#define TC_CFG_L1_STORE_POLICY__POLICY_28_MASK 0x10000000
16782#define TC_CFG_L1_STORE_POLICY__POLICY_28__SHIFT 0x1c
16783#define TC_CFG_L1_STORE_POLICY__POLICY_29_MASK 0x20000000
16784#define TC_CFG_L1_STORE_POLICY__POLICY_29__SHIFT 0x1d
16785#define TC_CFG_L1_STORE_POLICY__POLICY_30_MASK 0x40000000
16786#define TC_CFG_L1_STORE_POLICY__POLICY_30__SHIFT 0x1e
16787#define TC_CFG_L1_STORE_POLICY__POLICY_31_MASK 0x80000000
16788#define TC_CFG_L1_STORE_POLICY__POLICY_31__SHIFT 0x1f
16789#define TC_CFG_L2_LOAD_POLICY0__POLICY_0_MASK 0x3
16790#define TC_CFG_L2_LOAD_POLICY0__POLICY_0__SHIFT 0x0
16791#define TC_CFG_L2_LOAD_POLICY0__POLICY_1_MASK 0xc
16792#define TC_CFG_L2_LOAD_POLICY0__POLICY_1__SHIFT 0x2
16793#define TC_CFG_L2_LOAD_POLICY0__POLICY_2_MASK 0x30
16794#define TC_CFG_L2_LOAD_POLICY0__POLICY_2__SHIFT 0x4
16795#define TC_CFG_L2_LOAD_POLICY0__POLICY_3_MASK 0xc0
16796#define TC_CFG_L2_LOAD_POLICY0__POLICY_3__SHIFT 0x6
16797#define TC_CFG_L2_LOAD_POLICY0__POLICY_4_MASK 0x300
16798#define TC_CFG_L2_LOAD_POLICY0__POLICY_4__SHIFT 0x8
16799#define TC_CFG_L2_LOAD_POLICY0__POLICY_5_MASK 0xc00
16800#define TC_CFG_L2_LOAD_POLICY0__POLICY_5__SHIFT 0xa
16801#define TC_CFG_L2_LOAD_POLICY0__POLICY_6_MASK 0x3000
16802#define TC_CFG_L2_LOAD_POLICY0__POLICY_6__SHIFT 0xc
16803#define TC_CFG_L2_LOAD_POLICY0__POLICY_7_MASK 0xc000
16804#define TC_CFG_L2_LOAD_POLICY0__POLICY_7__SHIFT 0xe
16805#define TC_CFG_L2_LOAD_POLICY0__POLICY_8_MASK 0x30000
16806#define TC_CFG_L2_LOAD_POLICY0__POLICY_8__SHIFT 0x10
16807#define TC_CFG_L2_LOAD_POLICY0__POLICY_9_MASK 0xc0000
16808#define TC_CFG_L2_LOAD_POLICY0__POLICY_9__SHIFT 0x12
16809#define TC_CFG_L2_LOAD_POLICY0__POLICY_10_MASK 0x300000
16810#define TC_CFG_L2_LOAD_POLICY0__POLICY_10__SHIFT 0x14
16811#define TC_CFG_L2_LOAD_POLICY0__POLICY_11_MASK 0xc00000
16812#define TC_CFG_L2_LOAD_POLICY0__POLICY_11__SHIFT 0x16
16813#define TC_CFG_L2_LOAD_POLICY0__POLICY_12_MASK 0x3000000
16814#define TC_CFG_L2_LOAD_POLICY0__POLICY_12__SHIFT 0x18
16815#define TC_CFG_L2_LOAD_POLICY0__POLICY_13_MASK 0xc000000
16816#define TC_CFG_L2_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
16817#define TC_CFG_L2_LOAD_POLICY0__POLICY_14_MASK 0x30000000
16818#define TC_CFG_L2_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
16819#define TC_CFG_L2_LOAD_POLICY0__POLICY_15_MASK 0xc0000000
16820#define TC_CFG_L2_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
16821#define TC_CFG_L2_LOAD_POLICY1__POLICY_16_MASK 0x3
16822#define TC_CFG_L2_LOAD_POLICY1__POLICY_16__SHIFT 0x0
16823#define TC_CFG_L2_LOAD_POLICY1__POLICY_17_MASK 0xc
16824#define TC_CFG_L2_LOAD_POLICY1__POLICY_17__SHIFT 0x2
16825#define TC_CFG_L2_LOAD_POLICY1__POLICY_18_MASK 0x30
16826#define TC_CFG_L2_LOAD_POLICY1__POLICY_18__SHIFT 0x4
16827#define TC_CFG_L2_LOAD_POLICY1__POLICY_19_MASK 0xc0
16828#define TC_CFG_L2_LOAD_POLICY1__POLICY_19__SHIFT 0x6
16829#define TC_CFG_L2_LOAD_POLICY1__POLICY_20_MASK 0x300
16830#define TC_CFG_L2_LOAD_POLICY1__POLICY_20__SHIFT 0x8
16831#define TC_CFG_L2_LOAD_POLICY1__POLICY_21_MASK 0xc00
16832#define TC_CFG_L2_LOAD_POLICY1__POLICY_21__SHIFT 0xa
16833#define TC_CFG_L2_LOAD_POLICY1__POLICY_22_MASK 0x3000
16834#define TC_CFG_L2_LOAD_POLICY1__POLICY_22__SHIFT 0xc
16835#define TC_CFG_L2_LOAD_POLICY1__POLICY_23_MASK 0xc000
16836#define TC_CFG_L2_LOAD_POLICY1__POLICY_23__SHIFT 0xe
16837#define TC_CFG_L2_LOAD_POLICY1__POLICY_24_MASK 0x30000
16838#define TC_CFG_L2_LOAD_POLICY1__POLICY_24__SHIFT 0x10
16839#define TC_CFG_L2_LOAD_POLICY1__POLICY_25_MASK 0xc0000
16840#define TC_CFG_L2_LOAD_POLICY1__POLICY_25__SHIFT 0x12
16841#define TC_CFG_L2_LOAD_POLICY1__POLICY_26_MASK 0x300000
16842#define TC_CFG_L2_LOAD_POLICY1__POLICY_26__SHIFT 0x14
16843#define TC_CFG_L2_LOAD_POLICY1__POLICY_27_MASK 0xc00000
16844#define TC_CFG_L2_LOAD_POLICY1__POLICY_27__SHIFT 0x16
16845#define TC_CFG_L2_LOAD_POLICY1__POLICY_28_MASK 0x3000000
16846#define TC_CFG_L2_LOAD_POLICY1__POLICY_28__SHIFT 0x18
16847#define TC_CFG_L2_LOAD_POLICY1__POLICY_29_MASK 0xc000000
16848#define TC_CFG_L2_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
16849#define TC_CFG_L2_LOAD_POLICY1__POLICY_30_MASK 0x30000000
16850#define TC_CFG_L2_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
16851#define TC_CFG_L2_LOAD_POLICY1__POLICY_31_MASK 0xc0000000
16852#define TC_CFG_L2_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
16853#define TC_CFG_L2_STORE_POLICY0__POLICY_0_MASK 0x3
16854#define TC_CFG_L2_STORE_POLICY0__POLICY_0__SHIFT 0x0
16855#define TC_CFG_L2_STORE_POLICY0__POLICY_1_MASK 0xc
16856#define TC_CFG_L2_STORE_POLICY0__POLICY_1__SHIFT 0x2
16857#define TC_CFG_L2_STORE_POLICY0__POLICY_2_MASK 0x30
16858#define TC_CFG_L2_STORE_POLICY0__POLICY_2__SHIFT 0x4
16859#define TC_CFG_L2_STORE_POLICY0__POLICY_3_MASK 0xc0
16860#define TC_CFG_L2_STORE_POLICY0__POLICY_3__SHIFT 0x6
16861#define TC_CFG_L2_STORE_POLICY0__POLICY_4_MASK 0x300
16862#define TC_CFG_L2_STORE_POLICY0__POLICY_4__SHIFT 0x8
16863#define TC_CFG_L2_STORE_POLICY0__POLICY_5_MASK 0xc00
16864#define TC_CFG_L2_STORE_POLICY0__POLICY_5__SHIFT 0xa
16865#define TC_CFG_L2_STORE_POLICY0__POLICY_6_MASK 0x3000
16866#define TC_CFG_L2_STORE_POLICY0__POLICY_6__SHIFT 0xc
16867#define TC_CFG_L2_STORE_POLICY0__POLICY_7_MASK 0xc000
16868#define TC_CFG_L2_STORE_POLICY0__POLICY_7__SHIFT 0xe
16869#define TC_CFG_L2_STORE_POLICY0__POLICY_8_MASK 0x30000
16870#define TC_CFG_L2_STORE_POLICY0__POLICY_8__SHIFT 0x10
16871#define TC_CFG_L2_STORE_POLICY0__POLICY_9_MASK 0xc0000
16872#define TC_CFG_L2_STORE_POLICY0__POLICY_9__SHIFT 0x12
16873#define TC_CFG_L2_STORE_POLICY0__POLICY_10_MASK 0x300000
16874#define TC_CFG_L2_STORE_POLICY0__POLICY_10__SHIFT 0x14
16875#define TC_CFG_L2_STORE_POLICY0__POLICY_11_MASK 0xc00000
16876#define TC_CFG_L2_STORE_POLICY0__POLICY_11__SHIFT 0x16
16877#define TC_CFG_L2_STORE_POLICY0__POLICY_12_MASK 0x3000000
16878#define TC_CFG_L2_STORE_POLICY0__POLICY_12__SHIFT 0x18
16879#define TC_CFG_L2_STORE_POLICY0__POLICY_13_MASK 0xc000000
16880#define TC_CFG_L2_STORE_POLICY0__POLICY_13__SHIFT 0x1a
16881#define TC_CFG_L2_STORE_POLICY0__POLICY_14_MASK 0x30000000
16882#define TC_CFG_L2_STORE_POLICY0__POLICY_14__SHIFT 0x1c
16883#define TC_CFG_L2_STORE_POLICY0__POLICY_15_MASK 0xc0000000
16884#define TC_CFG_L2_STORE_POLICY0__POLICY_15__SHIFT 0x1e
16885#define TC_CFG_L2_STORE_POLICY1__POLICY_16_MASK 0x3
16886#define TC_CFG_L2_STORE_POLICY1__POLICY_16__SHIFT 0x0
16887#define TC_CFG_L2_STORE_POLICY1__POLICY_17_MASK 0xc
16888#define TC_CFG_L2_STORE_POLICY1__POLICY_17__SHIFT 0x2
16889#define TC_CFG_L2_STORE_POLICY1__POLICY_18_MASK 0x30
16890#define TC_CFG_L2_STORE_POLICY1__POLICY_18__SHIFT 0x4
16891#define TC_CFG_L2_STORE_POLICY1__POLICY_19_MASK 0xc0
16892#define TC_CFG_L2_STORE_POLICY1__POLICY_19__SHIFT 0x6
16893#define TC_CFG_L2_STORE_POLICY1__POLICY_20_MASK 0x300
16894#define TC_CFG_L2_STORE_POLICY1__POLICY_20__SHIFT 0x8
16895#define TC_CFG_L2_STORE_POLICY1__POLICY_21_MASK 0xc00
16896#define TC_CFG_L2_STORE_POLICY1__POLICY_21__SHIFT 0xa
16897#define TC_CFG_L2_STORE_POLICY1__POLICY_22_MASK 0x3000
16898#define TC_CFG_L2_STORE_POLICY1__POLICY_22__SHIFT 0xc
16899#define TC_CFG_L2_STORE_POLICY1__POLICY_23_MASK 0xc000
16900#define TC_CFG_L2_STORE_POLICY1__POLICY_23__SHIFT 0xe
16901#define TC_CFG_L2_STORE_POLICY1__POLICY_24_MASK 0x30000
16902#define TC_CFG_L2_STORE_POLICY1__POLICY_24__SHIFT 0x10
16903#define TC_CFG_L2_STORE_POLICY1__POLICY_25_MASK 0xc0000
16904#define TC_CFG_L2_STORE_POLICY1__POLICY_25__SHIFT 0x12
16905#define TC_CFG_L2_STORE_POLICY1__POLICY_26_MASK 0x300000
16906#define TC_CFG_L2_STORE_POLICY1__POLICY_26__SHIFT 0x14
16907#define TC_CFG_L2_STORE_POLICY1__POLICY_27_MASK 0xc00000
16908#define TC_CFG_L2_STORE_POLICY1__POLICY_27__SHIFT 0x16
16909#define TC_CFG_L2_STORE_POLICY1__POLICY_28_MASK 0x3000000
16910#define TC_CFG_L2_STORE_POLICY1__POLICY_28__SHIFT 0x18
16911#define TC_CFG_L2_STORE_POLICY1__POLICY_29_MASK 0xc000000
16912#define TC_CFG_L2_STORE_POLICY1__POLICY_29__SHIFT 0x1a
16913#define TC_CFG_L2_STORE_POLICY1__POLICY_30_MASK 0x30000000
16914#define TC_CFG_L2_STORE_POLICY1__POLICY_30__SHIFT 0x1c
16915#define TC_CFG_L2_STORE_POLICY1__POLICY_31_MASK 0xc0000000
16916#define TC_CFG_L2_STORE_POLICY1__POLICY_31__SHIFT 0x1e
16917#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0_MASK 0x3
16918#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0__SHIFT 0x0
16919#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1_MASK 0xc
16920#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1__SHIFT 0x2
16921#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2_MASK 0x30
16922#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2__SHIFT 0x4
16923#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3_MASK 0xc0
16924#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3__SHIFT 0x6
16925#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4_MASK 0x300
16926#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4__SHIFT 0x8
16927#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5_MASK 0xc00
16928#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5__SHIFT 0xa
16929#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6_MASK 0x3000
16930#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6__SHIFT 0xc
16931#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7_MASK 0xc000
16932#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7__SHIFT 0xe
16933#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8_MASK 0x30000
16934#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8__SHIFT 0x10
16935#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9_MASK 0xc0000
16936#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9__SHIFT 0x12
16937#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10_MASK 0x300000
16938#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10__SHIFT 0x14
16939#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11_MASK 0xc00000
16940#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11__SHIFT 0x16
16941#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12_MASK 0x3000000
16942#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12__SHIFT 0x18
16943#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13_MASK 0xc000000
16944#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13__SHIFT 0x1a
16945#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14_MASK 0x30000000
16946#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14__SHIFT 0x1c
16947#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15_MASK 0xc0000000
16948#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15__SHIFT 0x1e
16949#define TC_CFG_L1_VOLATILE__VOL_MASK 0xf
16950#define TC_CFG_L1_VOLATILE__VOL__SHIFT 0x0
16951#define TC_CFG_L2_VOLATILE__VOL_MASK 0xf
16952#define TC_CFG_L2_VOLATILE__VOL__SHIFT 0x0
16953#define TCP_WATCH0_ADDR_H__ADDR_MASK 0xffff
16954#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
16955#define TCP_WATCH1_ADDR_H__ADDR_MASK 0xffff
16956#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
16957#define TCP_WATCH2_ADDR_H__ADDR_MASK 0xffff
16958#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
16959#define TCP_WATCH3_ADDR_H__ADDR_MASK 0xffff
16960#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
16961#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xffffffc0
16962#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x6
16963#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xffffffc0
16964#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x6
16965#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xffffffc0
16966#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x6
16967#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xffffffc0
16968#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x6
16969#define TCP_WATCH0_CNTL__MASK_MASK 0xffffff
16970#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
16971#define TCP_WATCH0_CNTL__VMID_MASK 0xf000000
16972#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
16973#define TCP_WATCH0_CNTL__ATC_MASK 0x10000000
16974#define TCP_WATCH0_CNTL__ATC__SHIFT 0x1c
16975#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000
16976#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
16977#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000
16978#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
16979#define TCP_WATCH1_CNTL__MASK_MASK 0xffffff
16980#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
16981#define TCP_WATCH1_CNTL__VMID_MASK 0xf000000
16982#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
16983#define TCP_WATCH1_CNTL__ATC_MASK 0x10000000
16984#define TCP_WATCH1_CNTL__ATC__SHIFT 0x1c
16985#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000
16986#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
16987#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000
16988#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
16989#define TCP_WATCH2_CNTL__MASK_MASK 0xffffff
16990#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
16991#define TCP_WATCH2_CNTL__VMID_MASK 0xf000000
16992#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
16993#define TCP_WATCH2_CNTL__ATC_MASK 0x10000000
16994#define TCP_WATCH2_CNTL__ATC__SHIFT 0x1c
16995#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000
16996#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
16997#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000
16998#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
16999#define TCP_WATCH3_CNTL__MASK_MASK 0xffffff
17000#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
17001#define TCP_WATCH3_CNTL__VMID_MASK 0xf000000
17002#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
17003#define TCP_WATCH3_CNTL__ATC_MASK 0x10000000
17004#define TCP_WATCH3_CNTL__ATC__SHIFT 0x1c
17005#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000
17006#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
17007#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000
17008#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
17009#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID_MASK 0x2000000
17010#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID__SHIFT 0x19
17011#define TCP_GATCL1_CNTL__FORCE_MISS_MASK 0x4000000
17012#define TCP_GATCL1_CNTL__FORCE_MISS__SHIFT 0x1a
17013#define TCP_GATCL1_CNTL__FORCE_IN_ORDER_MASK 0x8000000
17014#define TCP_GATCL1_CNTL__FORCE_IN_ORDER__SHIFT 0x1b
17015#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000
17016#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
17017#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2_MASK 0xc0000000
17018#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
17019#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0xff
17020#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0
17021#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0_MASK 0x1
17022#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0__SHIFT 0x0
17023#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1_MASK 0x2
17024#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1__SHIFT 0x1
17025#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A_MASK 0x4
17026#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A__SHIFT 0x2
17027#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_DATA_SEL_MASK 0x3
17028#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0
17029#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x4
17030#define TCP_DSM_CNTL__CACHE_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
17031#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_DATA_SEL_MASK 0x18
17032#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_DATA_SEL__SHIFT 0x3
17033#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x20
17034#define TCP_DSM_CNTL__LFIFO_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
17035#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0xff
17036#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
17037#define TD_CGTT_CTRL__ON_DELAY_MASK 0xf
17038#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
17039#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0xff0
17040#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
17041#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
17042#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
17043#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
17044#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
17045#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
17046#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
17047#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
17048#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
17049#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
17050#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
17051#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
17052#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
17053#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
17054#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
17055#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
17056#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
17057#define TA_CGTT_CTRL__ON_DELAY_MASK 0xf
17058#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
17059#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0xff0
17060#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
17061#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
17062#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
17063#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
17064#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
17065#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
17066#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
17067#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
17068#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
17069#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
17070#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
17071#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
17072#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
17073#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
17074#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
17075#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
17076#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
17077#define CGTT_TCP_CLK_CTRL__ON_DELAY_MASK 0xf
17078#define CGTT_TCP_CLK_CTRL__ON_DELAY__SHIFT 0x0
17079#define CGTT_TCP_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
17080#define CGTT_TCP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
17081#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
17082#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
17083#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
17084#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
17085#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
17086#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
17087#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
17088#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
17089#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
17090#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
17091#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
17092#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
17093#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
17094#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
17095#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
17096#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
17097#define CGTT_TCI_CLK_CTRL__ON_DELAY_MASK 0xf
17098#define CGTT_TCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
17099#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
17100#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
17101#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
17102#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
17103#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
17104#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
17105#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
17106#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
17107#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
17108#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
17109#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
17110#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
17111#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
17112#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
17113#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
17114#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
17115#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
17116#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
17117#define TCI_STATUS__TCI_BUSY_MASK 0x1
17118#define TCI_STATUS__TCI_BUSY__SHIFT 0x0
17119#define TCI_CNTL_1__WBINVL1_NUM_CYCLES_MASK 0xffff
17120#define TCI_CNTL_1__WBINVL1_NUM_CYCLES__SHIFT 0x0
17121#define TCI_CNTL_1__REQ_FIFO_DEPTH_MASK 0xff0000
17122#define TCI_CNTL_1__REQ_FIFO_DEPTH__SHIFT 0x10
17123#define TCI_CNTL_1__WDATA_RAM_DEPTH_MASK 0xff000000
17124#define TCI_CNTL_1__WDATA_RAM_DEPTH__SHIFT 0x18
17125#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2_MASK 0x1
17126#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2__SHIFT 0x0
17127#define TCI_CNTL_2__TCA_MAX_CREDIT_MASK 0x1fe
17128#define TCI_CNTL_2__TCA_MAX_CREDIT__SHIFT 0x1
17129#define GDS_CONFIG__SH0_GPR_PHASE_SEL_MASK 0x6
17130#define GDS_CONFIG__SH0_GPR_PHASE_SEL__SHIFT 0x1
17131#define GDS_CONFIG__SH1_GPR_PHASE_SEL_MASK 0x18
17132#define GDS_CONFIG__SH1_GPR_PHASE_SEL__SHIFT 0x3
17133#define GDS_CONFIG__SH2_GPR_PHASE_SEL_MASK 0x60
17134#define GDS_CONFIG__SH2_GPR_PHASE_SEL__SHIFT 0x5
17135#define GDS_CONFIG__SH3_GPR_PHASE_SEL_MASK 0x180
17136#define GDS_CONFIG__SH3_GPR_PHASE_SEL__SHIFT 0x7
17137#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x1
17138#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
17139#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x2
17140#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
17141#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x4
17142#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
17143#define GDS_CNTL_STATUS__DS_BANK_CONFLICT_MASK 0x8
17144#define GDS_CNTL_STATUS__DS_BANK_CONFLICT__SHIFT 0x3
17145#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT_MASK 0x10
17146#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT__SHIFT 0x4
17147#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x20
17148#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x5
17149#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x40
17150#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x6
17151#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x80
17152#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x7
17153#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x100
17154#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x8
17155#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x200
17156#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x9
17157#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x400
17158#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0xa
17159#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x800
17160#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0xb
17161#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x1000
17162#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xc
17163#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x2000
17164#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xd
17165#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x4000
17166#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xe
17167#define GDS_ENHANCE2__MISC_MASK 0xffff
17168#define GDS_ENHANCE2__MISC__SHIFT 0x0
17169#define GDS_ENHANCE2__UNUSED_MASK 0xffff0000
17170#define GDS_ENHANCE2__UNUSED__SHIFT 0x10
17171#define GDS_PROTECTION_FAULT__WRITE_DIS_MASK 0x1
17172#define GDS_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
17173#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x2
17174#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
17175#define GDS_PROTECTION_FAULT__GRBM_MASK 0x4
17176#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
17177#define GDS_PROTECTION_FAULT__SH_ID_MASK 0x38
17178#define GDS_PROTECTION_FAULT__SH_ID__SHIFT 0x3
17179#define GDS_PROTECTION_FAULT__CU_ID_MASK 0x3c0
17180#define GDS_PROTECTION_FAULT__CU_ID__SHIFT 0x6
17181#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0xc00
17182#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xa
17183#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0xf000
17184#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xc
17185#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xffff0000
17186#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
17187#define GDS_VM_PROTECTION_FAULT__WRITE_DIS_MASK 0x1
17188#define GDS_VM_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
17189#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x2
17190#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
17191#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x4
17192#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
17193#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x8
17194#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
17195#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x10
17196#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
17197#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0xf00
17198#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
17199#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xffff0000
17200#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
17201#define GDS_EDC_CNT__DED_MASK 0xff
17202#define GDS_EDC_CNT__DED__SHIFT 0x0
17203#define GDS_EDC_CNT__SED_MASK 0xff00
17204#define GDS_EDC_CNT__SED__SHIFT 0x8
17205#define GDS_EDC_CNT__SEC_MASK 0xff0000
17206#define GDS_EDC_CNT__SEC__SHIFT 0x10
17207#define GDS_EDC_GRBM_CNT__DED_MASK 0xff
17208#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
17209#define GDS_EDC_GRBM_CNT__SEC_MASK 0xff0000
17210#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x10
17211#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x1
17212#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
17213#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x2
17214#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
17215#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x4
17216#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
17217#define GDS_EDC_OA_DED__UNUSED0_MASK 0x8
17218#define GDS_EDC_OA_DED__UNUSED0__SHIFT 0x3
17219#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x10
17220#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
17221#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x20
17222#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
17223#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x40
17224#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
17225#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x80
17226#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
17227#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x100
17228#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
17229#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x200
17230#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
17231#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x400
17232#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
17233#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x800
17234#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
17235#define GDS_EDC_OA_DED__UNUSED1_MASK 0xfffff000
17236#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc
17237#define GDS_DEBUG_CNTL__GDS_DEBUG_INDX_MASK 0x1f
17238#define GDS_DEBUG_CNTL__GDS_DEBUG_INDX__SHIFT 0x0
17239#define GDS_DEBUG_CNTL__UNUSED_MASK 0xffffffe0
17240#define GDS_DEBUG_CNTL__UNUSED__SHIFT 0x5
17241#define GDS_DEBUG_DATA__DATA_MASK 0xffffffff
17242#define GDS_DEBUG_DATA__DATA__SHIFT 0x0
17243#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_A_0_MASK 0x1
17244#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_A_0__SHIFT 0x0
17245#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_A_1_MASK 0x2
17246#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_A_1__SHIFT 0x1
17247#define GDS_DSM_CNTL__GDS_ENABLE_SINGLE_WRITE_A_MASK 0x4
17248#define GDS_DSM_CNTL__GDS_ENABLE_SINGLE_WRITE_A__SHIFT 0x2
17249#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_B_0_MASK 0x8
17250#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_B_0__SHIFT 0x3
17251#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_B_1_MASK 0x10
17252#define GDS_DSM_CNTL__SEL_DSM_GDS_IRRITATOR_DATA_B_1__SHIFT 0x4
17253#define GDS_DSM_CNTL__GDS_ENABLE_SINGLE_WRITE_B_MASK 0x20
17254#define GDS_DSM_CNTL__GDS_ENABLE_SINGLE_WRITE_B__SHIFT 0x5
17255#define GDS_DSM_CNTL__UNUSED_MASK 0xffffffc0
17256#define GDS_DSM_CNTL__UNUSED__SHIFT 0x6
17257#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0xf
17258#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x0
17259#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
17260#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
17261#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
17262#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
17263#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x2000000
17264#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
17265#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x4000000
17266#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
17267#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
17268#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
17269#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
17270#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
17271#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
17272#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
17273#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000
17274#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
17275#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000
17276#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
17277#define GDS_RD_ADDR__READ_ADDR_MASK 0xffffffff
17278#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
17279#define GDS_RD_DATA__READ_DATA_MASK 0xffffffff
17280#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
17281#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xffffffff
17282#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
17283#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xffffffff
17284#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
17285#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xffffffff
17286#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
17287#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xffffffff
17288#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
17289#define GDS_WR_DATA__WRITE_DATA_MASK 0xffffffff
17290#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
17291#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xffffffff
17292#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
17293#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xffffffff
17294#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
17295#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xffffffff
17296#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
17297#define GDS_ATOM_CNTL__AINC_MASK 0x3f
17298#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
17299#define GDS_ATOM_CNTL__UNUSED1_MASK 0xc0
17300#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
17301#define GDS_ATOM_CNTL__DMODE_MASK 0x300
17302#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
17303#define GDS_ATOM_CNTL__UNUSED2_MASK 0xfffffc00
17304#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
17305#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x1
17306#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
17307#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xfffffffe
17308#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
17309#define GDS_ATOM_BASE__BASE_MASK 0xffff
17310#define GDS_ATOM_BASE__BASE__SHIFT 0x0
17311#define GDS_ATOM_BASE__UNUSED_MASK 0xffff0000
17312#define GDS_ATOM_BASE__UNUSED__SHIFT 0x10
17313#define GDS_ATOM_SIZE__SIZE_MASK 0xffff
17314#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
17315#define GDS_ATOM_SIZE__UNUSED_MASK 0xffff0000
17316#define GDS_ATOM_SIZE__UNUSED__SHIFT 0x10
17317#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0xff
17318#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
17319#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xffffff00
17320#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
17321#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0xff
17322#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
17323#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xffffff00
17324#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
17325#define GDS_ATOM_DST__DST_MASK 0xffffffff
17326#define GDS_ATOM_DST__DST__SHIFT 0x0
17327#define GDS_ATOM_OP__OP_MASK 0xff
17328#define GDS_ATOM_OP__OP__SHIFT 0x0
17329#define GDS_ATOM_OP__UNUSED_MASK 0xffffff00
17330#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
17331#define GDS_ATOM_SRC0__DATA_MASK 0xffffffff
17332#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
17333#define GDS_ATOM_SRC0_U__DATA_MASK 0xffffffff
17334#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
17335#define GDS_ATOM_SRC1__DATA_MASK 0xffffffff
17336#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
17337#define GDS_ATOM_SRC1_U__DATA_MASK 0xffffffff
17338#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
17339#define GDS_ATOM_READ0__DATA_MASK 0xffffffff
17340#define GDS_ATOM_READ0__DATA__SHIFT 0x0
17341#define GDS_ATOM_READ0_U__DATA_MASK 0xffffffff
17342#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
17343#define GDS_ATOM_READ1__DATA_MASK 0xffffffff
17344#define GDS_ATOM_READ1__DATA__SHIFT 0x0
17345#define GDS_ATOM_READ1_U__DATA_MASK 0xffffffff
17346#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
17347#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x3f
17348#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
17349#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xffffffc0
17350#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
17351#define GDS_GWS_RESOURCE__FLAG_MASK 0x1
17352#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
17353#define GDS_GWS_RESOURCE__COUNTER_MASK 0x1ffe
17354#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
17355#define GDS_GWS_RESOURCE__TYPE_MASK 0x2000
17356#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xd
17357#define GDS_GWS_RESOURCE__DED_MASK 0x4000
17358#define GDS_GWS_RESOURCE__DED__SHIFT 0xe
17359#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x8000
17360#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0xf
17361#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0xfff0000
17362#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x10
17363#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x10000000
17364#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1c
17365#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x20000000
17366#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1d
17367#define GDS_GWS_RESOURCE__UNUSED1_MASK 0xc0000000
17368#define GDS_GWS_RESOURCE__UNUSED1__SHIFT 0x1e
17369#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0xffff
17370#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
17371#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xffff0000
17372#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
17373#define GDS_OA_CNTL__INDEX_MASK 0xf
17374#define GDS_OA_CNTL__INDEX__SHIFT 0x0
17375#define GDS_OA_CNTL__UNUSED_MASK 0xfffffff0
17376#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
17377#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xffffffff
17378#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
17379#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0xffff
17380#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
17381#define GDS_OA_ADDRESS__CRAWLER_MASK 0xf0000
17382#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x10
17383#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x300000
17384#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x14
17385#define GDS_OA_ADDRESS__UNUSED_MASK 0x3fc00000
17386#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x16
17387#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000
17388#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
17389#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000
17390#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
17391#define GDS_OA_INCDEC__VALUE_MASK 0x7fffffff
17392#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
17393#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000
17394#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
17395#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xffffffff
17396#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
17397#define GDS_DEBUG_REG0__spare1_MASK 0x3f
17398#define GDS_DEBUG_REG0__spare1__SHIFT 0x0
17399#define GDS_DEBUG_REG0__write_buff_valid_MASK 0x40
17400#define GDS_DEBUG_REG0__write_buff_valid__SHIFT 0x6
17401#define GDS_DEBUG_REG0__wr_pixel_nxt_ptr_MASK 0xf80
17402#define GDS_DEBUG_REG0__wr_pixel_nxt_ptr__SHIFT 0x7
17403#define GDS_DEBUG_REG0__last_pixel_ptr_MASK 0x1000
17404#define GDS_DEBUG_REG0__last_pixel_ptr__SHIFT 0xc
17405#define GDS_DEBUG_REG0__cstate_MASK 0x1e000
17406#define GDS_DEBUG_REG0__cstate__SHIFT 0xd
17407#define GDS_DEBUG_REG0__buff_write_MASK 0x20000
17408#define GDS_DEBUG_REG0__buff_write__SHIFT 0x11
17409#define GDS_DEBUG_REG0__flush_request_MASK 0x40000
17410#define GDS_DEBUG_REG0__flush_request__SHIFT 0x12
17411#define GDS_DEBUG_REG0__wr_buffer_wr_complete_MASK 0x80000
17412#define GDS_DEBUG_REG0__wr_buffer_wr_complete__SHIFT 0x13
17413#define GDS_DEBUG_REG0__wbuf_fifo_empty_MASK 0x100000
17414#define GDS_DEBUG_REG0__wbuf_fifo_empty__SHIFT 0x14
17415#define GDS_DEBUG_REG0__wbuf_fifo_full_MASK 0x200000
17416#define GDS_DEBUG_REG0__wbuf_fifo_full__SHIFT 0x15
17417#define GDS_DEBUG_REG0__spare_MASK 0xffc00000
17418#define GDS_DEBUG_REG0__spare__SHIFT 0x16
17419#define GDS_DEBUG_REG1__tag_hit_MASK 0x1
17420#define GDS_DEBUG_REG1__tag_hit__SHIFT 0x0
17421#define GDS_DEBUG_REG1__tag_miss_MASK 0x2
17422#define GDS_DEBUG_REG1__tag_miss__SHIFT 0x1
17423#define GDS_DEBUG_REG1__pixel_addr_MASK 0x1fffc
17424#define GDS_DEBUG_REG1__pixel_addr__SHIFT 0x2
17425#define GDS_DEBUG_REG1__pixel_vld_MASK 0x20000
17426#define GDS_DEBUG_REG1__pixel_vld__SHIFT 0x11
17427#define GDS_DEBUG_REG1__data_ready_MASK 0x40000
17428#define GDS_DEBUG_REG1__data_ready__SHIFT 0x12
17429#define GDS_DEBUG_REG1__awaiting_data_MASK 0x80000
17430#define GDS_DEBUG_REG1__awaiting_data__SHIFT 0x13
17431#define GDS_DEBUG_REG1__addr_fifo_full_MASK 0x100000
17432#define GDS_DEBUG_REG1__addr_fifo_full__SHIFT 0x14
17433#define GDS_DEBUG_REG1__addr_fifo_empty_MASK 0x200000
17434#define GDS_DEBUG_REG1__addr_fifo_empty__SHIFT 0x15
17435#define GDS_DEBUG_REG1__buffer_loaded_MASK 0x400000
17436#define GDS_DEBUG_REG1__buffer_loaded__SHIFT 0x16
17437#define GDS_DEBUG_REG1__buffer_invalid_MASK 0x800000
17438#define GDS_DEBUG_REG1__buffer_invalid__SHIFT 0x17
17439#define GDS_DEBUG_REG1__spare_MASK 0xff000000
17440#define GDS_DEBUG_REG1__spare__SHIFT 0x18
17441#define GDS_DEBUG_REG2__ds_full_MASK 0x1
17442#define GDS_DEBUG_REG2__ds_full__SHIFT 0x0
17443#define GDS_DEBUG_REG2__ds_credit_avail_MASK 0x2
17444#define GDS_DEBUG_REG2__ds_credit_avail__SHIFT 0x1
17445#define GDS_DEBUG_REG2__ord_idx_free_MASK 0x4
17446#define GDS_DEBUG_REG2__ord_idx_free__SHIFT 0x2
17447#define GDS_DEBUG_REG2__cmd_write_MASK 0x8
17448#define GDS_DEBUG_REG2__cmd_write__SHIFT 0x3
17449#define GDS_DEBUG_REG2__app_sel_MASK 0xf0
17450#define GDS_DEBUG_REG2__app_sel__SHIFT 0x4
17451#define GDS_DEBUG_REG2__req_MASK 0x7fff00
17452#define GDS_DEBUG_REG2__req__SHIFT 0x8
17453#define GDS_DEBUG_REG2__spare_MASK 0xff800000
17454#define GDS_DEBUG_REG2__spare__SHIFT 0x17
17455#define GDS_DEBUG_REG3__pipe_num_busy_MASK 0x7ff
17456#define GDS_DEBUG_REG3__pipe_num_busy__SHIFT 0x0
17457#define GDS_DEBUG_REG3__pipe0_busy_num_MASK 0x7800
17458#define GDS_DEBUG_REG3__pipe0_busy_num__SHIFT 0xb
17459#define GDS_DEBUG_REG3__spare_MASK 0xffff8000
17460#define GDS_DEBUG_REG3__spare__SHIFT 0xf
17461#define GDS_DEBUG_REG4__gws_busy_MASK 0x1
17462#define GDS_DEBUG_REG4__gws_busy__SHIFT 0x0
17463#define GDS_DEBUG_REG4__gws_req_MASK 0x2
17464#define GDS_DEBUG_REG4__gws_req__SHIFT 0x1
17465#define GDS_DEBUG_REG4__gws_out_stall_MASK 0x4
17466#define GDS_DEBUG_REG4__gws_out_stall__SHIFT 0x2
17467#define GDS_DEBUG_REG4__cur_reso_MASK 0x1f8
17468#define GDS_DEBUG_REG4__cur_reso__SHIFT 0x3
17469#define GDS_DEBUG_REG4__cur_reso_head_valid_MASK 0x200
17470#define GDS_DEBUG_REG4__cur_reso_head_valid__SHIFT 0x9
17471#define GDS_DEBUG_REG4__cur_reso_head_dirty_MASK 0x400
17472#define GDS_DEBUG_REG4__cur_reso_head_dirty__SHIFT 0xa
17473#define GDS_DEBUG_REG4__cur_reso_head_flag_MASK 0x800
17474#define GDS_DEBUG_REG4__cur_reso_head_flag__SHIFT 0xb
17475#define GDS_DEBUG_REG4__cur_reso_fed_MASK 0x1000
17476#define GDS_DEBUG_REG4__cur_reso_fed__SHIFT 0xc
17477#define GDS_DEBUG_REG4__cur_reso_barrier_MASK 0x2000
17478#define GDS_DEBUG_REG4__cur_reso_barrier__SHIFT 0xd
17479#define GDS_DEBUG_REG4__cur_reso_flag_MASK 0x4000
17480#define GDS_DEBUG_REG4__cur_reso_flag__SHIFT 0xe
17481#define GDS_DEBUG_REG4__cur_reso_cnt_gt0_MASK 0x8000
17482#define GDS_DEBUG_REG4__cur_reso_cnt_gt0__SHIFT 0xf
17483#define GDS_DEBUG_REG4__credit_cnt_gt0_MASK 0x10000
17484#define GDS_DEBUG_REG4__credit_cnt_gt0__SHIFT 0x10
17485#define GDS_DEBUG_REG4__cmd_write_MASK 0x20000
17486#define GDS_DEBUG_REG4__cmd_write__SHIFT 0x11
17487#define GDS_DEBUG_REG4__grbm_gws_reso_wr_MASK 0x40000
17488#define GDS_DEBUG_REG4__grbm_gws_reso_wr__SHIFT 0x12
17489#define GDS_DEBUG_REG4__grbm_gws_reso_rd_MASK 0x80000
17490#define GDS_DEBUG_REG4__grbm_gws_reso_rd__SHIFT 0x13
17491#define GDS_DEBUG_REG4__ram_read_busy_MASK 0x100000
17492#define GDS_DEBUG_REG4__ram_read_busy__SHIFT 0x14
17493#define GDS_DEBUG_REG4__gws_bulkfree_MASK 0x200000
17494#define GDS_DEBUG_REG4__gws_bulkfree__SHIFT 0x15
17495#define GDS_DEBUG_REG4__ram_gws_re_MASK 0x400000
17496#define GDS_DEBUG_REG4__ram_gws_re__SHIFT 0x16
17497#define GDS_DEBUG_REG4__ram_gws_we_MASK 0x800000
17498#define GDS_DEBUG_REG4__ram_gws_we__SHIFT 0x17
17499#define GDS_DEBUG_REG4__spare_MASK 0xff000000
17500#define GDS_DEBUG_REG4__spare__SHIFT 0x18
17501#define GDS_DEBUG_REG5__write_dis_MASK 0x1
17502#define GDS_DEBUG_REG5__write_dis__SHIFT 0x0
17503#define GDS_DEBUG_REG5__dec_error_MASK 0x2
17504#define GDS_DEBUG_REG5__dec_error__SHIFT 0x1
17505#define GDS_DEBUG_REG5__alloc_opco_error_MASK 0x4
17506#define GDS_DEBUG_REG5__alloc_opco_error__SHIFT 0x2
17507#define GDS_DEBUG_REG5__dealloc_opco_error_MASK 0x8
17508#define GDS_DEBUG_REG5__dealloc_opco_error__SHIFT 0x3
17509#define GDS_DEBUG_REG5__wrap_opco_error_MASK 0x10
17510#define GDS_DEBUG_REG5__wrap_opco_error__SHIFT 0x4
17511#define GDS_DEBUG_REG5__spare_MASK 0xe0
17512#define GDS_DEBUG_REG5__spare__SHIFT 0x5
17513#define GDS_DEBUG_REG5__error_ds_address_MASK 0x3fff00
17514#define GDS_DEBUG_REG5__error_ds_address__SHIFT 0x8
17515#define GDS_DEBUG_REG5__spare1_MASK 0xffc00000
17516#define GDS_DEBUG_REG5__spare1__SHIFT 0x16
17517#define GDS_DEBUG_REG6__oa_busy_MASK 0x1
17518#define GDS_DEBUG_REG6__oa_busy__SHIFT 0x0
17519#define GDS_DEBUG_REG6__counters_enabled_MASK 0x1e
17520#define GDS_DEBUG_REG6__counters_enabled__SHIFT 0x1
17521#define GDS_DEBUG_REG6__counters_busy_MASK 0x1fffe0
17522#define GDS_DEBUG_REG6__counters_busy__SHIFT 0x5
17523#define GDS_DEBUG_REG6__spare_MASK 0xffe00000
17524#define GDS_DEBUG_REG6__spare__SHIFT 0x15
17525#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
17526#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
17527#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
17528#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
17529#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
17530#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
17531#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
17532#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
17533#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
17534#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
17535#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
17536#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
17537#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
17538#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
17539#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
17540#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
17541#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0xf00000
17542#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
17543#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x3ff
17544#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
17545#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0xffc00
17546#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
17547#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0xf00000
17548#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
17549#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
17550#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
17551#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
17552#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
17553#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
17554#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
17555#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
17556#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
17557#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
17558#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
17559#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
17560#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
17561#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
17562#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
17563#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
17564#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
17565#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x3ff
17566#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
17567#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0xffc00
17568#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
17569#define GDS_VMID0_BASE__BASE_MASK 0xffff
17570#define GDS_VMID0_BASE__BASE__SHIFT 0x0
17571#define GDS_VMID1_BASE__BASE_MASK 0xffff
17572#define GDS_VMID1_BASE__BASE__SHIFT 0x0
17573#define GDS_VMID2_BASE__BASE_MASK 0xffff
17574#define GDS_VMID2_BASE__BASE__SHIFT 0x0
17575#define GDS_VMID3_BASE__BASE_MASK 0xffff
17576#define GDS_VMID3_BASE__BASE__SHIFT 0x0
17577#define GDS_VMID4_BASE__BASE_MASK 0xffff
17578#define GDS_VMID4_BASE__BASE__SHIFT 0x0
17579#define GDS_VMID5_BASE__BASE_MASK 0xffff
17580#define GDS_VMID5_BASE__BASE__SHIFT 0x0
17581#define GDS_VMID6_BASE__BASE_MASK 0xffff
17582#define GDS_VMID6_BASE__BASE__SHIFT 0x0
17583#define GDS_VMID7_BASE__BASE_MASK 0xffff
17584#define GDS_VMID7_BASE__BASE__SHIFT 0x0
17585#define GDS_VMID8_BASE__BASE_MASK 0xffff
17586#define GDS_VMID8_BASE__BASE__SHIFT 0x0
17587#define GDS_VMID9_BASE__BASE_MASK 0xffff
17588#define GDS_VMID9_BASE__BASE__SHIFT 0x0
17589#define GDS_VMID10_BASE__BASE_MASK 0xffff
17590#define GDS_VMID10_BASE__BASE__SHIFT 0x0
17591#define GDS_VMID11_BASE__BASE_MASK 0xffff
17592#define GDS_VMID11_BASE__BASE__SHIFT 0x0
17593#define GDS_VMID12_BASE__BASE_MASK 0xffff
17594#define GDS_VMID12_BASE__BASE__SHIFT 0x0
17595#define GDS_VMID13_BASE__BASE_MASK 0xffff
17596#define GDS_VMID13_BASE__BASE__SHIFT 0x0
17597#define GDS_VMID14_BASE__BASE_MASK 0xffff
17598#define GDS_VMID14_BASE__BASE__SHIFT 0x0
17599#define GDS_VMID15_BASE__BASE_MASK 0xffff
17600#define GDS_VMID15_BASE__BASE__SHIFT 0x0
17601#define GDS_VMID0_SIZE__SIZE_MASK 0x1ffff
17602#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
17603#define GDS_VMID1_SIZE__SIZE_MASK 0x1ffff
17604#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
17605#define GDS_VMID2_SIZE__SIZE_MASK 0x1ffff
17606#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
17607#define GDS_VMID3_SIZE__SIZE_MASK 0x1ffff
17608#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
17609#define GDS_VMID4_SIZE__SIZE_MASK 0x1ffff
17610#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
17611#define GDS_VMID5_SIZE__SIZE_MASK 0x1ffff
17612#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
17613#define GDS_VMID6_SIZE__SIZE_MASK 0x1ffff
17614#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
17615#define GDS_VMID7_SIZE__SIZE_MASK 0x1ffff
17616#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
17617#define GDS_VMID8_SIZE__SIZE_MASK 0x1ffff
17618#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
17619#define GDS_VMID9_SIZE__SIZE_MASK 0x1ffff
17620#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
17621#define GDS_VMID10_SIZE__SIZE_MASK 0x1ffff
17622#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
17623#define GDS_VMID11_SIZE__SIZE_MASK 0x1ffff
17624#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
17625#define GDS_VMID12_SIZE__SIZE_MASK 0x1ffff
17626#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
17627#define GDS_VMID13_SIZE__SIZE_MASK 0x1ffff
17628#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
17629#define GDS_VMID14_SIZE__SIZE_MASK 0x1ffff
17630#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
17631#define GDS_VMID15_SIZE__SIZE_MASK 0x1ffff
17632#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
17633#define GDS_GWS_VMID0__BASE_MASK 0x3f
17634#define GDS_GWS_VMID0__BASE__SHIFT 0x0
17635#define GDS_GWS_VMID0__SIZE_MASK 0x7f0000
17636#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
17637#define GDS_GWS_VMID1__BASE_MASK 0x3f
17638#define GDS_GWS_VMID1__BASE__SHIFT 0x0
17639#define GDS_GWS_VMID1__SIZE_MASK 0x7f0000
17640#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
17641#define GDS_GWS_VMID2__BASE_MASK 0x3f
17642#define GDS_GWS_VMID2__BASE__SHIFT 0x0
17643#define GDS_GWS_VMID2__SIZE_MASK 0x7f0000
17644#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
17645#define GDS_GWS_VMID3__BASE_MASK 0x3f
17646#define GDS_GWS_VMID3__BASE__SHIFT 0x0
17647#define GDS_GWS_VMID3__SIZE_MASK 0x7f0000
17648#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
17649#define GDS_GWS_VMID4__BASE_MASK 0x3f
17650#define GDS_GWS_VMID4__BASE__SHIFT 0x0
17651#define GDS_GWS_VMID4__SIZE_MASK 0x7f0000
17652#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
17653#define GDS_GWS_VMID5__BASE_MASK 0x3f
17654#define GDS_GWS_VMID5__BASE__SHIFT 0x0
17655#define GDS_GWS_VMID5__SIZE_MASK 0x7f0000
17656#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
17657#define GDS_GWS_VMID6__BASE_MASK 0x3f
17658#define GDS_GWS_VMID6__BASE__SHIFT 0x0
17659#define GDS_GWS_VMID6__SIZE_MASK 0x7f0000
17660#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
17661#define GDS_GWS_VMID7__BASE_MASK 0x3f
17662#define GDS_GWS_VMID7__BASE__SHIFT 0x0
17663#define GDS_GWS_VMID7__SIZE_MASK 0x7f0000
17664#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
17665#define GDS_GWS_VMID8__BASE_MASK 0x3f
17666#define GDS_GWS_VMID8__BASE__SHIFT 0x0
17667#define GDS_GWS_VMID8__SIZE_MASK 0x7f0000
17668#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
17669#define GDS_GWS_VMID9__BASE_MASK 0x3f
17670#define GDS_GWS_VMID9__BASE__SHIFT 0x0
17671#define GDS_GWS_VMID9__SIZE_MASK 0x7f0000
17672#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
17673#define GDS_GWS_VMID10__BASE_MASK 0x3f
17674#define GDS_GWS_VMID10__BASE__SHIFT 0x0
17675#define GDS_GWS_VMID10__SIZE_MASK 0x7f0000
17676#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
17677#define GDS_GWS_VMID11__BASE_MASK 0x3f
17678#define GDS_GWS_VMID11__BASE__SHIFT 0x0
17679#define GDS_GWS_VMID11__SIZE_MASK 0x7f0000
17680#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
17681#define GDS_GWS_VMID12__BASE_MASK 0x3f
17682#define GDS_GWS_VMID12__BASE__SHIFT 0x0
17683#define GDS_GWS_VMID12__SIZE_MASK 0x7f0000
17684#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
17685#define GDS_GWS_VMID13__BASE_MASK 0x3f
17686#define GDS_GWS_VMID13__BASE__SHIFT 0x0
17687#define GDS_GWS_VMID13__SIZE_MASK 0x7f0000
17688#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
17689#define GDS_GWS_VMID14__BASE_MASK 0x3f
17690#define GDS_GWS_VMID14__BASE__SHIFT 0x0
17691#define GDS_GWS_VMID14__SIZE_MASK 0x7f0000
17692#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
17693#define GDS_GWS_VMID15__BASE_MASK 0x3f
17694#define GDS_GWS_VMID15__BASE__SHIFT 0x0
17695#define GDS_GWS_VMID15__SIZE_MASK 0x7f0000
17696#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
17697#define GDS_OA_VMID0__MASK_MASK 0xffff
17698#define GDS_OA_VMID0__MASK__SHIFT 0x0
17699#define GDS_OA_VMID0__UNUSED_MASK 0xffff0000
17700#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
17701#define GDS_OA_VMID1__MASK_MASK 0xffff
17702#define GDS_OA_VMID1__MASK__SHIFT 0x0
17703#define GDS_OA_VMID1__UNUSED_MASK 0xffff0000
17704#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
17705#define GDS_OA_VMID2__MASK_MASK 0xffff
17706#define GDS_OA_VMID2__MASK__SHIFT 0x0
17707#define GDS_OA_VMID2__UNUSED_MASK 0xffff0000
17708#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
17709#define GDS_OA_VMID3__MASK_MASK 0xffff
17710#define GDS_OA_VMID3__MASK__SHIFT 0x0
17711#define GDS_OA_VMID3__UNUSED_MASK 0xffff0000
17712#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
17713#define GDS_OA_VMID4__MASK_MASK 0xffff
17714#define GDS_OA_VMID4__MASK__SHIFT 0x0
17715#define GDS_OA_VMID4__UNUSED_MASK 0xffff0000
17716#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
17717#define GDS_OA_VMID5__MASK_MASK 0xffff
17718#define GDS_OA_VMID5__MASK__SHIFT 0x0
17719#define GDS_OA_VMID5__UNUSED_MASK 0xffff0000
17720#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
17721#define GDS_OA_VMID6__MASK_MASK 0xffff
17722#define GDS_OA_VMID6__MASK__SHIFT 0x0
17723#define GDS_OA_VMID6__UNUSED_MASK 0xffff0000
17724#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
17725#define GDS_OA_VMID7__MASK_MASK 0xffff
17726#define GDS_OA_VMID7__MASK__SHIFT 0x0
17727#define GDS_OA_VMID7__UNUSED_MASK 0xffff0000
17728#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
17729#define GDS_OA_VMID8__MASK_MASK 0xffff
17730#define GDS_OA_VMID8__MASK__SHIFT 0x0
17731#define GDS_OA_VMID8__UNUSED_MASK 0xffff0000
17732#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
17733#define GDS_OA_VMID9__MASK_MASK 0xffff
17734#define GDS_OA_VMID9__MASK__SHIFT 0x0
17735#define GDS_OA_VMID9__UNUSED_MASK 0xffff0000
17736#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
17737#define GDS_OA_VMID10__MASK_MASK 0xffff
17738#define GDS_OA_VMID10__MASK__SHIFT 0x0
17739#define GDS_OA_VMID10__UNUSED_MASK 0xffff0000
17740#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
17741#define GDS_OA_VMID11__MASK_MASK 0xffff
17742#define GDS_OA_VMID11__MASK__SHIFT 0x0
17743#define GDS_OA_VMID11__UNUSED_MASK 0xffff0000
17744#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
17745#define GDS_OA_VMID12__MASK_MASK 0xffff
17746#define GDS_OA_VMID12__MASK__SHIFT 0x0
17747#define GDS_OA_VMID12__UNUSED_MASK 0xffff0000
17748#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
17749#define GDS_OA_VMID13__MASK_MASK 0xffff
17750#define GDS_OA_VMID13__MASK__SHIFT 0x0
17751#define GDS_OA_VMID13__UNUSED_MASK 0xffff0000
17752#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
17753#define GDS_OA_VMID14__MASK_MASK 0xffff
17754#define GDS_OA_VMID14__MASK__SHIFT 0x0
17755#define GDS_OA_VMID14__UNUSED_MASK 0xffff0000
17756#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
17757#define GDS_OA_VMID15__MASK_MASK 0xffff
17758#define GDS_OA_VMID15__MASK__SHIFT 0x0
17759#define GDS_OA_VMID15__UNUSED_MASK 0xffff0000
17760#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
17761#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x1
17762#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
17763#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x2
17764#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
17765#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x4
17766#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
17767#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x8
17768#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
17769#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x10
17770#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
17771#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x20
17772#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
17773#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x40
17774#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
17775#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x80
17776#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
17777#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x100
17778#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
17779#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x200
17780#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
17781#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x400
17782#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
17783#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x800
17784#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
17785#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x1000
17786#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
17787#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x2000
17788#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
17789#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x4000
17790#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
17791#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x8000
17792#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
17793#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x10000
17794#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
17795#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x20000
17796#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
17797#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x40000
17798#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
17799#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x80000
17800#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
17801#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x100000
17802#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
17803#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x200000
17804#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
17805#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x400000
17806#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
17807#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x800000
17808#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
17809#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x1000000
17810#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
17811#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x2000000
17812#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
17813#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x4000000
17814#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
17815#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x8000000
17816#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
17817#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000
17818#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
17819#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000
17820#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
17821#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000
17822#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
17823#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000
17824#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
17825#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x1
17826#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
17827#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x2
17828#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
17829#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x4
17830#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
17831#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x8
17832#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
17833#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x10
17834#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
17835#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x20
17836#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
17837#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x40
17838#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
17839#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x80
17840#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
17841#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x100
17842#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
17843#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x200
17844#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
17845#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x400
17846#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
17847#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x800
17848#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
17849#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x1000
17850#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
17851#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x2000
17852#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
17853#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x4000
17854#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
17855#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x8000
17856#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
17857#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x10000
17858#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
17859#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x20000
17860#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
17861#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x40000
17862#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
17863#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x80000
17864#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
17865#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x100000
17866#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
17867#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x200000
17868#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
17869#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x400000
17870#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
17871#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x800000
17872#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
17873#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x1000000
17874#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
17875#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x2000000
17876#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
17877#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x4000000
17878#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
17879#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x8000000
17880#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
17881#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000
17882#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
17883#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000
17884#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
17885#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000
17886#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
17887#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000
17888#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
17889#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x1
17890#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
17891#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0xff00
17892#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
17893#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0xfff
17894#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
17895#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x1
17896#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
17897#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x2
17898#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
17899#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x4
17900#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
17901#define GDS_OA_RESET_MASK__UNUSED0_MASK 0x8
17902#define GDS_OA_RESET_MASK__UNUSED0__SHIFT 0x3
17903#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x10
17904#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
17905#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x20
17906#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
17907#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x40
17908#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
17909#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x80
17910#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
17911#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x100
17912#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
17913#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x200
17914#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
17915#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x400
17916#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
17917#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x800
17918#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
17919#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xfffff000
17920#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xc
17921#define GDS_OA_RESET__RESET_MASK 0x1
17922#define GDS_OA_RESET__RESET__SHIFT 0x0
17923#define GDS_OA_RESET__PIPE_ID_MASK 0xff00
17924#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
17925#define GDS_ENHANCE__MISC_MASK 0xffff
17926#define GDS_ENHANCE__MISC__SHIFT 0x0
17927#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x10000
17928#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
17929#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x20000
17930#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
17931#define GDS_ENHANCE__UNUSED_MASK 0xfffc0000
17932#define GDS_ENHANCE__UNUSED__SHIFT 0x12
17933#define GDS_OA_CGPG_RESTORE__VMID_MASK 0xff
17934#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
17935#define GDS_OA_CGPG_RESTORE__MEID_MASK 0xf00
17936#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
17937#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0xf000
17938#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
17939#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0xf0000
17940#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
17941#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xfff00000
17942#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
17943#define GDS_CS_CTXSW_STATUS__R_MASK 0x1
17944#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
17945#define GDS_CS_CTXSW_STATUS__W_MASK 0x2
17946#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
17947#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xfffffffc
17948#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
17949#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0xffff
17950#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
17951#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xffff0000
17952#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
17953#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0xffff
17954#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
17955#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xffff0000
17956#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
17957#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0xffff
17958#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
17959#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xffff0000
17960#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
17961#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0xffff
17962#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
17963#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xffff0000
17964#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
17965#define GDS_GFX_CTXSW_STATUS__R_MASK 0x1
17966#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
17967#define GDS_GFX_CTXSW_STATUS__W_MASK 0x2
17968#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
17969#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xfffffffc
17970#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
17971#define GDS_VS_CTXSW_CNT0__UPDN_MASK 0xffff
17972#define GDS_VS_CTXSW_CNT0__UPDN__SHIFT 0x0
17973#define GDS_VS_CTXSW_CNT0__PTR_MASK 0xffff0000
17974#define GDS_VS_CTXSW_CNT0__PTR__SHIFT 0x10
17975#define GDS_VS_CTXSW_CNT1__UPDN_MASK 0xffff
17976#define GDS_VS_CTXSW_CNT1__UPDN__SHIFT 0x0
17977#define GDS_VS_CTXSW_CNT1__PTR_MASK 0xffff0000
17978#define GDS_VS_CTXSW_CNT1__PTR__SHIFT 0x10
17979#define GDS_VS_CTXSW_CNT2__UPDN_MASK 0xffff
17980#define GDS_VS_CTXSW_CNT2__UPDN__SHIFT 0x0
17981#define GDS_VS_CTXSW_CNT2__PTR_MASK 0xffff0000
17982#define GDS_VS_CTXSW_CNT2__PTR__SHIFT 0x10
17983#define GDS_VS_CTXSW_CNT3__UPDN_MASK 0xffff
17984#define GDS_VS_CTXSW_CNT3__UPDN__SHIFT 0x0
17985#define GDS_VS_CTXSW_CNT3__PTR_MASK 0xffff0000
17986#define GDS_VS_CTXSW_CNT3__PTR__SHIFT 0x10
17987#define GDS_PS0_CTXSW_CNT0__UPDN_MASK 0xffff
17988#define GDS_PS0_CTXSW_CNT0__UPDN__SHIFT 0x0
17989#define GDS_PS0_CTXSW_CNT0__PTR_MASK 0xffff0000
17990#define GDS_PS0_CTXSW_CNT0__PTR__SHIFT 0x10
17991#define GDS_PS1_CTXSW_CNT0__UPDN_MASK 0xffff
17992#define GDS_PS1_CTXSW_CNT0__UPDN__SHIFT 0x0
17993#define GDS_PS1_CTXSW_CNT0__PTR_MASK 0xffff0000
17994#define GDS_PS1_CTXSW_CNT0__PTR__SHIFT 0x10
17995#define GDS_PS2_CTXSW_CNT0__UPDN_MASK 0xffff
17996#define GDS_PS2_CTXSW_CNT0__UPDN__SHIFT 0x0
17997#define GDS_PS2_CTXSW_CNT0__PTR_MASK 0xffff0000
17998#define GDS_PS2_CTXSW_CNT0__PTR__SHIFT 0x10
17999#define GDS_PS3_CTXSW_CNT0__UPDN_MASK 0xffff
18000#define GDS_PS3_CTXSW_CNT0__UPDN__SHIFT 0x0
18001#define GDS_PS3_CTXSW_CNT0__PTR_MASK 0xffff0000
18002#define GDS_PS3_CTXSW_CNT0__PTR__SHIFT 0x10
18003#define GDS_PS4_CTXSW_CNT0__UPDN_MASK 0xffff
18004#define GDS_PS4_CTXSW_CNT0__UPDN__SHIFT 0x0
18005#define GDS_PS4_CTXSW_CNT0__PTR_MASK 0xffff0000
18006#define GDS_PS4_CTXSW_CNT0__PTR__SHIFT 0x10
18007#define GDS_PS5_CTXSW_CNT0__UPDN_MASK 0xffff
18008#define GDS_PS5_CTXSW_CNT0__UPDN__SHIFT 0x0
18009#define GDS_PS5_CTXSW_CNT0__PTR_MASK 0xffff0000
18010#define GDS_PS5_CTXSW_CNT0__PTR__SHIFT 0x10
18011#define GDS_PS6_CTXSW_CNT0__UPDN_MASK 0xffff
18012#define GDS_PS6_CTXSW_CNT0__UPDN__SHIFT 0x0
18013#define GDS_PS6_CTXSW_CNT0__PTR_MASK 0xffff0000
18014#define GDS_PS6_CTXSW_CNT0__PTR__SHIFT 0x10
18015#define GDS_PS7_CTXSW_CNT0__UPDN_MASK 0xffff
18016#define GDS_PS7_CTXSW_CNT0__UPDN__SHIFT 0x0
18017#define GDS_PS7_CTXSW_CNT0__PTR_MASK 0xffff0000
18018#define GDS_PS7_CTXSW_CNT0__PTR__SHIFT 0x10
18019#define GDS_PS0_CTXSW_CNT1__UPDN_MASK 0xffff
18020#define GDS_PS0_CTXSW_CNT1__UPDN__SHIFT 0x0
18021#define GDS_PS0_CTXSW_CNT1__PTR_MASK 0xffff0000
18022#define GDS_PS0_CTXSW_CNT1__PTR__SHIFT 0x10
18023#define GDS_PS1_CTXSW_CNT1__UPDN_MASK 0xffff
18024#define GDS_PS1_CTXSW_CNT1__UPDN__SHIFT 0x0
18025#define GDS_PS1_CTXSW_CNT1__PTR_MASK 0xffff0000
18026#define GDS_PS1_CTXSW_CNT1__PTR__SHIFT 0x10
18027#define GDS_PS2_CTXSW_CNT1__UPDN_MASK 0xffff
18028#define GDS_PS2_CTXSW_CNT1__UPDN__SHIFT 0x0
18029#define GDS_PS2_CTXSW_CNT1__PTR_MASK 0xffff0000
18030#define GDS_PS2_CTXSW_CNT1__PTR__SHIFT 0x10
18031#define GDS_PS3_CTXSW_CNT1__UPDN_MASK 0xffff
18032#define GDS_PS3_CTXSW_CNT1__UPDN__SHIFT 0x0
18033#define GDS_PS3_CTXSW_CNT1__PTR_MASK 0xffff0000
18034#define GDS_PS3_CTXSW_CNT1__PTR__SHIFT 0x10
18035#define GDS_PS4_CTXSW_CNT1__UPDN_MASK 0xffff
18036#define GDS_PS4_CTXSW_CNT1__UPDN__SHIFT 0x0
18037#define GDS_PS4_CTXSW_CNT1__PTR_MASK 0xffff0000
18038#define GDS_PS4_CTXSW_CNT1__PTR__SHIFT 0x10
18039#define GDS_PS5_CTXSW_CNT1__UPDN_MASK 0xffff
18040#define GDS_PS5_CTXSW_CNT1__UPDN__SHIFT 0x0
18041#define GDS_PS5_CTXSW_CNT1__PTR_MASK 0xffff0000
18042#define GDS_PS5_CTXSW_CNT1__PTR__SHIFT 0x10
18043#define GDS_PS6_CTXSW_CNT1__UPDN_MASK 0xffff
18044#define GDS_PS6_CTXSW_CNT1__UPDN__SHIFT 0x0
18045#define GDS_PS6_CTXSW_CNT1__PTR_MASK 0xffff0000
18046#define GDS_PS6_CTXSW_CNT1__PTR__SHIFT 0x10
18047#define GDS_PS7_CTXSW_CNT1__UPDN_MASK 0xffff
18048#define GDS_PS7_CTXSW_CNT1__UPDN__SHIFT 0x0
18049#define GDS_PS7_CTXSW_CNT1__PTR_MASK 0xffff0000
18050#define GDS_PS7_CTXSW_CNT1__PTR__SHIFT 0x10
18051#define GDS_PS0_CTXSW_CNT2__UPDN_MASK 0xffff
18052#define GDS_PS0_CTXSW_CNT2__UPDN__SHIFT 0x0
18053#define GDS_PS0_CTXSW_CNT2__PTR_MASK 0xffff0000
18054#define GDS_PS0_CTXSW_CNT2__PTR__SHIFT 0x10
18055#define GDS_PS1_CTXSW_CNT2__UPDN_MASK 0xffff
18056#define GDS_PS1_CTXSW_CNT2__UPDN__SHIFT 0x0
18057#define GDS_PS1_CTXSW_CNT2__PTR_MASK 0xffff0000
18058#define GDS_PS1_CTXSW_CNT2__PTR__SHIFT 0x10
18059#define GDS_PS2_CTXSW_CNT2__UPDN_MASK 0xffff
18060#define GDS_PS2_CTXSW_CNT2__UPDN__SHIFT 0x0
18061#define GDS_PS2_CTXSW_CNT2__PTR_MASK 0xffff0000
18062#define GDS_PS2_CTXSW_CNT2__PTR__SHIFT 0x10
18063#define GDS_PS3_CTXSW_CNT2__UPDN_MASK 0xffff
18064#define GDS_PS3_CTXSW_CNT2__UPDN__SHIFT 0x0
18065#define GDS_PS3_CTXSW_CNT2__PTR_MASK 0xffff0000
18066#define GDS_PS3_CTXSW_CNT2__PTR__SHIFT 0x10
18067#define GDS_PS4_CTXSW_CNT2__UPDN_MASK 0xffff
18068#define GDS_PS4_CTXSW_CNT2__UPDN__SHIFT 0x0
18069#define GDS_PS4_CTXSW_CNT2__PTR_MASK 0xffff0000
18070#define GDS_PS4_CTXSW_CNT2__PTR__SHIFT 0x10
18071#define GDS_PS5_CTXSW_CNT2__UPDN_MASK 0xffff
18072#define GDS_PS5_CTXSW_CNT2__UPDN__SHIFT 0x0
18073#define GDS_PS5_CTXSW_CNT2__PTR_MASK 0xffff0000
18074#define GDS_PS5_CTXSW_CNT2__PTR__SHIFT 0x10
18075#define GDS_PS6_CTXSW_CNT2__UPDN_MASK 0xffff
18076#define GDS_PS6_CTXSW_CNT2__UPDN__SHIFT 0x0
18077#define GDS_PS6_CTXSW_CNT2__PTR_MASK 0xffff0000
18078#define GDS_PS6_CTXSW_CNT2__PTR__SHIFT 0x10
18079#define GDS_PS7_CTXSW_CNT2__UPDN_MASK 0xffff
18080#define GDS_PS7_CTXSW_CNT2__UPDN__SHIFT 0x0
18081#define GDS_PS7_CTXSW_CNT2__PTR_MASK 0xffff0000
18082#define GDS_PS7_CTXSW_CNT2__PTR__SHIFT 0x10
18083#define GDS_PS0_CTXSW_CNT3__UPDN_MASK 0xffff
18084#define GDS_PS0_CTXSW_CNT3__UPDN__SHIFT 0x0
18085#define GDS_PS0_CTXSW_CNT3__PTR_MASK 0xffff0000
18086#define GDS_PS0_CTXSW_CNT3__PTR__SHIFT 0x10
18087#define GDS_PS1_CTXSW_CNT3__UPDN_MASK 0xffff
18088#define GDS_PS1_CTXSW_CNT3__UPDN__SHIFT 0x0
18089#define GDS_PS1_CTXSW_CNT3__PTR_MASK 0xffff0000
18090#define GDS_PS1_CTXSW_CNT3__PTR__SHIFT 0x10
18091#define GDS_PS2_CTXSW_CNT3__UPDN_MASK 0xffff
18092#define GDS_PS2_CTXSW_CNT3__UPDN__SHIFT 0x0
18093#define GDS_PS2_CTXSW_CNT3__PTR_MASK 0xffff0000
18094#define GDS_PS2_CTXSW_CNT3__PTR__SHIFT 0x10
18095#define GDS_PS3_CTXSW_CNT3__UPDN_MASK 0xffff
18096#define GDS_PS3_CTXSW_CNT3__UPDN__SHIFT 0x0
18097#define GDS_PS3_CTXSW_CNT3__PTR_MASK 0xffff0000
18098#define GDS_PS3_CTXSW_CNT3__PTR__SHIFT 0x10
18099#define GDS_PS4_CTXSW_CNT3__UPDN_MASK 0xffff
18100#define GDS_PS4_CTXSW_CNT3__UPDN__SHIFT 0x0
18101#define GDS_PS4_CTXSW_CNT3__PTR_MASK 0xffff0000
18102#define GDS_PS4_CTXSW_CNT3__PTR__SHIFT 0x10
18103#define GDS_PS5_CTXSW_CNT3__UPDN_MASK 0xffff
18104#define GDS_PS5_CTXSW_CNT3__UPDN__SHIFT 0x0
18105#define GDS_PS5_CTXSW_CNT3__PTR_MASK 0xffff0000
18106#define GDS_PS5_CTXSW_CNT3__PTR__SHIFT 0x10
18107#define GDS_PS6_CTXSW_CNT3__UPDN_MASK 0xffff
18108#define GDS_PS6_CTXSW_CNT3__UPDN__SHIFT 0x0
18109#define GDS_PS6_CTXSW_CNT3__PTR_MASK 0xffff0000
18110#define GDS_PS6_CTXSW_CNT3__PTR__SHIFT 0x10
18111#define GDS_PS7_CTXSW_CNT3__UPDN_MASK 0xffff
18112#define GDS_PS7_CTXSW_CNT3__UPDN__SHIFT 0x0
18113#define GDS_PS7_CTXSW_CNT3__PTR_MASK 0xffff0000
18114#define GDS_PS7_CTXSW_CNT3__PTR__SHIFT 0x10
18115#define CS_COPY_STATE__SRC_STATE_ID_MASK 0x7
18116#define CS_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
18117#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x7
18118#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
18119#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x3
18120#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
18121#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0xc
18122#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
18123#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x10
18124#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
18125#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x20
18126#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
18127#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x40
18128#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
18129#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x3f
18130#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
18131#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x7fc0000
18132#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0x12
18133#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x8000000
18134#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
18135#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0xfffffff
18136#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
18137#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0xff
18138#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
18139#define VGT_DMA_BASE__BASE_ADDR_MASK 0xffffffff
18140#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
18141#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x3
18142#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
18143#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0xc
18144#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
18145#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x30
18146#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
18147#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x40
18148#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
18149#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x200
18150#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
18151#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x400
18152#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
18153#define VGT_DMA_INDEX_TYPE__MTYPE_MASK 0x1800
18154#define VGT_DMA_INDEX_TYPE__MTYPE__SHIFT 0xb
18155#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xffffffff
18156#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
18157#define IA_ENHANCE__MISC_MASK 0xffffffff
18158#define IA_ENHANCE__MISC__SHIFT 0x0
18159#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xffffffff
18160#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
18161#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xffffffff
18162#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
18163#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x3f
18164#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
18165#define VGT_DMA_CONTROL__PRIMGROUP_SIZE_MASK 0xffff
18166#define VGT_DMA_CONTROL__PRIMGROUP_SIZE__SHIFT 0x0
18167#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP_MASK 0x20000
18168#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP__SHIFT 0x11
18169#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP_MASK 0x100000
18170#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP__SHIFT 0x14
18171#define VGT_IMMED_DATA__DATA_MASK 0xffffffff
18172#define VGT_IMMED_DATA__DATA__SHIFT 0x0
18173#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x3
18174#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
18175#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xffffffff
18176#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
18177#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xffffffff
18178#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
18179#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x3f
18180#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
18181#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x1
18182#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
18183#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x2
18184#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
18185#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xffffffff
18186#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
18187#define VGT_VTX_CNT_EN__VTX_CNT_EN_MASK 0x1
18188#define VGT_VTX_CNT_EN__VTX_CNT_EN__SHIFT 0x0
18189#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x1
18190#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
18191#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xffffffff
18192#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
18193#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE_MASK 0xffffffff
18194#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE__SHIFT 0x0
18195#define VGT_MAX_VTX_INDX__MAX_INDX_MASK 0xffffffff
18196#define VGT_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
18197#define VGT_MIN_VTX_INDX__MIN_INDX_MASK 0xffffffff
18198#define VGT_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
18199#define VGT_INDX_OFFSET__INDX_OFFSET_MASK 0xffffffff
18200#define VGT_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
18201#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH_MASK 0xff
18202#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH__SHIFT 0x0
18203#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST_MASK 0x7f
18204#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST__SHIFT 0x0
18205#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xffffffff
18206#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
18207#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x1
18208#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
18209#define VGT_ENHANCE__MISC_MASK 0xffffffff
18210#define VGT_ENHANCE__MISC__SHIFT 0x0
18211#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT_MASK 0x7
18212#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT__SHIFT 0x0
18213#define VGT_HOS_CNTL__TESS_MODE_MASK 0x3
18214#define VGT_HOS_CNTL__TESS_MODE__SHIFT 0x0
18215#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xffffffff
18216#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
18217#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xffffffff
18218#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
18219#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH_MASK 0xff
18220#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH__SHIFT 0x0
18221#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE_MASK 0x1f
18222#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE__SHIFT 0x0
18223#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER_MASK 0x4000
18224#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER__SHIFT 0xe
18225#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS_MASK 0x8000
18226#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS__SHIFT 0xf
18227#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER_MASK 0x70000
18228#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER__SHIFT 0x10
18229#define VGT_GROUP_FIRST_DECR__FIRST_DECR_MASK 0xf
18230#define VGT_GROUP_FIRST_DECR__FIRST_DECR__SHIFT 0x0
18231#define VGT_GROUP_DECR__DECR_MASK 0xf
18232#define VGT_GROUP_DECR__DECR__SHIFT 0x0
18233#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN_MASK 0x1
18234#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN__SHIFT 0x0
18235#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN_MASK 0x2
18236#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN__SHIFT 0x1
18237#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN_MASK 0x4
18238#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN__SHIFT 0x2
18239#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN_MASK 0x8
18240#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN__SHIFT 0x3
18241#define VGT_GROUP_VECT_0_CNTL__STRIDE_MASK 0xff00
18242#define VGT_GROUP_VECT_0_CNTL__STRIDE__SHIFT 0x8
18243#define VGT_GROUP_VECT_0_CNTL__SHIFT_MASK 0xff0000
18244#define VGT_GROUP_VECT_0_CNTL__SHIFT__SHIFT 0x10
18245#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN_MASK 0x1
18246#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN__SHIFT 0x0
18247#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN_MASK 0x2
18248#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN__SHIFT 0x1
18249#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN_MASK 0x4
18250#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN__SHIFT 0x2
18251#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN_MASK 0x8
18252#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN__SHIFT 0x3
18253#define VGT_GROUP_VECT_1_CNTL__STRIDE_MASK 0xff00
18254#define VGT_GROUP_VECT_1_CNTL__STRIDE__SHIFT 0x8
18255#define VGT_GROUP_VECT_1_CNTL__SHIFT_MASK 0xff0000
18256#define VGT_GROUP_VECT_1_CNTL__SHIFT__SHIFT 0x10
18257#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV_MASK 0xf
18258#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV__SHIFT 0x0
18259#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET_MASK 0xf0
18260#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET__SHIFT 0x4
18261#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV_MASK 0xf00
18262#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV__SHIFT 0x8
18263#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET_MASK 0xf000
18264#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET__SHIFT 0xc
18265#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV_MASK 0xf0000
18266#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV__SHIFT 0x10
18267#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET_MASK 0xf00000
18268#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET__SHIFT 0x14
18269#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV_MASK 0xf000000
18270#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV__SHIFT 0x18
18271#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET_MASK 0xf0000000
18272#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET__SHIFT 0x1c
18273#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV_MASK 0xf
18274#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV__SHIFT 0x0
18275#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET_MASK 0xf0
18276#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET__SHIFT 0x4
18277#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV_MASK 0xf00
18278#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV__SHIFT 0x8
18279#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET_MASK 0xf000
18280#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET__SHIFT 0xc
18281#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV_MASK 0xf0000
18282#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV__SHIFT 0x10
18283#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET_MASK 0xf00000
18284#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET__SHIFT 0x14
18285#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV_MASK 0xf000000
18286#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV__SHIFT 0x18
18287#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET_MASK 0xf0000000
18288#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET__SHIFT 0x1c
18289#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT_MASK 0x3ff
18290#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT__SHIFT 0x0
18291#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x1ff
18292#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
18293#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH_MASK 0x7fe00
18294#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH__SHIFT 0x9
18295#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x3f
18296#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
18297#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x3f
18298#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
18299#define VGT_LAST_COPY_STATE__SRC_STATE_ID_MASK 0x7
18300#define VGT_LAST_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
18301#define VGT_LAST_COPY_STATE__DST_STATE_ID_MASK 0x70000
18302#define VGT_LAST_COPY_STATE__DST_STATE_ID__SHIFT 0x10
18303#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xffff0000
18304#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
18305#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xffff0000
18306#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
18307#define VGT_GS_MODE__MODE_MASK 0x7
18308#define VGT_GS_MODE__MODE__SHIFT 0x0
18309#define VGT_GS_MODE__RESERVED_0_MASK 0x8
18310#define VGT_GS_MODE__RESERVED_0__SHIFT 0x3
18311#define VGT_GS_MODE__CUT_MODE_MASK 0x30
18312#define VGT_GS_MODE__CUT_MODE__SHIFT 0x4
18313#define VGT_GS_MODE__RESERVED_1_MASK 0x7c0
18314#define VGT_GS_MODE__RESERVED_1__SHIFT 0x6
18315#define VGT_GS_MODE__GS_C_PACK_EN_MASK 0x800
18316#define VGT_GS_MODE__GS_C_PACK_EN__SHIFT 0xb
18317#define VGT_GS_MODE__RESERVED_2_MASK 0x1000
18318#define VGT_GS_MODE__RESERVED_2__SHIFT 0xc
18319#define VGT_GS_MODE__ES_PASSTHRU_MASK 0x2000
18320#define VGT_GS_MODE__ES_PASSTHRU__SHIFT 0xd
18321#define VGT_GS_MODE__RESERVED_3_MASK 0x4000
18322#define VGT_GS_MODE__RESERVED_3__SHIFT 0xe
18323#define VGT_GS_MODE__RESERVED_4_MASK 0x8000
18324#define VGT_GS_MODE__RESERVED_4__SHIFT 0xf
18325#define VGT_GS_MODE__RESERVED_5_MASK 0x10000
18326#define VGT_GS_MODE__RESERVED_5__SHIFT 0x10
18327#define VGT_GS_MODE__PARTIAL_THD_AT_EOI_MASK 0x20000
18328#define VGT_GS_MODE__PARTIAL_THD_AT_EOI__SHIFT 0x11
18329#define VGT_GS_MODE__SUPPRESS_CUTS_MASK 0x40000
18330#define VGT_GS_MODE__SUPPRESS_CUTS__SHIFT 0x12
18331#define VGT_GS_MODE__ES_WRITE_OPTIMIZE_MASK 0x80000
18332#define VGT_GS_MODE__ES_WRITE_OPTIMIZE__SHIFT 0x13
18333#define VGT_GS_MODE__GS_WRITE_OPTIMIZE_MASK 0x100000
18334#define VGT_GS_MODE__GS_WRITE_OPTIMIZE__SHIFT 0x14
18335#define VGT_GS_MODE__ONCHIP_MASK 0x600000
18336#define VGT_GS_MODE__ONCHIP__SHIFT 0x15
18337#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP_MASK 0x7ff
18338#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP__SHIFT 0x0
18339#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP_MASK 0x3ff800
18340#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP__SHIFT 0xb
18341#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x3f
18342#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
18343#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1_MASK 0x3f00
18344#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1__SHIFT 0x8
18345#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2_MASK 0x3f0000
18346#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2__SHIFT 0x10
18347#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3_MASK 0xfc00000
18348#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3__SHIFT 0x16
18349#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM_MASK 0x80000000
18350#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM__SHIFT 0x1f
18351#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION_MASK 0x3
18352#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT 0x0
18353#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT_MASK 0x10
18354#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT__SHIFT 0x4
18355#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER_MASK 0x20
18356#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER__SHIFT 0x5
18357#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN_MASK 0xc0
18358#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT 0x6
18359#define VGT_CACHE_INVALIDATION__USE_GS_DONE_MASK 0x200
18360#define VGT_CACHE_INVALIDATION__USE_GS_DONE__SHIFT 0x9
18361#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD_MASK 0x800
18362#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD__SHIFT 0xb
18363#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN_MASK 0x1000
18364#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN__SHIFT 0xc
18365#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH_MASK 0x2000
18366#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH__SHIFT 0xd
18367#define VGT_CACHE_INVALIDATION__ES_LIMIT_MASK 0x1f0000
18368#define VGT_CACHE_INVALIDATION__ES_LIMIT__SHIFT 0x10
18369#define VGT_RESET_DEBUG__GS_DISABLE_MASK 0x1
18370#define VGT_RESET_DEBUG__GS_DISABLE__SHIFT 0x0
18371#define VGT_RESET_DEBUG__TESS_DISABLE_MASK 0x2
18372#define VGT_RESET_DEBUG__TESS_DISABLE__SHIFT 0x1
18373#define VGT_RESET_DEBUG__WD_DISABLE_MASK 0x4
18374#define VGT_RESET_DEBUG__WD_DISABLE__SHIFT 0x2
18375#define VGT_STRMOUT_DELAY__SKIP_DELAY_MASK 0xff
18376#define VGT_STRMOUT_DELAY__SKIP_DELAY__SHIFT 0x0
18377#define VGT_STRMOUT_DELAY__SE0_WD_DELAY_MASK 0x700
18378#define VGT_STRMOUT_DELAY__SE0_WD_DELAY__SHIFT 0x8
18379#define VGT_STRMOUT_DELAY__SE1_WD_DELAY_MASK 0x3800
18380#define VGT_STRMOUT_DELAY__SE1_WD_DELAY__SHIFT 0xb
18381#define VGT_STRMOUT_DELAY__SE2_WD_DELAY_MASK 0x1c000
18382#define VGT_STRMOUT_DELAY__SE2_WD_DELAY__SHIFT 0xe
18383#define VGT_STRMOUT_DELAY__SE3_WD_DELAY_MASK 0xe0000
18384#define VGT_STRMOUT_DELAY__SE3_WD_DELAY__SHIFT 0x11
18385#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH_MASK 0x7f
18386#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH__SHIFT 0x0
18387#define VGT_FIFO_DEPTHS__RESERVED_0_MASK 0x80
18388#define VGT_FIFO_DEPTHS__RESERVED_0__SHIFT 0x7
18389#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH_MASK 0x3fff00
18390#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH__SHIFT 0x8
18391#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH_MASK 0xfc00000
18392#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH__SHIFT 0x16
18393#define VGT_GS_PER_ES__GS_PER_ES_MASK 0x7ff
18394#define VGT_GS_PER_ES__GS_PER_ES__SHIFT 0x0
18395#define VGT_ES_PER_GS__ES_PER_GS_MASK 0x7ff
18396#define VGT_ES_PER_GS__ES_PER_GS__SHIFT 0x0
18397#define VGT_GS_PER_VS__GS_PER_VS_MASK 0xf
18398#define VGT_GS_PER_VS__GS_PER_VS__SHIFT 0x0
18399#define VGT_GS_VERTEX_REUSE__VERT_REUSE_MASK 0x1f
18400#define VGT_GS_VERTEX_REUSE__VERT_REUSE__SHIFT 0x0
18401#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x3
18402#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
18403#define IA_CNTL_STATUS__IA_BUSY_MASK 0x1
18404#define IA_CNTL_STATUS__IA_BUSY__SHIFT 0x0
18405#define IA_CNTL_STATUS__IA_DMA_BUSY_MASK 0x2
18406#define IA_CNTL_STATUS__IA_DMA_BUSY__SHIFT 0x1
18407#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY_MASK 0x4
18408#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY__SHIFT 0x2
18409#define IA_CNTL_STATUS__IA_GRP_BUSY_MASK 0x8
18410#define IA_CNTL_STATUS__IA_GRP_BUSY__SHIFT 0x3
18411#define IA_CNTL_STATUS__IA_ADC_BUSY_MASK 0x10
18412#define IA_CNTL_STATUS__IA_ADC_BUSY__SHIFT 0x4
18413#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN_MASK 0x1
18414#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN__SHIFT 0x0
18415#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN_MASK 0x2
18416#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN__SHIFT 0x1
18417#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN_MASK 0x4
18418#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN__SHIFT 0x2
18419#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN_MASK 0x8
18420#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN__SHIFT 0x3
18421#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK 0x70
18422#define VGT_STRMOUT_CONFIG__RAST_STREAM__SHIFT 0x4
18423#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK_MASK 0xf00
18424#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK__SHIFT 0x8
18425#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK_MASK 0x80000000
18426#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK__SHIFT 0x1f
18427#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE_MASK 0xffffffff
18428#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE__SHIFT 0x0
18429#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE_MASK 0xffffffff
18430#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE__SHIFT 0x0
18431#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE_MASK 0xffffffff
18432#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE__SHIFT 0x0
18433#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE_MASK 0xffffffff
18434#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE__SHIFT 0x0
18435#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET_MASK 0xffffffff
18436#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET__SHIFT 0x0
18437#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET_MASK 0xffffffff
18438#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET__SHIFT 0x0
18439#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET_MASK 0xffffffff
18440#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET__SHIFT 0x0
18441#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET_MASK 0xffffffff
18442#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET__SHIFT 0x0
18443#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE_MASK 0x3ff
18444#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE__SHIFT 0x0
18445#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE_MASK 0x3ff
18446#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE__SHIFT 0x0
18447#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE_MASK 0x3ff
18448#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE__SHIFT 0x0
18449#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE_MASK 0x3ff
18450#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE__SHIFT 0x0
18451#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN_MASK 0xf
18452#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN__SHIFT 0x0
18453#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN_MASK 0xf0
18454#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN__SHIFT 0x4
18455#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN_MASK 0xf00
18456#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN__SHIFT 0x8
18457#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN_MASK 0xf000
18458#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN__SHIFT 0xc
18459#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE_MASK 0xffffffff
18460#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE__SHIFT 0x0
18461#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE_MASK 0xffffffff
18462#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE__SHIFT 0x0
18463#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE_MASK 0xffffffff
18464#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE__SHIFT 0x0
18465#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE_MASK 0xffffffff
18466#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE__SHIFT 0x0
18467#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xffffffff
18468#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
18469#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xffffffff
18470#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
18471#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x1ff
18472#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
18473#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x7ff
18474#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
18475#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x3
18476#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
18477#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x4
18478#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
18479#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x18
18480#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
18481#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x20
18482#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
18483#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0xc0
18484#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
18485#define VGT_SHADER_STAGES_EN__DYNAMIC_HS_MASK 0x100
18486#define VGT_SHADER_STAGES_EN__DYNAMIC_HS__SHIFT 0x8
18487#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN_MASK 0x200
18488#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN__SHIFT 0x9
18489#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0_MASK 0x400
18490#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0__SHIFT 0xa
18491#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1_MASK 0x800
18492#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1__SHIFT 0xb
18493#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x1000
18494#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
18495#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX_MASK 0xffffffff
18496#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX__SHIFT 0x0
18497#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0xff
18498#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
18499#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x3f00
18500#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
18501#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0xfc000
18502#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
18503#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x3f00
18504#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
18505#define VGT_TF_PARAM__TYPE_MASK 0x3
18506#define VGT_TF_PARAM__TYPE__SHIFT 0x0
18507#define VGT_TF_PARAM__PARTITIONING_MASK 0x1c
18508#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
18509#define VGT_TF_PARAM__TOPOLOGY_MASK 0xe0
18510#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
18511#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x100
18512#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
18513#define VGT_TF_PARAM__DEPRECATED_MASK 0x200
18514#define VGT_TF_PARAM__DEPRECATED__SHIFT 0x9
18515#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD_MASK 0x3c00
18516#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD__SHIFT 0xa
18517#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x4000
18518#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
18519#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x8000
18520#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
18521#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x60000
18522#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
18523#define VGT_TF_PARAM__MTYPE_MASK 0x180000
18524#define VGT_TF_PARAM__MTYPE__SHIFT 0x13
18525#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0xff
18526#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
18527#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0xff00
18528#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
18529#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000
18530#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
18531#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000
18532#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
18533#define VGT_TF_RING_SIZE__SIZE_MASK 0xffff
18534#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
18535#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1
18536#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
18537#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x7e
18538#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
18539#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x80
18540#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
18541#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x1ff
18542#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
18543#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x600
18544#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0x9
18545#define VGT_TF_MEMORY_BASE__BASE_MASK 0xffffffff
18546#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
18547#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x1
18548#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
18549#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x1fc
18550#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
18551#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE_MASK 0xffff
18552#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE__SHIFT 0x0
18553#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON_MASK 0x10000
18554#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON__SHIFT 0x10
18555#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP_MASK 0x20000
18556#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP__SHIFT 0x11
18557#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON_MASK 0x40000
18558#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON__SHIFT 0x12
18559#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI_MASK 0x80000
18560#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI__SHIFT 0x13
18561#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP_MASK 0x100000
18562#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP__SHIFT 0x14
18563#define IA_MULTI_VGT_PARAM__MAX_PRIMGRP_IN_WAVE_MASK 0xf0000000
18564#define IA_MULTI_VGT_PARAM__MAX_PRIMGRP_IN_WAVE__SHIFT 0x1c
18565#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0xfff
18566#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
18567#define VGT_ESGS_RING_SIZE__MEM_SIZE_MASK 0xffffffff
18568#define VGT_ESGS_RING_SIZE__MEM_SIZE__SHIFT 0x0
18569#define VGT_GSVS_RING_SIZE__MEM_SIZE_MASK 0xffffffff
18570#define VGT_GSVS_RING_SIZE__MEM_SIZE__SHIFT 0x0
18571#define VGT_GSVS_RING_OFFSET_1__OFFSET_MASK 0x7fff
18572#define VGT_GSVS_RING_OFFSET_1__OFFSET__SHIFT 0x0
18573#define VGT_GSVS_RING_OFFSET_2__OFFSET_MASK 0x7fff
18574#define VGT_GSVS_RING_OFFSET_2__OFFSET__SHIFT 0x0
18575#define VGT_GSVS_RING_OFFSET_3__OFFSET_MASK 0x7fff
18576#define VGT_GSVS_RING_OFFSET_3__OFFSET__SHIFT 0x0
18577#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x7fff
18578#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
18579#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE_MASK 0x7fff
18580#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
18581#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE_MASK 0x7fff
18582#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE__SHIFT 0x0
18583#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE_MASK 0x7fff
18584#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE__SHIFT 0x0
18585#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE_MASK 0x7fff
18586#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE__SHIFT 0x0
18587#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE_MASK 0x7fff
18588#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE__SHIFT 0x0
18589#define WD_CNTL_STATUS__WD_BUSY_MASK 0x1
18590#define WD_CNTL_STATUS__WD_BUSY__SHIFT 0x0
18591#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY_MASK 0x2
18592#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY__SHIFT 0x1
18593#define WD_CNTL_STATUS__WD_SPL_DI_BUSY_MASK 0x4
18594#define WD_CNTL_STATUS__WD_SPL_DI_BUSY__SHIFT 0x2
18595#define WD_CNTL_STATUS__WD_ADC_BUSY_MASK 0x8
18596#define WD_CNTL_STATUS__WD_ADC_BUSY__SHIFT 0x3
18597#define WD_ENHANCE__MISC_MASK 0xffffffff
18598#define WD_ENHANCE__MISC__SHIFT 0x0
18599#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x1fff
18600#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
18601#define GFX_PIPE_CONTROL__RESERVED_MASK 0xe000
18602#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
18603#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x10000
18604#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
18605#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0xf
18606#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
18607#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
18608#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
18609#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
18610#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
18611#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x2000000
18612#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
18613#define CGTT_VGT_CLK_CTRL__DBG_ENABLE_MASK 0x4000000
18614#define CGTT_VGT_CLK_CTRL__DBG_ENABLE__SHIFT 0x1a
18615#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
18616#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
18617#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000
18618#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
18619#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE_MASK 0x20000000
18620#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE__SHIFT 0x1d
18621#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000
18622#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
18623#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
18624#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
18625#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0xf
18626#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
18627#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
18628#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
18629#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
18630#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
18631#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x2000000
18632#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
18633#define CGTT_IA_CLK_CTRL__DBG_ENABLE_MASK 0x4000000
18634#define CGTT_IA_CLK_CTRL__DBG_ENABLE__SHIFT 0x1a
18635#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
18636#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
18637#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000
18638#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
18639#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000
18640#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
18641#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000
18642#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
18643#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
18644#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
18645#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0xf
18646#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
18647#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
18648#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
18649#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x1000000
18650#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
18651#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x2000000
18652#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
18653#define CGTT_WD_CLK_CTRL__DBG_ENABLE_MASK 0x4000000
18654#define CGTT_WD_CLK_CTRL__DBG_ENABLE__SHIFT 0x1a
18655#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x8000000
18656#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
18657#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000
18658#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
18659#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000
18660#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
18661#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000
18662#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
18663#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000
18664#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
18665#define VGT_DEBUG_CNTL__VGT_DEBUG_INDX_MASK 0x3f
18666#define VGT_DEBUG_CNTL__VGT_DEBUG_INDX__SHIFT 0x0
18667#define VGT_DEBUG_CNTL__VGT_DEBUG_SEL_BUS_B_MASK 0x40
18668#define VGT_DEBUG_CNTL__VGT_DEBUG_SEL_BUS_B__SHIFT 0x6
18669#define VGT_DEBUG_DATA__DATA_MASK 0xffffffff
18670#define VGT_DEBUG_DATA__DATA__SHIFT 0x0
18671#define IA_DEBUG_CNTL__IA_DEBUG_INDX_MASK 0x3f
18672#define IA_DEBUG_CNTL__IA_DEBUG_INDX__SHIFT 0x0
18673#define IA_DEBUG_CNTL__IA_DEBUG_SEL_BUS_B_MASK 0x40
18674#define IA_DEBUG_CNTL__IA_DEBUG_SEL_BUS_B__SHIFT 0x6
18675#define IA_DEBUG_DATA__DATA_MASK 0xffffffff
18676#define IA_DEBUG_DATA__DATA__SHIFT 0x0
18677#define VGT_CNTL_STATUS__VGT_BUSY_MASK 0x1
18678#define VGT_CNTL_STATUS__VGT_BUSY__SHIFT 0x0
18679#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY_MASK 0x2
18680#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY__SHIFT 0x1
18681#define VGT_CNTL_STATUS__VGT_OUT_BUSY_MASK 0x4
18682#define VGT_CNTL_STATUS__VGT_OUT_BUSY__SHIFT 0x2
18683#define VGT_CNTL_STATUS__VGT_PT_BUSY_MASK 0x8
18684#define VGT_CNTL_STATUS__VGT_PT_BUSY__SHIFT 0x3
18685#define VGT_CNTL_STATUS__VGT_TE_BUSY_MASK 0x10
18686#define VGT_CNTL_STATUS__VGT_TE_BUSY__SHIFT 0x4
18687#define VGT_CNTL_STATUS__VGT_VR_BUSY_MASK 0x20
18688#define VGT_CNTL_STATUS__VGT_VR_BUSY__SHIFT 0x5
18689#define VGT_CNTL_STATUS__VGT_PI_BUSY_MASK 0x40
18690#define VGT_CNTL_STATUS__VGT_PI_BUSY__SHIFT 0x6
18691#define VGT_CNTL_STATUS__VGT_GS_BUSY_MASK 0x80
18692#define VGT_CNTL_STATUS__VGT_GS_BUSY__SHIFT 0x7
18693#define VGT_CNTL_STATUS__VGT_HS_BUSY_MASK 0x100
18694#define VGT_CNTL_STATUS__VGT_HS_BUSY__SHIFT 0x8
18695#define VGT_CNTL_STATUS__VGT_TE11_BUSY_MASK 0x200
18696#define VGT_CNTL_STATUS__VGT_TE11_BUSY__SHIFT 0x9
18697#define WD_DEBUG_CNTL__WD_DEBUG_INDX_MASK 0x3f
18698#define WD_DEBUG_CNTL__WD_DEBUG_INDX__SHIFT 0x0
18699#define WD_DEBUG_CNTL__WD_DEBUG_SEL_BUS_B_MASK 0x40
18700#define WD_DEBUG_CNTL__WD_DEBUG_SEL_BUS_B__SHIFT 0x6
18701#define WD_DEBUG_DATA__DATA_MASK 0xffffffff
18702#define WD_DEBUG_DATA__DATA__SHIFT 0x0
18703#define WD_QOS__DRAW_STALL_MASK 0x1
18704#define WD_QOS__DRAW_STALL__SHIFT 0x0
18705#define CC_GC_PRIM_CONFIG__INACTIVE_IA_MASK 0x30000
18706#define CC_GC_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
18707#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0xf000000
18708#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
18709#define GC_USER_PRIM_CONFIG__INACTIVE_IA_MASK 0x30000
18710#define GC_USER_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
18711#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0xf000000
18712#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
18713#define WD_DEBUG_REG0__wd_busy_extended_MASK 0x1
18714#define WD_DEBUG_REG0__wd_busy_extended__SHIFT 0x0
18715#define WD_DEBUG_REG0__wd_nodma_busy_extended_MASK 0x2
18716#define WD_DEBUG_REG0__wd_nodma_busy_extended__SHIFT 0x1
18717#define WD_DEBUG_REG0__wd_busy_MASK 0x4
18718#define WD_DEBUG_REG0__wd_busy__SHIFT 0x2
18719#define WD_DEBUG_REG0__wd_nodma_busy_MASK 0x8
18720#define WD_DEBUG_REG0__wd_nodma_busy__SHIFT 0x3
18721#define WD_DEBUG_REG0__rbiu_busy_MASK 0x10
18722#define WD_DEBUG_REG0__rbiu_busy__SHIFT 0x4
18723#define WD_DEBUG_REG0__spl_dma_busy_MASK 0x20
18724#define WD_DEBUG_REG0__spl_dma_busy__SHIFT 0x5
18725#define WD_DEBUG_REG0__spl_di_busy_MASK 0x40
18726#define WD_DEBUG_REG0__spl_di_busy__SHIFT 0x6
18727#define WD_DEBUG_REG0__vgt0_active_q_MASK 0x80
18728#define WD_DEBUG_REG0__vgt0_active_q__SHIFT 0x7
18729#define WD_DEBUG_REG0__vgt1_active_q_MASK 0x100
18730#define WD_DEBUG_REG0__vgt1_active_q__SHIFT 0x8
18731#define WD_DEBUG_REG0__spl_dma_p1_busy_MASK 0x200
18732#define WD_DEBUG_REG0__spl_dma_p1_busy__SHIFT 0x9
18733#define WD_DEBUG_REG0__rbiu_dr_p1_fifo_busy_MASK 0x400
18734#define WD_DEBUG_REG0__rbiu_dr_p1_fifo_busy__SHIFT 0xa
18735#define WD_DEBUG_REG0__rbiu_di_p1_fifo_busy_MASK 0x800
18736#define WD_DEBUG_REG0__rbiu_di_p1_fifo_busy__SHIFT 0xb
18737#define WD_DEBUG_REG0__SPARE2_MASK 0x1000
18738#define WD_DEBUG_REG0__SPARE2__SHIFT 0xc
18739#define WD_DEBUG_REG0__rbiu_dr_fifo_busy_MASK 0x2000
18740#define WD_DEBUG_REG0__rbiu_dr_fifo_busy__SHIFT 0xd
18741#define WD_DEBUG_REG0__rbiu_spl_dr_valid_MASK 0x4000
18742#define WD_DEBUG_REG0__rbiu_spl_dr_valid__SHIFT 0xe
18743#define WD_DEBUG_REG0__spl_rbiu_dr_read_MASK 0x8000
18744#define WD_DEBUG_REG0__spl_rbiu_dr_read__SHIFT 0xf
18745#define WD_DEBUG_REG0__SPARE3_MASK 0x10000
18746#define WD_DEBUG_REG0__SPARE3__SHIFT 0x10
18747#define WD_DEBUG_REG0__rbiu_di_fifo_busy_MASK 0x20000
18748#define WD_DEBUG_REG0__rbiu_di_fifo_busy__SHIFT 0x11
18749#define WD_DEBUG_REG0__rbiu_spl_di_valid_MASK 0x40000
18750#define WD_DEBUG_REG0__rbiu_spl_di_valid__SHIFT 0x12
18751#define WD_DEBUG_REG0__spl_rbiu_di_read_MASK 0x80000
18752#define WD_DEBUG_REG0__spl_rbiu_di_read__SHIFT 0x13
18753#define WD_DEBUG_REG0__se0_synced_q_MASK 0x100000
18754#define WD_DEBUG_REG0__se0_synced_q__SHIFT 0x14
18755#define WD_DEBUG_REG0__se1_synced_q_MASK 0x200000
18756#define WD_DEBUG_REG0__se1_synced_q__SHIFT 0x15
18757#define WD_DEBUG_REG0__se2_synced_q_MASK 0x400000
18758#define WD_DEBUG_REG0__se2_synced_q__SHIFT 0x16
18759#define WD_DEBUG_REG0__se3_synced_q_MASK 0x800000
18760#define WD_DEBUG_REG0__se3_synced_q__SHIFT 0x17
18761#define WD_DEBUG_REG0__reg_clk_busy_MASK 0x1000000
18762#define WD_DEBUG_REG0__reg_clk_busy__SHIFT 0x18
18763#define WD_DEBUG_REG0__input_clk_busy_MASK 0x2000000
18764#define WD_DEBUG_REG0__input_clk_busy__SHIFT 0x19
18765#define WD_DEBUG_REG0__core_clk_busy_MASK 0x4000000
18766#define WD_DEBUG_REG0__core_clk_busy__SHIFT 0x1a
18767#define WD_DEBUG_REG0__vgt2_active_q_MASK 0x8000000
18768#define WD_DEBUG_REG0__vgt2_active_q__SHIFT 0x1b
18769#define WD_DEBUG_REG0__sclk_reg_vld_MASK 0x10000000
18770#define WD_DEBUG_REG0__sclk_reg_vld__SHIFT 0x1c
18771#define WD_DEBUG_REG0__sclk_input_vld_MASK 0x20000000
18772#define WD_DEBUG_REG0__sclk_input_vld__SHIFT 0x1d
18773#define WD_DEBUG_REG0__sclk_core_vld_MASK 0x40000000
18774#define WD_DEBUG_REG0__sclk_core_vld__SHIFT 0x1e
18775#define WD_DEBUG_REG0__vgt3_active_q_MASK 0x80000000
18776#define WD_DEBUG_REG0__vgt3_active_q__SHIFT 0x1f
18777#define WD_DEBUG_REG1__grbm_fifo_empty_MASK 0x1
18778#define WD_DEBUG_REG1__grbm_fifo_empty__SHIFT 0x0
18779#define WD_DEBUG_REG1__grbm_fifo_full_MASK 0x2
18780#define WD_DEBUG_REG1__grbm_fifo_full__SHIFT 0x1
18781#define WD_DEBUG_REG1__grbm_fifo_we_MASK 0x4
18782#define WD_DEBUG_REG1__grbm_fifo_we__SHIFT 0x2
18783#define WD_DEBUG_REG1__grbm_fifo_re_MASK 0x8
18784#define WD_DEBUG_REG1__grbm_fifo_re__SHIFT 0x3
18785#define WD_DEBUG_REG1__draw_initiator_valid_q_MASK 0x10
18786#define WD_DEBUG_REG1__draw_initiator_valid_q__SHIFT 0x4
18787#define WD_DEBUG_REG1__event_initiator_valid_q_MASK 0x20
18788#define WD_DEBUG_REG1__event_initiator_valid_q__SHIFT 0x5
18789#define WD_DEBUG_REG1__event_addr_valid_q_MASK 0x40
18790#define WD_DEBUG_REG1__event_addr_valid_q__SHIFT 0x6
18791#define WD_DEBUG_REG1__dma_request_valid_q_MASK 0x80
18792#define WD_DEBUG_REG1__dma_request_valid_q__SHIFT 0x7
18793#define WD_DEBUG_REG1__SPARE0_MASK 0x100
18794#define WD_DEBUG_REG1__SPARE0__SHIFT 0x8
18795#define WD_DEBUG_REG1__min_indx_valid_q_MASK 0x200
18796#define WD_DEBUG_REG1__min_indx_valid_q__SHIFT 0x9
18797#define WD_DEBUG_REG1__max_indx_valid_q_MASK 0x400
18798#define WD_DEBUG_REG1__max_indx_valid_q__SHIFT 0xa
18799#define WD_DEBUG_REG1__indx_offset_valid_q_MASK 0x800
18800#define WD_DEBUG_REG1__indx_offset_valid_q__SHIFT 0xb
18801#define WD_DEBUG_REG1__grbm_fifo_rdata_reg_id_MASK 0x1f000
18802#define WD_DEBUG_REG1__grbm_fifo_rdata_reg_id__SHIFT 0xc
18803#define WD_DEBUG_REG1__grbm_fifo_rdata_state_MASK 0xe0000
18804#define WD_DEBUG_REG1__grbm_fifo_rdata_state__SHIFT 0x11
18805#define WD_DEBUG_REG1__free_cnt_q_MASK 0x3f00000
18806#define WD_DEBUG_REG1__free_cnt_q__SHIFT 0x14
18807#define WD_DEBUG_REG1__rbiu_di_fifo_we_MASK 0x4000000
18808#define WD_DEBUG_REG1__rbiu_di_fifo_we__SHIFT 0x1a
18809#define WD_DEBUG_REG1__rbiu_dr_fifo_we_MASK 0x8000000
18810#define WD_DEBUG_REG1__rbiu_dr_fifo_we__SHIFT 0x1b
18811#define WD_DEBUG_REG1__rbiu_di_fifo_empty_MASK 0x10000000
18812#define WD_DEBUG_REG1__rbiu_di_fifo_empty__SHIFT 0x1c
18813#define WD_DEBUG_REG1__rbiu_di_fifo_full_MASK 0x20000000
18814#define WD_DEBUG_REG1__rbiu_di_fifo_full__SHIFT 0x1d
18815#define WD_DEBUG_REG1__rbiu_dr_fifo_empty_MASK 0x40000000
18816#define WD_DEBUG_REG1__rbiu_dr_fifo_empty__SHIFT 0x1e
18817#define WD_DEBUG_REG1__rbiu_dr_fifo_full_MASK 0x80000000
18818#define WD_DEBUG_REG1__rbiu_dr_fifo_full__SHIFT 0x1f
18819#define WD_DEBUG_REG2__p1_grbm_fifo_empty_MASK 0x1
18820#define WD_DEBUG_REG2__p1_grbm_fifo_empty__SHIFT 0x0
18821#define WD_DEBUG_REG2__p1_grbm_fifo_full_MASK 0x2
18822#define WD_DEBUG_REG2__p1_grbm_fifo_full__SHIFT 0x1
18823#define WD_DEBUG_REG2__p1_grbm_fifo_we_MASK 0x4
18824#define WD_DEBUG_REG2__p1_grbm_fifo_we__SHIFT 0x2
18825#define WD_DEBUG_REG2__p1_grbm_fifo_re_MASK 0x8
18826#define WD_DEBUG_REG2__p1_grbm_fifo_re__SHIFT 0x3
18827#define WD_DEBUG_REG2__p1_draw_initiator_valid_q_MASK 0x10
18828#define WD_DEBUG_REG2__p1_draw_initiator_valid_q__SHIFT 0x4
18829#define WD_DEBUG_REG2__p1_event_initiator_valid_q_MASK 0x20
18830#define WD_DEBUG_REG2__p1_event_initiator_valid_q__SHIFT 0x5
18831#define WD_DEBUG_REG2__p1_event_addr_valid_q_MASK 0x40
18832#define WD_DEBUG_REG2__p1_event_addr_valid_q__SHIFT 0x6
18833#define WD_DEBUG_REG2__p1_dma_request_valid_q_MASK 0x80
18834#define WD_DEBUG_REG2__p1_dma_request_valid_q__SHIFT 0x7
18835#define WD_DEBUG_REG2__SPARE0_MASK 0x100
18836#define WD_DEBUG_REG2__SPARE0__SHIFT 0x8
18837#define WD_DEBUG_REG2__p1_min_indx_valid_q_MASK 0x200
18838#define WD_DEBUG_REG2__p1_min_indx_valid_q__SHIFT 0x9
18839#define WD_DEBUG_REG2__p1_max_indx_valid_q_MASK 0x400
18840#define WD_DEBUG_REG2__p1_max_indx_valid_q__SHIFT 0xa
18841#define WD_DEBUG_REG2__p1_indx_offset_valid_q_MASK 0x800
18842#define WD_DEBUG_REG2__p1_indx_offset_valid_q__SHIFT 0xb
18843#define WD_DEBUG_REG2__p1_grbm_fifo_rdata_reg_id_MASK 0x1f000
18844#define WD_DEBUG_REG2__p1_grbm_fifo_rdata_reg_id__SHIFT 0xc
18845#define WD_DEBUG_REG2__p1_grbm_fifo_rdata_state_MASK 0xe0000
18846#define WD_DEBUG_REG2__p1_grbm_fifo_rdata_state__SHIFT 0x11
18847#define WD_DEBUG_REG2__p1_free_cnt_q_MASK 0x3f00000
18848#define WD_DEBUG_REG2__p1_free_cnt_q__SHIFT 0x14
18849#define WD_DEBUG_REG2__p1_rbiu_di_fifo_we_MASK 0x4000000
18850#define WD_DEBUG_REG2__p1_rbiu_di_fifo_we__SHIFT 0x1a
18851#define WD_DEBUG_REG2__p1_rbiu_dr_fifo_we_MASK 0x8000000
18852#define WD_DEBUG_REG2__p1_rbiu_dr_fifo_we__SHIFT 0x1b
18853#define WD_DEBUG_REG2__p1_rbiu_di_fifo_empty_MASK 0x10000000
18854#define WD_DEBUG_REG2__p1_rbiu_di_fifo_empty__SHIFT 0x1c
18855#define WD_DEBUG_REG2__p1_rbiu_di_fifo_full_MASK 0x20000000
18856#define WD_DEBUG_REG2__p1_rbiu_di_fifo_full__SHIFT 0x1d
18857#define WD_DEBUG_REG2__p1_rbiu_dr_fifo_empty_MASK 0x40000000
18858#define WD_DEBUG_REG2__p1_rbiu_dr_fifo_empty__SHIFT 0x1e
18859#define WD_DEBUG_REG2__p1_rbiu_dr_fifo_full_MASK 0x80000000
18860#define WD_DEBUG_REG2__p1_rbiu_dr_fifo_full__SHIFT 0x1f
18861#define WD_DEBUG_REG3__rbiu_spl_dr_valid_MASK 0x1
18862#define WD_DEBUG_REG3__rbiu_spl_dr_valid__SHIFT 0x0
18863#define WD_DEBUG_REG3__SPARE0_MASK 0x2
18864#define WD_DEBUG_REG3__SPARE0__SHIFT 0x1
18865#define WD_DEBUG_REG3__pipe0_dr_MASK 0x4
18866#define WD_DEBUG_REG3__pipe0_dr__SHIFT 0x2
18867#define WD_DEBUG_REG3__pipe0_rtr_MASK 0x8
18868#define WD_DEBUG_REG3__pipe0_rtr__SHIFT 0x3
18869#define WD_DEBUG_REG3__pipe1_dr_MASK 0x10
18870#define WD_DEBUG_REG3__pipe1_dr__SHIFT 0x4
18871#define WD_DEBUG_REG3__pipe1_rtr_MASK 0x20
18872#define WD_DEBUG_REG3__pipe1_rtr__SHIFT 0x5
18873#define WD_DEBUG_REG3__wd_subdma_fifo_empty_MASK 0x40
18874#define WD_DEBUG_REG3__wd_subdma_fifo_empty__SHIFT 0x6
18875#define WD_DEBUG_REG3__wd_subdma_fifo_full_MASK 0x80
18876#define WD_DEBUG_REG3__wd_subdma_fifo_full__SHIFT 0x7
18877#define WD_DEBUG_REG3__dma_buf_type_p0_q_MASK 0x300
18878#define WD_DEBUG_REG3__dma_buf_type_p0_q__SHIFT 0x8
18879#define WD_DEBUG_REG3__dma_zero_indices_p0_q_MASK 0x400
18880#define WD_DEBUG_REG3__dma_zero_indices_p0_q__SHIFT 0xa
18881#define WD_DEBUG_REG3__dma_req_path_p3_q_MASK 0x800
18882#define WD_DEBUG_REG3__dma_req_path_p3_q__SHIFT 0xb
18883#define WD_DEBUG_REG3__dma_not_eop_p1_q_MASK 0x1000
18884#define WD_DEBUG_REG3__dma_not_eop_p1_q__SHIFT 0xc
18885#define WD_DEBUG_REG3__out_of_range_p4_MASK 0x2000
18886#define WD_DEBUG_REG3__out_of_range_p4__SHIFT 0xd
18887#define WD_DEBUG_REG3__last_sub_dma_p3_q_MASK 0x4000
18888#define WD_DEBUG_REG3__last_sub_dma_p3_q__SHIFT 0xe
18889#define WD_DEBUG_REG3__last_rdreq_of_sub_dma_p4_MASK 0x8000
18890#define WD_DEBUG_REG3__last_rdreq_of_sub_dma_p4__SHIFT 0xf
18891#define WD_DEBUG_REG3__WD_IA_dma_send_d_MASK 0x10000
18892#define WD_DEBUG_REG3__WD_IA_dma_send_d__SHIFT 0x10
18893#define WD_DEBUG_REG3__WD_IA_dma_rtr_MASK 0x20000
18894#define WD_DEBUG_REG3__WD_IA_dma_rtr__SHIFT 0x11
18895#define WD_DEBUG_REG3__WD_IA1_dma_send_d_MASK 0x40000
18896#define WD_DEBUG_REG3__WD_IA1_dma_send_d__SHIFT 0x12
18897#define WD_DEBUG_REG3__WD_IA1_dma_rtr_MASK 0x80000
18898#define WD_DEBUG_REG3__WD_IA1_dma_rtr__SHIFT 0x13
18899#define WD_DEBUG_REG3__last_inst_of_dma_p2_MASK 0x100000
18900#define WD_DEBUG_REG3__last_inst_of_dma_p2__SHIFT 0x14
18901#define WD_DEBUG_REG3__last_sd_of_inst_p2_MASK 0x200000
18902#define WD_DEBUG_REG3__last_sd_of_inst_p2__SHIFT 0x15
18903#define WD_DEBUG_REG3__last_sd_of_dma_p2_MASK 0x400000
18904#define WD_DEBUG_REG3__last_sd_of_dma_p2__SHIFT 0x16
18905#define WD_DEBUG_REG3__SPARE1_MASK 0x800000
18906#define WD_DEBUG_REG3__SPARE1__SHIFT 0x17
18907#define WD_DEBUG_REG3__WD_IA_dma_busy_MASK 0x1000000
18908#define WD_DEBUG_REG3__WD_IA_dma_busy__SHIFT 0x18
18909#define WD_DEBUG_REG3__WD_IA1_dma_busy_MASK 0x2000000
18910#define WD_DEBUG_REG3__WD_IA1_dma_busy__SHIFT 0x19
18911#define WD_DEBUG_REG3__send_to_ia1_p3_q_MASK 0x4000000
18912#define WD_DEBUG_REG3__send_to_ia1_p3_q__SHIFT 0x1a
18913#define WD_DEBUG_REG3__dma_wd_switch_on_eop_p3_q_MASK 0x8000000
18914#define WD_DEBUG_REG3__dma_wd_switch_on_eop_p3_q__SHIFT 0x1b
18915#define WD_DEBUG_REG3__pipe3_dr_MASK 0x10000000
18916#define WD_DEBUG_REG3__pipe3_dr__SHIFT 0x1c
18917#define WD_DEBUG_REG3__pipe3_rtr_MASK 0x20000000
18918#define WD_DEBUG_REG3__pipe3_rtr__SHIFT 0x1d
18919#define WD_DEBUG_REG3__wd_dma2draw_fifo_empty_MASK 0x40000000
18920#define WD_DEBUG_REG3__wd_dma2draw_fifo_empty__SHIFT 0x1e
18921#define WD_DEBUG_REG3__wd_dma2draw_fifo_full_MASK 0x80000000
18922#define WD_DEBUG_REG3__wd_dma2draw_fifo_full__SHIFT 0x1f
18923#define WD_DEBUG_REG4__rbiu_spl_di_valid_MASK 0x1
18924#define WD_DEBUG_REG4__rbiu_spl_di_valid__SHIFT 0x0
18925#define WD_DEBUG_REG4__spl_rbiu_di_read_MASK 0x2
18926#define WD_DEBUG_REG4__spl_rbiu_di_read__SHIFT 0x1
18927#define WD_DEBUG_REG4__rbiu_spl_p1_di_valid_MASK 0x4
18928#define WD_DEBUG_REG4__rbiu_spl_p1_di_valid__SHIFT 0x2
18929#define WD_DEBUG_REG4__spl_rbiu_p1_di_read_MASK 0x8
18930#define WD_DEBUG_REG4__spl_rbiu_p1_di_read__SHIFT 0x3
18931#define WD_DEBUG_REG4__pipe0_dr_MASK 0x10
18932#define WD_DEBUG_REG4__pipe0_dr__SHIFT 0x4
18933#define WD_DEBUG_REG4__pipe0_rtr_MASK 0x20
18934#define WD_DEBUG_REG4__pipe0_rtr__SHIFT 0x5
18935#define WD_DEBUG_REG4__pipe1_dr_MASK 0x40
18936#define WD_DEBUG_REG4__pipe1_dr__SHIFT 0x6
18937#define WD_DEBUG_REG4__pipe1_rtr_MASK 0x80
18938#define WD_DEBUG_REG4__pipe1_rtr__SHIFT 0x7
18939#define WD_DEBUG_REG4__pipe2_dr_MASK 0x100
18940#define WD_DEBUG_REG4__pipe2_dr__SHIFT 0x8
18941#define WD_DEBUG_REG4__pipe2_rtr_MASK 0x200
18942#define WD_DEBUG_REG4__pipe2_rtr__SHIFT 0x9
18943#define WD_DEBUG_REG4__pipe3_ld_MASK 0x400
18944#define WD_DEBUG_REG4__pipe3_ld__SHIFT 0xa
18945#define WD_DEBUG_REG4__pipe3_rtr_MASK 0x800
18946#define WD_DEBUG_REG4__pipe3_rtr__SHIFT 0xb
18947#define WD_DEBUG_REG4__WD_IA_draw_send_d_MASK 0x1000
18948#define WD_DEBUG_REG4__WD_IA_draw_send_d__SHIFT 0xc
18949#define WD_DEBUG_REG4__WD_IA_draw_rtr_MASK 0x2000
18950#define WD_DEBUG_REG4__WD_IA_draw_rtr__SHIFT 0xd
18951#define WD_DEBUG_REG4__di_type_p0_MASK 0xc000
18952#define WD_DEBUG_REG4__di_type_p0__SHIFT 0xe
18953#define WD_DEBUG_REG4__di_state_sel_p1_q_MASK 0x70000
18954#define WD_DEBUG_REG4__di_state_sel_p1_q__SHIFT 0x10
18955#define WD_DEBUG_REG4__di_wd_switch_on_eop_p1_q_MASK 0x80000
18956#define WD_DEBUG_REG4__di_wd_switch_on_eop_p1_q__SHIFT 0x13
18957#define WD_DEBUG_REG4__rbiu_spl_pipe0_lockout_MASK 0x100000
18958#define WD_DEBUG_REG4__rbiu_spl_pipe0_lockout__SHIFT 0x14
18959#define WD_DEBUG_REG4__last_inst_of_di_p2_MASK 0x200000
18960#define WD_DEBUG_REG4__last_inst_of_di_p2__SHIFT 0x15
18961#define WD_DEBUG_REG4__last_sd_of_inst_p2_MASK 0x400000
18962#define WD_DEBUG_REG4__last_sd_of_inst_p2__SHIFT 0x16
18963#define WD_DEBUG_REG4__last_sd_of_di_p2_MASK 0x800000
18964#define WD_DEBUG_REG4__last_sd_of_di_p2__SHIFT 0x17
18965#define WD_DEBUG_REG4__not_eop_wait_p1_q_MASK 0x1000000
18966#define WD_DEBUG_REG4__not_eop_wait_p1_q__SHIFT 0x18
18967#define WD_DEBUG_REG4__not_eop_wait_q_MASK 0x2000000
18968#define WD_DEBUG_REG4__not_eop_wait_q__SHIFT 0x19
18969#define WD_DEBUG_REG4__ext_event_wait_p1_q_MASK 0x4000000
18970#define WD_DEBUG_REG4__ext_event_wait_p1_q__SHIFT 0x1a
18971#define WD_DEBUG_REG4__ext_event_wait_q_MASK 0x8000000
18972#define WD_DEBUG_REG4__ext_event_wait_q__SHIFT 0x1b
18973#define WD_DEBUG_REG4__WD_IA1_draw_send_d_MASK 0x10000000
18974#define WD_DEBUG_REG4__WD_IA1_draw_send_d__SHIFT 0x1c
18975#define WD_DEBUG_REG4__WD_IA1_draw_rtr_MASK 0x20000000
18976#define WD_DEBUG_REG4__WD_IA1_draw_rtr__SHIFT 0x1d
18977#define WD_DEBUG_REG4__send_to_ia1_q_MASK 0x40000000
18978#define WD_DEBUG_REG4__send_to_ia1_q__SHIFT 0x1e
18979#define WD_DEBUG_REG4__dual_ia_mode_MASK 0x80000000
18980#define WD_DEBUG_REG4__dual_ia_mode__SHIFT 0x1f
18981#define WD_DEBUG_REG5__p1_rbiu_spl_dr_valid_MASK 0x1
18982#define WD_DEBUG_REG5__p1_rbiu_spl_dr_valid__SHIFT 0x0
18983#define WD_DEBUG_REG5__SPARE0_MASK 0x2
18984#define WD_DEBUG_REG5__SPARE0__SHIFT 0x1
18985#define WD_DEBUG_REG5__p1_pipe0_dr_MASK 0x4
18986#define WD_DEBUG_REG5__p1_pipe0_dr__SHIFT 0x2
18987#define WD_DEBUG_REG5__p1_pipe0_rtr_MASK 0x8
18988#define WD_DEBUG_REG5__p1_pipe0_rtr__SHIFT 0x3
18989#define WD_DEBUG_REG5__p1_pipe1_dr_MASK 0x10
18990#define WD_DEBUG_REG5__p1_pipe1_dr__SHIFT 0x4
18991#define WD_DEBUG_REG5__p1_pipe1_rtr_MASK 0x20
18992#define WD_DEBUG_REG5__p1_pipe1_rtr__SHIFT 0x5
18993#define WD_DEBUG_REG5__p1_wd_subdma_fifo_empty_MASK 0x40
18994#define WD_DEBUG_REG5__p1_wd_subdma_fifo_empty__SHIFT 0x6
18995#define WD_DEBUG_REG5__p1_wd_subdma_fifo_full_MASK 0x80
18996#define WD_DEBUG_REG5__p1_wd_subdma_fifo_full__SHIFT 0x7
18997#define WD_DEBUG_REG5__p1_dma_buf_type_p0_q_MASK 0x300
18998#define WD_DEBUG_REG5__p1_dma_buf_type_p0_q__SHIFT 0x8
18999#define WD_DEBUG_REG5__p1_dma_zero_indices_p0_q_MASK 0x400
19000#define WD_DEBUG_REG5__p1_dma_zero_indices_p0_q__SHIFT 0xa
19001#define WD_DEBUG_REG5__p1_dma_req_path_p3_q_MASK 0x800
19002#define WD_DEBUG_REG5__p1_dma_req_path_p3_q__SHIFT 0xb
19003#define WD_DEBUG_REG5__p1_dma_not_eop_p1_q_MASK 0x1000
19004#define WD_DEBUG_REG5__p1_dma_not_eop_p1_q__SHIFT 0xc
19005#define WD_DEBUG_REG5__p1_out_of_range_p4_MASK 0x2000
19006#define WD_DEBUG_REG5__p1_out_of_range_p4__SHIFT 0xd
19007#define WD_DEBUG_REG5__p1_last_sub_dma_p3_q_MASK 0x4000
19008#define WD_DEBUG_REG5__p1_last_sub_dma_p3_q__SHIFT 0xe
19009#define WD_DEBUG_REG5__p1_last_rdreq_of_sub_dma_p4_MASK 0x8000
19010#define WD_DEBUG_REG5__p1_last_rdreq_of_sub_dma_p4__SHIFT 0xf
19011#define WD_DEBUG_REG5__p1_WD_IA_dma_send_d_MASK 0x10000
19012#define WD_DEBUG_REG5__p1_WD_IA_dma_send_d__SHIFT 0x10
19013#define WD_DEBUG_REG5__p1_WD_IA_dma_rtr_MASK 0x20000
19014#define WD_DEBUG_REG5__p1_WD_IA_dma_rtr__SHIFT 0x11
19015#define WD_DEBUG_REG5__p1_WD_IA1_dma_send_d_MASK 0x40000
19016#define WD_DEBUG_REG5__p1_WD_IA1_dma_send_d__SHIFT 0x12
19017#define WD_DEBUG_REG5__p1_WD_IA1_dma_rtr_MASK 0x80000
19018#define WD_DEBUG_REG5__p1_WD_IA1_dma_rtr__SHIFT 0x13
19019#define WD_DEBUG_REG5__p1_last_inst_of_dma_p2_MASK 0x100000
19020#define WD_DEBUG_REG5__p1_last_inst_of_dma_p2__SHIFT 0x14
19021#define WD_DEBUG_REG5__p1_last_sd_of_inst_p2_MASK 0x200000
19022#define WD_DEBUG_REG5__p1_last_sd_of_inst_p2__SHIFT 0x15
19023#define WD_DEBUG_REG5__p1_last_sd_of_dma_p2_MASK 0x400000
19024#define WD_DEBUG_REG5__p1_last_sd_of_dma_p2__SHIFT 0x16
19025#define WD_DEBUG_REG5__SPARE1_MASK 0x800000
19026#define WD_DEBUG_REG5__SPARE1__SHIFT 0x17
19027#define WD_DEBUG_REG5__p1_WD_IA_dma_busy_MASK 0x1000000
19028#define WD_DEBUG_REG5__p1_WD_IA_dma_busy__SHIFT 0x18
19029#define WD_DEBUG_REG5__p1_WD_IA1_dma_busy_MASK 0x2000000
19030#define WD_DEBUG_REG5__p1_WD_IA1_dma_busy__SHIFT 0x19
19031#define WD_DEBUG_REG5__p1_send_to_ia1_p3_q_MASK 0x4000000
19032#define WD_DEBUG_REG5__p1_send_to_ia1_p3_q__SHIFT 0x1a
19033#define WD_DEBUG_REG5__p1_dma_wd_switch_on_eop_p3_q_MASK 0x8000000
19034#define WD_DEBUG_REG5__p1_dma_wd_switch_on_eop_p3_q__SHIFT 0x1b
19035#define WD_DEBUG_REG5__p1_pipe3_dr_MASK 0x10000000
19036#define WD_DEBUG_REG5__p1_pipe3_dr__SHIFT 0x1c
19037#define WD_DEBUG_REG5__p1_pipe3_rtr_MASK 0x20000000
19038#define WD_DEBUG_REG5__p1_pipe3_rtr__SHIFT 0x1d
19039#define WD_DEBUG_REG5__p1_wd_dma2draw_fifo_empty_MASK 0x40000000
19040#define WD_DEBUG_REG5__p1_wd_dma2draw_fifo_empty__SHIFT 0x1e
19041#define WD_DEBUG_REG5__p1_wd_dma2draw_fifo_full_MASK 0x80000000
19042#define WD_DEBUG_REG5__p1_wd_dma2draw_fifo_full__SHIFT 0x1f
19043#define WD_DEBUG_REG6__WD_IA_draw_eop_MASK 0xffffffff
19044#define WD_DEBUG_REG6__WD_IA_draw_eop__SHIFT 0x0
19045#define WD_DEBUG_REG7__SE0VGT_WD_thdgrp_send_in_MASK 0x1
19046#define WD_DEBUG_REG7__SE0VGT_WD_thdgrp_send_in__SHIFT 0x0
19047#define WD_DEBUG_REG7__wd_arb_se0_input_fifo_re_MASK 0x2
19048#define WD_DEBUG_REG7__wd_arb_se0_input_fifo_re__SHIFT 0x1
19049#define WD_DEBUG_REG7__wd_arb_se0_input_fifo_empty_MASK 0x4
19050#define WD_DEBUG_REG7__wd_arb_se0_input_fifo_empty__SHIFT 0x2
19051#define WD_DEBUG_REG7__wd_arb_se0_input_fifo_full_MASK 0x8
19052#define WD_DEBUG_REG7__wd_arb_se0_input_fifo_full__SHIFT 0x3
19053#define WD_DEBUG_REG7__SE1VGT_WD_thdgrp_send_in_MASK 0x10
19054#define WD_DEBUG_REG7__SE1VGT_WD_thdgrp_send_in__SHIFT 0x4
19055#define WD_DEBUG_REG7__wd_arb_se1_input_fifo_re_MASK 0x20
19056#define WD_DEBUG_REG7__wd_arb_se1_input_fifo_re__SHIFT 0x5
19057#define WD_DEBUG_REG7__wd_arb_se1_input_fifo_empty_MASK 0x40
19058#define WD_DEBUG_REG7__wd_arb_se1_input_fifo_empty__SHIFT 0x6
19059#define WD_DEBUG_REG7__wd_arb_se1_input_fifo_full_MASK 0x80
19060#define WD_DEBUG_REG7__wd_arb_se1_input_fifo_full__SHIFT 0x7
19061#define WD_DEBUG_REG7__SPARE1_MASK 0xf00
19062#define WD_DEBUG_REG7__SPARE1__SHIFT 0x8
19063#define WD_DEBUG_REG7__SPARE2_MASK 0xf000
19064#define WD_DEBUG_REG7__SPARE2__SHIFT 0xc
19065#define WD_DEBUG_REG7__te11_arb_state_q_MASK 0x70000
19066#define WD_DEBUG_REG7__te11_arb_state_q__SHIFT 0x10
19067#define WD_DEBUG_REG7__SPARE5_MASK 0x80000
19068#define WD_DEBUG_REG7__SPARE5__SHIFT 0x13
19069#define WD_DEBUG_REG7__se0_thdgrp_is_event_MASK 0x100000
19070#define WD_DEBUG_REG7__se0_thdgrp_is_event__SHIFT 0x14
19071#define WD_DEBUG_REG7__se0_thdgrp_eop_MASK 0x200000
19072#define WD_DEBUG_REG7__se0_thdgrp_eop__SHIFT 0x15
19073#define WD_DEBUG_REG7__se1_thdgrp_is_event_MASK 0x400000
19074#define WD_DEBUG_REG7__se1_thdgrp_is_event__SHIFT 0x16
19075#define WD_DEBUG_REG7__se1_thdgrp_eop_MASK 0x800000
19076#define WD_DEBUG_REG7__se1_thdgrp_eop__SHIFT 0x17
19077#define WD_DEBUG_REG7__SPARE6_MASK 0xf000000
19078#define WD_DEBUG_REG7__SPARE6__SHIFT 0x18
19079#define WD_DEBUG_REG7__tfreq_arb_tgroup_rtr_MASK 0x10000000
19080#define WD_DEBUG_REG7__tfreq_arb_tgroup_rtr__SHIFT 0x1c
19081#define WD_DEBUG_REG7__arb_tfreq_tgroup_rts_MASK 0x20000000
19082#define WD_DEBUG_REG7__arb_tfreq_tgroup_rts__SHIFT 0x1d
19083#define WD_DEBUG_REG7__arb_tfreq_tgroup_event_MASK 0x40000000
19084#define WD_DEBUG_REG7__arb_tfreq_tgroup_event__SHIFT 0x1e
19085#define WD_DEBUG_REG7__te11_arb_busy_MASK 0x80000000
19086#define WD_DEBUG_REG7__te11_arb_busy__SHIFT 0x1f
19087#define WD_DEBUG_REG8__pipe0_dr_MASK 0x1
19088#define WD_DEBUG_REG8__pipe0_dr__SHIFT 0x0
19089#define WD_DEBUG_REG8__pipe1_dr_MASK 0x2
19090#define WD_DEBUG_REG8__pipe1_dr__SHIFT 0x1
19091#define WD_DEBUG_REG8__pipe0_rtr_MASK 0x4
19092#define WD_DEBUG_REG8__pipe0_rtr__SHIFT 0x2
19093#define WD_DEBUG_REG8__pipe1_rtr_MASK 0x8
19094#define WD_DEBUG_REG8__pipe1_rtr__SHIFT 0x3
19095#define WD_DEBUG_REG8__tfreq_tg_fifo_empty_MASK 0x10
19096#define WD_DEBUG_REG8__tfreq_tg_fifo_empty__SHIFT 0x4
19097#define WD_DEBUG_REG8__tfreq_tg_fifo_full_MASK 0x20
19098#define WD_DEBUG_REG8__tfreq_tg_fifo_full__SHIFT 0x5
19099#define WD_DEBUG_REG8__tf_data_fifo_busy_q_MASK 0x40
19100#define WD_DEBUG_REG8__tf_data_fifo_busy_q__SHIFT 0x6
19101#define WD_DEBUG_REG8__tf_data_fifo_rtr_q_MASK 0x80
19102#define WD_DEBUG_REG8__tf_data_fifo_rtr_q__SHIFT 0x7
19103#define WD_DEBUG_REG8__tf_skid_fifo_empty_MASK 0x100
19104#define WD_DEBUG_REG8__tf_skid_fifo_empty__SHIFT 0x8
19105#define WD_DEBUG_REG8__tf_skid_fifo_full_MASK 0x200
19106#define WD_DEBUG_REG8__tf_skid_fifo_full__SHIFT 0x9
19107#define WD_DEBUG_REG8__wd_tc_rdreq_rtr_q_MASK 0x400
19108#define WD_DEBUG_REG8__wd_tc_rdreq_rtr_q__SHIFT 0xa
19109#define WD_DEBUG_REG8__last_req_of_tg_p2_MASK 0x800
19110#define WD_DEBUG_REG8__last_req_of_tg_p2__SHIFT 0xb
19111#define WD_DEBUG_REG8__se0spi_wd_hs_done_cnt_q_MASK 0x3f000
19112#define WD_DEBUG_REG8__se0spi_wd_hs_done_cnt_q__SHIFT 0xc
19113#define WD_DEBUG_REG8__event_flag_p1_q_MASK 0x40000
19114#define WD_DEBUG_REG8__event_flag_p1_q__SHIFT 0x12
19115#define WD_DEBUG_REG8__null_flag_p1_q_MASK 0x80000
19116#define WD_DEBUG_REG8__null_flag_p1_q__SHIFT 0x13
19117#define WD_DEBUG_REG8__tf_data_fifo_cnt_q_MASK 0x7f00000
19118#define WD_DEBUG_REG8__tf_data_fifo_cnt_q__SHIFT 0x14
19119#define WD_DEBUG_REG8__second_tf_ret_data_q_MASK 0x8000000
19120#define WD_DEBUG_REG8__second_tf_ret_data_q__SHIFT 0x1b
19121#define WD_DEBUG_REG8__first_req_of_tg_p1_q_MASK 0x10000000
19122#define WD_DEBUG_REG8__first_req_of_tg_p1_q__SHIFT 0x1c
19123#define WD_DEBUG_REG8__WD_TC_rdreq_send_out_MASK 0x20000000
19124#define WD_DEBUG_REG8__WD_TC_rdreq_send_out__SHIFT 0x1d
19125#define WD_DEBUG_REG8__WD_TC_rdnfo_stall_out_MASK 0x40000000
19126#define WD_DEBUG_REG8__WD_TC_rdnfo_stall_out__SHIFT 0x1e
19127#define WD_DEBUG_REG8__TC_WD_rdret_valid_in_MASK 0x80000000
19128#define WD_DEBUG_REG8__TC_WD_rdret_valid_in__SHIFT 0x1f
19129#define WD_DEBUG_REG9__pipe0_dr_MASK 0x1
19130#define WD_DEBUG_REG9__pipe0_dr__SHIFT 0x0
19131#define WD_DEBUG_REG9__pipec_tf_dr_MASK 0x2
19132#define WD_DEBUG_REG9__pipec_tf_dr__SHIFT 0x1
19133#define WD_DEBUG_REG9__pipe2_dr_MASK 0x4
19134#define WD_DEBUG_REG9__pipe2_dr__SHIFT 0x2
19135#define WD_DEBUG_REG9__event_or_null_flags_p0_q_MASK 0x8
19136#define WD_DEBUG_REG9__event_or_null_flags_p0_q__SHIFT 0x3
19137#define WD_DEBUG_REG9__pipe0_rtr_MASK 0x10
19138#define WD_DEBUG_REG9__pipe0_rtr__SHIFT 0x4
19139#define WD_DEBUG_REG9__pipe1_rtr_MASK 0x20
19140#define WD_DEBUG_REG9__pipe1_rtr__SHIFT 0x5
19141#define WD_DEBUG_REG9__pipec_tf_rtr_MASK 0x40
19142#define WD_DEBUG_REG9__pipec_tf_rtr__SHIFT 0x6
19143#define WD_DEBUG_REG9__pipe2_rtr_MASK 0x80
19144#define WD_DEBUG_REG9__pipe2_rtr__SHIFT 0x7
19145#define WD_DEBUG_REG9__ttp_patch_fifo_full_MASK 0x100
19146#define WD_DEBUG_REG9__ttp_patch_fifo_full__SHIFT 0x8
19147#define WD_DEBUG_REG9__ttp_patch_fifo_empty_MASK 0x200
19148#define WD_DEBUG_REG9__ttp_patch_fifo_empty__SHIFT 0x9
19149#define WD_DEBUG_REG9__ttp_tf_fifo_empty_MASK 0x400
19150#define WD_DEBUG_REG9__ttp_tf_fifo_empty__SHIFT 0xa
19151#define WD_DEBUG_REG9__SPARE0_MASK 0xf800
19152#define WD_DEBUG_REG9__SPARE0__SHIFT 0xb
19153#define WD_DEBUG_REG9__tf_fetch_state_q_MASK 0x70000
19154#define WD_DEBUG_REG9__tf_fetch_state_q__SHIFT 0x10
19155#define WD_DEBUG_REG9__last_patch_of_tg_MASK 0x80000
19156#define WD_DEBUG_REG9__last_patch_of_tg__SHIFT 0x13
19157#define WD_DEBUG_REG9__tf_pointer_p0_q_MASK 0xf00000
19158#define WD_DEBUG_REG9__tf_pointer_p0_q__SHIFT 0x14
19159#define WD_DEBUG_REG9__dynamic_hs_p0_q_MASK 0x1000000
19160#define WD_DEBUG_REG9__dynamic_hs_p0_q__SHIFT 0x18
19161#define WD_DEBUG_REG9__first_fetch_of_tg_p0_q_MASK 0x2000000
19162#define WD_DEBUG_REG9__first_fetch_of_tg_p0_q__SHIFT 0x19
19163#define WD_DEBUG_REG9__mem_is_even_MASK 0x4000000
19164#define WD_DEBUG_REG9__mem_is_even__SHIFT 0x1a
19165#define WD_DEBUG_REG9__SPARE1_MASK 0x8000000
19166#define WD_DEBUG_REG9__SPARE1__SHIFT 0x1b
19167#define WD_DEBUG_REG9__SPARE2_MASK 0x30000000
19168#define WD_DEBUG_REG9__SPARE2__SHIFT 0x1c
19169#define WD_DEBUG_REG9__pipe4_dr_MASK 0x40000000
19170#define WD_DEBUG_REG9__pipe4_dr__SHIFT 0x1e
19171#define WD_DEBUG_REG9__pipe4_rtr_MASK 0x80000000
19172#define WD_DEBUG_REG9__pipe4_rtr__SHIFT 0x1f
19173#define WD_DEBUG_REG10__ttp_pd_patch_rts_MASK 0x1
19174#define WD_DEBUG_REG10__ttp_pd_patch_rts__SHIFT 0x0
19175#define WD_DEBUG_REG10__ttp_pd_is_event_MASK 0x2
19176#define WD_DEBUG_REG10__ttp_pd_is_event__SHIFT 0x1
19177#define WD_DEBUG_REG10__ttp_pd_eopg_MASK 0x4
19178#define WD_DEBUG_REG10__ttp_pd_eopg__SHIFT 0x2
19179#define WD_DEBUG_REG10__ttp_pd_eop_MASK 0x8
19180#define WD_DEBUG_REG10__ttp_pd_eop__SHIFT 0x3
19181#define WD_DEBUG_REG10__pipe0_dr_MASK 0x10
19182#define WD_DEBUG_REG10__pipe0_dr__SHIFT 0x4
19183#define WD_DEBUG_REG10__pipe1_dr_MASK 0x20
19184#define WD_DEBUG_REG10__pipe1_dr__SHIFT 0x5
19185#define WD_DEBUG_REG10__pipe0_rtr_MASK 0x40
19186#define WD_DEBUG_REG10__pipe0_rtr__SHIFT 0x6
19187#define WD_DEBUG_REG10__pipe1_rtr_MASK 0x80
19188#define WD_DEBUG_REG10__pipe1_rtr__SHIFT 0x7
19189#define WD_DEBUG_REG10__donut_en_p1_q_MASK 0x100
19190#define WD_DEBUG_REG10__donut_en_p1_q__SHIFT 0x8
19191#define WD_DEBUG_REG10__donut_se_switch_p2_MASK 0x200
19192#define WD_DEBUG_REG10__donut_se_switch_p2__SHIFT 0x9
19193#define WD_DEBUG_REG10__patch_se_switch_p2_MASK 0x400
19194#define WD_DEBUG_REG10__patch_se_switch_p2__SHIFT 0xa
19195#define WD_DEBUG_REG10__last_donut_switch_p2_MASK 0x800
19196#define WD_DEBUG_REG10__last_donut_switch_p2__SHIFT 0xb
19197#define WD_DEBUG_REG10__last_donut_of_patch_p2_MASK 0x1000
19198#define WD_DEBUG_REG10__last_donut_of_patch_p2__SHIFT 0xc
19199#define WD_DEBUG_REG10__is_event_p1_q_MASK 0x2000
19200#define WD_DEBUG_REG10__is_event_p1_q__SHIFT 0xd
19201#define WD_DEBUG_REG10__eopg_p1_q_MASK 0x4000
19202#define WD_DEBUG_REG10__eopg_p1_q__SHIFT 0xe
19203#define WD_DEBUG_REG10__eop_p1_q_MASK 0x8000
19204#define WD_DEBUG_REG10__eop_p1_q__SHIFT 0xf
19205#define WD_DEBUG_REG10__patch_accum_q_MASK 0xff0000
19206#define WD_DEBUG_REG10__patch_accum_q__SHIFT 0x10
19207#define WD_DEBUG_REG10__wd_te11_out_se0_fifo_full_MASK 0x1000000
19208#define WD_DEBUG_REG10__wd_te11_out_se0_fifo_full__SHIFT 0x18
19209#define WD_DEBUG_REG10__wd_te11_out_se0_fifo_empty_MASK 0x2000000
19210#define WD_DEBUG_REG10__wd_te11_out_se0_fifo_empty__SHIFT 0x19
19211#define WD_DEBUG_REG10__wd_te11_out_se1_fifo_full_MASK 0x4000000
19212#define WD_DEBUG_REG10__wd_te11_out_se1_fifo_full__SHIFT 0x1a
19213#define WD_DEBUG_REG10__wd_te11_out_se1_fifo_empty_MASK 0x8000000
19214#define WD_DEBUG_REG10__wd_te11_out_se1_fifo_empty__SHIFT 0x1b
19215#define WD_DEBUG_REG10__wd_te11_out_se2_fifo_full_MASK 0x10000000
19216#define WD_DEBUG_REG10__wd_te11_out_se2_fifo_full__SHIFT 0x1c
19217#define WD_DEBUG_REG10__wd_te11_out_se2_fifo_empty_MASK 0x20000000
19218#define WD_DEBUG_REG10__wd_te11_out_se2_fifo_empty__SHIFT 0x1d
19219#define WD_DEBUG_REG10__wd_te11_out_se3_fifo_full_MASK 0x40000000
19220#define WD_DEBUG_REG10__wd_te11_out_se3_fifo_full__SHIFT 0x1e
19221#define WD_DEBUG_REG10__wd_te11_out_se3_fifo_empty_MASK 0x80000000
19222#define WD_DEBUG_REG10__wd_te11_out_se3_fifo_empty__SHIFT 0x1f
19223#define IA_DEBUG_REG0__ia_busy_extended_MASK 0x1
19224#define IA_DEBUG_REG0__ia_busy_extended__SHIFT 0x0
19225#define IA_DEBUG_REG0__ia_nodma_busy_extended_MASK 0x2
19226#define IA_DEBUG_REG0__ia_nodma_busy_extended__SHIFT 0x1
19227#define IA_DEBUG_REG0__ia_busy_MASK 0x4
19228#define IA_DEBUG_REG0__ia_busy__SHIFT 0x2
19229#define IA_DEBUG_REG0__ia_nodma_busy_MASK 0x8
19230#define IA_DEBUG_REG0__ia_nodma_busy__SHIFT 0x3
19231#define IA_DEBUG_REG0__SPARE0_MASK 0x10
19232#define IA_DEBUG_REG0__SPARE0__SHIFT 0x4
19233#define IA_DEBUG_REG0__dma_req_busy_MASK 0x20
19234#define IA_DEBUG_REG0__dma_req_busy__SHIFT 0x5
19235#define IA_DEBUG_REG0__dma_busy_MASK 0x40
19236#define IA_DEBUG_REG0__dma_busy__SHIFT 0x6
19237#define IA_DEBUG_REG0__mc_xl8r_busy_MASK 0x80
19238#define IA_DEBUG_REG0__mc_xl8r_busy__SHIFT 0x7
19239#define IA_DEBUG_REG0__grp_busy_MASK 0x100
19240#define IA_DEBUG_REG0__grp_busy__SHIFT 0x8
19241#define IA_DEBUG_REG0__SPARE1_MASK 0x200
19242#define IA_DEBUG_REG0__SPARE1__SHIFT 0x9
19243#define IA_DEBUG_REG0__dma_grp_valid_MASK 0x400
19244#define IA_DEBUG_REG0__dma_grp_valid__SHIFT 0xa
19245#define IA_DEBUG_REG0__grp_dma_read_MASK 0x800
19246#define IA_DEBUG_REG0__grp_dma_read__SHIFT 0xb
19247#define IA_DEBUG_REG0__dma_grp_hp_valid_MASK 0x1000
19248#define IA_DEBUG_REG0__dma_grp_hp_valid__SHIFT 0xc
19249#define IA_DEBUG_REG0__grp_dma_hp_read_MASK 0x2000
19250#define IA_DEBUG_REG0__grp_dma_hp_read__SHIFT 0xd
19251#define IA_DEBUG_REG0__SPARE2_MASK 0xffc000
19252#define IA_DEBUG_REG0__SPARE2__SHIFT 0xe
19253#define IA_DEBUG_REG0__reg_clk_busy_MASK 0x1000000
19254#define IA_DEBUG_REG0__reg_clk_busy__SHIFT 0x18
19255#define IA_DEBUG_REG0__core_clk_busy_MASK 0x2000000
19256#define IA_DEBUG_REG0__core_clk_busy__SHIFT 0x19
19257#define IA_DEBUG_REG0__SPARE3_MASK 0x4000000
19258#define IA_DEBUG_REG0__SPARE3__SHIFT 0x1a
19259#define IA_DEBUG_REG0__SPARE4_MASK 0x8000000
19260#define IA_DEBUG_REG0__SPARE4__SHIFT 0x1b
19261#define IA_DEBUG_REG0__sclk_reg_vld_MASK 0x10000000
19262#define IA_DEBUG_REG0__sclk_reg_vld__SHIFT 0x1c
19263#define IA_DEBUG_REG0__sclk_core_vld_MASK 0x20000000
19264#define IA_DEBUG_REG0__sclk_core_vld__SHIFT 0x1d
19265#define IA_DEBUG_REG0__SPARE5_MASK 0x40000000
19266#define IA_DEBUG_REG0__SPARE5__SHIFT 0x1e
19267#define IA_DEBUG_REG0__SPARE6_MASK 0x80000000
19268#define IA_DEBUG_REG0__SPARE6__SHIFT 0x1f
19269#define IA_DEBUG_REG1__dma_input_fifo_empty_MASK 0x1
19270#define IA_DEBUG_REG1__dma_input_fifo_empty__SHIFT 0x0
19271#define IA_DEBUG_REG1__dma_input_fifo_full_MASK 0x2
19272#define IA_DEBUG_REG1__dma_input_fifo_full__SHIFT 0x1
19273#define IA_DEBUG_REG1__start_new_packet_MASK 0x4
19274#define IA_DEBUG_REG1__start_new_packet__SHIFT 0x2
19275#define IA_DEBUG_REG1__dma_rdreq_dr_q_MASK 0x8
19276#define IA_DEBUG_REG1__dma_rdreq_dr_q__SHIFT 0x3
19277#define IA_DEBUG_REG1__dma_zero_indices_q_MASK 0x10
19278#define IA_DEBUG_REG1__dma_zero_indices_q__SHIFT 0x4
19279#define IA_DEBUG_REG1__dma_buf_type_q_MASK 0x60
19280#define IA_DEBUG_REG1__dma_buf_type_q__SHIFT 0x5
19281#define IA_DEBUG_REG1__dma_req_path_q_MASK 0x80
19282#define IA_DEBUG_REG1__dma_req_path_q__SHIFT 0x7
19283#define IA_DEBUG_REG1__discard_1st_chunk_MASK 0x100
19284#define IA_DEBUG_REG1__discard_1st_chunk__SHIFT 0x8
19285#define IA_DEBUG_REG1__discard_2nd_chunk_MASK 0x200
19286#define IA_DEBUG_REG1__discard_2nd_chunk__SHIFT 0x9
19287#define IA_DEBUG_REG1__second_tc_ret_data_q_MASK 0x400
19288#define IA_DEBUG_REG1__second_tc_ret_data_q__SHIFT 0xa
19289#define IA_DEBUG_REG1__dma_tc_ret_sel_q_MASK 0x800
19290#define IA_DEBUG_REG1__dma_tc_ret_sel_q__SHIFT 0xb
19291#define IA_DEBUG_REG1__last_rdreq_in_dma_op_MASK 0x1000
19292#define IA_DEBUG_REG1__last_rdreq_in_dma_op__SHIFT 0xc
19293#define IA_DEBUG_REG1__dma_mask_fifo_empty_MASK 0x2000
19294#define IA_DEBUG_REG1__dma_mask_fifo_empty__SHIFT 0xd
19295#define IA_DEBUG_REG1__dma_data_fifo_empty_q_MASK 0x4000
19296#define IA_DEBUG_REG1__dma_data_fifo_empty_q__SHIFT 0xe
19297#define IA_DEBUG_REG1__dma_data_fifo_full_MASK 0x8000
19298#define IA_DEBUG_REG1__dma_data_fifo_full__SHIFT 0xf
19299#define IA_DEBUG_REG1__dma_req_fifo_empty_MASK 0x10000
19300#define IA_DEBUG_REG1__dma_req_fifo_empty__SHIFT 0x10
19301#define IA_DEBUG_REG1__dma_req_fifo_full_MASK 0x20000
19302#define IA_DEBUG_REG1__dma_req_fifo_full__SHIFT 0x11
19303#define IA_DEBUG_REG1__stage2_dr_MASK 0x40000
19304#define IA_DEBUG_REG1__stage2_dr__SHIFT 0x12
19305#define IA_DEBUG_REG1__stage2_rtr_MASK 0x80000
19306#define IA_DEBUG_REG1__stage2_rtr__SHIFT 0x13
19307#define IA_DEBUG_REG1__stage3_dr_MASK 0x100000
19308#define IA_DEBUG_REG1__stage3_dr__SHIFT 0x14
19309#define IA_DEBUG_REG1__stage3_rtr_MASK 0x200000
19310#define IA_DEBUG_REG1__stage3_rtr__SHIFT 0x15
19311#define IA_DEBUG_REG1__stage4_dr_MASK 0x400000
19312#define IA_DEBUG_REG1__stage4_dr__SHIFT 0x16
19313#define IA_DEBUG_REG1__stage4_rtr_MASK 0x800000
19314#define IA_DEBUG_REG1__stage4_rtr__SHIFT 0x17
19315#define IA_DEBUG_REG1__dma_skid_fifo_empty_MASK 0x1000000
19316#define IA_DEBUG_REG1__dma_skid_fifo_empty__SHIFT 0x18
19317#define IA_DEBUG_REG1__dma_skid_fifo_full_MASK 0x2000000
19318#define IA_DEBUG_REG1__dma_skid_fifo_full__SHIFT 0x19
19319#define IA_DEBUG_REG1__dma_grp_valid_MASK 0x4000000
19320#define IA_DEBUG_REG1__dma_grp_valid__SHIFT 0x1a
19321#define IA_DEBUG_REG1__grp_dma_read_MASK 0x8000000
19322#define IA_DEBUG_REG1__grp_dma_read__SHIFT 0x1b
19323#define IA_DEBUG_REG1__current_data_valid_MASK 0x10000000
19324#define IA_DEBUG_REG1__current_data_valid__SHIFT 0x1c
19325#define IA_DEBUG_REG1__out_of_range_r2_q_MASK 0x20000000
19326#define IA_DEBUG_REG1__out_of_range_r2_q__SHIFT 0x1d
19327#define IA_DEBUG_REG1__dma_mask_fifo_we_MASK 0x40000000
19328#define IA_DEBUG_REG1__dma_mask_fifo_we__SHIFT 0x1e
19329#define IA_DEBUG_REG1__dma_ret_data_we_q_MASK 0x80000000
19330#define IA_DEBUG_REG1__dma_ret_data_we_q__SHIFT 0x1f
19331#define IA_DEBUG_REG2__hp_dma_input_fifo_empty_MASK 0x1
19332#define IA_DEBUG_REG2__hp_dma_input_fifo_empty__SHIFT 0x0
19333#define IA_DEBUG_REG2__hp_dma_input_fifo_full_MASK 0x2
19334#define IA_DEBUG_REG2__hp_dma_input_fifo_full__SHIFT 0x1
19335#define IA_DEBUG_REG2__hp_start_new_packet_MASK 0x4
19336#define IA_DEBUG_REG2__hp_start_new_packet__SHIFT 0x2
19337#define IA_DEBUG_REG2__hp_dma_rdreq_dr_q_MASK 0x8
19338#define IA_DEBUG_REG2__hp_dma_rdreq_dr_q__SHIFT 0x3
19339#define IA_DEBUG_REG2__hp_dma_zero_indices_q_MASK 0x10
19340#define IA_DEBUG_REG2__hp_dma_zero_indices_q__SHIFT 0x4
19341#define IA_DEBUG_REG2__hp_dma_buf_type_q_MASK 0x60
19342#define IA_DEBUG_REG2__hp_dma_buf_type_q__SHIFT 0x5
19343#define IA_DEBUG_REG2__hp_dma_req_path_q_MASK 0x80
19344#define IA_DEBUG_REG2__hp_dma_req_path_q__SHIFT 0x7
19345#define IA_DEBUG_REG2__hp_discard_1st_chunk_MASK 0x100
19346#define IA_DEBUG_REG2__hp_discard_1st_chunk__SHIFT 0x8
19347#define IA_DEBUG_REG2__hp_discard_2nd_chunk_MASK 0x200
19348#define IA_DEBUG_REG2__hp_discard_2nd_chunk__SHIFT 0x9
19349#define IA_DEBUG_REG2__hp_second_tc_ret_data_q_MASK 0x400
19350#define IA_DEBUG_REG2__hp_second_tc_ret_data_q__SHIFT 0xa
19351#define IA_DEBUG_REG2__hp_dma_tc_ret_sel_q_MASK 0x800
19352#define IA_DEBUG_REG2__hp_dma_tc_ret_sel_q__SHIFT 0xb
19353#define IA_DEBUG_REG2__hp_last_rdreq_in_dma_op_MASK 0x1000
19354#define IA_DEBUG_REG2__hp_last_rdreq_in_dma_op__SHIFT 0xc
19355#define IA_DEBUG_REG2__hp_dma_mask_fifo_empty_MASK 0x2000
19356#define IA_DEBUG_REG2__hp_dma_mask_fifo_empty__SHIFT 0xd
19357#define IA_DEBUG_REG2__hp_dma_data_fifo_empty_q_MASK 0x4000
19358#define IA_DEBUG_REG2__hp_dma_data_fifo_empty_q__SHIFT 0xe
19359#define IA_DEBUG_REG2__hp_dma_data_fifo_full_MASK 0x8000
19360#define IA_DEBUG_REG2__hp_dma_data_fifo_full__SHIFT 0xf
19361#define IA_DEBUG_REG2__hp_dma_req_fifo_empty_MASK 0x10000
19362#define IA_DEBUG_REG2__hp_dma_req_fifo_empty__SHIFT 0x10
19363#define IA_DEBUG_REG2__hp_dma_req_fifo_full_MASK 0x20000
19364#define IA_DEBUG_REG2__hp_dma_req_fifo_full__SHIFT 0x11
19365#define IA_DEBUG_REG2__hp_stage2_dr_MASK 0x40000
19366#define IA_DEBUG_REG2__hp_stage2_dr__SHIFT 0x12
19367#define IA_DEBUG_REG2__hp_stage2_rtr_MASK 0x80000
19368#define IA_DEBUG_REG2__hp_stage2_rtr__SHIFT 0x13
19369#define IA_DEBUG_REG2__hp_stage3_dr_MASK 0x100000
19370#define IA_DEBUG_REG2__hp_stage3_dr__SHIFT 0x14
19371#define IA_DEBUG_REG2__hp_stage3_rtr_MASK 0x200000
19372#define IA_DEBUG_REG2__hp_stage3_rtr__SHIFT 0x15
19373#define IA_DEBUG_REG2__hp_stage4_dr_MASK 0x400000
19374#define IA_DEBUG_REG2__hp_stage4_dr__SHIFT 0x16
19375#define IA_DEBUG_REG2__hp_stage4_rtr_MASK 0x800000
19376#define IA_DEBUG_REG2__hp_stage4_rtr__SHIFT 0x17
19377#define IA_DEBUG_REG2__hp_dma_skid_fifo_empty_MASK 0x1000000
19378#define IA_DEBUG_REG2__hp_dma_skid_fifo_empty__SHIFT 0x18
19379#define IA_DEBUG_REG2__hp_dma_skid_fifo_full_MASK 0x2000000
19380#define IA_DEBUG_REG2__hp_dma_skid_fifo_full__SHIFT 0x19
19381#define IA_DEBUG_REG2__hp_dma_grp_valid_MASK 0x4000000
19382#define IA_DEBUG_REG2__hp_dma_grp_valid__SHIFT 0x1a
19383#define IA_DEBUG_REG2__hp_grp_dma_read_MASK 0x8000000
19384#define IA_DEBUG_REG2__hp_grp_dma_read__SHIFT 0x1b
19385#define IA_DEBUG_REG2__hp_current_data_valid_MASK 0x10000000
19386#define IA_DEBUG_REG2__hp_current_data_valid__SHIFT 0x1c
19387#define IA_DEBUG_REG2__hp_out_of_range_r2_q_MASK 0x20000000
19388#define IA_DEBUG_REG2__hp_out_of_range_r2_q__SHIFT 0x1d
19389#define IA_DEBUG_REG2__hp_dma_mask_fifo_we_MASK 0x40000000
19390#define IA_DEBUG_REG2__hp_dma_mask_fifo_we__SHIFT 0x1e
19391#define IA_DEBUG_REG2__hp_dma_ret_data_we_q_MASK 0x80000000
19392#define IA_DEBUG_REG2__hp_dma_ret_data_we_q__SHIFT 0x1f
19393#define IA_DEBUG_REG3__dma_pipe0_rdreq_valid_MASK 0x1
19394#define IA_DEBUG_REG3__dma_pipe0_rdreq_valid__SHIFT 0x0
19395#define IA_DEBUG_REG3__dma_pipe0_rdreq_read_MASK 0x2
19396#define IA_DEBUG_REG3__dma_pipe0_rdreq_read__SHIFT 0x1
19397#define IA_DEBUG_REG3__dma_pipe0_rdreq_null_out_MASK 0x4
19398#define IA_DEBUG_REG3__dma_pipe0_rdreq_null_out__SHIFT 0x2
19399#define IA_DEBUG_REG3__dma_pipe0_rdreq_eop_out_MASK 0x8
19400#define IA_DEBUG_REG3__dma_pipe0_rdreq_eop_out__SHIFT 0x3
19401#define IA_DEBUG_REG3__dma_pipe0_rdreq_use_tc_out_MASK 0x10
19402#define IA_DEBUG_REG3__dma_pipe0_rdreq_use_tc_out__SHIFT 0x4
19403#define IA_DEBUG_REG3__grp_dma_draw_is_pipe0_MASK 0x20
19404#define IA_DEBUG_REG3__grp_dma_draw_is_pipe0__SHIFT 0x5
19405#define IA_DEBUG_REG3__must_service_pipe0_req_MASK 0x40
19406#define IA_DEBUG_REG3__must_service_pipe0_req__SHIFT 0x6
19407#define IA_DEBUG_REG3__send_pipe1_req_MASK 0x80
19408#define IA_DEBUG_REG3__send_pipe1_req__SHIFT 0x7
19409#define IA_DEBUG_REG3__dma_pipe1_rdreq_valid_MASK 0x100
19410#define IA_DEBUG_REG3__dma_pipe1_rdreq_valid__SHIFT 0x8
19411#define IA_DEBUG_REG3__dma_pipe1_rdreq_read_MASK 0x200
19412#define IA_DEBUG_REG3__dma_pipe1_rdreq_read__SHIFT 0x9
19413#define IA_DEBUG_REG3__dma_pipe1_rdreq_null_out_MASK 0x400
19414#define IA_DEBUG_REG3__dma_pipe1_rdreq_null_out__SHIFT 0xa
19415#define IA_DEBUG_REG3__dma_pipe1_rdreq_eop_out_MASK 0x800
19416#define IA_DEBUG_REG3__dma_pipe1_rdreq_eop_out__SHIFT 0xb
19417#define IA_DEBUG_REG3__dma_pipe1_rdreq_use_tc_out_MASK 0x1000
19418#define IA_DEBUG_REG3__dma_pipe1_rdreq_use_tc_out__SHIFT 0xc
19419#define IA_DEBUG_REG3__ia_mc_rdreq_rtr_q_MASK 0x2000
19420#define IA_DEBUG_REG3__ia_mc_rdreq_rtr_q__SHIFT 0xd
19421#define IA_DEBUG_REG3__mc_out_rtr_MASK 0x4000
19422#define IA_DEBUG_REG3__mc_out_rtr__SHIFT 0xe
19423#define IA_DEBUG_REG3__dma_rdreq_send_out_MASK 0x8000
19424#define IA_DEBUG_REG3__dma_rdreq_send_out__SHIFT 0xf
19425#define IA_DEBUG_REG3__pipe0_dr_MASK 0x10000
19426#define IA_DEBUG_REG3__pipe0_dr__SHIFT 0x10
19427#define IA_DEBUG_REG3__pipe0_rtr_MASK 0x20000
19428#define IA_DEBUG_REG3__pipe0_rtr__SHIFT 0x11
19429#define IA_DEBUG_REG3__ia_tc_rdreq_rtr_q_MASK 0x40000
19430#define IA_DEBUG_REG3__ia_tc_rdreq_rtr_q__SHIFT 0x12
19431#define IA_DEBUG_REG3__tc_out_rtr_MASK 0x80000
19432#define IA_DEBUG_REG3__tc_out_rtr__SHIFT 0x13
19433#define IA_DEBUG_REG3__pair0_valid_p1_MASK 0x100000
19434#define IA_DEBUG_REG3__pair0_valid_p1__SHIFT 0x14
19435#define IA_DEBUG_REG3__pair1_valid_p1_MASK 0x200000
19436#define IA_DEBUG_REG3__pair1_valid_p1__SHIFT 0x15
19437#define IA_DEBUG_REG3__pair2_valid_p1_MASK 0x400000
19438#define IA_DEBUG_REG3__pair2_valid_p1__SHIFT 0x16
19439#define IA_DEBUG_REG3__pair3_valid_p1_MASK 0x800000
19440#define IA_DEBUG_REG3__pair3_valid_p1__SHIFT 0x17
19441#define IA_DEBUG_REG3__tc_req_count_q_MASK 0x3000000
19442#define IA_DEBUG_REG3__tc_req_count_q__SHIFT 0x18
19443#define IA_DEBUG_REG3__discard_1st_chunk_MASK 0x4000000
19444#define IA_DEBUG_REG3__discard_1st_chunk__SHIFT 0x1a
19445#define IA_DEBUG_REG3__discard_2nd_chunk_MASK 0x8000000
19446#define IA_DEBUG_REG3__discard_2nd_chunk__SHIFT 0x1b
19447#define IA_DEBUG_REG3__last_tc_req_p1_MASK 0x10000000
19448#define IA_DEBUG_REG3__last_tc_req_p1__SHIFT 0x1c
19449#define IA_DEBUG_REG3__IA_TC_rdreq_send_out_MASK 0x20000000
19450#define IA_DEBUG_REG3__IA_TC_rdreq_send_out__SHIFT 0x1d
19451#define IA_DEBUG_REG3__TC_IA_rdret_valid_in_MASK 0x40000000
19452#define IA_DEBUG_REG3__TC_IA_rdret_valid_in__SHIFT 0x1e
19453#define IA_DEBUG_REG3__TAP_IA_rdret_vld_in_MASK 0x80000000
19454#define IA_DEBUG_REG3__TAP_IA_rdret_vld_in__SHIFT 0x1f
19455#define IA_DEBUG_REG4__pipe0_dr_MASK 0x1
19456#define IA_DEBUG_REG4__pipe0_dr__SHIFT 0x0
19457#define IA_DEBUG_REG4__pipe1_dr_MASK 0x2
19458#define IA_DEBUG_REG4__pipe1_dr__SHIFT 0x1
19459#define IA_DEBUG_REG4__pipe2_dr_MASK 0x4
19460#define IA_DEBUG_REG4__pipe2_dr__SHIFT 0x2
19461#define IA_DEBUG_REG4__pipe3_dr_MASK 0x8
19462#define IA_DEBUG_REG4__pipe3_dr__SHIFT 0x3
19463#define IA_DEBUG_REG4__pipe4_dr_MASK 0x10
19464#define IA_DEBUG_REG4__pipe4_dr__SHIFT 0x4
19465#define IA_DEBUG_REG4__pipe5_dr_MASK 0x20
19466#define IA_DEBUG_REG4__pipe5_dr__SHIFT 0x5
19467#define IA_DEBUG_REG4__grp_se0_fifo_empty_MASK 0x40
19468#define IA_DEBUG_REG4__grp_se0_fifo_empty__SHIFT 0x6
19469#define IA_DEBUG_REG4__grp_se0_fifo_full_MASK 0x80
19470#define IA_DEBUG_REG4__grp_se0_fifo_full__SHIFT 0x7
19471#define IA_DEBUG_REG4__pipe0_rtr_MASK 0x100
19472#define IA_DEBUG_REG4__pipe0_rtr__SHIFT 0x8
19473#define IA_DEBUG_REG4__pipe1_rtr_MASK 0x200
19474#define IA_DEBUG_REG4__pipe1_rtr__SHIFT 0x9
19475#define IA_DEBUG_REG4__pipe2_rtr_MASK 0x400
19476#define IA_DEBUG_REG4__pipe2_rtr__SHIFT 0xa
19477#define IA_DEBUG_REG4__pipe3_rtr_MASK 0x800
19478#define IA_DEBUG_REG4__pipe3_rtr__SHIFT 0xb
19479#define IA_DEBUG_REG4__pipe4_rtr_MASK 0x1000
19480#define IA_DEBUG_REG4__pipe4_rtr__SHIFT 0xc
19481#define IA_DEBUG_REG4__pipe5_rtr_MASK 0x2000
19482#define IA_DEBUG_REG4__pipe5_rtr__SHIFT 0xd
19483#define IA_DEBUG_REG4__ia_vgt_prim_rtr_q_MASK 0x4000
19484#define IA_DEBUG_REG4__ia_vgt_prim_rtr_q__SHIFT 0xe
19485#define IA_DEBUG_REG4__ia_se1vgt_prim_rtr_q_MASK 0x8000
19486#define IA_DEBUG_REG4__ia_se1vgt_prim_rtr_q__SHIFT 0xf
19487#define IA_DEBUG_REG4__di_major_mode_p1_q_MASK 0x10000
19488#define IA_DEBUG_REG4__di_major_mode_p1_q__SHIFT 0x10
19489#define IA_DEBUG_REG4__gs_mode_p1_q_MASK 0xe0000
19490#define IA_DEBUG_REG4__gs_mode_p1_q__SHIFT 0x11
19491#define IA_DEBUG_REG4__di_event_flag_p1_q_MASK 0x100000
19492#define IA_DEBUG_REG4__di_event_flag_p1_q__SHIFT 0x14
19493#define IA_DEBUG_REG4__di_state_sel_p1_q_MASK 0xe00000
19494#define IA_DEBUG_REG4__di_state_sel_p1_q__SHIFT 0x15
19495#define IA_DEBUG_REG4__draw_opaq_en_p1_q_MASK 0x1000000
19496#define IA_DEBUG_REG4__draw_opaq_en_p1_q__SHIFT 0x18
19497#define IA_DEBUG_REG4__draw_opaq_active_q_MASK 0x2000000
19498#define IA_DEBUG_REG4__draw_opaq_active_q__SHIFT 0x19
19499#define IA_DEBUG_REG4__di_source_select_p1_q_MASK 0xc000000
19500#define IA_DEBUG_REG4__di_source_select_p1_q__SHIFT 0x1a
19501#define IA_DEBUG_REG4__ready_to_read_di_MASK 0x10000000
19502#define IA_DEBUG_REG4__ready_to_read_di__SHIFT 0x1c
19503#define IA_DEBUG_REG4__di_first_group_of_draw_q_MASK 0x20000000
19504#define IA_DEBUG_REG4__di_first_group_of_draw_q__SHIFT 0x1d
19505#define IA_DEBUG_REG4__last_shift_of_draw_MASK 0x40000000
19506#define IA_DEBUG_REG4__last_shift_of_draw__SHIFT 0x1e
19507#define IA_DEBUG_REG4__current_shift_is_vect1_q_MASK 0x80000000
19508#define IA_DEBUG_REG4__current_shift_is_vect1_q__SHIFT 0x1f
19509#define IA_DEBUG_REG5__di_index_counter_q_15_0_MASK 0xffff
19510#define IA_DEBUG_REG5__di_index_counter_q_15_0__SHIFT 0x0
19511#define IA_DEBUG_REG5__instanceid_13_0_MASK 0x3fff0000
19512#define IA_DEBUG_REG5__instanceid_13_0__SHIFT 0x10
19513#define IA_DEBUG_REG5__draw_input_fifo_full_MASK 0x40000000
19514#define IA_DEBUG_REG5__draw_input_fifo_full__SHIFT 0x1e
19515#define IA_DEBUG_REG5__draw_input_fifo_empty_MASK 0x80000000
19516#define IA_DEBUG_REG5__draw_input_fifo_empty__SHIFT 0x1f
19517#define IA_DEBUG_REG6__current_shift_q_MASK 0xf
19518#define IA_DEBUG_REG6__current_shift_q__SHIFT 0x0
19519#define IA_DEBUG_REG6__current_stride_pre_MASK 0xf0
19520#define IA_DEBUG_REG6__current_stride_pre__SHIFT 0x4
19521#define IA_DEBUG_REG6__current_stride_q_MASK 0x1f00
19522#define IA_DEBUG_REG6__current_stride_q__SHIFT 0x8
19523#define IA_DEBUG_REG6__first_group_partial_MASK 0x2000
19524#define IA_DEBUG_REG6__first_group_partial__SHIFT 0xd
19525#define IA_DEBUG_REG6__second_group_partial_MASK 0x4000
19526#define IA_DEBUG_REG6__second_group_partial__SHIFT 0xe
19527#define IA_DEBUG_REG6__curr_prim_partial_MASK 0x8000
19528#define IA_DEBUG_REG6__curr_prim_partial__SHIFT 0xf
19529#define IA_DEBUG_REG6__next_stride_q_MASK 0x1f0000
19530#define IA_DEBUG_REG6__next_stride_q__SHIFT 0x10
19531#define IA_DEBUG_REG6__next_group_partial_MASK 0x200000
19532#define IA_DEBUG_REG6__next_group_partial__SHIFT 0x15
19533#define IA_DEBUG_REG6__after_group_partial_MASK 0x400000
19534#define IA_DEBUG_REG6__after_group_partial__SHIFT 0x16
19535#define IA_DEBUG_REG6__extract_group_MASK 0x800000
19536#define IA_DEBUG_REG6__extract_group__SHIFT 0x17
19537#define IA_DEBUG_REG6__grp_shift_debug_data_MASK 0xff000000
19538#define IA_DEBUG_REG6__grp_shift_debug_data__SHIFT 0x18
19539#define IA_DEBUG_REG7__reset_indx_state_q_MASK 0xf
19540#define IA_DEBUG_REG7__reset_indx_state_q__SHIFT 0x0
19541#define IA_DEBUG_REG7__shift_vect_valid_p2_q_MASK 0xf0
19542#define IA_DEBUG_REG7__shift_vect_valid_p2_q__SHIFT 0x4
19543#define IA_DEBUG_REG7__shift_vect1_valid_p2_q_MASK 0xf00
19544#define IA_DEBUG_REG7__shift_vect1_valid_p2_q__SHIFT 0x8
19545#define IA_DEBUG_REG7__shift_vect0_reset_match_p2_q_MASK 0xf000
19546#define IA_DEBUG_REG7__shift_vect0_reset_match_p2_q__SHIFT 0xc
19547#define IA_DEBUG_REG7__shift_vect1_reset_match_p2_q_MASK 0xf0000
19548#define IA_DEBUG_REG7__shift_vect1_reset_match_p2_q__SHIFT 0x10
19549#define IA_DEBUG_REG7__num_indx_in_group_p2_q_MASK 0x700000
19550#define IA_DEBUG_REG7__num_indx_in_group_p2_q__SHIFT 0x14
19551#define IA_DEBUG_REG7__last_group_of_draw_p2_q_MASK 0x800000
19552#define IA_DEBUG_REG7__last_group_of_draw_p2_q__SHIFT 0x17
19553#define IA_DEBUG_REG7__shift_event_flag_p2_q_MASK 0x1000000
19554#define IA_DEBUG_REG7__shift_event_flag_p2_q__SHIFT 0x18
19555#define IA_DEBUG_REG7__indx_shift_is_one_p2_q_MASK 0x2000000
19556#define IA_DEBUG_REG7__indx_shift_is_one_p2_q__SHIFT 0x19
19557#define IA_DEBUG_REG7__indx_shift_is_two_p2_q_MASK 0x4000000
19558#define IA_DEBUG_REG7__indx_shift_is_two_p2_q__SHIFT 0x1a
19559#define IA_DEBUG_REG7__indx_stride_is_four_p2_q_MASK 0x8000000
19560#define IA_DEBUG_REG7__indx_stride_is_four_p2_q__SHIFT 0x1b
19561#define IA_DEBUG_REG7__shift_prim1_reset_p3_q_MASK 0x10000000
19562#define IA_DEBUG_REG7__shift_prim1_reset_p3_q__SHIFT 0x1c
19563#define IA_DEBUG_REG7__shift_prim1_partial_p3_q_MASK 0x20000000
19564#define IA_DEBUG_REG7__shift_prim1_partial_p3_q__SHIFT 0x1d
19565#define IA_DEBUG_REG7__shift_prim0_reset_p3_q_MASK 0x40000000
19566#define IA_DEBUG_REG7__shift_prim0_reset_p3_q__SHIFT 0x1e
19567#define IA_DEBUG_REG7__shift_prim0_partial_p3_q_MASK 0x80000000
19568#define IA_DEBUG_REG7__shift_prim0_partial_p3_q__SHIFT 0x1f
19569#define IA_DEBUG_REG8__di_prim_type_p1_q_MASK 0x1f
19570#define IA_DEBUG_REG8__di_prim_type_p1_q__SHIFT 0x0
19571#define IA_DEBUG_REG8__two_cycle_xfer_p1_q_MASK 0x20
19572#define IA_DEBUG_REG8__two_cycle_xfer_p1_q__SHIFT 0x5
19573#define IA_DEBUG_REG8__two_prim_input_p1_q_MASK 0x40
19574#define IA_DEBUG_REG8__two_prim_input_p1_q__SHIFT 0x6
19575#define IA_DEBUG_REG8__shift_vect_end_of_packet_p5_q_MASK 0x80
19576#define IA_DEBUG_REG8__shift_vect_end_of_packet_p5_q__SHIFT 0x7
19577#define IA_DEBUG_REG8__last_group_of_inst_p5_q_MASK 0x100
19578#define IA_DEBUG_REG8__last_group_of_inst_p5_q__SHIFT 0x8
19579#define IA_DEBUG_REG8__shift_prim1_null_flag_p5_q_MASK 0x200
19580#define IA_DEBUG_REG8__shift_prim1_null_flag_p5_q__SHIFT 0x9
19581#define IA_DEBUG_REG8__shift_prim0_null_flag_p5_q_MASK 0x400
19582#define IA_DEBUG_REG8__shift_prim0_null_flag_p5_q__SHIFT 0xa
19583#define IA_DEBUG_REG8__grp_continued_MASK 0x800
19584#define IA_DEBUG_REG8__grp_continued__SHIFT 0xb
19585#define IA_DEBUG_REG8__grp_state_sel_MASK 0x7000
19586#define IA_DEBUG_REG8__grp_state_sel__SHIFT 0xc
19587#define IA_DEBUG_REG8__grp_sub_prim_type_MASK 0x1f8000
19588#define IA_DEBUG_REG8__grp_sub_prim_type__SHIFT 0xf
19589#define IA_DEBUG_REG8__grp_output_path_MASK 0xe00000
19590#define IA_DEBUG_REG8__grp_output_path__SHIFT 0x15
19591#define IA_DEBUG_REG8__grp_null_primitive_MASK 0x1000000
19592#define IA_DEBUG_REG8__grp_null_primitive__SHIFT 0x18
19593#define IA_DEBUG_REG8__grp_eop_MASK 0x2000000
19594#define IA_DEBUG_REG8__grp_eop__SHIFT 0x19
19595#define IA_DEBUG_REG8__grp_eopg_MASK 0x4000000
19596#define IA_DEBUG_REG8__grp_eopg__SHIFT 0x1a
19597#define IA_DEBUG_REG8__grp_event_flag_MASK 0x8000000
19598#define IA_DEBUG_REG8__grp_event_flag__SHIFT 0x1b
19599#define IA_DEBUG_REG8__grp_components_valid_MASK 0xf0000000
19600#define IA_DEBUG_REG8__grp_components_valid__SHIFT 0x1c
19601#define IA_DEBUG_REG9__send_to_se1_p6_MASK 0x1
19602#define IA_DEBUG_REG9__send_to_se1_p6__SHIFT 0x0
19603#define IA_DEBUG_REG9__gfx_se_switch_p6_MASK 0x2
19604#define IA_DEBUG_REG9__gfx_se_switch_p6__SHIFT 0x1
19605#define IA_DEBUG_REG9__null_eoi_xfer_prim1_p6_MASK 0x4
19606#define IA_DEBUG_REG9__null_eoi_xfer_prim1_p6__SHIFT 0x2
19607#define IA_DEBUG_REG9__null_eoi_xfer_prim0_p6_MASK 0x8
19608#define IA_DEBUG_REG9__null_eoi_xfer_prim0_p6__SHIFT 0x3
19609#define IA_DEBUG_REG9__prim1_eoi_p6_MASK 0x10
19610#define IA_DEBUG_REG9__prim1_eoi_p6__SHIFT 0x4
19611#define IA_DEBUG_REG9__prim0_eoi_p6_MASK 0x20
19612#define IA_DEBUG_REG9__prim0_eoi_p6__SHIFT 0x5
19613#define IA_DEBUG_REG9__prim1_valid_eopg_p6_MASK 0x40
19614#define IA_DEBUG_REG9__prim1_valid_eopg_p6__SHIFT 0x6
19615#define IA_DEBUG_REG9__prim0_valid_eopg_p6_MASK 0x80
19616#define IA_DEBUG_REG9__prim0_valid_eopg_p6__SHIFT 0x7
19617#define IA_DEBUG_REG9__prim1_to_other_se_p6_MASK 0x100
19618#define IA_DEBUG_REG9__prim1_to_other_se_p6__SHIFT 0x8
19619#define IA_DEBUG_REG9__eopg_on_last_prim_p6_MASK 0x200
19620#define IA_DEBUG_REG9__eopg_on_last_prim_p6__SHIFT 0x9
19621#define IA_DEBUG_REG9__eopg_between_prims_p6_MASK 0x400
19622#define IA_DEBUG_REG9__eopg_between_prims_p6__SHIFT 0xa
19623#define IA_DEBUG_REG9__prim_count_eq_group_size_p6_MASK 0x800
19624#define IA_DEBUG_REG9__prim_count_eq_group_size_p6__SHIFT 0xb
19625#define IA_DEBUG_REG9__prim_count_gt_group_size_p6_MASK 0x1000
19626#define IA_DEBUG_REG9__prim_count_gt_group_size_p6__SHIFT 0xc
19627#define IA_DEBUG_REG9__two_prim_output_p5_q_MASK 0x2000
19628#define IA_DEBUG_REG9__two_prim_output_p5_q__SHIFT 0xd
19629#define IA_DEBUG_REG9__SPARE0_MASK 0x4000
19630#define IA_DEBUG_REG9__SPARE0__SHIFT 0xe
19631#define IA_DEBUG_REG9__SPARE1_MASK 0x8000
19632#define IA_DEBUG_REG9__SPARE1__SHIFT 0xf
19633#define IA_DEBUG_REG9__shift_vect_end_of_packet_p5_q_MASK 0x10000
19634#define IA_DEBUG_REG9__shift_vect_end_of_packet_p5_q__SHIFT 0x10
19635#define IA_DEBUG_REG9__prim1_xfer_p6_MASK 0x20000
19636#define IA_DEBUG_REG9__prim1_xfer_p6__SHIFT 0x11
19637#define IA_DEBUG_REG9__grp_se1_fifo_empty_MASK 0x40000
19638#define IA_DEBUG_REG9__grp_se1_fifo_empty__SHIFT 0x12
19639#define IA_DEBUG_REG9__grp_se1_fifo_full_MASK 0x80000
19640#define IA_DEBUG_REG9__grp_se1_fifo_full__SHIFT 0x13
19641#define IA_DEBUG_REG9__prim_counter_q_MASK 0xfff00000
19642#define IA_DEBUG_REG9__prim_counter_q__SHIFT 0x14
19643#define VGT_DEBUG_REG0__vgt_busy_extended_MASK 0x1
19644#define VGT_DEBUG_REG0__vgt_busy_extended__SHIFT 0x0
19645#define VGT_DEBUG_REG0__SPARE9_MASK 0x2
19646#define VGT_DEBUG_REG0__SPARE9__SHIFT 0x1
19647#define VGT_DEBUG_REG0__vgt_busy_MASK 0x4
19648#define VGT_DEBUG_REG0__vgt_busy__SHIFT 0x2
19649#define VGT_DEBUG_REG0__SPARE8_MASK 0x8
19650#define VGT_DEBUG_REG0__SPARE8__SHIFT 0x3
19651#define VGT_DEBUG_REG0__SPARE7_MASK 0x10
19652#define VGT_DEBUG_REG0__SPARE7__SHIFT 0x4
19653#define VGT_DEBUG_REG0__SPARE6_MASK 0x20
19654#define VGT_DEBUG_REG0__SPARE6__SHIFT 0x5
19655#define VGT_DEBUG_REG0__SPARE5_MASK 0x40
19656#define VGT_DEBUG_REG0__SPARE5__SHIFT 0x6
19657#define VGT_DEBUG_REG0__SPARE4_MASK 0x80
19658#define VGT_DEBUG_REG0__SPARE4__SHIFT 0x7
19659#define VGT_DEBUG_REG0__pi_busy_MASK 0x100
19660#define VGT_DEBUG_REG0__pi_busy__SHIFT 0x8
19661#define VGT_DEBUG_REG0__vr_pi_busy_MASK 0x200
19662#define VGT_DEBUG_REG0__vr_pi_busy__SHIFT 0x9
19663#define VGT_DEBUG_REG0__pt_pi_busy_MASK 0x400
19664#define VGT_DEBUG_REG0__pt_pi_busy__SHIFT 0xa
19665#define VGT_DEBUG_REG0__te_pi_busy_MASK 0x800
19666#define VGT_DEBUG_REG0__te_pi_busy__SHIFT 0xb
19667#define VGT_DEBUG_REG0__gs_busy_MASK 0x1000
19668#define VGT_DEBUG_REG0__gs_busy__SHIFT 0xc
19669#define VGT_DEBUG_REG0__rcm_busy_MASK 0x2000
19670#define VGT_DEBUG_REG0__rcm_busy__SHIFT 0xd
19671#define VGT_DEBUG_REG0__tm_busy_MASK 0x4000
19672#define VGT_DEBUG_REG0__tm_busy__SHIFT 0xe
19673#define VGT_DEBUG_REG0__cm_busy_MASK 0x8000
19674#define VGT_DEBUG_REG0__cm_busy__SHIFT 0xf
19675#define VGT_DEBUG_REG0__gog_busy_MASK 0x10000
19676#define VGT_DEBUG_REG0__gog_busy__SHIFT 0x10
19677#define VGT_DEBUG_REG0__frmt_busy_MASK 0x20000
19678#define VGT_DEBUG_REG0__frmt_busy__SHIFT 0x11
19679#define VGT_DEBUG_REG0__SPARE10_MASK 0x40000
19680#define VGT_DEBUG_REG0__SPARE10__SHIFT 0x12
19681#define VGT_DEBUG_REG0__te11_pi_busy_MASK 0x80000
19682#define VGT_DEBUG_REG0__te11_pi_busy__SHIFT 0x13
19683#define VGT_DEBUG_REG0__SPARE3_MASK 0x100000
19684#define VGT_DEBUG_REG0__SPARE3__SHIFT 0x14
19685#define VGT_DEBUG_REG0__combined_out_busy_MASK 0x200000
19686#define VGT_DEBUG_REG0__combined_out_busy__SHIFT 0x15
19687#define VGT_DEBUG_REG0__spi_vs_interfaces_busy_MASK 0x400000
19688#define VGT_DEBUG_REG0__spi_vs_interfaces_busy__SHIFT 0x16
19689#define VGT_DEBUG_REG0__pa_interfaces_busy_MASK 0x800000
19690#define VGT_DEBUG_REG0__pa_interfaces_busy__SHIFT 0x17
19691#define VGT_DEBUG_REG0__reg_clk_busy_MASK 0x1000000
19692#define VGT_DEBUG_REG0__reg_clk_busy__SHIFT 0x18
19693#define VGT_DEBUG_REG0__SPARE2_MASK 0x2000000
19694#define VGT_DEBUG_REG0__SPARE2__SHIFT 0x19
19695#define VGT_DEBUG_REG0__core_clk_busy_MASK 0x4000000
19696#define VGT_DEBUG_REG0__core_clk_busy__SHIFT 0x1a
19697#define VGT_DEBUG_REG0__gs_clk_busy_MASK 0x8000000
19698#define VGT_DEBUG_REG0__gs_clk_busy__SHIFT 0x1b
19699#define VGT_DEBUG_REG0__SPARE1_MASK 0x10000000
19700#define VGT_DEBUG_REG0__SPARE1__SHIFT 0x1c
19701#define VGT_DEBUG_REG0__sclk_core_vld_MASK 0x20000000
19702#define VGT_DEBUG_REG0__sclk_core_vld__SHIFT 0x1d
19703#define VGT_DEBUG_REG0__sclk_gs_vld_MASK 0x40000000
19704#define VGT_DEBUG_REG0__sclk_gs_vld__SHIFT 0x1e
19705#define VGT_DEBUG_REG0__SPARE0_MASK 0x80000000
19706#define VGT_DEBUG_REG0__SPARE0__SHIFT 0x1f
19707#define VGT_DEBUG_REG1__SPARE9_MASK 0x1
19708#define VGT_DEBUG_REG1__SPARE9__SHIFT 0x0
19709#define VGT_DEBUG_REG1__SPARE8_MASK 0x2
19710#define VGT_DEBUG_REG1__SPARE8__SHIFT 0x1
19711#define VGT_DEBUG_REG1__SPARE7_MASK 0x4
19712#define VGT_DEBUG_REG1__SPARE7__SHIFT 0x2
19713#define VGT_DEBUG_REG1__SPARE6_MASK 0x8
19714#define VGT_DEBUG_REG1__SPARE6__SHIFT 0x3
19715#define VGT_DEBUG_REG1__SPARE5_MASK 0x10
19716#define VGT_DEBUG_REG1__SPARE5__SHIFT 0x4
19717#define VGT_DEBUG_REG1__SPARE4_MASK 0x20
19718#define VGT_DEBUG_REG1__SPARE4__SHIFT 0x5
19719#define VGT_DEBUG_REG1__SPARE3_MASK 0x40
19720#define VGT_DEBUG_REG1__SPARE3__SHIFT 0x6
19721#define VGT_DEBUG_REG1__SPARE2_MASK 0x80
19722#define VGT_DEBUG_REG1__SPARE2__SHIFT 0x7
19723#define VGT_DEBUG_REG1__SPARE1_MASK 0x100
19724#define VGT_DEBUG_REG1__SPARE1__SHIFT 0x8
19725#define VGT_DEBUG_REG1__SPARE0_MASK 0x200
19726#define VGT_DEBUG_REG1__SPARE0__SHIFT 0x9
19727#define VGT_DEBUG_REG1__pi_vr_valid_MASK 0x400
19728#define VGT_DEBUG_REG1__pi_vr_valid__SHIFT 0xa
19729#define VGT_DEBUG_REG1__vr_pi_read_MASK 0x800
19730#define VGT_DEBUG_REG1__vr_pi_read__SHIFT 0xb
19731#define VGT_DEBUG_REG1__pi_pt_valid_MASK 0x1000
19732#define VGT_DEBUG_REG1__pi_pt_valid__SHIFT 0xc
19733#define VGT_DEBUG_REG1__pt_pi_read_MASK 0x2000
19734#define VGT_DEBUG_REG1__pt_pi_read__SHIFT 0xd
19735#define VGT_DEBUG_REG1__pi_te_valid_MASK 0x4000
19736#define VGT_DEBUG_REG1__pi_te_valid__SHIFT 0xe
19737#define VGT_DEBUG_REG1__te_grp_read_MASK 0x8000
19738#define VGT_DEBUG_REG1__te_grp_read__SHIFT 0xf
19739#define VGT_DEBUG_REG1__vr_out_indx_valid_MASK 0x10000
19740#define VGT_DEBUG_REG1__vr_out_indx_valid__SHIFT 0x10
19741#define VGT_DEBUG_REG1__SPARE12_MASK 0x20000
19742#define VGT_DEBUG_REG1__SPARE12__SHIFT 0x11
19743#define VGT_DEBUG_REG1__vr_out_prim_valid_MASK 0x40000
19744#define VGT_DEBUG_REG1__vr_out_prim_valid__SHIFT 0x12
19745#define VGT_DEBUG_REG1__SPARE11_MASK 0x80000
19746#define VGT_DEBUG_REG1__SPARE11__SHIFT 0x13
19747#define VGT_DEBUG_REG1__pt_out_indx_valid_MASK 0x100000
19748#define VGT_DEBUG_REG1__pt_out_indx_valid__SHIFT 0x14
19749#define VGT_DEBUG_REG1__SPARE10_MASK 0x200000
19750#define VGT_DEBUG_REG1__SPARE10__SHIFT 0x15
19751#define VGT_DEBUG_REG1__pt_out_prim_valid_MASK 0x400000
19752#define VGT_DEBUG_REG1__pt_out_prim_valid__SHIFT 0x16
19753#define VGT_DEBUG_REG1__SPARE23_MASK 0x800000
19754#define VGT_DEBUG_REG1__SPARE23__SHIFT 0x17
19755#define VGT_DEBUG_REG1__te_out_data_valid_MASK 0x1000000
19756#define VGT_DEBUG_REG1__te_out_data_valid__SHIFT 0x18
19757#define VGT_DEBUG_REG1__SPARE25_MASK 0x2000000
19758#define VGT_DEBUG_REG1__SPARE25__SHIFT 0x19
19759#define VGT_DEBUG_REG1__pi_gs_valid_MASK 0x4000000
19760#define VGT_DEBUG_REG1__pi_gs_valid__SHIFT 0x1a
19761#define VGT_DEBUG_REG1__gs_pi_read_MASK 0x8000000
19762#define VGT_DEBUG_REG1__gs_pi_read__SHIFT 0x1b
19763#define VGT_DEBUG_REG1__gog_out_indx_valid_MASK 0x10000000
19764#define VGT_DEBUG_REG1__gog_out_indx_valid__SHIFT 0x1c
19765#define VGT_DEBUG_REG1__out_indx_read_MASK 0x20000000
19766#define VGT_DEBUG_REG1__out_indx_read__SHIFT 0x1d
19767#define VGT_DEBUG_REG1__gog_out_prim_valid_MASK 0x40000000
19768#define VGT_DEBUG_REG1__gog_out_prim_valid__SHIFT 0x1e
19769#define VGT_DEBUG_REG1__out_prim_read_MASK 0x80000000
19770#define VGT_DEBUG_REG1__out_prim_read__SHIFT 0x1f
19771#define VGT_DEBUG_REG2__hs_grp_busy_MASK 0x1
19772#define VGT_DEBUG_REG2__hs_grp_busy__SHIFT 0x0
19773#define VGT_DEBUG_REG2__hs_noif_busy_MASK 0x2
19774#define VGT_DEBUG_REG2__hs_noif_busy__SHIFT 0x1
19775#define VGT_DEBUG_REG2__tfmmIsBusy_MASK 0x4
19776#define VGT_DEBUG_REG2__tfmmIsBusy__SHIFT 0x2
19777#define VGT_DEBUG_REG2__lsVertIfBusy_0_MASK 0x8
19778#define VGT_DEBUG_REG2__lsVertIfBusy_0__SHIFT 0x3
19779#define VGT_DEBUG_REG2__te11_hs_tess_input_rtr_MASK 0x10
19780#define VGT_DEBUG_REG2__te11_hs_tess_input_rtr__SHIFT 0x4
19781#define VGT_DEBUG_REG2__lsWaveIfBusy_0_MASK 0x20
19782#define VGT_DEBUG_REG2__lsWaveIfBusy_0__SHIFT 0x5
19783#define VGT_DEBUG_REG2__hs_te11_tess_input_rts_MASK 0x40
19784#define VGT_DEBUG_REG2__hs_te11_tess_input_rts__SHIFT 0x6
19785#define VGT_DEBUG_REG2__grpModBusy_MASK 0x80
19786#define VGT_DEBUG_REG2__grpModBusy__SHIFT 0x7
19787#define VGT_DEBUG_REG2__lsVertFifoEmpty_MASK 0x100
19788#define VGT_DEBUG_REG2__lsVertFifoEmpty__SHIFT 0x8
19789#define VGT_DEBUG_REG2__lsWaveFifoEmpty_MASK 0x200
19790#define VGT_DEBUG_REG2__lsWaveFifoEmpty__SHIFT 0x9
19791#define VGT_DEBUG_REG2__hsVertFifoEmpty_MASK 0x400
19792#define VGT_DEBUG_REG2__hsVertFifoEmpty__SHIFT 0xa
19793#define VGT_DEBUG_REG2__hsWaveFifoEmpty_MASK 0x800
19794#define VGT_DEBUG_REG2__hsWaveFifoEmpty__SHIFT 0xb
19795#define VGT_DEBUG_REG2__hsInputFifoEmpty_MASK 0x1000
19796#define VGT_DEBUG_REG2__hsInputFifoEmpty__SHIFT 0xc
19797#define VGT_DEBUG_REG2__hsTifFifoEmpty_MASK 0x2000
19798#define VGT_DEBUG_REG2__hsTifFifoEmpty__SHIFT 0xd
19799#define VGT_DEBUG_REG2__lsVertFifoFull_MASK 0x4000
19800#define VGT_DEBUG_REG2__lsVertFifoFull__SHIFT 0xe
19801#define VGT_DEBUG_REG2__lsWaveFifoFull_MASK 0x8000
19802#define VGT_DEBUG_REG2__lsWaveFifoFull__SHIFT 0xf
19803#define VGT_DEBUG_REG2__hsVertFifoFull_MASK 0x10000
19804#define VGT_DEBUG_REG2__hsVertFifoFull__SHIFT 0x10
19805#define VGT_DEBUG_REG2__hsWaveFifoFull_MASK 0x20000
19806#define VGT_DEBUG_REG2__hsWaveFifoFull__SHIFT 0x11
19807#define VGT_DEBUG_REG2__hsInputFifoFull_MASK 0x40000
19808#define VGT_DEBUG_REG2__hsInputFifoFull__SHIFT 0x12
19809#define VGT_DEBUG_REG2__hsTifFifoFull_MASK 0x80000
19810#define VGT_DEBUG_REG2__hsTifFifoFull__SHIFT 0x13
19811#define VGT_DEBUG_REG2__p0_rtr_MASK 0x100000
19812#define VGT_DEBUG_REG2__p0_rtr__SHIFT 0x14
19813#define VGT_DEBUG_REG2__p1_rtr_MASK 0x200000
19814#define VGT_DEBUG_REG2__p1_rtr__SHIFT 0x15
19815#define VGT_DEBUG_REG2__p0_dr_MASK 0x400000
19816#define VGT_DEBUG_REG2__p0_dr__SHIFT 0x16
19817#define VGT_DEBUG_REG2__p1_dr_MASK 0x800000
19818#define VGT_DEBUG_REG2__p1_dr__SHIFT 0x17
19819#define VGT_DEBUG_REG2__p0_rts_MASK 0x1000000
19820#define VGT_DEBUG_REG2__p0_rts__SHIFT 0x18
19821#define VGT_DEBUG_REG2__p1_rts_MASK 0x2000000
19822#define VGT_DEBUG_REG2__p1_rts__SHIFT 0x19
19823#define VGT_DEBUG_REG2__ls_sh_id_MASK 0x4000000
19824#define VGT_DEBUG_REG2__ls_sh_id__SHIFT 0x1a
19825#define VGT_DEBUG_REG2__lsFwaveFlag_MASK 0x8000000
19826#define VGT_DEBUG_REG2__lsFwaveFlag__SHIFT 0x1b
19827#define VGT_DEBUG_REG2__lsWaveSendFlush_MASK 0x10000000
19828#define VGT_DEBUG_REG2__lsWaveSendFlush__SHIFT 0x1c
19829#define VGT_DEBUG_REG2__SPARE_MASK 0xe0000000
19830#define VGT_DEBUG_REG2__SPARE__SHIFT 0x1d
19831#define VGT_DEBUG_REG3__lsTgRelInd_MASK 0xfff
19832#define VGT_DEBUG_REG3__lsTgRelInd__SHIFT 0x0
19833#define VGT_DEBUG_REG3__lsWaveRelInd_MASK 0x3f000
19834#define VGT_DEBUG_REG3__lsWaveRelInd__SHIFT 0xc
19835#define VGT_DEBUG_REG3__lsPatchCnt_MASK 0x3fc0000
19836#define VGT_DEBUG_REG3__lsPatchCnt__SHIFT 0x12
19837#define VGT_DEBUG_REG3__hsWaveRelInd_MASK 0xfc000000
19838#define VGT_DEBUG_REG3__hsWaveRelInd__SHIFT 0x1a
19839#define VGT_DEBUG_REG4__hsPatchCnt_MASK 0xff
19840#define VGT_DEBUG_REG4__hsPatchCnt__SHIFT 0x0
19841#define VGT_DEBUG_REG4__hsPrimId_15_0_MASK 0xffff00
19842#define VGT_DEBUG_REG4__hsPrimId_15_0__SHIFT 0x8
19843#define VGT_DEBUG_REG4__hsCpCnt_MASK 0x1f000000
19844#define VGT_DEBUG_REG4__hsCpCnt__SHIFT 0x18
19845#define VGT_DEBUG_REG4__hsWaveSendFlush_MASK 0x20000000
19846#define VGT_DEBUG_REG4__hsWaveSendFlush__SHIFT 0x1d
19847#define VGT_DEBUG_REG4__hsFwaveFlag_MASK 0x40000000
19848#define VGT_DEBUG_REG4__hsFwaveFlag__SHIFT 0x1e
19849#define VGT_DEBUG_REG4__SPARE_MASK 0x80000000
19850#define VGT_DEBUG_REG4__SPARE__SHIFT 0x1f
19851#define VGT_DEBUG_REG5__SPARE4_MASK 0x7
19852#define VGT_DEBUG_REG5__SPARE4__SHIFT 0x0
19853#define VGT_DEBUG_REG5__hsWaveCreditCnt_0_MASK 0xf8
19854#define VGT_DEBUG_REG5__hsWaveCreditCnt_0__SHIFT 0x3
19855#define VGT_DEBUG_REG5__SPARE3_MASK 0x700
19856#define VGT_DEBUG_REG5__SPARE3__SHIFT 0x8
19857#define VGT_DEBUG_REG5__hsVertCreditCnt_0_MASK 0xf800
19858#define VGT_DEBUG_REG5__hsVertCreditCnt_0__SHIFT 0xb
19859#define VGT_DEBUG_REG5__SPARE2_MASK 0x70000
19860#define VGT_DEBUG_REG5__SPARE2__SHIFT 0x10
19861#define VGT_DEBUG_REG5__lsWaveCreditCnt_0_MASK 0xf80000
19862#define VGT_DEBUG_REG5__lsWaveCreditCnt_0__SHIFT 0x13
19863#define VGT_DEBUG_REG5__SPARE1_MASK 0x7000000
19864#define VGT_DEBUG_REG5__SPARE1__SHIFT 0x18
19865#define VGT_DEBUG_REG5__lsVertCreditCnt_0_MASK 0xf8000000
19866#define VGT_DEBUG_REG5__lsVertCreditCnt_0__SHIFT 0x1b
19867#define VGT_DEBUG_REG6__debug_BASE_MASK 0xffff
19868#define VGT_DEBUG_REG6__debug_BASE__SHIFT 0x0
19869#define VGT_DEBUG_REG6__debug_SIZE_MASK 0xffff0000
19870#define VGT_DEBUG_REG6__debug_SIZE__SHIFT 0x10
19871#define VGT_DEBUG_REG7__debug_tfmmFifoEmpty_MASK 0x1
19872#define VGT_DEBUG_REG7__debug_tfmmFifoEmpty__SHIFT 0x0
19873#define VGT_DEBUG_REG7__debug_tfmmFifoFull_MASK 0x2
19874#define VGT_DEBUG_REG7__debug_tfmmFifoFull__SHIFT 0x1
19875#define VGT_DEBUG_REG7__hs_pipe0_dr_MASK 0x4
19876#define VGT_DEBUG_REG7__hs_pipe0_dr__SHIFT 0x2
19877#define VGT_DEBUG_REG7__hs_pipe0_rtr_MASK 0x8
19878#define VGT_DEBUG_REG7__hs_pipe0_rtr__SHIFT 0x3
19879#define VGT_DEBUG_REG7__hs_pipe1_rtr_MASK 0x10
19880#define VGT_DEBUG_REG7__hs_pipe1_rtr__SHIFT 0x4
19881#define VGT_DEBUG_REG7__SPARE_MASK 0xffe0
19882#define VGT_DEBUG_REG7__SPARE__SHIFT 0x5
19883#define VGT_DEBUG_REG7__TF_addr_MASK 0xffff0000
19884#define VGT_DEBUG_REG7__TF_addr__SHIFT 0x10
19885#define VGT_DEBUG_REG8__rcm_busy_q_MASK 0x1
19886#define VGT_DEBUG_REG8__rcm_busy_q__SHIFT 0x0
19887#define VGT_DEBUG_REG8__rcm_noif_busy_q_MASK 0x2
19888#define VGT_DEBUG_REG8__rcm_noif_busy_q__SHIFT 0x1
19889#define VGT_DEBUG_REG8__r1_inst_rtr_MASK 0x4
19890#define VGT_DEBUG_REG8__r1_inst_rtr__SHIFT 0x2
19891#define VGT_DEBUG_REG8__spi_gsprim_fifo_busy_q_MASK 0x8
19892#define VGT_DEBUG_REG8__spi_gsprim_fifo_busy_q__SHIFT 0x3
19893#define VGT_DEBUG_REG8__spi_esvert_fifo_busy_q_MASK 0x10
19894#define VGT_DEBUG_REG8__spi_esvert_fifo_busy_q__SHIFT 0x4
19895#define VGT_DEBUG_REG8__gs_tbl_valid_r3_q_MASK 0x20
19896#define VGT_DEBUG_REG8__gs_tbl_valid_r3_q__SHIFT 0x5
19897#define VGT_DEBUG_REG8__valid_r0_q_MASK 0x40
19898#define VGT_DEBUG_REG8__valid_r0_q__SHIFT 0x6
19899#define VGT_DEBUG_REG8__valid_r1_q_MASK 0x80
19900#define VGT_DEBUG_REG8__valid_r1_q__SHIFT 0x7
19901#define VGT_DEBUG_REG8__valid_r2_MASK 0x100
19902#define VGT_DEBUG_REG8__valid_r2__SHIFT 0x8
19903#define VGT_DEBUG_REG8__valid_r2_q_MASK 0x200
19904#define VGT_DEBUG_REG8__valid_r2_q__SHIFT 0x9
19905#define VGT_DEBUG_REG8__r0_rtr_MASK 0x400
19906#define VGT_DEBUG_REG8__r0_rtr__SHIFT 0xa
19907#define VGT_DEBUG_REG8__r1_rtr_MASK 0x800
19908#define VGT_DEBUG_REG8__r1_rtr__SHIFT 0xb
19909#define VGT_DEBUG_REG8__r2_indx_rtr_MASK 0x1000
19910#define VGT_DEBUG_REG8__r2_indx_rtr__SHIFT 0xc
19911#define VGT_DEBUG_REG8__r2_rtr_MASK 0x2000
19912#define VGT_DEBUG_REG8__r2_rtr__SHIFT 0xd
19913#define VGT_DEBUG_REG8__es_gs_rtr_MASK 0x4000
19914#define VGT_DEBUG_REG8__es_gs_rtr__SHIFT 0xe
19915#define VGT_DEBUG_REG8__gs_event_fifo_rtr_MASK 0x8000
19916#define VGT_DEBUG_REG8__gs_event_fifo_rtr__SHIFT 0xf
19917#define VGT_DEBUG_REG8__tm_rcm_gs_event_rtr_MASK 0x10000
19918#define VGT_DEBUG_REG8__tm_rcm_gs_event_rtr__SHIFT 0x10
19919#define VGT_DEBUG_REG8__gs_tbl_r3_rtr_MASK 0x20000
19920#define VGT_DEBUG_REG8__gs_tbl_r3_rtr__SHIFT 0x11
19921#define VGT_DEBUG_REG8__prim_skid_fifo_empty_MASK 0x40000
19922#define VGT_DEBUG_REG8__prim_skid_fifo_empty__SHIFT 0x12
19923#define VGT_DEBUG_REG8__VGT_SPI_gsprim_rtr_q_MASK 0x80000
19924#define VGT_DEBUG_REG8__VGT_SPI_gsprim_rtr_q__SHIFT 0x13
19925#define VGT_DEBUG_REG8__tm_rcm_gs_tbl_rtr_MASK 0x100000
19926#define VGT_DEBUG_REG8__tm_rcm_gs_tbl_rtr__SHIFT 0x14
19927#define VGT_DEBUG_REG8__tm_rcm_es_tbl_rtr_MASK 0x200000
19928#define VGT_DEBUG_REG8__tm_rcm_es_tbl_rtr__SHIFT 0x15
19929#define VGT_DEBUG_REG8__VGT_SPI_esvert_rtr_q_MASK 0x400000
19930#define VGT_DEBUG_REG8__VGT_SPI_esvert_rtr_q__SHIFT 0x16
19931#define VGT_DEBUG_REG8__r2_no_bp_rtr_MASK 0x800000
19932#define VGT_DEBUG_REG8__r2_no_bp_rtr__SHIFT 0x17
19933#define VGT_DEBUG_REG8__hold_for_es_flush_MASK 0x1000000
19934#define VGT_DEBUG_REG8__hold_for_es_flush__SHIFT 0x18
19935#define VGT_DEBUG_REG8__gs_event_fifo_empty_MASK 0x2000000
19936#define VGT_DEBUG_REG8__gs_event_fifo_empty__SHIFT 0x19
19937#define VGT_DEBUG_REG8__gsprim_buff_empty_q_MASK 0x4000000
19938#define VGT_DEBUG_REG8__gsprim_buff_empty_q__SHIFT 0x1a
19939#define VGT_DEBUG_REG8__gsprim_buff_full_q_MASK 0x8000000
19940#define VGT_DEBUG_REG8__gsprim_buff_full_q__SHIFT 0x1b
19941#define VGT_DEBUG_REG8__te_prim_fifo_empty_MASK 0x10000000
19942#define VGT_DEBUG_REG8__te_prim_fifo_empty__SHIFT 0x1c
19943#define VGT_DEBUG_REG8__te_prim_fifo_full_MASK 0x20000000
19944#define VGT_DEBUG_REG8__te_prim_fifo_full__SHIFT 0x1d
19945#define VGT_DEBUG_REG8__te_vert_fifo_empty_MASK 0x40000000
19946#define VGT_DEBUG_REG8__te_vert_fifo_empty__SHIFT 0x1e
19947#define VGT_DEBUG_REG8__te_vert_fifo_full_MASK 0x80000000
19948#define VGT_DEBUG_REG8__te_vert_fifo_full__SHIFT 0x1f
19949#define VGT_DEBUG_REG9__indices_to_send_r2_q_MASK 0x3
19950#define VGT_DEBUG_REG9__indices_to_send_r2_q__SHIFT 0x0
19951#define VGT_DEBUG_REG9__valid_indices_r3_MASK 0x4
19952#define VGT_DEBUG_REG9__valid_indices_r3__SHIFT 0x2
19953#define VGT_DEBUG_REG9__gs_eov_r3_MASK 0x8
19954#define VGT_DEBUG_REG9__gs_eov_r3__SHIFT 0x3
19955#define VGT_DEBUG_REG9__eop_indx_r3_MASK 0x10
19956#define VGT_DEBUG_REG9__eop_indx_r3__SHIFT 0x4
19957#define VGT_DEBUG_REG9__eop_prim_r3_MASK 0x20
19958#define VGT_DEBUG_REG9__eop_prim_r3__SHIFT 0x5
19959#define VGT_DEBUG_REG9__es_eov_r3_MASK 0x40
19960#define VGT_DEBUG_REG9__es_eov_r3__SHIFT 0x6
19961#define VGT_DEBUG_REG9__es_tbl_state_r3_q_0_MASK 0x80
19962#define VGT_DEBUG_REG9__es_tbl_state_r3_q_0__SHIFT 0x7
19963#define VGT_DEBUG_REG9__pending_es_send_r3_q_MASK 0x100
19964#define VGT_DEBUG_REG9__pending_es_send_r3_q__SHIFT 0x8
19965#define VGT_DEBUG_REG9__pending_es_flush_r3_MASK 0x200
19966#define VGT_DEBUG_REG9__pending_es_flush_r3__SHIFT 0x9
19967#define VGT_DEBUG_REG9__gs_tbl_num_es_per_gs_r3_q_not_0_MASK 0x400
19968#define VGT_DEBUG_REG9__gs_tbl_num_es_per_gs_r3_q_not_0__SHIFT 0xa
19969#define VGT_DEBUG_REG9__gs_tbl_prim_cnt_r3_q_MASK 0x3f800
19970#define VGT_DEBUG_REG9__gs_tbl_prim_cnt_r3_q__SHIFT 0xb
19971#define VGT_DEBUG_REG9__gs_tbl_eop_r3_q_MASK 0x40000
19972#define VGT_DEBUG_REG9__gs_tbl_eop_r3_q__SHIFT 0x12
19973#define VGT_DEBUG_REG9__gs_tbl_state_r3_q_MASK 0x380000
19974#define VGT_DEBUG_REG9__gs_tbl_state_r3_q__SHIFT 0x13
19975#define VGT_DEBUG_REG9__gs_pending_state_r3_q_MASK 0x400000
19976#define VGT_DEBUG_REG9__gs_pending_state_r3_q__SHIFT 0x16
19977#define VGT_DEBUG_REG9__invalidate_rb_roll_over_q_MASK 0x800000
19978#define VGT_DEBUG_REG9__invalidate_rb_roll_over_q__SHIFT 0x17
19979#define VGT_DEBUG_REG9__gs_instancing_state_q_MASK 0x1000000
19980#define VGT_DEBUG_REG9__gs_instancing_state_q__SHIFT 0x18
19981#define VGT_DEBUG_REG9__es_per_gs_vert_cnt_r3_q_not_0_MASK 0x2000000
19982#define VGT_DEBUG_REG9__es_per_gs_vert_cnt_r3_q_not_0__SHIFT 0x19
19983#define VGT_DEBUG_REG9__gs_prim_per_es_ctr_r3_q_not_0_MASK 0x4000000
19984#define VGT_DEBUG_REG9__gs_prim_per_es_ctr_r3_q_not_0__SHIFT 0x1a
19985#define VGT_DEBUG_REG9__pre_r0_rtr_MASK 0x8000000
19986#define VGT_DEBUG_REG9__pre_r0_rtr__SHIFT 0x1b
19987#define VGT_DEBUG_REG9__valid_r3_q_MASK 0x10000000
19988#define VGT_DEBUG_REG9__valid_r3_q__SHIFT 0x1c
19989#define VGT_DEBUG_REG9__valid_pre_r0_q_MASK 0x20000000
19990#define VGT_DEBUG_REG9__valid_pre_r0_q__SHIFT 0x1d
19991#define VGT_DEBUG_REG9__SPARE0_MASK 0x40000000
19992#define VGT_DEBUG_REG9__SPARE0__SHIFT 0x1e
19993#define VGT_DEBUG_REG9__off_chip_hs_r2_q_MASK 0x80000000
19994#define VGT_DEBUG_REG9__off_chip_hs_r2_q__SHIFT 0x1f
19995#define VGT_DEBUG_REG10__index_buffer_depth_r1_q_MASK 0x1f
19996#define VGT_DEBUG_REG10__index_buffer_depth_r1_q__SHIFT 0x0
19997#define VGT_DEBUG_REG10__eopg_r2_q_MASK 0x20
19998#define VGT_DEBUG_REG10__eopg_r2_q__SHIFT 0x5
19999#define VGT_DEBUG_REG10__eotg_r2_q_MASK 0x40
20000#define VGT_DEBUG_REG10__eotg_r2_q__SHIFT 0x6
20001#define VGT_DEBUG_REG10__onchip_gs_en_r0_q_MASK 0x180
20002#define VGT_DEBUG_REG10__onchip_gs_en_r0_q__SHIFT 0x7
20003#define VGT_DEBUG_REG10__SPARE2_MASK 0x600
20004#define VGT_DEBUG_REG10__SPARE2__SHIFT 0x9
20005#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_qq_MASK 0x800
20006#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_qq__SHIFT 0xb
20007#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_q_MASK 0x1000
20008#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_q__SHIFT 0xc
20009#define VGT_DEBUG_REG10__gs_rb_space_avail_r3_q_9_0_MASK 0x7fe000
20010#define VGT_DEBUG_REG10__gs_rb_space_avail_r3_q_9_0__SHIFT 0xd
20011#define VGT_DEBUG_REG10__es_rb_space_avail_r2_q_8_0_MASK 0xff800000
20012#define VGT_DEBUG_REG10__es_rb_space_avail_r2_q_8_0__SHIFT 0x17
20013#define VGT_DEBUG_REG11__tm_busy_q_MASK 0x1
20014#define VGT_DEBUG_REG11__tm_busy_q__SHIFT 0x0
20015#define VGT_DEBUG_REG11__tm_noif_busy_q_MASK 0x2
20016#define VGT_DEBUG_REG11__tm_noif_busy_q__SHIFT 0x1
20017#define VGT_DEBUG_REG11__tm_out_busy_q_MASK 0x4
20018#define VGT_DEBUG_REG11__tm_out_busy_q__SHIFT 0x2
20019#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_busy_MASK 0x8
20020#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_busy__SHIFT 0x3
20021#define VGT_DEBUG_REG11__vs_dealloc_tbl_busy_MASK 0x10
20022#define VGT_DEBUG_REG11__vs_dealloc_tbl_busy__SHIFT 0x4
20023#define VGT_DEBUG_REG11__SPARE1_MASK 0x20
20024#define VGT_DEBUG_REG11__SPARE1__SHIFT 0x5
20025#define VGT_DEBUG_REG11__spi_gsthread_fifo_busy_MASK 0x40
20026#define VGT_DEBUG_REG11__spi_gsthread_fifo_busy__SHIFT 0x6
20027#define VGT_DEBUG_REG11__spi_esthread_fifo_busy_MASK 0x80
20028#define VGT_DEBUG_REG11__spi_esthread_fifo_busy__SHIFT 0x7
20029#define VGT_DEBUG_REG11__hold_eswave_MASK 0x100
20030#define VGT_DEBUG_REG11__hold_eswave__SHIFT 0x8
20031#define VGT_DEBUG_REG11__es_rb_roll_over_r3_MASK 0x200
20032#define VGT_DEBUG_REG11__es_rb_roll_over_r3__SHIFT 0x9
20033#define VGT_DEBUG_REG11__counters_busy_r0_MASK 0x400
20034#define VGT_DEBUG_REG11__counters_busy_r0__SHIFT 0xa
20035#define VGT_DEBUG_REG11__counters_avail_r0_MASK 0x800
20036#define VGT_DEBUG_REG11__counters_avail_r0__SHIFT 0xb
20037#define VGT_DEBUG_REG11__counters_available_r0_MASK 0x1000
20038#define VGT_DEBUG_REG11__counters_available_r0__SHIFT 0xc
20039#define VGT_DEBUG_REG11__vs_event_fifo_rtr_MASK 0x2000
20040#define VGT_DEBUG_REG11__vs_event_fifo_rtr__SHIFT 0xd
20041#define VGT_DEBUG_REG11__VGT_SPI_gsthread_rtr_q_MASK 0x4000
20042#define VGT_DEBUG_REG11__VGT_SPI_gsthread_rtr_q__SHIFT 0xe
20043#define VGT_DEBUG_REG11__VGT_SPI_esthread_rtr_q_MASK 0x8000
20044#define VGT_DEBUG_REG11__VGT_SPI_esthread_rtr_q__SHIFT 0xf
20045#define VGT_DEBUG_REG11__gs_issue_rtr_MASK 0x10000
20046#define VGT_DEBUG_REG11__gs_issue_rtr__SHIFT 0x10
20047#define VGT_DEBUG_REG11__tm_pt_event_rtr_MASK 0x20000
20048#define VGT_DEBUG_REG11__tm_pt_event_rtr__SHIFT 0x11
20049#define VGT_DEBUG_REG11__SPARE0_MASK 0x40000
20050#define VGT_DEBUG_REG11__SPARE0__SHIFT 0x12
20051#define VGT_DEBUG_REG11__gs_r0_rtr_MASK 0x80000
20052#define VGT_DEBUG_REG11__gs_r0_rtr__SHIFT 0x13
20053#define VGT_DEBUG_REG11__es_r0_rtr_MASK 0x100000
20054#define VGT_DEBUG_REG11__es_r0_rtr__SHIFT 0x14
20055#define VGT_DEBUG_REG11__gog_tm_vs_event_rtr_MASK 0x200000
20056#define VGT_DEBUG_REG11__gog_tm_vs_event_rtr__SHIFT 0x15
20057#define VGT_DEBUG_REG11__tm_rcm_gs_event_rtr_MASK 0x400000
20058#define VGT_DEBUG_REG11__tm_rcm_gs_event_rtr__SHIFT 0x16
20059#define VGT_DEBUG_REG11__tm_rcm_gs_tbl_rtr_MASK 0x800000
20060#define VGT_DEBUG_REG11__tm_rcm_gs_tbl_rtr__SHIFT 0x17
20061#define VGT_DEBUG_REG11__tm_rcm_es_tbl_rtr_MASK 0x1000000
20062#define VGT_DEBUG_REG11__tm_rcm_es_tbl_rtr__SHIFT 0x18
20063#define VGT_DEBUG_REG11__vs_event_fifo_empty_MASK 0x2000000
20064#define VGT_DEBUG_REG11__vs_event_fifo_empty__SHIFT 0x19
20065#define VGT_DEBUG_REG11__vs_event_fifo_full_MASK 0x4000000
20066#define VGT_DEBUG_REG11__vs_event_fifo_full__SHIFT 0x1a
20067#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_full_MASK 0x8000000
20068#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_full__SHIFT 0x1b
20069#define VGT_DEBUG_REG11__vs_dealloc_tbl_full_MASK 0x10000000
20070#define VGT_DEBUG_REG11__vs_dealloc_tbl_full__SHIFT 0x1c
20071#define VGT_DEBUG_REG11__send_event_q_MASK 0x20000000
20072#define VGT_DEBUG_REG11__send_event_q__SHIFT 0x1d
20073#define VGT_DEBUG_REG11__es_tbl_empty_MASK 0x40000000
20074#define VGT_DEBUG_REG11__es_tbl_empty__SHIFT 0x1e
20075#define VGT_DEBUG_REG11__no_active_states_r0_MASK 0x80000000
20076#define VGT_DEBUG_REG11__no_active_states_r0__SHIFT 0x1f
20077#define VGT_DEBUG_REG12__gs_state0_r0_q_MASK 0x7
20078#define VGT_DEBUG_REG12__gs_state0_r0_q__SHIFT 0x0
20079#define VGT_DEBUG_REG12__gs_state1_r0_q_MASK 0x38
20080#define VGT_DEBUG_REG12__gs_state1_r0_q__SHIFT 0x3
20081#define VGT_DEBUG_REG12__gs_state2_r0_q_MASK 0x1c0
20082#define VGT_DEBUG_REG12__gs_state2_r0_q__SHIFT 0x6
20083#define VGT_DEBUG_REG12__gs_state3_r0_q_MASK 0xe00
20084#define VGT_DEBUG_REG12__gs_state3_r0_q__SHIFT 0x9
20085#define VGT_DEBUG_REG12__gs_state4_r0_q_MASK 0x7000
20086#define VGT_DEBUG_REG12__gs_state4_r0_q__SHIFT 0xc
20087#define VGT_DEBUG_REG12__gs_state5_r0_q_MASK 0x38000
20088#define VGT_DEBUG_REG12__gs_state5_r0_q__SHIFT 0xf
20089#define VGT_DEBUG_REG12__gs_state6_r0_q_MASK 0x1c0000
20090#define VGT_DEBUG_REG12__gs_state6_r0_q__SHIFT 0x12
20091#define VGT_DEBUG_REG12__gs_state7_r0_q_MASK 0xe00000
20092#define VGT_DEBUG_REG12__gs_state7_r0_q__SHIFT 0x15
20093#define VGT_DEBUG_REG12__gs_state8_r0_q_MASK 0x7000000
20094#define VGT_DEBUG_REG12__gs_state8_r0_q__SHIFT 0x18
20095#define VGT_DEBUG_REG12__gs_state9_r0_q_MASK 0x38000000
20096#define VGT_DEBUG_REG12__gs_state9_r0_q__SHIFT 0x1b
20097#define VGT_DEBUG_REG12__hold_eswave_eop_MASK 0x40000000
20098#define VGT_DEBUG_REG12__hold_eswave_eop__SHIFT 0x1e
20099#define VGT_DEBUG_REG12__SPARE0_MASK 0x80000000
20100#define VGT_DEBUG_REG12__SPARE0__SHIFT 0x1f
20101#define VGT_DEBUG_REG13__gs_state10_r0_q_MASK 0x7
20102#define VGT_DEBUG_REG13__gs_state10_r0_q__SHIFT 0x0
20103#define VGT_DEBUG_REG13__gs_state11_r0_q_MASK 0x38
20104#define VGT_DEBUG_REG13__gs_state11_r0_q__SHIFT 0x3
20105#define VGT_DEBUG_REG13__gs_state12_r0_q_MASK 0x1c0
20106#define VGT_DEBUG_REG13__gs_state12_r0_q__SHIFT 0x6
20107#define VGT_DEBUG_REG13__gs_state13_r0_q_MASK 0xe00
20108#define VGT_DEBUG_REG13__gs_state13_r0_q__SHIFT 0x9
20109#define VGT_DEBUG_REG13__gs_state14_r0_q_MASK 0x7000
20110#define VGT_DEBUG_REG13__gs_state14_r0_q__SHIFT 0xc
20111#define VGT_DEBUG_REG13__gs_state15_r0_q_MASK 0x38000
20112#define VGT_DEBUG_REG13__gs_state15_r0_q__SHIFT 0xf
20113#define VGT_DEBUG_REG13__gs_tbl_wrptr_r0_q_3_0_MASK 0x3c0000
20114#define VGT_DEBUG_REG13__gs_tbl_wrptr_r0_q_3_0__SHIFT 0x12
20115#define VGT_DEBUG_REG13__gsfetch_done_fifo_cnt_q_not_0_MASK 0x400000
20116#define VGT_DEBUG_REG13__gsfetch_done_fifo_cnt_q_not_0__SHIFT 0x16
20117#define VGT_DEBUG_REG13__gsfetch_done_cnt_q_not_0_MASK 0x800000
20118#define VGT_DEBUG_REG13__gsfetch_done_cnt_q_not_0__SHIFT 0x17
20119#define VGT_DEBUG_REG13__es_tbl_full_MASK 0x1000000
20120#define VGT_DEBUG_REG13__es_tbl_full__SHIFT 0x18
20121#define VGT_DEBUG_REG13__SPARE1_MASK 0x2000000
20122#define VGT_DEBUG_REG13__SPARE1__SHIFT 0x19
20123#define VGT_DEBUG_REG13__SPARE0_MASK 0x4000000
20124#define VGT_DEBUG_REG13__SPARE0__SHIFT 0x1a
20125#define VGT_DEBUG_REG13__active_cm_sm_r0_q_MASK 0xf8000000
20126#define VGT_DEBUG_REG13__active_cm_sm_r0_q__SHIFT 0x1b
20127#define VGT_DEBUG_REG14__SPARE3_MASK 0xf
20128#define VGT_DEBUG_REG14__SPARE3__SHIFT 0x0
20129#define VGT_DEBUG_REG14__gsfetch_done_fifo_full_MASK 0x10
20130#define VGT_DEBUG_REG14__gsfetch_done_fifo_full__SHIFT 0x4
20131#define VGT_DEBUG_REG14__gs_rb_space_avail_r0_MASK 0x20
20132#define VGT_DEBUG_REG14__gs_rb_space_avail_r0__SHIFT 0x5
20133#define VGT_DEBUG_REG14__smx_es_done_cnt_r0_q_not_0_MASK 0x40
20134#define VGT_DEBUG_REG14__smx_es_done_cnt_r0_q_not_0__SHIFT 0x6
20135#define VGT_DEBUG_REG14__SPARE8_MASK 0x180
20136#define VGT_DEBUG_REG14__SPARE8__SHIFT 0x7
20137#define VGT_DEBUG_REG14__vs_done_cnt_q_not_0_MASK 0x200
20138#define VGT_DEBUG_REG14__vs_done_cnt_q_not_0__SHIFT 0x9
20139#define VGT_DEBUG_REG14__es_flush_cnt_busy_q_MASK 0x400
20140#define VGT_DEBUG_REG14__es_flush_cnt_busy_q__SHIFT 0xa
20141#define VGT_DEBUG_REG14__gs_tbl_full_r0_MASK 0x800
20142#define VGT_DEBUG_REG14__gs_tbl_full_r0__SHIFT 0xb
20143#define VGT_DEBUG_REG14__SPARE2_MASK 0x1ff000
20144#define VGT_DEBUG_REG14__SPARE2__SHIFT 0xc
20145#define VGT_DEBUG_REG14__se1spi_gsthread_fifo_busy_MASK 0x200000
20146#define VGT_DEBUG_REG14__se1spi_gsthread_fifo_busy__SHIFT 0x15
20147#define VGT_DEBUG_REG14__SPARE_MASK 0x1c00000
20148#define VGT_DEBUG_REG14__SPARE__SHIFT 0x16
20149#define VGT_DEBUG_REG14__VGT_SE1SPI_gsthread_rtr_q_MASK 0x2000000
20150#define VGT_DEBUG_REG14__VGT_SE1SPI_gsthread_rtr_q__SHIFT 0x19
20151#define VGT_DEBUG_REG14__smx1_es_done_cnt_r0_q_not_0_MASK 0x4000000
20152#define VGT_DEBUG_REG14__smx1_es_done_cnt_r0_q_not_0__SHIFT 0x1a
20153#define VGT_DEBUG_REG14__se1spi_esthread_fifo_busy_MASK 0x8000000
20154#define VGT_DEBUG_REG14__se1spi_esthread_fifo_busy__SHIFT 0x1b
20155#define VGT_DEBUG_REG14__SPARE1_MASK 0x10000000
20156#define VGT_DEBUG_REG14__SPARE1__SHIFT 0x1c
20157#define VGT_DEBUG_REG14__gsfetch_done_se1_cnt_q_not_0_MASK 0x20000000
20158#define VGT_DEBUG_REG14__gsfetch_done_se1_cnt_q_not_0__SHIFT 0x1d
20159#define VGT_DEBUG_REG14__SPARE0_MASK 0x40000000
20160#define VGT_DEBUG_REG14__SPARE0__SHIFT 0x1e
20161#define VGT_DEBUG_REG14__VGT_SE1SPI_esthread_rtr_q_MASK 0x80000000
20162#define VGT_DEBUG_REG14__VGT_SE1SPI_esthread_rtr_q__SHIFT 0x1f
20163#define VGT_DEBUG_REG15__cm_busy_q_MASK 0x1
20164#define VGT_DEBUG_REG15__cm_busy_q__SHIFT 0x0
20165#define VGT_DEBUG_REG15__counters_busy_q_MASK 0x2
20166#define VGT_DEBUG_REG15__counters_busy_q__SHIFT 0x1
20167#define VGT_DEBUG_REG15__output_fifo_empty_MASK 0x4
20168#define VGT_DEBUG_REG15__output_fifo_empty__SHIFT 0x2
20169#define VGT_DEBUG_REG15__output_fifo_full_MASK 0x8
20170#define VGT_DEBUG_REG15__output_fifo_full__SHIFT 0x3
20171#define VGT_DEBUG_REG15__counters_full_MASK 0x10
20172#define VGT_DEBUG_REG15__counters_full__SHIFT 0x4
20173#define VGT_DEBUG_REG15__active_sm_q_MASK 0x3e0
20174#define VGT_DEBUG_REG15__active_sm_q__SHIFT 0x5
20175#define VGT_DEBUG_REG15__entry_rdptr_q_MASK 0x7c00
20176#define VGT_DEBUG_REG15__entry_rdptr_q__SHIFT 0xa
20177#define VGT_DEBUG_REG15__cntr_tbl_wrptr_q_MASK 0xf8000
20178#define VGT_DEBUG_REG15__cntr_tbl_wrptr_q__SHIFT 0xf
20179#define VGT_DEBUG_REG15__SPARE25_MASK 0x3f00000
20180#define VGT_DEBUG_REG15__SPARE25__SHIFT 0x14
20181#define VGT_DEBUG_REG15__st_cut_mode_q_MASK 0xc000000
20182#define VGT_DEBUG_REG15__st_cut_mode_q__SHIFT 0x1a
20183#define VGT_DEBUG_REG15__gs_done_array_q_not_0_MASK 0x10000000
20184#define VGT_DEBUG_REG15__gs_done_array_q_not_0__SHIFT 0x1c
20185#define VGT_DEBUG_REG15__SPARE31_MASK 0xe0000000
20186#define VGT_DEBUG_REG15__SPARE31__SHIFT 0x1d
20187#define VGT_DEBUG_REG16__gog_busy_MASK 0x1
20188#define VGT_DEBUG_REG16__gog_busy__SHIFT 0x0
20189#define VGT_DEBUG_REG16__gog_state_q_MASK 0xe
20190#define VGT_DEBUG_REG16__gog_state_q__SHIFT 0x1
20191#define VGT_DEBUG_REG16__r0_rtr_MASK 0x10
20192#define VGT_DEBUG_REG16__r0_rtr__SHIFT 0x4
20193#define VGT_DEBUG_REG16__r1_rtr_MASK 0x20
20194#define VGT_DEBUG_REG16__r1_rtr__SHIFT 0x5
20195#define VGT_DEBUG_REG16__r1_upstream_rtr_MASK 0x40
20196#define VGT_DEBUG_REG16__r1_upstream_rtr__SHIFT 0x6
20197#define VGT_DEBUG_REG16__r2_vs_tbl_rtr_MASK 0x80
20198#define VGT_DEBUG_REG16__r2_vs_tbl_rtr__SHIFT 0x7
20199#define VGT_DEBUG_REG16__r2_prim_rtr_MASK 0x100
20200#define VGT_DEBUG_REG16__r2_prim_rtr__SHIFT 0x8
20201#define VGT_DEBUG_REG16__r2_indx_rtr_MASK 0x200
20202#define VGT_DEBUG_REG16__r2_indx_rtr__SHIFT 0x9
20203#define VGT_DEBUG_REG16__r2_rtr_MASK 0x400
20204#define VGT_DEBUG_REG16__r2_rtr__SHIFT 0xa
20205#define VGT_DEBUG_REG16__gog_tm_vs_event_rtr_MASK 0x800
20206#define VGT_DEBUG_REG16__gog_tm_vs_event_rtr__SHIFT 0xb
20207#define VGT_DEBUG_REG16__r3_force_vs_tbl_we_rtr_MASK 0x1000
20208#define VGT_DEBUG_REG16__r3_force_vs_tbl_we_rtr__SHIFT 0xc
20209#define VGT_DEBUG_REG16__indx_valid_r2_q_MASK 0x2000
20210#define VGT_DEBUG_REG16__indx_valid_r2_q__SHIFT 0xd
20211#define VGT_DEBUG_REG16__prim_valid_r2_q_MASK 0x4000
20212#define VGT_DEBUG_REG16__prim_valid_r2_q__SHIFT 0xe
20213#define VGT_DEBUG_REG16__valid_r2_q_MASK 0x8000
20214#define VGT_DEBUG_REG16__valid_r2_q__SHIFT 0xf
20215#define VGT_DEBUG_REG16__prim_valid_r1_q_MASK 0x10000
20216#define VGT_DEBUG_REG16__prim_valid_r1_q__SHIFT 0x10
20217#define VGT_DEBUG_REG16__indx_valid_r1_q_MASK 0x20000
20218#define VGT_DEBUG_REG16__indx_valid_r1_q__SHIFT 0x11
20219#define VGT_DEBUG_REG16__valid_r1_q_MASK 0x40000
20220#define VGT_DEBUG_REG16__valid_r1_q__SHIFT 0x12
20221#define VGT_DEBUG_REG16__indx_valid_r0_q_MASK 0x80000
20222#define VGT_DEBUG_REG16__indx_valid_r0_q__SHIFT 0x13
20223#define VGT_DEBUG_REG16__prim_valid_r0_q_MASK 0x100000
20224#define VGT_DEBUG_REG16__prim_valid_r0_q__SHIFT 0x14
20225#define VGT_DEBUG_REG16__valid_r0_q_MASK 0x200000
20226#define VGT_DEBUG_REG16__valid_r0_q__SHIFT 0x15
20227#define VGT_DEBUG_REG16__send_event_q_MASK 0x400000
20228#define VGT_DEBUG_REG16__send_event_q__SHIFT 0x16
20229#define VGT_DEBUG_REG16__SPARE24_MASK 0x800000
20230#define VGT_DEBUG_REG16__SPARE24__SHIFT 0x17
20231#define VGT_DEBUG_REG16__vert_seen_since_sopg_r2_q_MASK 0x1000000
20232#define VGT_DEBUG_REG16__vert_seen_since_sopg_r2_q__SHIFT 0x18
20233#define VGT_DEBUG_REG16__gog_out_prim_state_sel_MASK 0xe000000
20234#define VGT_DEBUG_REG16__gog_out_prim_state_sel__SHIFT 0x19
20235#define VGT_DEBUG_REG16__multiple_streams_en_r1_q_MASK 0x10000000
20236#define VGT_DEBUG_REG16__multiple_streams_en_r1_q__SHIFT 0x1c
20237#define VGT_DEBUG_REG16__vs_vert_count_r2_q_not_0_MASK 0x20000000
20238#define VGT_DEBUG_REG16__vs_vert_count_r2_q_not_0__SHIFT 0x1d
20239#define VGT_DEBUG_REG16__num_gs_r2_q_not_0_MASK 0x40000000
20240#define VGT_DEBUG_REG16__num_gs_r2_q_not_0__SHIFT 0x1e
20241#define VGT_DEBUG_REG16__new_vs_thread_r2_MASK 0x80000000
20242#define VGT_DEBUG_REG16__new_vs_thread_r2__SHIFT 0x1f
20243#define VGT_DEBUG_REG17__gog_out_prim_rel_indx2_5_0_MASK 0x3f
20244#define VGT_DEBUG_REG17__gog_out_prim_rel_indx2_5_0__SHIFT 0x0
20245#define VGT_DEBUG_REG17__gog_out_prim_rel_indx1_5_0_MASK 0xfc0
20246#define VGT_DEBUG_REG17__gog_out_prim_rel_indx1_5_0__SHIFT 0x6
20247#define VGT_DEBUG_REG17__gog_out_prim_rel_indx0_5_0_MASK 0x3f000
20248#define VGT_DEBUG_REG17__gog_out_prim_rel_indx0_5_0__SHIFT 0xc
20249#define VGT_DEBUG_REG17__gog_out_indx_13_0_MASK 0xfffc0000
20250#define VGT_DEBUG_REG17__gog_out_indx_13_0__SHIFT 0x12
20251#define VGT_DEBUG_REG18__grp_vr_valid_MASK 0x1
20252#define VGT_DEBUG_REG18__grp_vr_valid__SHIFT 0x0
20253#define VGT_DEBUG_REG18__pipe0_dr_MASK 0x2
20254#define VGT_DEBUG_REG18__pipe0_dr__SHIFT 0x1
20255#define VGT_DEBUG_REG18__pipe1_dr_MASK 0x4
20256#define VGT_DEBUG_REG18__pipe1_dr__SHIFT 0x2
20257#define VGT_DEBUG_REG18__vr_grp_read_MASK 0x8
20258#define VGT_DEBUG_REG18__vr_grp_read__SHIFT 0x3
20259#define VGT_DEBUG_REG18__pipe0_rtr_MASK 0x10
20260#define VGT_DEBUG_REG18__pipe0_rtr__SHIFT 0x4
20261#define VGT_DEBUG_REG18__pipe1_rtr_MASK 0x20
20262#define VGT_DEBUG_REG18__pipe1_rtr__SHIFT 0x5
20263#define VGT_DEBUG_REG18__out_vr_indx_read_MASK 0x40
20264#define VGT_DEBUG_REG18__out_vr_indx_read__SHIFT 0x6
20265#define VGT_DEBUG_REG18__out_vr_prim_read_MASK 0x80
20266#define VGT_DEBUG_REG18__out_vr_prim_read__SHIFT 0x7
20267#define VGT_DEBUG_REG18__indices_to_send_q_MASK 0x700
20268#define VGT_DEBUG_REG18__indices_to_send_q__SHIFT 0x8
20269#define VGT_DEBUG_REG18__valid_indices_MASK 0x800
20270#define VGT_DEBUG_REG18__valid_indices__SHIFT 0xb
20271#define VGT_DEBUG_REG18__last_indx_of_prim_MASK 0x1000
20272#define VGT_DEBUG_REG18__last_indx_of_prim__SHIFT 0xc
20273#define VGT_DEBUG_REG18__indx0_new_d_MASK 0x2000
20274#define VGT_DEBUG_REG18__indx0_new_d__SHIFT 0xd
20275#define VGT_DEBUG_REG18__indx1_new_d_MASK 0x4000
20276#define VGT_DEBUG_REG18__indx1_new_d__SHIFT 0xe
20277#define VGT_DEBUG_REG18__indx2_new_d_MASK 0x8000
20278#define VGT_DEBUG_REG18__indx2_new_d__SHIFT 0xf
20279#define VGT_DEBUG_REG18__indx2_hit_d_MASK 0x10000
20280#define VGT_DEBUG_REG18__indx2_hit_d__SHIFT 0x10
20281#define VGT_DEBUG_REG18__indx1_hit_d_MASK 0x20000
20282#define VGT_DEBUG_REG18__indx1_hit_d__SHIFT 0x11
20283#define VGT_DEBUG_REG18__indx0_hit_d_MASK 0x40000
20284#define VGT_DEBUG_REG18__indx0_hit_d__SHIFT 0x12
20285#define VGT_DEBUG_REG18__st_vertex_reuse_off_r0_q_MASK 0x80000
20286#define VGT_DEBUG_REG18__st_vertex_reuse_off_r0_q__SHIFT 0x13
20287#define VGT_DEBUG_REG18__last_group_of_instance_r0_q_MASK 0x100000
20288#define VGT_DEBUG_REG18__last_group_of_instance_r0_q__SHIFT 0x14
20289#define VGT_DEBUG_REG18__null_primitive_r0_q_MASK 0x200000
20290#define VGT_DEBUG_REG18__null_primitive_r0_q__SHIFT 0x15
20291#define VGT_DEBUG_REG18__eop_r0_q_MASK 0x400000
20292#define VGT_DEBUG_REG18__eop_r0_q__SHIFT 0x16
20293#define VGT_DEBUG_REG18__eject_vtx_vect_r1_d_MASK 0x800000
20294#define VGT_DEBUG_REG18__eject_vtx_vect_r1_d__SHIFT 0x17
20295#define VGT_DEBUG_REG18__sub_prim_type_r0_q_MASK 0x7000000
20296#define VGT_DEBUG_REG18__sub_prim_type_r0_q__SHIFT 0x18
20297#define VGT_DEBUG_REG18__gs_scenario_a_r0_q_MASK 0x8000000
20298#define VGT_DEBUG_REG18__gs_scenario_a_r0_q__SHIFT 0x1b
20299#define VGT_DEBUG_REG18__gs_scenario_b_r0_q_MASK 0x10000000
20300#define VGT_DEBUG_REG18__gs_scenario_b_r0_q__SHIFT 0x1c
20301#define VGT_DEBUG_REG18__components_valid_r0_q_MASK 0xe0000000
20302#define VGT_DEBUG_REG18__components_valid_r0_q__SHIFT 0x1d
20303#define VGT_DEBUG_REG19__separate_out_busy_q_MASK 0x1
20304#define VGT_DEBUG_REG19__separate_out_busy_q__SHIFT 0x0
20305#define VGT_DEBUG_REG19__separate_out_indx_busy_q_MASK 0x2
20306#define VGT_DEBUG_REG19__separate_out_indx_busy_q__SHIFT 0x1
20307#define VGT_DEBUG_REG19__prim_buffer_empty_MASK 0x4
20308#define VGT_DEBUG_REG19__prim_buffer_empty__SHIFT 0x2
20309#define VGT_DEBUG_REG19__prim_buffer_full_MASK 0x8
20310#define VGT_DEBUG_REG19__prim_buffer_full__SHIFT 0x3
20311#define VGT_DEBUG_REG19__pa_clips_fifo_busy_q_MASK 0x10
20312#define VGT_DEBUG_REG19__pa_clips_fifo_busy_q__SHIFT 0x4
20313#define VGT_DEBUG_REG19__pa_clipp_fifo_busy_q_MASK 0x20
20314#define VGT_DEBUG_REG19__pa_clipp_fifo_busy_q__SHIFT 0x5
20315#define VGT_DEBUG_REG19__VGT_PA_clips_rtr_q_MASK 0x40
20316#define VGT_DEBUG_REG19__VGT_PA_clips_rtr_q__SHIFT 0x6
20317#define VGT_DEBUG_REG19__VGT_PA_clipp_rtr_q_MASK 0x80
20318#define VGT_DEBUG_REG19__VGT_PA_clipp_rtr_q__SHIFT 0x7
20319#define VGT_DEBUG_REG19__spi_vsthread_fifo_busy_q_MASK 0x100
20320#define VGT_DEBUG_REG19__spi_vsthread_fifo_busy_q__SHIFT 0x8
20321#define VGT_DEBUG_REG19__spi_vsvert_fifo_busy_q_MASK 0x200
20322#define VGT_DEBUG_REG19__spi_vsvert_fifo_busy_q__SHIFT 0x9
20323#define VGT_DEBUG_REG19__pa_clipv_fifo_busy_q_MASK 0x400
20324#define VGT_DEBUG_REG19__pa_clipv_fifo_busy_q__SHIFT 0xa
20325#define VGT_DEBUG_REG19__hold_prim_MASK 0x800
20326#define VGT_DEBUG_REG19__hold_prim__SHIFT 0xb
20327#define VGT_DEBUG_REG19__VGT_SPI_vsthread_rtr_q_MASK 0x1000
20328#define VGT_DEBUG_REG19__VGT_SPI_vsthread_rtr_q__SHIFT 0xc
20329#define VGT_DEBUG_REG19__VGT_SPI_vsvert_rtr_q_MASK 0x2000
20330#define VGT_DEBUG_REG19__VGT_SPI_vsvert_rtr_q__SHIFT 0xd
20331#define VGT_DEBUG_REG19__VGT_PA_clipv_rtr_q_MASK 0x4000
20332#define VGT_DEBUG_REG19__VGT_PA_clipv_rtr_q__SHIFT 0xe
20333#define VGT_DEBUG_REG19__new_packet_q_MASK 0x8000
20334#define VGT_DEBUG_REG19__new_packet_q__SHIFT 0xf
20335#define VGT_DEBUG_REG19__buffered_prim_event_MASK 0x10000
20336#define VGT_DEBUG_REG19__buffered_prim_event__SHIFT 0x10
20337#define VGT_DEBUG_REG19__buffered_prim_null_primitive_MASK 0x20000
20338#define VGT_DEBUG_REG19__buffered_prim_null_primitive__SHIFT 0x11
20339#define VGT_DEBUG_REG19__buffered_prim_eop_MASK 0x40000
20340#define VGT_DEBUG_REG19__buffered_prim_eop__SHIFT 0x12
20341#define VGT_DEBUG_REG19__buffered_prim_eject_vtx_vect_MASK 0x80000
20342#define VGT_DEBUG_REG19__buffered_prim_eject_vtx_vect__SHIFT 0x13
20343#define VGT_DEBUG_REG19__buffered_prim_type_event_MASK 0x3f00000
20344#define VGT_DEBUG_REG19__buffered_prim_type_event__SHIFT 0x14
20345#define VGT_DEBUG_REG19__VGT_SE1SPI_vswave_rtr_q_MASK 0x4000000
20346#define VGT_DEBUG_REG19__VGT_SE1SPI_vswave_rtr_q__SHIFT 0x1a
20347#define VGT_DEBUG_REG19__VGT_SE1SPI_vsvert_rtr_q_MASK 0x8000000
20348#define VGT_DEBUG_REG19__VGT_SE1SPI_vsvert_rtr_q__SHIFT 0x1b
20349#define VGT_DEBUG_REG19__num_new_unique_rel_indx_MASK 0x30000000
20350#define VGT_DEBUG_REG19__num_new_unique_rel_indx__SHIFT 0x1c
20351#define VGT_DEBUG_REG19__null_terminate_vtx_vector_MASK 0x40000000
20352#define VGT_DEBUG_REG19__null_terminate_vtx_vector__SHIFT 0x1e
20353#define VGT_DEBUG_REG19__filter_event_MASK 0x80000000
20354#define VGT_DEBUG_REG19__filter_event__SHIFT 0x1f
20355#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexindex_MASK 0xffff
20356#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexindex__SHIFT 0x0
20357#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexcount_not_0_MASK 0x10000
20358#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexcount_not_0__SHIFT 0x10
20359#define VGT_DEBUG_REG20__SPARE17_MASK 0x20000
20360#define VGT_DEBUG_REG20__SPARE17__SHIFT 0x11
20361#define VGT_DEBUG_REG20__alloc_counter_q_MASK 0x3c0000
20362#define VGT_DEBUG_REG20__alloc_counter_q__SHIFT 0x12
20363#define VGT_DEBUG_REG20__curr_dealloc_distance_q_MASK 0x1fc00000
20364#define VGT_DEBUG_REG20__curr_dealloc_distance_q__SHIFT 0x16
20365#define VGT_DEBUG_REG20__new_allocate_q_MASK 0x20000000
20366#define VGT_DEBUG_REG20__new_allocate_q__SHIFT 0x1d
20367#define VGT_DEBUG_REG20__curr_slot_in_vtx_vect_q_not_0_MASK 0x40000000
20368#define VGT_DEBUG_REG20__curr_slot_in_vtx_vect_q_not_0__SHIFT 0x1e
20369#define VGT_DEBUG_REG20__int_vtx_counter_q_not_0_MASK 0x80000000
20370#define VGT_DEBUG_REG20__int_vtx_counter_q_not_0__SHIFT 0x1f
20371#define VGT_DEBUG_REG21__out_indx_fifo_empty_MASK 0x1
20372#define VGT_DEBUG_REG21__out_indx_fifo_empty__SHIFT 0x0
20373#define VGT_DEBUG_REG21__indx_side_fifo_empty_MASK 0x2
20374#define VGT_DEBUG_REG21__indx_side_fifo_empty__SHIFT 0x1
20375#define VGT_DEBUG_REG21__pipe0_dr_MASK 0x4
20376#define VGT_DEBUG_REG21__pipe0_dr__SHIFT 0x2
20377#define VGT_DEBUG_REG21__pipe1_dr_MASK 0x8
20378#define VGT_DEBUG_REG21__pipe1_dr__SHIFT 0x3
20379#define VGT_DEBUG_REG21__pipe2_dr_MASK 0x10
20380#define VGT_DEBUG_REG21__pipe2_dr__SHIFT 0x4
20381#define VGT_DEBUG_REG21__vsthread_buff_empty_MASK 0x20
20382#define VGT_DEBUG_REG21__vsthread_buff_empty__SHIFT 0x5
20383#define VGT_DEBUG_REG21__out_indx_fifo_full_MASK 0x40
20384#define VGT_DEBUG_REG21__out_indx_fifo_full__SHIFT 0x6
20385#define VGT_DEBUG_REG21__indx_side_fifo_full_MASK 0x80
20386#define VGT_DEBUG_REG21__indx_side_fifo_full__SHIFT 0x7
20387#define VGT_DEBUG_REG21__pipe0_rtr_MASK 0x100
20388#define VGT_DEBUG_REG21__pipe0_rtr__SHIFT 0x8
20389#define VGT_DEBUG_REG21__pipe1_rtr_MASK 0x200
20390#define VGT_DEBUG_REG21__pipe1_rtr__SHIFT 0x9
20391#define VGT_DEBUG_REG21__pipe2_rtr_MASK 0x400
20392#define VGT_DEBUG_REG21__pipe2_rtr__SHIFT 0xa
20393#define VGT_DEBUG_REG21__vsthread_buff_full_MASK 0x800
20394#define VGT_DEBUG_REG21__vsthread_buff_full__SHIFT 0xb
20395#define VGT_DEBUG_REG21__interfaces_rtr_MASK 0x1000
20396#define VGT_DEBUG_REG21__interfaces_rtr__SHIFT 0xc
20397#define VGT_DEBUG_REG21__indx_count_q_not_0_MASK 0x2000
20398#define VGT_DEBUG_REG21__indx_count_q_not_0__SHIFT 0xd
20399#define VGT_DEBUG_REG21__wait_for_external_eopg_q_MASK 0x4000
20400#define VGT_DEBUG_REG21__wait_for_external_eopg_q__SHIFT 0xe
20401#define VGT_DEBUG_REG21__full_state_p1_q_MASK 0x8000
20402#define VGT_DEBUG_REG21__full_state_p1_q__SHIFT 0xf
20403#define VGT_DEBUG_REG21__indx_side_indx_valid_MASK 0x10000
20404#define VGT_DEBUG_REG21__indx_side_indx_valid__SHIFT 0x10
20405#define VGT_DEBUG_REG21__stateid_p0_q_MASK 0xe0000
20406#define VGT_DEBUG_REG21__stateid_p0_q__SHIFT 0x11
20407#define VGT_DEBUG_REG21__is_event_p0_q_MASK 0x100000
20408#define VGT_DEBUG_REG21__is_event_p0_q__SHIFT 0x14
20409#define VGT_DEBUG_REG21__lshs_dealloc_p1_MASK 0x200000
20410#define VGT_DEBUG_REG21__lshs_dealloc_p1__SHIFT 0x15
20411#define VGT_DEBUG_REG21__stream_id_r2_q_MASK 0x400000
20412#define VGT_DEBUG_REG21__stream_id_r2_q__SHIFT 0x16
20413#define VGT_DEBUG_REG21__vtx_vect_counter_q_not_0_MASK 0x800000
20414#define VGT_DEBUG_REG21__vtx_vect_counter_q_not_0__SHIFT 0x17
20415#define VGT_DEBUG_REG21__buff_full_p1_MASK 0x1000000
20416#define VGT_DEBUG_REG21__buff_full_p1__SHIFT 0x18
20417#define VGT_DEBUG_REG21__strmout_valid_p1_MASK 0x2000000
20418#define VGT_DEBUG_REG21__strmout_valid_p1__SHIFT 0x19
20419#define VGT_DEBUG_REG21__eotg_r2_q_MASK 0x4000000
20420#define VGT_DEBUG_REG21__eotg_r2_q__SHIFT 0x1a
20421#define VGT_DEBUG_REG21__null_r2_q_MASK 0x8000000
20422#define VGT_DEBUG_REG21__null_r2_q__SHIFT 0x1b
20423#define VGT_DEBUG_REG21__p0_dr_MASK 0x10000000
20424#define VGT_DEBUG_REG21__p0_dr__SHIFT 0x1c
20425#define VGT_DEBUG_REG21__p0_rtr_MASK 0x20000000
20426#define VGT_DEBUG_REG21__p0_rtr__SHIFT 0x1d
20427#define VGT_DEBUG_REG21__eopg_p0_q_MASK 0x40000000
20428#define VGT_DEBUG_REG21__eopg_p0_q__SHIFT 0x1e
20429#define VGT_DEBUG_REG21__p0_nobp_MASK 0x80000000
20430#define VGT_DEBUG_REG21__p0_nobp__SHIFT 0x1f
20431#define VGT_DEBUG_REG22__cm_state16_MASK 0x3
20432#define VGT_DEBUG_REG22__cm_state16__SHIFT 0x0
20433#define VGT_DEBUG_REG22__cm_state17_MASK 0xc
20434#define VGT_DEBUG_REG22__cm_state17__SHIFT 0x2
20435#define VGT_DEBUG_REG22__cm_state18_MASK 0x30
20436#define VGT_DEBUG_REG22__cm_state18__SHIFT 0x4
20437#define VGT_DEBUG_REG22__cm_state19_MASK 0xc0
20438#define VGT_DEBUG_REG22__cm_state19__SHIFT 0x6
20439#define VGT_DEBUG_REG22__cm_state20_MASK 0x300
20440#define VGT_DEBUG_REG22__cm_state20__SHIFT 0x8
20441#define VGT_DEBUG_REG22__cm_state21_MASK 0xc00
20442#define VGT_DEBUG_REG22__cm_state21__SHIFT 0xa
20443#define VGT_DEBUG_REG22__cm_state22_MASK 0x3000
20444#define VGT_DEBUG_REG22__cm_state22__SHIFT 0xc
20445#define VGT_DEBUG_REG22__cm_state23_MASK 0xc000
20446#define VGT_DEBUG_REG22__cm_state23__SHIFT 0xe
20447#define VGT_DEBUG_REG22__cm_state24_MASK 0x30000
20448#define VGT_DEBUG_REG22__cm_state24__SHIFT 0x10
20449#define VGT_DEBUG_REG22__cm_state25_MASK 0xc0000
20450#define VGT_DEBUG_REG22__cm_state25__SHIFT 0x12
20451#define VGT_DEBUG_REG22__cm_state26_MASK 0x300000
20452#define VGT_DEBUG_REG22__cm_state26__SHIFT 0x14
20453#define VGT_DEBUG_REG22__cm_state27_MASK 0xc00000
20454#define VGT_DEBUG_REG22__cm_state27__SHIFT 0x16
20455#define VGT_DEBUG_REG22__cm_state28_MASK 0x3000000
20456#define VGT_DEBUG_REG22__cm_state28__SHIFT 0x18
20457#define VGT_DEBUG_REG22__cm_state29_MASK 0xc000000
20458#define VGT_DEBUG_REG22__cm_state29__SHIFT 0x1a
20459#define VGT_DEBUG_REG22__cm_state30_MASK 0x30000000
20460#define VGT_DEBUG_REG22__cm_state30__SHIFT 0x1c
20461#define VGT_DEBUG_REG22__cm_state31_MASK 0xc0000000
20462#define VGT_DEBUG_REG22__cm_state31__SHIFT 0x1e
20463#define VGT_DEBUG_REG23__frmt_busy_MASK 0x1
20464#define VGT_DEBUG_REG23__frmt_busy__SHIFT 0x0
20465#define VGT_DEBUG_REG23__rcm_frmt_vert_rtr_MASK 0x2
20466#define VGT_DEBUG_REG23__rcm_frmt_vert_rtr__SHIFT 0x1
20467#define VGT_DEBUG_REG23__rcm_frmt_prim_rtr_MASK 0x4
20468#define VGT_DEBUG_REG23__rcm_frmt_prim_rtr__SHIFT 0x2
20469#define VGT_DEBUG_REG23__prim_r3_rtr_MASK 0x8
20470#define VGT_DEBUG_REG23__prim_r3_rtr__SHIFT 0x3
20471#define VGT_DEBUG_REG23__prim_r2_rtr_MASK 0x10
20472#define VGT_DEBUG_REG23__prim_r2_rtr__SHIFT 0x4
20473#define VGT_DEBUG_REG23__vert_r3_rtr_MASK 0x20
20474#define VGT_DEBUG_REG23__vert_r3_rtr__SHIFT 0x5
20475#define VGT_DEBUG_REG23__vert_r2_rtr_MASK 0x40
20476#define VGT_DEBUG_REG23__vert_r2_rtr__SHIFT 0x6
20477#define VGT_DEBUG_REG23__vert_r1_rtr_MASK 0x80
20478#define VGT_DEBUG_REG23__vert_r1_rtr__SHIFT 0x7
20479#define VGT_DEBUG_REG23__vert_r0_rtr_MASK 0x100
20480#define VGT_DEBUG_REG23__vert_r0_rtr__SHIFT 0x8
20481#define VGT_DEBUG_REG23__prim_fifo_empty_MASK 0x200
20482#define VGT_DEBUG_REG23__prim_fifo_empty__SHIFT 0x9
20483#define VGT_DEBUG_REG23__prim_fifo_full_MASK 0x400
20484#define VGT_DEBUG_REG23__prim_fifo_full__SHIFT 0xa
20485#define VGT_DEBUG_REG23__vert_dr_r2_q_MASK 0x800
20486#define VGT_DEBUG_REG23__vert_dr_r2_q__SHIFT 0xb
20487#define VGT_DEBUG_REG23__prim_dr_r2_q_MASK 0x1000
20488#define VGT_DEBUG_REG23__prim_dr_r2_q__SHIFT 0xc
20489#define VGT_DEBUG_REG23__vert_dr_r1_q_MASK 0x2000
20490#define VGT_DEBUG_REG23__vert_dr_r1_q__SHIFT 0xd
20491#define VGT_DEBUG_REG23__vert_dr_r0_q_MASK 0x4000
20492#define VGT_DEBUG_REG23__vert_dr_r0_q__SHIFT 0xe
20493#define VGT_DEBUG_REG23__new_verts_r2_q_MASK 0x18000
20494#define VGT_DEBUG_REG23__new_verts_r2_q__SHIFT 0xf
20495#define VGT_DEBUG_REG23__verts_sent_r2_q_MASK 0x1e0000
20496#define VGT_DEBUG_REG23__verts_sent_r2_q__SHIFT 0x11
20497#define VGT_DEBUG_REG23__prim_state_sel_r2_q_MASK 0xe00000
20498#define VGT_DEBUG_REG23__prim_state_sel_r2_q__SHIFT 0x15
20499#define VGT_DEBUG_REG23__SPARE_MASK 0xff000000
20500#define VGT_DEBUG_REG23__SPARE__SHIFT 0x18
20501#define VGT_DEBUG_REG24__avail_es_rb_space_r0_q_23_0_MASK 0xffffff
20502#define VGT_DEBUG_REG24__avail_es_rb_space_r0_q_23_0__SHIFT 0x0
20503#define VGT_DEBUG_REG24__dependent_st_cut_mode_q_MASK 0x3000000
20504#define VGT_DEBUG_REG24__dependent_st_cut_mode_q__SHIFT 0x18
20505#define VGT_DEBUG_REG24__SPARE31_MASK 0xfc000000
20506#define VGT_DEBUG_REG24__SPARE31__SHIFT 0x1a
20507#define VGT_DEBUG_REG25__avail_gs_rb_space_r0_q_25_0_MASK 0x3ffffff
20508#define VGT_DEBUG_REG25__avail_gs_rb_space_r0_q_25_0__SHIFT 0x0
20509#define VGT_DEBUG_REG25__active_sm_r0_q_MASK 0x3c000000
20510#define VGT_DEBUG_REG25__active_sm_r0_q__SHIFT 0x1a
20511#define VGT_DEBUG_REG25__add_gs_rb_space_r1_q_MASK 0x40000000
20512#define VGT_DEBUG_REG25__add_gs_rb_space_r1_q__SHIFT 0x1e
20513#define VGT_DEBUG_REG25__add_gs_rb_space_r0_q_MASK 0x80000000
20514#define VGT_DEBUG_REG25__add_gs_rb_space_r0_q__SHIFT 0x1f
20515#define VGT_DEBUG_REG26__cm_state0_MASK 0x3
20516#define VGT_DEBUG_REG26__cm_state0__SHIFT 0x0
20517#define VGT_DEBUG_REG26__cm_state1_MASK 0xc
20518#define VGT_DEBUG_REG26__cm_state1__SHIFT 0x2
20519#define VGT_DEBUG_REG26__cm_state2_MASK 0x30
20520#define VGT_DEBUG_REG26__cm_state2__SHIFT 0x4
20521#define VGT_DEBUG_REG26__cm_state3_MASK 0xc0
20522#define VGT_DEBUG_REG26__cm_state3__SHIFT 0x6
20523#define VGT_DEBUG_REG26__cm_state4_MASK 0x300
20524#define VGT_DEBUG_REG26__cm_state4__SHIFT 0x8
20525#define VGT_DEBUG_REG26__cm_state5_MASK 0xc00
20526#define VGT_DEBUG_REG26__cm_state5__SHIFT 0xa
20527#define VGT_DEBUG_REG26__cm_state6_MASK 0x3000
20528#define VGT_DEBUG_REG26__cm_state6__SHIFT 0xc
20529#define VGT_DEBUG_REG26__cm_state7_MASK 0xc000
20530#define VGT_DEBUG_REG26__cm_state7__SHIFT 0xe
20531#define VGT_DEBUG_REG26__cm_state8_MASK 0x30000
20532#define VGT_DEBUG_REG26__cm_state8__SHIFT 0x10
20533#define VGT_DEBUG_REG26__cm_state9_MASK 0xc0000
20534#define VGT_DEBUG_REG26__cm_state9__SHIFT 0x12
20535#define VGT_DEBUG_REG26__cm_state10_MASK 0x300000
20536#define VGT_DEBUG_REG26__cm_state10__SHIFT 0x14
20537#define VGT_DEBUG_REG26__cm_state11_MASK 0xc00000
20538#define VGT_DEBUG_REG26__cm_state11__SHIFT 0x16
20539#define VGT_DEBUG_REG26__cm_state12_MASK 0x3000000
20540#define VGT_DEBUG_REG26__cm_state12__SHIFT 0x18
20541#define VGT_DEBUG_REG26__cm_state13_MASK 0xc000000
20542#define VGT_DEBUG_REG26__cm_state13__SHIFT 0x1a
20543#define VGT_DEBUG_REG26__cm_state14_MASK 0x30000000
20544#define VGT_DEBUG_REG26__cm_state14__SHIFT 0x1c
20545#define VGT_DEBUG_REG26__cm_state15_MASK 0xc0000000
20546#define VGT_DEBUG_REG26__cm_state15__SHIFT 0x1e
20547#define VGT_DEBUG_REG27__pipe0_dr_MASK 0x1
20548#define VGT_DEBUG_REG27__pipe0_dr__SHIFT 0x0
20549#define VGT_DEBUG_REG27__gsc0_dr_MASK 0x2
20550#define VGT_DEBUG_REG27__gsc0_dr__SHIFT 0x1
20551#define VGT_DEBUG_REG27__pipe1_dr_MASK 0x4
20552#define VGT_DEBUG_REG27__pipe1_dr__SHIFT 0x2
20553#define VGT_DEBUG_REG27__tm_pt_event_rtr_MASK 0x8
20554#define VGT_DEBUG_REG27__tm_pt_event_rtr__SHIFT 0x3
20555#define VGT_DEBUG_REG27__pipe0_rtr_MASK 0x10
20556#define VGT_DEBUG_REG27__pipe0_rtr__SHIFT 0x4
20557#define VGT_DEBUG_REG27__gsc0_rtr_MASK 0x20
20558#define VGT_DEBUG_REG27__gsc0_rtr__SHIFT 0x5
20559#define VGT_DEBUG_REG27__pipe1_rtr_MASK 0x40
20560#define VGT_DEBUG_REG27__pipe1_rtr__SHIFT 0x6
20561#define VGT_DEBUG_REG27__last_indx_of_prim_p1_q_MASK 0x80
20562#define VGT_DEBUG_REG27__last_indx_of_prim_p1_q__SHIFT 0x7
20563#define VGT_DEBUG_REG27__indices_to_send_p0_q_MASK 0x300
20564#define VGT_DEBUG_REG27__indices_to_send_p0_q__SHIFT 0x8
20565#define VGT_DEBUG_REG27__event_flag_p1_q_MASK 0x400
20566#define VGT_DEBUG_REG27__event_flag_p1_q__SHIFT 0xa
20567#define VGT_DEBUG_REG27__eop_p1_q_MASK 0x800
20568#define VGT_DEBUG_REG27__eop_p1_q__SHIFT 0xb
20569#define VGT_DEBUG_REG27__gs_out_prim_type_p0_q_MASK 0x3000
20570#define VGT_DEBUG_REG27__gs_out_prim_type_p0_q__SHIFT 0xc
20571#define VGT_DEBUG_REG27__gsc_null_primitive_p0_q_MASK 0x4000
20572#define VGT_DEBUG_REG27__gsc_null_primitive_p0_q__SHIFT 0xe
20573#define VGT_DEBUG_REG27__gsc_eop_p0_q_MASK 0x8000
20574#define VGT_DEBUG_REG27__gsc_eop_p0_q__SHIFT 0xf
20575#define VGT_DEBUG_REG27__gsc_2cycle_output_MASK 0x10000
20576#define VGT_DEBUG_REG27__gsc_2cycle_output__SHIFT 0x10
20577#define VGT_DEBUG_REG27__gsc_2nd_cycle_p0_q_MASK 0x20000
20578#define VGT_DEBUG_REG27__gsc_2nd_cycle_p0_q__SHIFT 0x11
20579#define VGT_DEBUG_REG27__last_indx_of_vsprim_MASK 0x40000
20580#define VGT_DEBUG_REG27__last_indx_of_vsprim__SHIFT 0x12
20581#define VGT_DEBUG_REG27__first_vsprim_of_gsprim_p0_q_MASK 0x80000
20582#define VGT_DEBUG_REG27__first_vsprim_of_gsprim_p0_q__SHIFT 0x13
20583#define VGT_DEBUG_REG27__gsc_indx_count_p0_q_MASK 0x7ff00000
20584#define VGT_DEBUG_REG27__gsc_indx_count_p0_q__SHIFT 0x14
20585#define VGT_DEBUG_REG27__last_vsprim_of_gsprim_MASK 0x80000000
20586#define VGT_DEBUG_REG27__last_vsprim_of_gsprim__SHIFT 0x1f
20587#define VGT_DEBUG_REG28__con_state_q_MASK 0xf
20588#define VGT_DEBUG_REG28__con_state_q__SHIFT 0x0
20589#define VGT_DEBUG_REG28__second_cycle_q_MASK 0x10
20590#define VGT_DEBUG_REG28__second_cycle_q__SHIFT 0x4
20591#define VGT_DEBUG_REG28__process_tri_middle_p0_q_MASK 0x20
20592#define VGT_DEBUG_REG28__process_tri_middle_p0_q__SHIFT 0x5
20593#define VGT_DEBUG_REG28__process_tri_1st_2nd_half_p0_q_MASK 0x40
20594#define VGT_DEBUG_REG28__process_tri_1st_2nd_half_p0_q__SHIFT 0x6
20595#define VGT_DEBUG_REG28__process_tri_center_poly_p0_q_MASK 0x80
20596#define VGT_DEBUG_REG28__process_tri_center_poly_p0_q__SHIFT 0x7
20597#define VGT_DEBUG_REG28__pipe0_patch_dr_MASK 0x100
20598#define VGT_DEBUG_REG28__pipe0_patch_dr__SHIFT 0x8
20599#define VGT_DEBUG_REG28__pipe0_edge_dr_MASK 0x200
20600#define VGT_DEBUG_REG28__pipe0_edge_dr__SHIFT 0x9
20601#define VGT_DEBUG_REG28__pipe1_dr_MASK 0x400
20602#define VGT_DEBUG_REG28__pipe1_dr__SHIFT 0xa
20603#define VGT_DEBUG_REG28__pipe0_patch_rtr_MASK 0x800
20604#define VGT_DEBUG_REG28__pipe0_patch_rtr__SHIFT 0xb
20605#define VGT_DEBUG_REG28__pipe0_edge_rtr_MASK 0x1000
20606#define VGT_DEBUG_REG28__pipe0_edge_rtr__SHIFT 0xc
20607#define VGT_DEBUG_REG28__pipe1_rtr_MASK 0x2000
20608#define VGT_DEBUG_REG28__pipe1_rtr__SHIFT 0xd
20609#define VGT_DEBUG_REG28__outer_parity_p0_q_MASK 0x4000
20610#define VGT_DEBUG_REG28__outer_parity_p0_q__SHIFT 0xe
20611#define VGT_DEBUG_REG28__parallel_parity_p0_q_MASK 0x8000
20612#define VGT_DEBUG_REG28__parallel_parity_p0_q__SHIFT 0xf
20613#define VGT_DEBUG_REG28__first_ring_of_patch_p0_q_MASK 0x10000
20614#define VGT_DEBUG_REG28__first_ring_of_patch_p0_q__SHIFT 0x10
20615#define VGT_DEBUG_REG28__last_ring_of_patch_p0_q_MASK 0x20000
20616#define VGT_DEBUG_REG28__last_ring_of_patch_p0_q__SHIFT 0x11
20617#define VGT_DEBUG_REG28__last_edge_of_outer_ring_p0_q_MASK 0x40000
20618#define VGT_DEBUG_REG28__last_edge_of_outer_ring_p0_q__SHIFT 0x12
20619#define VGT_DEBUG_REG28__last_point_of_outer_ring_p1_MASK 0x80000
20620#define VGT_DEBUG_REG28__last_point_of_outer_ring_p1__SHIFT 0x13
20621#define VGT_DEBUG_REG28__last_point_of_inner_ring_p1_MASK 0x100000
20622#define VGT_DEBUG_REG28__last_point_of_inner_ring_p1__SHIFT 0x14
20623#define VGT_DEBUG_REG28__outer_edge_tf_eq_one_p0_q_MASK 0x200000
20624#define VGT_DEBUG_REG28__outer_edge_tf_eq_one_p0_q__SHIFT 0x15
20625#define VGT_DEBUG_REG28__advance_outer_point_p1_MASK 0x400000
20626#define VGT_DEBUG_REG28__advance_outer_point_p1__SHIFT 0x16
20627#define VGT_DEBUG_REG28__advance_inner_point_p1_MASK 0x800000
20628#define VGT_DEBUG_REG28__advance_inner_point_p1__SHIFT 0x17
20629#define VGT_DEBUG_REG28__next_ring_is_rect_p0_q_MASK 0x1000000
20630#define VGT_DEBUG_REG28__next_ring_is_rect_p0_q__SHIFT 0x18
20631#define VGT_DEBUG_REG28__pipe1_outer1_rtr_MASK 0x2000000
20632#define VGT_DEBUG_REG28__pipe1_outer1_rtr__SHIFT 0x19
20633#define VGT_DEBUG_REG28__pipe1_outer2_rtr_MASK 0x4000000
20634#define VGT_DEBUG_REG28__pipe1_outer2_rtr__SHIFT 0x1a
20635#define VGT_DEBUG_REG28__pipe1_inner1_rtr_MASK 0x8000000
20636#define VGT_DEBUG_REG28__pipe1_inner1_rtr__SHIFT 0x1b
20637#define VGT_DEBUG_REG28__pipe1_inner2_rtr_MASK 0x10000000
20638#define VGT_DEBUG_REG28__pipe1_inner2_rtr__SHIFT 0x1c
20639#define VGT_DEBUG_REG28__pipe1_patch_rtr_MASK 0x20000000
20640#define VGT_DEBUG_REG28__pipe1_patch_rtr__SHIFT 0x1d
20641#define VGT_DEBUG_REG28__pipe1_edge_rtr_MASK 0x40000000
20642#define VGT_DEBUG_REG28__pipe1_edge_rtr__SHIFT 0x1e
20643#define VGT_DEBUG_REG28__use_stored_inner_q_ring2_MASK 0x80000000
20644#define VGT_DEBUG_REG28__use_stored_inner_q_ring2__SHIFT 0x1f
20645#define VGT_DEBUG_REG29__con_state_q_MASK 0xf
20646#define VGT_DEBUG_REG29__con_state_q__SHIFT 0x0
20647#define VGT_DEBUG_REG29__second_cycle_q_MASK 0x10
20648#define VGT_DEBUG_REG29__second_cycle_q__SHIFT 0x4
20649#define VGT_DEBUG_REG29__process_tri_middle_p0_q_MASK 0x20
20650#define VGT_DEBUG_REG29__process_tri_middle_p0_q__SHIFT 0x5
20651#define VGT_DEBUG_REG29__process_tri_1st_2nd_half_p0_q_MASK 0x40
20652#define VGT_DEBUG_REG29__process_tri_1st_2nd_half_p0_q__SHIFT 0x6
20653#define VGT_DEBUG_REG29__process_tri_center_poly_p0_q_MASK 0x80
20654#define VGT_DEBUG_REG29__process_tri_center_poly_p0_q__SHIFT 0x7
20655#define VGT_DEBUG_REG29__pipe0_patch_dr_MASK 0x100
20656#define VGT_DEBUG_REG29__pipe0_patch_dr__SHIFT 0x8
20657#define VGT_DEBUG_REG29__pipe0_edge_dr_MASK 0x200
20658#define VGT_DEBUG_REG29__pipe0_edge_dr__SHIFT 0x9
20659#define VGT_DEBUG_REG29__pipe1_dr_MASK 0x400
20660#define VGT_DEBUG_REG29__pipe1_dr__SHIFT 0xa
20661#define VGT_DEBUG_REG29__pipe0_patch_rtr_MASK 0x800
20662#define VGT_DEBUG_REG29__pipe0_patch_rtr__SHIFT 0xb
20663#define VGT_DEBUG_REG29__pipe0_edge_rtr_MASK 0x1000
20664#define VGT_DEBUG_REG29__pipe0_edge_rtr__SHIFT 0xc
20665#define VGT_DEBUG_REG29__pipe1_rtr_MASK 0x2000
20666#define VGT_DEBUG_REG29__pipe1_rtr__SHIFT 0xd
20667#define VGT_DEBUG_REG29__outer_parity_p0_q_MASK 0x4000
20668#define VGT_DEBUG_REG29__outer_parity_p0_q__SHIFT 0xe
20669#define VGT_DEBUG_REG29__parallel_parity_p0_q_MASK 0x8000
20670#define VGT_DEBUG_REG29__parallel_parity_p0_q__SHIFT 0xf
20671#define VGT_DEBUG_REG29__first_ring_of_patch_p0_q_MASK 0x10000
20672#define VGT_DEBUG_REG29__first_ring_of_patch_p0_q__SHIFT 0x10
20673#define VGT_DEBUG_REG29__last_ring_of_patch_p0_q_MASK 0x20000
20674#define VGT_DEBUG_REG29__last_ring_of_patch_p0_q__SHIFT 0x11
20675#define VGT_DEBUG_REG29__last_edge_of_outer_ring_p0_q_MASK 0x40000
20676#define VGT_DEBUG_REG29__last_edge_of_outer_ring_p0_q__SHIFT 0x12
20677#define VGT_DEBUG_REG29__last_point_of_outer_ring_p1_MASK 0x80000
20678#define VGT_DEBUG_REG29__last_point_of_outer_ring_p1__SHIFT 0x13
20679#define VGT_DEBUG_REG29__last_point_of_inner_ring_p1_MASK 0x100000
20680#define VGT_DEBUG_REG29__last_point_of_inner_ring_p1__SHIFT 0x14
20681#define VGT_DEBUG_REG29__outer_edge_tf_eq_one_p0_q_MASK 0x200000
20682#define VGT_DEBUG_REG29__outer_edge_tf_eq_one_p0_q__SHIFT 0x15
20683#define VGT_DEBUG_REG29__advance_outer_point_p1_MASK 0x400000
20684#define VGT_DEBUG_REG29__advance_outer_point_p1__SHIFT 0x16
20685#define VGT_DEBUG_REG29__advance_inner_point_p1_MASK 0x800000
20686#define VGT_DEBUG_REG29__advance_inner_point_p1__SHIFT 0x17
20687#define VGT_DEBUG_REG29__next_ring_is_rect_p0_q_MASK 0x1000000
20688#define VGT_DEBUG_REG29__next_ring_is_rect_p0_q__SHIFT 0x18
20689#define VGT_DEBUG_REG29__pipe1_outer1_rtr_MASK 0x2000000
20690#define VGT_DEBUG_REG29__pipe1_outer1_rtr__SHIFT 0x19
20691#define VGT_DEBUG_REG29__pipe1_outer2_rtr_MASK 0x4000000
20692#define VGT_DEBUG_REG29__pipe1_outer2_rtr__SHIFT 0x1a
20693#define VGT_DEBUG_REG29__pipe1_inner1_rtr_MASK 0x8000000
20694#define VGT_DEBUG_REG29__pipe1_inner1_rtr__SHIFT 0x1b
20695#define VGT_DEBUG_REG29__pipe1_inner2_rtr_MASK 0x10000000
20696#define VGT_DEBUG_REG29__pipe1_inner2_rtr__SHIFT 0x1c
20697#define VGT_DEBUG_REG29__pipe1_patch_rtr_MASK 0x20000000
20698#define VGT_DEBUG_REG29__pipe1_patch_rtr__SHIFT 0x1d
20699#define VGT_DEBUG_REG29__pipe1_edge_rtr_MASK 0x40000000
20700#define VGT_DEBUG_REG29__pipe1_edge_rtr__SHIFT 0x1e
20701#define VGT_DEBUG_REG29__use_stored_inner_q_ring3_MASK 0x80000000
20702#define VGT_DEBUG_REG29__use_stored_inner_q_ring3__SHIFT 0x1f
20703#define VGT_DEBUG_REG31__pipe0_dr_MASK 0x1
20704#define VGT_DEBUG_REG31__pipe0_dr__SHIFT 0x0
20705#define VGT_DEBUG_REG31__pipe0_rtr_MASK 0x2
20706#define VGT_DEBUG_REG31__pipe0_rtr__SHIFT 0x1
20707#define VGT_DEBUG_REG31__pipe1_outer_dr_MASK 0x4
20708#define VGT_DEBUG_REG31__pipe1_outer_dr__SHIFT 0x2
20709#define VGT_DEBUG_REG31__pipe1_inner_dr_MASK 0x8
20710#define VGT_DEBUG_REG31__pipe1_inner_dr__SHIFT 0x3
20711#define VGT_DEBUG_REG31__pipe2_outer_dr_MASK 0x10
20712#define VGT_DEBUG_REG31__pipe2_outer_dr__SHIFT 0x4
20713#define VGT_DEBUG_REG31__pipe2_inner_dr_MASK 0x20
20714#define VGT_DEBUG_REG31__pipe2_inner_dr__SHIFT 0x5
20715#define VGT_DEBUG_REG31__pipe3_outer_dr_MASK 0x40
20716#define VGT_DEBUG_REG31__pipe3_outer_dr__SHIFT 0x6
20717#define VGT_DEBUG_REG31__pipe3_inner_dr_MASK 0x80
20718#define VGT_DEBUG_REG31__pipe3_inner_dr__SHIFT 0x7
20719#define VGT_DEBUG_REG31__pipe4_outer_dr_MASK 0x100
20720#define VGT_DEBUG_REG31__pipe4_outer_dr__SHIFT 0x8
20721#define VGT_DEBUG_REG31__pipe4_inner_dr_MASK 0x200
20722#define VGT_DEBUG_REG31__pipe4_inner_dr__SHIFT 0x9
20723#define VGT_DEBUG_REG31__pipe5_outer_dr_MASK 0x400
20724#define VGT_DEBUG_REG31__pipe5_outer_dr__SHIFT 0xa
20725#define VGT_DEBUG_REG31__pipe5_inner_dr_MASK 0x800
20726#define VGT_DEBUG_REG31__pipe5_inner_dr__SHIFT 0xb
20727#define VGT_DEBUG_REG31__pipe2_outer_rtr_MASK 0x1000
20728#define VGT_DEBUG_REG31__pipe2_outer_rtr__SHIFT 0xc
20729#define VGT_DEBUG_REG31__pipe2_inner_rtr_MASK 0x2000
20730#define VGT_DEBUG_REG31__pipe2_inner_rtr__SHIFT 0xd
20731#define VGT_DEBUG_REG31__pipe3_outer_rtr_MASK 0x4000
20732#define VGT_DEBUG_REG31__pipe3_outer_rtr__SHIFT 0xe
20733#define VGT_DEBUG_REG31__pipe3_inner_rtr_MASK 0x8000
20734#define VGT_DEBUG_REG31__pipe3_inner_rtr__SHIFT 0xf
20735#define VGT_DEBUG_REG31__pipe4_outer_rtr_MASK 0x10000
20736#define VGT_DEBUG_REG31__pipe4_outer_rtr__SHIFT 0x10
20737#define VGT_DEBUG_REG31__pipe4_inner_rtr_MASK 0x20000
20738#define VGT_DEBUG_REG31__pipe4_inner_rtr__SHIFT 0x11
20739#define VGT_DEBUG_REG31__pipe5_outer_rtr_MASK 0x40000
20740#define VGT_DEBUG_REG31__pipe5_outer_rtr__SHIFT 0x12
20741#define VGT_DEBUG_REG31__pipe5_inner_rtr_MASK 0x80000
20742#define VGT_DEBUG_REG31__pipe5_inner_rtr__SHIFT 0x13
20743#define VGT_DEBUG_REG31__pg_con_outer_point1_rts_MASK 0x100000
20744#define VGT_DEBUG_REG31__pg_con_outer_point1_rts__SHIFT 0x14
20745#define VGT_DEBUG_REG31__pg_con_outer_point2_rts_MASK 0x200000
20746#define VGT_DEBUG_REG31__pg_con_outer_point2_rts__SHIFT 0x15
20747#define VGT_DEBUG_REG31__pg_con_inner_point1_rts_MASK 0x400000
20748#define VGT_DEBUG_REG31__pg_con_inner_point1_rts__SHIFT 0x16
20749#define VGT_DEBUG_REG31__pg_con_inner_point2_rts_MASK 0x800000
20750#define VGT_DEBUG_REG31__pg_con_inner_point2_rts__SHIFT 0x17
20751#define VGT_DEBUG_REG31__pg_patch_fifo_empty_MASK 0x1000000
20752#define VGT_DEBUG_REG31__pg_patch_fifo_empty__SHIFT 0x18
20753#define VGT_DEBUG_REG31__pg_edge_fifo_empty_MASK 0x2000000
20754#define VGT_DEBUG_REG31__pg_edge_fifo_empty__SHIFT 0x19
20755#define VGT_DEBUG_REG31__pg_inner3_perp_fifo_empty_MASK 0x4000000
20756#define VGT_DEBUG_REG31__pg_inner3_perp_fifo_empty__SHIFT 0x1a
20757#define VGT_DEBUG_REG31__pg_patch_fifo_full_MASK 0x8000000
20758#define VGT_DEBUG_REG31__pg_patch_fifo_full__SHIFT 0x1b
20759#define VGT_DEBUG_REG31__pg_edge_fifo_full_MASK 0x10000000
20760#define VGT_DEBUG_REG31__pg_edge_fifo_full__SHIFT 0x1c
20761#define VGT_DEBUG_REG31__pg_inner_perp_fifo_full_MASK 0x20000000
20762#define VGT_DEBUG_REG31__pg_inner_perp_fifo_full__SHIFT 0x1d
20763#define VGT_DEBUG_REG31__outer_ring_done_q_MASK 0x40000000
20764#define VGT_DEBUG_REG31__outer_ring_done_q__SHIFT 0x1e
20765#define VGT_DEBUG_REG31__inner_ring_done_q_MASK 0x80000000
20766#define VGT_DEBUG_REG31__inner_ring_done_q__SHIFT 0x1f
20767#define VGT_DEBUG_REG32__first_ring_of_patch_MASK 0x1
20768#define VGT_DEBUG_REG32__first_ring_of_patch__SHIFT 0x0
20769#define VGT_DEBUG_REG32__last_ring_of_patch_MASK 0x2
20770#define VGT_DEBUG_REG32__last_ring_of_patch__SHIFT 0x1
20771#define VGT_DEBUG_REG32__last_edge_of_outer_ring_MASK 0x4
20772#define VGT_DEBUG_REG32__last_edge_of_outer_ring__SHIFT 0x2
20773#define VGT_DEBUG_REG32__last_point_of_outer_edge_MASK 0x8
20774#define VGT_DEBUG_REG32__last_point_of_outer_edge__SHIFT 0x3
20775#define VGT_DEBUG_REG32__last_edge_of_inner_ring_MASK 0x10
20776#define VGT_DEBUG_REG32__last_edge_of_inner_ring__SHIFT 0x4
20777#define VGT_DEBUG_REG32__last_point_of_inner_edge_MASK 0x20
20778#define VGT_DEBUG_REG32__last_point_of_inner_edge__SHIFT 0x5
20779#define VGT_DEBUG_REG32__last_patch_of_tg_p0_q_MASK 0x40
20780#define VGT_DEBUG_REG32__last_patch_of_tg_p0_q__SHIFT 0x6
20781#define VGT_DEBUG_REG32__event_null_special_p0_q_MASK 0x80
20782#define VGT_DEBUG_REG32__event_null_special_p0_q__SHIFT 0x7
20783#define VGT_DEBUG_REG32__event_flag_p5_q_MASK 0x100
20784#define VGT_DEBUG_REG32__event_flag_p5_q__SHIFT 0x8
20785#define VGT_DEBUG_REG32__first_point_of_patch_p5_q_MASK 0x200
20786#define VGT_DEBUG_REG32__first_point_of_patch_p5_q__SHIFT 0x9
20787#define VGT_DEBUG_REG32__first_point_of_edge_p5_q_MASK 0x400
20788#define VGT_DEBUG_REG32__first_point_of_edge_p5_q__SHIFT 0xa
20789#define VGT_DEBUG_REG32__last_patch_of_tg_p5_q_MASK 0x800
20790#define VGT_DEBUG_REG32__last_patch_of_tg_p5_q__SHIFT 0xb
20791#define VGT_DEBUG_REG32__tess_topology_p5_q_MASK 0x3000
20792#define VGT_DEBUG_REG32__tess_topology_p5_q__SHIFT 0xc
20793#define VGT_DEBUG_REG32__pipe5_inner3_rtr_MASK 0x4000
20794#define VGT_DEBUG_REG32__pipe5_inner3_rtr__SHIFT 0xe
20795#define VGT_DEBUG_REG32__pipe5_inner2_rtr_MASK 0x8000
20796#define VGT_DEBUG_REG32__pipe5_inner2_rtr__SHIFT 0xf
20797#define VGT_DEBUG_REG32__pg_edge_fifo3_full_MASK 0x10000
20798#define VGT_DEBUG_REG32__pg_edge_fifo3_full__SHIFT 0x10
20799#define VGT_DEBUG_REG32__pg_edge_fifo2_full_MASK 0x20000
20800#define VGT_DEBUG_REG32__pg_edge_fifo2_full__SHIFT 0x11
20801#define VGT_DEBUG_REG32__pg_inner3_point_fifo_full_MASK 0x40000
20802#define VGT_DEBUG_REG32__pg_inner3_point_fifo_full__SHIFT 0x12
20803#define VGT_DEBUG_REG32__pg_outer3_point_fifo_full_MASK 0x80000
20804#define VGT_DEBUG_REG32__pg_outer3_point_fifo_full__SHIFT 0x13
20805#define VGT_DEBUG_REG32__pg_inner2_point_fifo_full_MASK 0x100000
20806#define VGT_DEBUG_REG32__pg_inner2_point_fifo_full__SHIFT 0x14
20807#define VGT_DEBUG_REG32__pg_outer2_point_fifo_full_MASK 0x200000
20808#define VGT_DEBUG_REG32__pg_outer2_point_fifo_full__SHIFT 0x15
20809#define VGT_DEBUG_REG32__pg_inner_point_fifo_full_MASK 0x400000
20810#define VGT_DEBUG_REG32__pg_inner_point_fifo_full__SHIFT 0x16
20811#define VGT_DEBUG_REG32__pg_outer_point_fifo_full_MASK 0x800000
20812#define VGT_DEBUG_REG32__pg_outer_point_fifo_full__SHIFT 0x17
20813#define VGT_DEBUG_REG32__inner2_fifos_rtr_MASK 0x1000000
20814#define VGT_DEBUG_REG32__inner2_fifos_rtr__SHIFT 0x18
20815#define VGT_DEBUG_REG32__inner_fifos_rtr_MASK 0x2000000
20816#define VGT_DEBUG_REG32__inner_fifos_rtr__SHIFT 0x19
20817#define VGT_DEBUG_REG32__outer_fifos_rtr_MASK 0x4000000
20818#define VGT_DEBUG_REG32__outer_fifos_rtr__SHIFT 0x1a
20819#define VGT_DEBUG_REG32__fifos_rtr_MASK 0x8000000
20820#define VGT_DEBUG_REG32__fifos_rtr__SHIFT 0x1b
20821#define VGT_DEBUG_REG32__SPARE_MASK 0xf0000000
20822#define VGT_DEBUG_REG32__SPARE__SHIFT 0x1c
20823#define VGT_DEBUG_REG33__pipe0_patch_dr_MASK 0x1
20824#define VGT_DEBUG_REG33__pipe0_patch_dr__SHIFT 0x0
20825#define VGT_DEBUG_REG33__ring3_pipe1_dr_MASK 0x2
20826#define VGT_DEBUG_REG33__ring3_pipe1_dr__SHIFT 0x1
20827#define VGT_DEBUG_REG33__pipe1_dr_MASK 0x4
20828#define VGT_DEBUG_REG33__pipe1_dr__SHIFT 0x2
20829#define VGT_DEBUG_REG33__pipe2_dr_MASK 0x8
20830#define VGT_DEBUG_REG33__pipe2_dr__SHIFT 0x3
20831#define VGT_DEBUG_REG33__pipe0_patch_rtr_MASK 0x10
20832#define VGT_DEBUG_REG33__pipe0_patch_rtr__SHIFT 0x4
20833#define VGT_DEBUG_REG33__ring2_pipe1_dr_MASK 0x20
20834#define VGT_DEBUG_REG33__ring2_pipe1_dr__SHIFT 0x5
20835#define VGT_DEBUG_REG33__ring1_pipe1_dr_MASK 0x40
20836#define VGT_DEBUG_REG33__ring1_pipe1_dr__SHIFT 0x6
20837#define VGT_DEBUG_REG33__pipe2_rtr_MASK 0x80
20838#define VGT_DEBUG_REG33__pipe2_rtr__SHIFT 0x7
20839#define VGT_DEBUG_REG33__pipe3_dr_MASK 0x100
20840#define VGT_DEBUG_REG33__pipe3_dr__SHIFT 0x8
20841#define VGT_DEBUG_REG33__pipe3_rtr_MASK 0x200
20842#define VGT_DEBUG_REG33__pipe3_rtr__SHIFT 0x9
20843#define VGT_DEBUG_REG33__ring2_in_sync_q_MASK 0x400
20844#define VGT_DEBUG_REG33__ring2_in_sync_q__SHIFT 0xa
20845#define VGT_DEBUG_REG33__ring1_in_sync_q_MASK 0x800
20846#define VGT_DEBUG_REG33__ring1_in_sync_q__SHIFT 0xb
20847#define VGT_DEBUG_REG33__pipe1_patch_rtr_MASK 0x1000
20848#define VGT_DEBUG_REG33__pipe1_patch_rtr__SHIFT 0xc
20849#define VGT_DEBUG_REG33__ring3_in_sync_q_MASK 0x2000
20850#define VGT_DEBUG_REG33__ring3_in_sync_q__SHIFT 0xd
20851#define VGT_DEBUG_REG33__tm_te11_event_rtr_MASK 0x4000
20852#define VGT_DEBUG_REG33__tm_te11_event_rtr__SHIFT 0xe
20853#define VGT_DEBUG_REG33__first_prim_of_patch_q_MASK 0x8000
20854#define VGT_DEBUG_REG33__first_prim_of_patch_q__SHIFT 0xf
20855#define VGT_DEBUG_REG33__con_prim_fifo_full_MASK 0x10000
20856#define VGT_DEBUG_REG33__con_prim_fifo_full__SHIFT 0x10
20857#define VGT_DEBUG_REG33__con_vert_fifo_full_MASK 0x20000
20858#define VGT_DEBUG_REG33__con_vert_fifo_full__SHIFT 0x11
20859#define VGT_DEBUG_REG33__con_prim_fifo_empty_MASK 0x40000
20860#define VGT_DEBUG_REG33__con_prim_fifo_empty__SHIFT 0x12
20861#define VGT_DEBUG_REG33__con_vert_fifo_empty_MASK 0x80000
20862#define VGT_DEBUG_REG33__con_vert_fifo_empty__SHIFT 0x13
20863#define VGT_DEBUG_REG33__last_patch_of_tg_p0_q_MASK 0x100000
20864#define VGT_DEBUG_REG33__last_patch_of_tg_p0_q__SHIFT 0x14
20865#define VGT_DEBUG_REG33__ring3_valid_p2_MASK 0x200000
20866#define VGT_DEBUG_REG33__ring3_valid_p2__SHIFT 0x15
20867#define VGT_DEBUG_REG33__ring2_valid_p2_MASK 0x400000
20868#define VGT_DEBUG_REG33__ring2_valid_p2__SHIFT 0x16
20869#define VGT_DEBUG_REG33__ring1_valid_p2_MASK 0x800000
20870#define VGT_DEBUG_REG33__ring1_valid_p2__SHIFT 0x17
20871#define VGT_DEBUG_REG33__tess_type_p0_q_MASK 0x3000000
20872#define VGT_DEBUG_REG33__tess_type_p0_q__SHIFT 0x18
20873#define VGT_DEBUG_REG33__tess_topology_p0_q_MASK 0xc000000
20874#define VGT_DEBUG_REG33__tess_topology_p0_q__SHIFT 0x1a
20875#define VGT_DEBUG_REG33__te11_out_vert_gs_en_MASK 0x10000000
20876#define VGT_DEBUG_REG33__te11_out_vert_gs_en__SHIFT 0x1c
20877#define VGT_DEBUG_REG33__con_ring3_busy_MASK 0x20000000
20878#define VGT_DEBUG_REG33__con_ring3_busy__SHIFT 0x1d
20879#define VGT_DEBUG_REG33__con_ring2_busy_MASK 0x40000000
20880#define VGT_DEBUG_REG33__con_ring2_busy__SHIFT 0x1e
20881#define VGT_DEBUG_REG33__con_ring1_busy_MASK 0x80000000
20882#define VGT_DEBUG_REG33__con_ring1_busy__SHIFT 0x1f
20883#define VGT_DEBUG_REG34__con_state_q_MASK 0xf
20884#define VGT_DEBUG_REG34__con_state_q__SHIFT 0x0
20885#define VGT_DEBUG_REG34__second_cycle_q_MASK 0x10
20886#define VGT_DEBUG_REG34__second_cycle_q__SHIFT 0x4
20887#define VGT_DEBUG_REG34__process_tri_middle_p0_q_MASK 0x20
20888#define VGT_DEBUG_REG34__process_tri_middle_p0_q__SHIFT 0x5
20889#define VGT_DEBUG_REG34__process_tri_1st_2nd_half_p0_q_MASK 0x40
20890#define VGT_DEBUG_REG34__process_tri_1st_2nd_half_p0_q__SHIFT 0x6
20891#define VGT_DEBUG_REG34__process_tri_center_poly_p0_q_MASK 0x80
20892#define VGT_DEBUG_REG34__process_tri_center_poly_p0_q__SHIFT 0x7
20893#define VGT_DEBUG_REG34__pipe0_patch_dr_MASK 0x100
20894#define VGT_DEBUG_REG34__pipe0_patch_dr__SHIFT 0x8
20895#define VGT_DEBUG_REG34__pipe0_edge_dr_MASK 0x200
20896#define VGT_DEBUG_REG34__pipe0_edge_dr__SHIFT 0x9
20897#define VGT_DEBUG_REG34__pipe1_dr_MASK 0x400
20898#define VGT_DEBUG_REG34__pipe1_dr__SHIFT 0xa
20899#define VGT_DEBUG_REG34__pipe0_patch_rtr_MASK 0x800
20900#define VGT_DEBUG_REG34__pipe0_patch_rtr__SHIFT 0xb
20901#define VGT_DEBUG_REG34__pipe0_edge_rtr_MASK 0x1000
20902#define VGT_DEBUG_REG34__pipe0_edge_rtr__SHIFT 0xc
20903#define VGT_DEBUG_REG34__pipe1_rtr_MASK 0x2000
20904#define VGT_DEBUG_REG34__pipe1_rtr__SHIFT 0xd
20905#define VGT_DEBUG_REG34__outer_parity_p0_q_MASK 0x4000
20906#define VGT_DEBUG_REG34__outer_parity_p0_q__SHIFT 0xe
20907#define VGT_DEBUG_REG34__parallel_parity_p0_q_MASK 0x8000
20908#define VGT_DEBUG_REG34__parallel_parity_p0_q__SHIFT 0xf
20909#define VGT_DEBUG_REG34__first_ring_of_patch_p0_q_MASK 0x10000
20910#define VGT_DEBUG_REG34__first_ring_of_patch_p0_q__SHIFT 0x10
20911#define VGT_DEBUG_REG34__last_ring_of_patch_p0_q_MASK 0x20000
20912#define VGT_DEBUG_REG34__last_ring_of_patch_p0_q__SHIFT 0x11
20913#define VGT_DEBUG_REG34__last_edge_of_outer_ring_p0_q_MASK 0x40000
20914#define VGT_DEBUG_REG34__last_edge_of_outer_ring_p0_q__SHIFT 0x12
20915#define VGT_DEBUG_REG34__last_point_of_outer_ring_p1_MASK 0x80000
20916#define VGT_DEBUG_REG34__last_point_of_outer_ring_p1__SHIFT 0x13
20917#define VGT_DEBUG_REG34__last_point_of_inner_ring_p1_MASK 0x100000
20918#define VGT_DEBUG_REG34__last_point_of_inner_ring_p1__SHIFT 0x14
20919#define VGT_DEBUG_REG34__outer_edge_tf_eq_one_p0_q_MASK 0x200000
20920#define VGT_DEBUG_REG34__outer_edge_tf_eq_one_p0_q__SHIFT 0x15
20921#define VGT_DEBUG_REG34__advance_outer_point_p1_MASK 0x400000
20922#define VGT_DEBUG_REG34__advance_outer_point_p1__SHIFT 0x16
20923#define VGT_DEBUG_REG34__advance_inner_point_p1_MASK 0x800000
20924#define VGT_DEBUG_REG34__advance_inner_point_p1__SHIFT 0x17
20925#define VGT_DEBUG_REG34__next_ring_is_rect_p0_q_MASK 0x1000000
20926#define VGT_DEBUG_REG34__next_ring_is_rect_p0_q__SHIFT 0x18
20927#define VGT_DEBUG_REG34__pipe1_outer1_rtr_MASK 0x2000000
20928#define VGT_DEBUG_REG34__pipe1_outer1_rtr__SHIFT 0x19
20929#define VGT_DEBUG_REG34__pipe1_outer2_rtr_MASK 0x4000000
20930#define VGT_DEBUG_REG34__pipe1_outer2_rtr__SHIFT 0x1a
20931#define VGT_DEBUG_REG34__pipe1_inner1_rtr_MASK 0x8000000
20932#define VGT_DEBUG_REG34__pipe1_inner1_rtr__SHIFT 0x1b
20933#define VGT_DEBUG_REG34__pipe1_inner2_rtr_MASK 0x10000000
20934#define VGT_DEBUG_REG34__pipe1_inner2_rtr__SHIFT 0x1c
20935#define VGT_DEBUG_REG34__pipe1_patch_rtr_MASK 0x20000000
20936#define VGT_DEBUG_REG34__pipe1_patch_rtr__SHIFT 0x1d
20937#define VGT_DEBUG_REG34__pipe1_edge_rtr_MASK 0x40000000
20938#define VGT_DEBUG_REG34__pipe1_edge_rtr__SHIFT 0x1e
20939#define VGT_DEBUG_REG34__use_stored_inner_q_ring1_MASK 0x80000000
20940#define VGT_DEBUG_REG34__use_stored_inner_q_ring1__SHIFT 0x1f
20941#define VGT_DEBUG_REG36__VGT_PA_clipp_eop_MASK 0xffffffff
20942#define VGT_DEBUG_REG36__VGT_PA_clipp_eop__SHIFT 0x0
20943#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK_MASK 0xff
20944#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK__SHIFT 0x0
20945#define VGT_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
20946#define VGT_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
20947#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
20948#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
20949#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
20950#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
20951#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
20952#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
20953#define VGT_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
20954#define VGT_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
20955#define VGT_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x3ff
20956#define VGT_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
20957#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0xffc00
20958#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
20959#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xf00000
20960#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
20961#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0xf000000
20962#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
20963#define VGT_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
20964#define VGT_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
20965#define VGT_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0xff
20966#define VGT_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
20967#define VGT_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
20968#define VGT_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
20969#define VGT_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0xff
20970#define VGT_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
20971#define VGT_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
20972#define VGT_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
20973#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
20974#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
20975#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
20976#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
20977#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf000000
20978#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
20979#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000
20980#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
20981#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x3ff
20982#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
20983#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0xffc00
20984#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
20985#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf000000
20986#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
20987#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000
20988#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
20989#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
20990#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
20991#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
20992#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
20993#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
20994#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
20995#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
20996#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
20997#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
20998#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
20999#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
21000#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21001#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
21002#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21003#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
21004#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21005#define IA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x3ff
21006#define IA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
21007#define IA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0xffc00
21008#define IA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
21009#define IA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0xf00000
21010#define IA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
21011#define IA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0xf000000
21012#define IA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
21013#define IA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
21014#define IA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
21015#define IA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0xff
21016#define IA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
21017#define IA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
21018#define IA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
21019#define IA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0xff
21020#define IA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
21021#define IA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
21022#define IA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
21023#define IA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0xff
21024#define IA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
21025#define IA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
21026#define IA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
21027#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x3ff
21028#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
21029#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0xffc00
21030#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
21031#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf000000
21032#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
21033#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000
21034#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
21035#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
21036#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21037#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
21038#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21039#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
21040#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21041#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
21042#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21043#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
21044#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21045#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
21046#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21047#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
21048#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21049#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
21050#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21051#define WD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0xff
21052#define WD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
21053#define WD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000
21054#define WD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
21055#define WD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0xff
21056#define WD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
21057#define WD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000
21058#define WD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
21059#define WD_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0xff
21060#define WD_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
21061#define WD_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000
21062#define WD_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
21063#define WD_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0xff
21064#define WD_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
21065#define WD_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000
21066#define WD_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
21067#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffff
21068#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
21069#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffff
21070#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
21071#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffff
21072#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
21073#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffff
21074#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
21075#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffff
21076#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
21077#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffff
21078#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
21079#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffff
21080#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
21081#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffff
21082#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
21083#define DIDT_IND_INDEX__DIDT_IND_INDEX_MASK 0xffffffff
21084#define DIDT_IND_INDEX__DIDT_IND_INDEX__SHIFT 0x0
21085#define DIDT_IND_DATA__DIDT_IND_DATA_MASK 0xffffffff
21086#define DIDT_IND_DATA__DIDT_IND_DATA__SHIFT 0x0
21087#define DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK 0x1
21088#define DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
21089#define DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK 0x2
21090#define DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT 0x1
21091#define DIDT_SQ_CTRL0__PHASE_OFFSET_MASK 0xc
21092#define DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT 0x2
21093#define DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK 0x10
21094#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
21095#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
21096#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
21097#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
21098#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
21099#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff
21100#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
21101#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000
21102#define DIDT_SQ_CTRL1__MAX_POWER__SHIFT 0x10
21103#define DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK 0x3fff
21104#define DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
21105#define DIDT_SQ_CTRL2__UNUSED_0_MASK 0xc000
21106#define DIDT_SQ_CTRL2__UNUSED_0__SHIFT 0xe
21107#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x3ff0000
21108#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
21109#define DIDT_SQ_CTRL2__UNUSED_1_MASK 0x4000000
21110#define DIDT_SQ_CTRL2__UNUSED_1__SHIFT 0x1a
21111#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000
21112#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
21113#define DIDT_SQ_CTRL2__UNUSED_2_MASK 0x80000000
21114#define DIDT_SQ_CTRL2__UNUSED_2__SHIFT 0x1f
21115#define DIDT_SQ_CTRL_OCP__UNUSED_0_MASK 0xffff
21116#define DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT 0x0
21117#define DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK 0xffff0000
21118#define DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT 0x10
21119#define DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK 0xff
21120#define DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT 0x0
21121#define DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK 0xff00
21122#define DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT 0x8
21123#define DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK 0xff0000
21124#define DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT 0x10
21125#define DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK 0xff000000
21126#define DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT 0x18
21127#define DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK 0xff
21128#define DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT 0x0
21129#define DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK 0xff00
21130#define DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT 0x8
21131#define DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK 0xff0000
21132#define DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT 0x10
21133#define DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK 0xff000000
21134#define DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT 0x18
21135#define DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK 0xff
21136#define DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT 0x0
21137#define DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK 0xff00
21138#define DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT 0x8
21139#define DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK 0xff0000
21140#define DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT 0x10
21141#define DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK 0xff000000
21142#define DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT 0x18
21143#define DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK 0x1
21144#define DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
21145#define DIDT_DB_CTRL0__USE_REF_CLOCK_MASK 0x2
21146#define DIDT_DB_CTRL0__USE_REF_CLOCK__SHIFT 0x1
21147#define DIDT_DB_CTRL0__PHASE_OFFSET_MASK 0xc
21148#define DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT 0x2
21149#define DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK 0x10
21150#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
21151#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
21152#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
21153#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0
21154#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6
21155#define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff
21156#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
21157#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000
21158#define DIDT_DB_CTRL1__MAX_POWER__SHIFT 0x10
21159#define DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK 0x3fff
21160#define DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
21161#define DIDT_DB_CTRL2__UNUSED_0_MASK 0xc000
21162#define DIDT_DB_CTRL2__UNUSED_0__SHIFT 0xe
21163#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x3ff0000
21164#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
21165#define DIDT_DB_CTRL2__UNUSED_1_MASK 0x4000000
21166#define DIDT_DB_CTRL2__UNUSED_1__SHIFT 0x1a
21167#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000
21168#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
21169#define DIDT_DB_CTRL2__UNUSED_2_MASK 0x80000000
21170#define DIDT_DB_CTRL2__UNUSED_2__SHIFT 0x1f
21171#define DIDT_DB_CTRL_OCP__UNUSED_0_MASK 0xffff
21172#define DIDT_DB_CTRL_OCP__UNUSED_0__SHIFT 0x0
21173#define DIDT_DB_CTRL_OCP__OCP_MAX_POWER_MASK 0xffff0000
21174#define DIDT_DB_CTRL_OCP__OCP_MAX_POWER__SHIFT 0x10
21175#define DIDT_DB_WEIGHT0_3__WEIGHT0_MASK 0xff
21176#define DIDT_DB_WEIGHT0_3__WEIGHT0__SHIFT 0x0
21177#define DIDT_DB_WEIGHT0_3__WEIGHT1_MASK 0xff00
21178#define DIDT_DB_WEIGHT0_3__WEIGHT1__SHIFT 0x8
21179#define DIDT_DB_WEIGHT0_3__WEIGHT2_MASK 0xff0000
21180#define DIDT_DB_WEIGHT0_3__WEIGHT2__SHIFT 0x10
21181#define DIDT_DB_WEIGHT0_3__WEIGHT3_MASK 0xff000000
21182#define DIDT_DB_WEIGHT0_3__WEIGHT3__SHIFT 0x18
21183#define DIDT_DB_WEIGHT4_7__WEIGHT4_MASK 0xff
21184#define DIDT_DB_WEIGHT4_7__WEIGHT4__SHIFT 0x0
21185#define DIDT_DB_WEIGHT4_7__WEIGHT5_MASK 0xff00
21186#define DIDT_DB_WEIGHT4_7__WEIGHT5__SHIFT 0x8
21187#define DIDT_DB_WEIGHT4_7__WEIGHT6_MASK 0xff0000
21188#define DIDT_DB_WEIGHT4_7__WEIGHT6__SHIFT 0x10
21189#define DIDT_DB_WEIGHT4_7__WEIGHT7_MASK 0xff000000
21190#define DIDT_DB_WEIGHT4_7__WEIGHT7__SHIFT 0x18
21191#define DIDT_DB_WEIGHT8_11__WEIGHT8_MASK 0xff
21192#define DIDT_DB_WEIGHT8_11__WEIGHT8__SHIFT 0x0
21193#define DIDT_DB_WEIGHT8_11__WEIGHT9_MASK 0xff00
21194#define DIDT_DB_WEIGHT8_11__WEIGHT9__SHIFT 0x8
21195#define DIDT_DB_WEIGHT8_11__WEIGHT10_MASK 0xff0000
21196#define DIDT_DB_WEIGHT8_11__WEIGHT10__SHIFT 0x10
21197#define DIDT_DB_WEIGHT8_11__WEIGHT11_MASK 0xff000000
21198#define DIDT_DB_WEIGHT8_11__WEIGHT11__SHIFT 0x18
21199#define DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK 0x1
21200#define DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
21201#define DIDT_TD_CTRL0__USE_REF_CLOCK_MASK 0x2
21202#define DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT 0x1
21203#define DIDT_TD_CTRL0__PHASE_OFFSET_MASK 0xc
21204#define DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT 0x2
21205#define DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK 0x10
21206#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
21207#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
21208#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
21209#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
21210#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
21211#define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff
21212#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
21213#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000
21214#define DIDT_TD_CTRL1__MAX_POWER__SHIFT 0x10
21215#define DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK 0x3fff
21216#define DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
21217#define DIDT_TD_CTRL2__UNUSED_0_MASK 0xc000
21218#define DIDT_TD_CTRL2__UNUSED_0__SHIFT 0xe
21219#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x3ff0000
21220#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
21221#define DIDT_TD_CTRL2__UNUSED_1_MASK 0x4000000
21222#define DIDT_TD_CTRL2__UNUSED_1__SHIFT 0x1a
21223#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000
21224#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
21225#define DIDT_TD_CTRL2__UNUSED_2_MASK 0x80000000
21226#define DIDT_TD_CTRL2__UNUSED_2__SHIFT 0x1f
21227#define DIDT_TD_CTRL_OCP__UNUSED_0_MASK 0xffff
21228#define DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT 0x0
21229#define DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK 0xffff0000
21230#define DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT 0x10
21231#define DIDT_TD_WEIGHT0_3__WEIGHT0_MASK 0xff
21232#define DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT 0x0
21233#define DIDT_TD_WEIGHT0_3__WEIGHT1_MASK 0xff00
21234#define DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT 0x8
21235#define DIDT_TD_WEIGHT0_3__WEIGHT2_MASK 0xff0000
21236#define DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT 0x10
21237#define DIDT_TD_WEIGHT0_3__WEIGHT3_MASK 0xff000000
21238#define DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT 0x18
21239#define DIDT_TD_WEIGHT4_7__WEIGHT4_MASK 0xff
21240#define DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT 0x0
21241#define DIDT_TD_WEIGHT4_7__WEIGHT5_MASK 0xff00
21242#define DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT 0x8
21243#define DIDT_TD_WEIGHT4_7__WEIGHT6_MASK 0xff0000
21244#define DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT 0x10
21245#define DIDT_TD_WEIGHT4_7__WEIGHT7_MASK 0xff000000
21246#define DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT 0x18
21247#define DIDT_TD_WEIGHT8_11__WEIGHT8_MASK 0xff
21248#define DIDT_TD_WEIGHT8_11__WEIGHT8__SHIFT 0x0
21249#define DIDT_TD_WEIGHT8_11__WEIGHT9_MASK 0xff00
21250#define DIDT_TD_WEIGHT8_11__WEIGHT9__SHIFT 0x8
21251#define DIDT_TD_WEIGHT8_11__WEIGHT10_MASK 0xff0000
21252#define DIDT_TD_WEIGHT8_11__WEIGHT10__SHIFT 0x10
21253#define DIDT_TD_WEIGHT8_11__WEIGHT11_MASK 0xff000000
21254#define DIDT_TD_WEIGHT8_11__WEIGHT11__SHIFT 0x18
21255#define DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK 0x1
21256#define DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
21257#define DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK 0x2
21258#define DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT 0x1
21259#define DIDT_TCP_CTRL0__PHASE_OFFSET_MASK 0xc
21260#define DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT 0x2
21261#define DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK 0x10
21262#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
21263#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
21264#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
21265#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
21266#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
21267#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff
21268#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
21269#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000
21270#define DIDT_TCP_CTRL1__MAX_POWER__SHIFT 0x10
21271#define DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK 0x3fff
21272#define DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
21273#define DIDT_TCP_CTRL2__UNUSED_0_MASK 0xc000
21274#define DIDT_TCP_CTRL2__UNUSED_0__SHIFT 0xe
21275#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x3ff0000
21276#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
21277#define DIDT_TCP_CTRL2__UNUSED_1_MASK 0x4000000
21278#define DIDT_TCP_CTRL2__UNUSED_1__SHIFT 0x1a
21279#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000
21280#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
21281#define DIDT_TCP_CTRL2__UNUSED_2_MASK 0x80000000
21282#define DIDT_TCP_CTRL2__UNUSED_2__SHIFT 0x1f
21283#define DIDT_TCP_CTRL_OCP__UNUSED_0_MASK 0xffff
21284#define DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT 0x0
21285#define DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK 0xffff0000
21286#define DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT 0x10
21287#define DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK 0xff
21288#define DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT 0x0
21289#define DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK 0xff00
21290#define DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT 0x8
21291#define DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK 0xff0000
21292#define DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT 0x10
21293#define DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK 0xff000000
21294#define DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT 0x18
21295#define DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK 0xff
21296#define DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT 0x0
21297#define DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK 0xff00
21298#define DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT 0x8
21299#define DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK 0xff0000
21300#define DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT 0x10
21301#define DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK 0xff000000
21302#define DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT 0x18
21303#define DIDT_TCP_WEIGHT8_11__WEIGHT8_MASK 0xff
21304#define DIDT_TCP_WEIGHT8_11__WEIGHT8__SHIFT 0x0
21305#define DIDT_TCP_WEIGHT8_11__WEIGHT9_MASK 0xff00
21306#define DIDT_TCP_WEIGHT8_11__WEIGHT9__SHIFT 0x8
21307#define DIDT_TCP_WEIGHT8_11__WEIGHT10_MASK 0xff0000
21308#define DIDT_TCP_WEIGHT8_11__WEIGHT10__SHIFT 0x10
21309#define DIDT_TCP_WEIGHT8_11__WEIGHT11_MASK 0xff000000
21310#define DIDT_TCP_WEIGHT8_11__WEIGHT11__SHIFT 0x18
21311#define DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK 0x1
21312#define DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
21313#define DIDT_DBR_CTRL0__USE_REF_CLOCK_MASK 0x2
21314#define DIDT_DBR_CTRL0__USE_REF_CLOCK__SHIFT 0x1
21315#define DIDT_DBR_CTRL0__PHASE_OFFSET_MASK 0xc
21316#define DIDT_DBR_CTRL0__PHASE_OFFSET__SHIFT 0x2
21317#define DIDT_DBR_CTRL0__DIDT_CTRL_RST_MASK 0x10
21318#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
21319#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
21320#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
21321#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0
21322#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6
21323#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff
21324#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
21325#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000
21326#define DIDT_DBR_CTRL1__MAX_POWER__SHIFT 0x10
21327#define DIDT_DBR_CTRL2__MAX_POWER_DELTA_MASK 0x3fff
21328#define DIDT_DBR_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
21329#define DIDT_DBR_CTRL2__UNUSED_0_MASK 0xc000
21330#define DIDT_DBR_CTRL2__UNUSED_0__SHIFT 0xe
21331#define DIDT_DBR_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x3ff0000
21332#define DIDT_DBR_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
21333#define DIDT_DBR_CTRL2__UNUSED_1_MASK 0x4000000
21334#define DIDT_DBR_CTRL2__UNUSED_1__SHIFT 0x1a
21335#define DIDT_DBR_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000
21336#define DIDT_DBR_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
21337#define DIDT_DBR_CTRL2__UNUSED_2_MASK 0x80000000
21338#define DIDT_DBR_CTRL2__UNUSED_2__SHIFT 0x1f
21339#define DIDT_DBR_CTRL_OCP__UNUSED_0_MASK 0xffff
21340#define DIDT_DBR_CTRL_OCP__UNUSED_0__SHIFT 0x0
21341#define DIDT_DBR_CTRL_OCP__OCP_MAX_POWER_MASK 0xffff0000
21342#define DIDT_DBR_CTRL_OCP__OCP_MAX_POWER__SHIFT 0x10
21343#define DIDT_DBR_WEIGHT0_3__WEIGHT0_MASK 0xff
21344#define DIDT_DBR_WEIGHT0_3__WEIGHT0__SHIFT 0x0
21345#define DIDT_DBR_WEIGHT0_3__WEIGHT1_MASK 0xff00
21346#define DIDT_DBR_WEIGHT0_3__WEIGHT1__SHIFT 0x8
21347#define DIDT_DBR_WEIGHT0_3__WEIGHT2_MASK 0xff0000
21348#define DIDT_DBR_WEIGHT0_3__WEIGHT2__SHIFT 0x10
21349#define DIDT_DBR_WEIGHT0_3__WEIGHT3_MASK 0xff000000
21350#define DIDT_DBR_WEIGHT0_3__WEIGHT3__SHIFT 0x18
21351#define DIDT_DBR_WEIGHT4_7__WEIGHT4_MASK 0xff
21352#define DIDT_DBR_WEIGHT4_7__WEIGHT4__SHIFT 0x0
21353#define DIDT_DBR_WEIGHT4_7__WEIGHT5_MASK 0xff00
21354#define DIDT_DBR_WEIGHT4_7__WEIGHT5__SHIFT 0x8
21355#define DIDT_DBR_WEIGHT4_7__WEIGHT6_MASK 0xff0000
21356#define DIDT_DBR_WEIGHT4_7__WEIGHT6__SHIFT 0x10
21357#define DIDT_DBR_WEIGHT4_7__WEIGHT7_MASK 0xff000000
21358#define DIDT_DBR_WEIGHT4_7__WEIGHT7__SHIFT 0x18
21359#define DIDT_DBR_WEIGHT8_11__WEIGHT8_MASK 0xff
21360#define DIDT_DBR_WEIGHT8_11__WEIGHT8__SHIFT 0x0
21361#define DIDT_DBR_WEIGHT8_11__WEIGHT9_MASK 0xff00
21362#define DIDT_DBR_WEIGHT8_11__WEIGHT9__SHIFT 0x8
21363#define DIDT_DBR_WEIGHT8_11__WEIGHT10_MASK 0xff0000
21364#define DIDT_DBR_WEIGHT8_11__WEIGHT10__SHIFT 0x10
21365#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000
21366#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
21367
21368#endif /* GFX_8_1_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 44c5d4a4d1bf..552622675ace 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -6784,7 +6784,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1
6784 ULONG ulMCUcodeRomStartAddr; 6784 ULONG ulMCUcodeRomStartAddr;
6785 ULONG ulMCUcodeLength; 6785 ULONG ulMCUcodeLength;
6786 USHORT usMcRegInitTableOffset; // offset of ATOM_REG_INIT_SETTING array for MC core register settings. 6786 USHORT usMcRegInitTableOffset; // offset of ATOM_REG_INIT_SETTING array for MC core register settings.
6787 USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY regsiter setting 6787 USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY register setting
6788}ATOM_MC_INIT_PARAM_TABLE_V2_1; 6788}ATOM_MC_INIT_PARAM_TABLE_V2_1;
6789 6789
6790 6790
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 3697eeeecf82..89619a5a4289 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -222,6 +222,12 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
222 222
223 while ((entity->dependency = sched->ops->dependency(sched_job))) { 223 while ((entity->dependency = sched->ops->dependency(sched_job))) {
224 224
225 if (entity->dependency->context == entity->fence_context) {
226 /* We can ignore fences from ourself */
227 fence_put(entity->dependency);
228 continue;
229 }
230
225 if (fence_add_callback(entity->dependency, &entity->cb, 231 if (fence_add_callback(entity->dependency, &entity->cb,
226 amd_sched_entity_wakeup)) 232 amd_sched_entity_wakeup))
227 fence_put(entity->dependency); 233 fence_put(entity->dependency);
@@ -327,19 +333,49 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
327 struct amd_sched_fence *s_fence = 333 struct amd_sched_fence *s_fence =
328 container_of(cb, struct amd_sched_fence, cb); 334 container_of(cb, struct amd_sched_fence, cb);
329 struct amd_gpu_scheduler *sched = s_fence->sched; 335 struct amd_gpu_scheduler *sched = s_fence->sched;
336 unsigned long flags;
330 337
331 atomic_dec(&sched->hw_rq_count); 338 atomic_dec(&sched->hw_rq_count);
332 amd_sched_fence_signal(s_fence); 339 amd_sched_fence_signal(s_fence);
340 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
341 cancel_delayed_work(&s_fence->dwork);
342 spin_lock_irqsave(&sched->fence_list_lock, flags);
343 list_del_init(&s_fence->list);
344 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
345 }
333 fence_put(&s_fence->base); 346 fence_put(&s_fence->base);
334 wake_up_interruptible(&sched->wake_up_worker); 347 wake_up_interruptible(&sched->wake_up_worker);
335} 348}
336 349
350static void amd_sched_fence_work_func(struct work_struct *work)
351{
352 struct amd_sched_fence *s_fence =
353 container_of(work, struct amd_sched_fence, dwork.work);
354 struct amd_gpu_scheduler *sched = s_fence->sched;
355 struct amd_sched_fence *entity, *tmp;
356 unsigned long flags;
357
358 DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
359
360 /* Clean all pending fences */
361 spin_lock_irqsave(&sched->fence_list_lock, flags);
362 list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
363 DRM_ERROR(" fence no %d\n", entity->base.seqno);
364 cancel_delayed_work(&entity->dwork);
365 list_del_init(&entity->list);
366 fence_put(&entity->base);
367 }
368 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
369}
370
337static int amd_sched_main(void *param) 371static int amd_sched_main(void *param)
338{ 372{
339 struct sched_param sparam = {.sched_priority = 1}; 373 struct sched_param sparam = {.sched_priority = 1};
340 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; 374 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
341 int r, count; 375 int r, count;
342 376
377 spin_lock_init(&sched->fence_list_lock);
378 INIT_LIST_HEAD(&sched->fence_list);
343 sched_setscheduler(current, SCHED_FIFO, &sparam); 379 sched_setscheduler(current, SCHED_FIFO, &sparam);
344 380
345 while (!kthread_should_stop()) { 381 while (!kthread_should_stop()) {
@@ -347,6 +383,7 @@ static int amd_sched_main(void *param)
347 struct amd_sched_fence *s_fence; 383 struct amd_sched_fence *s_fence;
348 struct amd_sched_job *sched_job; 384 struct amd_sched_job *sched_job;
349 struct fence *fence; 385 struct fence *fence;
386 unsigned long flags;
350 387
351 wait_event_interruptible(sched->wake_up_worker, 388 wait_event_interruptible(sched->wake_up_worker,
352 kthread_should_stop() || 389 kthread_should_stop() ||
@@ -357,6 +394,15 @@ static int amd_sched_main(void *param)
357 394
358 entity = sched_job->s_entity; 395 entity = sched_job->s_entity;
359 s_fence = sched_job->s_fence; 396 s_fence = sched_job->s_fence;
397
398 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
399 INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
400 schedule_delayed_work(&s_fence->dwork, sched->timeout);
401 spin_lock_irqsave(&sched->fence_list_lock, flags);
402 list_add_tail(&s_fence->list, &sched->fence_list);
403 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
404 }
405
360 atomic_inc(&sched->hw_rq_count); 406 atomic_inc(&sched->hw_rq_count);
361 fence = sched->ops->run_job(sched_job); 407 fence = sched->ops->run_job(sched_job);
362 if (fence) { 408 if (fence) {
@@ -392,11 +438,12 @@ static int amd_sched_main(void *param)
392*/ 438*/
393int amd_sched_init(struct amd_gpu_scheduler *sched, 439int amd_sched_init(struct amd_gpu_scheduler *sched,
394 struct amd_sched_backend_ops *ops, 440 struct amd_sched_backend_ops *ops,
395 unsigned hw_submission, const char *name) 441 unsigned hw_submission, long timeout, const char *name)
396{ 442{
397 sched->ops = ops; 443 sched->ops = ops;
398 sched->hw_submission_limit = hw_submission; 444 sched->hw_submission_limit = hw_submission;
399 sched->name = name; 445 sched->name = name;
446 sched->timeout = timeout;
400 amd_sched_rq_init(&sched->sched_rq); 447 amd_sched_rq_init(&sched->sched_rq);
401 amd_sched_rq_init(&sched->kernel_rq); 448 amd_sched_rq_init(&sched->kernel_rq);
402 449
@@ -421,5 +468,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
421 */ 468 */
422void amd_sched_fini(struct amd_gpu_scheduler *sched) 469void amd_sched_fini(struct amd_gpu_scheduler *sched)
423{ 470{
424 kthread_stop(sched->thread); 471 if (sched->thread)
472 kthread_stop(sched->thread);
425} 473}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 80b64dc22214..929e9aced041 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -68,6 +68,8 @@ struct amd_sched_fence {
68 struct amd_gpu_scheduler *sched; 68 struct amd_gpu_scheduler *sched;
69 spinlock_t lock; 69 spinlock_t lock;
70 void *owner; 70 void *owner;
71 struct delayed_work dwork;
72 struct list_head list;
71}; 73};
72 74
73struct amd_sched_job { 75struct amd_sched_job {
@@ -103,18 +105,21 @@ struct amd_sched_backend_ops {
103struct amd_gpu_scheduler { 105struct amd_gpu_scheduler {
104 struct amd_sched_backend_ops *ops; 106 struct amd_sched_backend_ops *ops;
105 uint32_t hw_submission_limit; 107 uint32_t hw_submission_limit;
108 long timeout;
106 const char *name; 109 const char *name;
107 struct amd_sched_rq sched_rq; 110 struct amd_sched_rq sched_rq;
108 struct amd_sched_rq kernel_rq; 111 struct amd_sched_rq kernel_rq;
109 wait_queue_head_t wake_up_worker; 112 wait_queue_head_t wake_up_worker;
110 wait_queue_head_t job_scheduled; 113 wait_queue_head_t job_scheduled;
111 atomic_t hw_rq_count; 114 atomic_t hw_rq_count;
115 struct list_head fence_list;
116 spinlock_t fence_list_lock;
112 struct task_struct *thread; 117 struct task_struct *thread;
113}; 118};
114 119
115int amd_sched_init(struct amd_gpu_scheduler *sched, 120int amd_sched_init(struct amd_gpu_scheduler *sched,
116 struct amd_sched_backend_ops *ops, 121 struct amd_sched_backend_ops *ops,
117 uint32_t hw_submission, const char *name); 122 uint32_t hw_submission, long timeout, const char *name);
118void amd_sched_fini(struct amd_gpu_scheduler *sched); 123void amd_sched_fini(struct amd_gpu_scheduler *sched);
119 124
120int amd_sched_entity_init(struct amd_gpu_scheduler *sched, 125int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index 50ae88ad4d76..eb773e9af313 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -14,12 +14,3 @@ config DRM_ARMADA
14 This driver provides no built-in acceleration; acceleration is 14 This driver provides no built-in acceleration; acceleration is
15 performed by other IP found on the SoC. This driver provides 15 performed by other IP found on the SoC. This driver provides
16 kernel mode setting and buffer management to userspace. 16 kernel mode setting and buffer management to userspace.
17
18config DRM_ARMADA_TDA1998X
19 bool "Support TDA1998X HDMI output"
20 depends on DRM_ARMADA != n
21 depends on I2C && DRM_I2C_NXP_TDA998X = y
22 default y
23 help
24 Support the TDA1998x HDMI output device found on the Solid-Run
25 CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index d6f43e06150a..ffd673615772 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,6 +1,5 @@
1armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \ 1armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
2 armada_gem.o armada_output.o armada_overlay.o \ 2 armada_gem.o armada_overlay.o
3 armada_slave.o
4armada-y += armada_510.o 3armada-y += armada_510.o
5armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o 4armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
6 5
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 01ffe9bffe38..cebcab560626 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -20,6 +20,7 @@
20#include "armada_hw.h" 20#include "armada_hw.h"
21 21
22struct armada_frame_work { 22struct armada_frame_work {
23 struct armada_plane_work work;
23 struct drm_pending_vblank_event *event; 24 struct drm_pending_vblank_event *event;
24 struct armada_regs regs[4]; 25 struct armada_regs regs[4];
25 struct drm_framebuffer *old_fb; 26 struct drm_framebuffer *old_fb;
@@ -33,6 +34,23 @@ enum csc_mode {
33 CSC_RGB_STUDIO = 2, 34 CSC_RGB_STUDIO = 2,
34}; 35};
35 36
37static const uint32_t armada_primary_formats[] = {
38 DRM_FORMAT_UYVY,
39 DRM_FORMAT_YUYV,
40 DRM_FORMAT_VYUY,
41 DRM_FORMAT_YVYU,
42 DRM_FORMAT_ARGB8888,
43 DRM_FORMAT_ABGR8888,
44 DRM_FORMAT_XRGB8888,
45 DRM_FORMAT_XBGR8888,
46 DRM_FORMAT_RGB888,
47 DRM_FORMAT_BGR888,
48 DRM_FORMAT_ARGB1555,
49 DRM_FORMAT_ABGR1555,
50 DRM_FORMAT_RGB565,
51 DRM_FORMAT_BGR565,
52};
53
36/* 54/*
37 * A note about interlacing. Let's consider HDMI 1920x1080i. 55 * A note about interlacing. Let's consider HDMI 1920x1080i.
38 * The timing parameters we have from X are: 56 * The timing parameters we have from X are:
@@ -173,49 +191,82 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
173 return i; 191 return i;
174} 192}
175 193
176static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc, 194static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
177 struct armada_frame_work *work) 195 struct armada_plane *plane)
196{
197 struct armada_plane_work *work = xchg(&plane->work, NULL);
198
199 /* Handle any pending frame work. */
200 if (work) {
201 work->fn(dcrtc, plane, work);
202 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
203 }
204
205 wake_up(&plane->frame_wait);
206}
207
208int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
209 struct armada_plane *plane, struct armada_plane_work *work)
178{ 210{
179 struct drm_device *dev = dcrtc->crtc.dev;
180 unsigned long flags;
181 int ret; 211 int ret;
182 212
183 ret = drm_vblank_get(dev, dcrtc->num); 213 ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
184 if (ret) { 214 if (ret) {
185 DRM_ERROR("failed to acquire vblank counter\n"); 215 DRM_ERROR("failed to acquire vblank counter\n");
186 return ret; 216 return ret;
187 } 217 }
188 218
189 spin_lock_irqsave(&dev->event_lock, flags); 219 ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
190 if (!dcrtc->frame_work)
191 dcrtc->frame_work = work;
192 else
193 ret = -EBUSY;
194 spin_unlock_irqrestore(&dev->event_lock, flags);
195
196 if (ret) 220 if (ret)
197 drm_vblank_put(dev, dcrtc->num); 221 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
198 222
199 return ret; 223 return ret;
200} 224}
201 225
202static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc) 226int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
203{ 227{
204 struct drm_device *dev = dcrtc->crtc.dev; 228 return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
205 struct armada_frame_work *work = dcrtc->frame_work; 229}
206 230
207 dcrtc->frame_work = NULL; 231struct armada_plane_work *armada_drm_plane_work_cancel(
232 struct armada_crtc *dcrtc, struct armada_plane *plane)
233{
234 struct armada_plane_work *work = xchg(&plane->work, NULL);
208 235
209 armada_drm_crtc_update_regs(dcrtc, work->regs); 236 if (work)
237 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
210 238
211 if (work->event) 239 return work;
212 drm_send_vblank_event(dev, dcrtc->num, work->event); 240}
213 241
214 drm_vblank_put(dev, dcrtc->num); 242static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
243 struct armada_frame_work *work)
244{
245 struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
246
247 return armada_drm_plane_work_queue(dcrtc, plane, &work->work);
248}
249
250static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
251 struct armada_plane *plane, struct armada_plane_work *work)
252{
253 struct armada_frame_work *fwork = container_of(work, struct armada_frame_work, work);
254 struct drm_device *dev = dcrtc->crtc.dev;
255 unsigned long flags;
256
257 spin_lock_irqsave(&dcrtc->irq_lock, flags);
258 armada_drm_crtc_update_regs(dcrtc, fwork->regs);
259 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
260
261 if (fwork->event) {
262 spin_lock_irqsave(&dev->event_lock, flags);
263 drm_send_vblank_event(dev, dcrtc->num, fwork->event);
264 spin_unlock_irqrestore(&dev->event_lock, flags);
265 }
215 266
216 /* Finally, queue the process-half of the cleanup. */ 267 /* Finally, queue the process-half of the cleanup. */
217 __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb); 268 __armada_drm_queue_unref_work(dcrtc->crtc.dev, fwork->old_fb);
218 kfree(work); 269 kfree(fwork);
219} 270}
220 271
221static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc, 272static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
@@ -235,6 +286,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
235 work = kmalloc(sizeof(*work), GFP_KERNEL); 286 work = kmalloc(sizeof(*work), GFP_KERNEL);
236 if (work) { 287 if (work) {
237 int i = 0; 288 int i = 0;
289 work->work.fn = armada_drm_crtc_complete_frame_work;
238 work->event = NULL; 290 work->event = NULL;
239 work->old_fb = fb; 291 work->old_fb = fb;
240 armada_reg_queue_end(work->regs, i); 292 armada_reg_queue_end(work->regs, i);
@@ -255,19 +307,14 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
255 307
256static void armada_drm_vblank_off(struct armada_crtc *dcrtc) 308static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
257{ 309{
258 struct drm_device *dev = dcrtc->crtc.dev; 310 struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
259 311
260 /* 312 /*
261 * Tell the DRM core that vblank IRQs aren't going to happen for 313 * Tell the DRM core that vblank IRQs aren't going to happen for
262 * a while. This cleans up any pending vblank events for us. 314 * a while. This cleans up any pending vblank events for us.
263 */ 315 */
264 drm_crtc_vblank_off(&dcrtc->crtc); 316 drm_crtc_vblank_off(&dcrtc->crtc);
265 317 armada_drm_plane_work_run(dcrtc, plane);
266 /* Handle any pending flip event. */
267 spin_lock_irq(&dev->event_lock);
268 if (dcrtc->frame_work)
269 armada_drm_crtc_complete_frame_work(dcrtc);
270 spin_unlock_irq(&dev->event_lock);
271} 318}
272 319
273void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b, 320void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
@@ -287,7 +334,11 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
287 334
288 if (dcrtc->dpms != dpms) { 335 if (dcrtc->dpms != dpms) {
289 dcrtc->dpms = dpms; 336 dcrtc->dpms = dpms;
337 if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
338 WARN_ON(clk_prepare_enable(dcrtc->clk));
290 armada_drm_crtc_update(dcrtc); 339 armada_drm_crtc_update(dcrtc);
340 if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
341 clk_disable_unprepare(dcrtc->clk);
291 if (dpms_blanked(dpms)) 342 if (dpms_blanked(dpms))
292 armada_drm_vblank_off(dcrtc); 343 armada_drm_vblank_off(dcrtc);
293 else 344 else
@@ -310,17 +361,11 @@ static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
310 /* 361 /*
311 * If we have an overlay plane associated with this CRTC, disable 362 * If we have an overlay plane associated with this CRTC, disable
312 * it before the modeset to avoid its coordinates being outside 363 * it before the modeset to avoid its coordinates being outside
313 * the new mode parameters. DRM doesn't provide help with this. 364 * the new mode parameters.
314 */ 365 */
315 plane = dcrtc->plane; 366 plane = dcrtc->plane;
316 if (plane) { 367 if (plane)
317 struct drm_framebuffer *fb = plane->fb; 368 drm_plane_force_disable(plane);
318
319 plane->funcs->disable_plane(plane);
320 plane->fb = NULL;
321 plane->crtc = NULL;
322 drm_framebuffer_unreference(fb);
323 }
324} 369}
325 370
326/* The mode_config.mutex will be held for this call */ 371/* The mode_config.mutex will be held for this call */
@@ -356,8 +401,8 @@ static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
356 401
357static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) 402static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
358{ 403{
359 struct armada_vbl_event *e, *n;
360 void __iomem *base = dcrtc->base; 404 void __iomem *base = dcrtc->base;
405 struct drm_plane *ovl_plane;
361 406
362 if (stat & DMA_FF_UNDERFLOW) 407 if (stat & DMA_FF_UNDERFLOW)
363 DRM_ERROR("video underflow on crtc %u\n", dcrtc->num); 408 DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
@@ -368,11 +413,10 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
368 drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num); 413 drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
369 414
370 spin_lock(&dcrtc->irq_lock); 415 spin_lock(&dcrtc->irq_lock);
371 416 ovl_plane = dcrtc->plane;
372 list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) { 417 if (ovl_plane) {
373 list_del_init(&e->node); 418 struct armada_plane *plane = drm_to_armada_plane(ovl_plane);
374 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 419 armada_drm_plane_work_run(dcrtc, plane);
375 e->fn(dcrtc, e->data);
376 } 420 }
377 421
378 if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) { 422 if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
@@ -404,14 +448,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
404 spin_unlock(&dcrtc->irq_lock); 448 spin_unlock(&dcrtc->irq_lock);
405 449
406 if (stat & GRA_FRAME_IRQ) { 450 if (stat & GRA_FRAME_IRQ) {
407 struct drm_device *dev = dcrtc->crtc.dev; 451 struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
408 452 armada_drm_plane_work_run(dcrtc, plane);
409 spin_lock(&dev->event_lock);
410 if (dcrtc->frame_work)
411 armada_drm_crtc_complete_frame_work(dcrtc);
412 spin_unlock(&dev->event_lock);
413
414 wake_up(&dcrtc->frame_wait);
415 } 453 }
416} 454}
417 455
@@ -527,7 +565,8 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
527 adj->crtc_vtotal, tm, bm); 565 adj->crtc_vtotal, tm, bm);
528 566
529 /* Wait for pending flips to complete */ 567 /* Wait for pending flips to complete */
530 wait_event(dcrtc->frame_wait, !dcrtc->frame_work); 568 armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
569 MAX_SCHEDULE_TIMEOUT);
531 570
532 drm_crtc_vblank_off(crtc); 571 drm_crtc_vblank_off(crtc);
533 572
@@ -537,6 +576,13 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
537 writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL); 576 writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
538 } 577 }
539 578
579 /*
580 * If we are blanked, we would have disabled the clock. Re-enable
581 * it so that compute_clock() does the right thing.
582 */
583 if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
584 WARN_ON(clk_prepare_enable(dcrtc->clk));
585
540 /* Now compute the divider for real */ 586 /* Now compute the divider for real */
541 dcrtc->variant->compute_clock(dcrtc, adj, &sclk); 587 dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
542 588
@@ -637,7 +683,8 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
637 armada_reg_queue_end(regs, i); 683 armada_reg_queue_end(regs, i);
638 684
639 /* Wait for pending flips to complete */ 685 /* Wait for pending flips to complete */
640 wait_event(dcrtc->frame_wait, !dcrtc->frame_work); 686 armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
687 MAX_SCHEDULE_TIMEOUT);
641 688
642 /* Take a reference to the new fb as we're using it */ 689 /* Take a reference to the new fb as we're using it */
643 drm_framebuffer_reference(crtc->primary->fb); 690 drm_framebuffer_reference(crtc->primary->fb);
@@ -651,18 +698,47 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
651 return 0; 698 return 0;
652} 699}
653 700
701void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
702 struct drm_plane *plane)
703{
704 u32 sram_para1, dma_ctrl0_mask;
705
706 /*
707 * Drop our reference on any framebuffer attached to this plane.
708 * We don't need to NULL this out as drm_plane_force_disable(),
709 * and __setplane_internal() will do so for an overlay plane, and
710 * __drm_helper_disable_unused_functions() will do so for the
711 * primary plane.
712 */
713 if (plane->fb)
714 drm_framebuffer_unreference(plane->fb);
715
716 /* Power down the Y/U/V FIFOs */
717 sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
718
719 /* Power down most RAMs and FIFOs if this is the primary plane */
720 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
721 sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
722 CFG_PDWN32x32 | CFG_PDWN64x66;
723 dma_ctrl0_mask = CFG_GRA_ENA;
724 } else {
725 dma_ctrl0_mask = CFG_DMA_ENA;
726 }
727
728 spin_lock_irq(&dcrtc->irq_lock);
729 armada_updatel(0, dma_ctrl0_mask, dcrtc->base + LCD_SPU_DMA_CTRL0);
730 spin_unlock_irq(&dcrtc->irq_lock);
731
732 armada_updatel(sram_para1, 0, dcrtc->base + LCD_SPU_SRAM_PARA1);
733}
734
654/* The mode_config.mutex will be held for this call */ 735/* The mode_config.mutex will be held for this call */
655static void armada_drm_crtc_disable(struct drm_crtc *crtc) 736static void armada_drm_crtc_disable(struct drm_crtc *crtc)
656{ 737{
657 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 738 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
658 739
659 armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 740 armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
660 armada_drm_crtc_finish_fb(dcrtc, crtc->primary->fb, true); 741 armada_drm_crtc_plane_disable(dcrtc, crtc->primary);
661
662 /* Power down most RAMs and FIFOs */
663 writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
664 CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
665 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
666} 742}
667 743
668static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = { 744static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
@@ -920,8 +996,6 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
920{ 996{
921 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 997 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
922 struct armada_frame_work *work; 998 struct armada_frame_work *work;
923 struct drm_device *dev = crtc->dev;
924 unsigned long flags;
925 unsigned i; 999 unsigned i;
926 int ret; 1000 int ret;
927 1001
@@ -933,6 +1007,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
933 if (!work) 1007 if (!work)
934 return -ENOMEM; 1008 return -ENOMEM;
935 1009
1010 work->work.fn = armada_drm_crtc_complete_frame_work;
936 work->event = event; 1011 work->event = event;
937 work->old_fb = dcrtc->crtc.primary->fb; 1012 work->old_fb = dcrtc->crtc.primary->fb;
938 1013
@@ -966,12 +1041,8 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
966 * Finally, if the display is blanked, we won't receive an 1041 * Finally, if the display is blanked, we won't receive an
967 * interrupt, so complete it now. 1042 * interrupt, so complete it now.
968 */ 1043 */
969 if (dpms_blanked(dcrtc->dpms)) { 1044 if (dpms_blanked(dcrtc->dpms))
970 spin_lock_irqsave(&dev->event_lock, flags); 1045 armada_drm_plane_work_run(dcrtc, drm_to_armada_plane(dcrtc->crtc.primary));
971 if (dcrtc->frame_work)
972 armada_drm_crtc_complete_frame_work(dcrtc);
973 spin_unlock_irqrestore(&dev->event_lock, flags);
974 }
975 1046
976 return 0; 1047 return 0;
977} 1048}
@@ -1012,6 +1083,19 @@ static struct drm_crtc_funcs armada_crtc_funcs = {
1012 .set_property = armada_drm_crtc_set_property, 1083 .set_property = armada_drm_crtc_set_property,
1013}; 1084};
1014 1085
1086static const struct drm_plane_funcs armada_primary_plane_funcs = {
1087 .update_plane = drm_primary_helper_update,
1088 .disable_plane = drm_primary_helper_disable,
1089 .destroy = drm_primary_helper_destroy,
1090};
1091
1092int armada_drm_plane_init(struct armada_plane *plane)
1093{
1094 init_waitqueue_head(&plane->frame_wait);
1095
1096 return 0;
1097}
1098
1015static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = { 1099static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
1016 { CSC_AUTO, "Auto" }, 1100 { CSC_AUTO, "Auto" },
1017 { CSC_YUV_CCIR601, "CCIR601" }, 1101 { CSC_YUV_CCIR601, "CCIR601" },
@@ -1044,12 +1128,13 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev)
1044 return 0; 1128 return 0;
1045} 1129}
1046 1130
1047int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, 1131static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1048 struct resource *res, int irq, const struct armada_variant *variant, 1132 struct resource *res, int irq, const struct armada_variant *variant,
1049 struct device_node *port) 1133 struct device_node *port)
1050{ 1134{
1051 struct armada_private *priv = drm->dev_private; 1135 struct armada_private *priv = drm->dev_private;
1052 struct armada_crtc *dcrtc; 1136 struct armada_crtc *dcrtc;
1137 struct armada_plane *primary;
1053 void __iomem *base; 1138 void __iomem *base;
1054 int ret; 1139 int ret;
1055 1140
@@ -1080,8 +1165,6 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1080 dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24; 1165 dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
1081 spin_lock_init(&dcrtc->irq_lock); 1166 spin_lock_init(&dcrtc->irq_lock);
1082 dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR; 1167 dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
1083 INIT_LIST_HEAD(&dcrtc->vbl_list);
1084 init_waitqueue_head(&dcrtc->frame_wait);
1085 1168
1086 /* Initialize some registers which we don't otherwise set */ 1169 /* Initialize some registers which we don't otherwise set */
1087 writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV); 1170 writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
@@ -1118,7 +1201,32 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1118 priv->dcrtc[dcrtc->num] = dcrtc; 1201 priv->dcrtc[dcrtc->num] = dcrtc;
1119 1202
1120 dcrtc->crtc.port = port; 1203 dcrtc->crtc.port = port;
1121 drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs); 1204
1205 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
1206 if (!primary)
1207 return -ENOMEM;
1208
1209 ret = armada_drm_plane_init(primary);
1210 if (ret) {
1211 kfree(primary);
1212 return ret;
1213 }
1214
1215 ret = drm_universal_plane_init(drm, &primary->base, 0,
1216 &armada_primary_plane_funcs,
1217 armada_primary_formats,
1218 ARRAY_SIZE(armada_primary_formats),
1219 DRM_PLANE_TYPE_PRIMARY);
1220 if (ret) {
1221 kfree(primary);
1222 return ret;
1223 }
1224
1225 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
1226 &armada_crtc_funcs);
1227 if (ret)
1228 goto err_crtc_init;
1229
1122 drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs); 1230 drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
1123 1231
1124 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop, 1232 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
@@ -1127,6 +1235,10 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1127 dcrtc->csc_rgb_mode); 1235 dcrtc->csc_rgb_mode);
1128 1236
1129 return armada_overlay_plane_create(drm, 1 << dcrtc->num); 1237 return armada_overlay_plane_create(drm, 1 << dcrtc->num);
1238
1239err_crtc_init:
1240 primary->base.funcs->destroy(&primary->base);
1241 return ret;
1130} 1242}
1131 1243
1132static int 1244static int
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 98102a5a9af5..04fdd22d483b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -31,9 +31,30 @@ struct armada_regs {
31#define armada_reg_queue_end(_r, _i) \ 31#define armada_reg_queue_end(_r, _i) \
32 armada_reg_queue_mod(_r, _i, 0, 0, ~0) 32 armada_reg_queue_mod(_r, _i, 0, 0, ~0)
33 33
34struct armada_frame_work; 34struct armada_crtc;
35struct armada_plane;
35struct armada_variant; 36struct armada_variant;
36 37
38struct armada_plane_work {
39 void (*fn)(struct armada_crtc *,
40 struct armada_plane *,
41 struct armada_plane_work *);
42};
43
44struct armada_plane {
45 struct drm_plane base;
46 wait_queue_head_t frame_wait;
47 struct armada_plane_work *work;
48};
49#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
50
51int armada_drm_plane_init(struct armada_plane *plane);
52int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
53 struct armada_plane *plane, struct armada_plane_work *work);
54int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
55struct armada_plane_work *armada_drm_plane_work_cancel(
56 struct armada_crtc *dcrtc, struct armada_plane *plane);
57
37struct armada_crtc { 58struct armada_crtc {
38 struct drm_crtc crtc; 59 struct drm_crtc crtc;
39 const struct armada_variant *variant; 60 const struct armada_variant *variant;
@@ -66,25 +87,20 @@ struct armada_crtc {
66 uint32_t dumb_ctrl; 87 uint32_t dumb_ctrl;
67 uint32_t spu_iopad_ctrl; 88 uint32_t spu_iopad_ctrl;
68 89
69 wait_queue_head_t frame_wait;
70 struct armada_frame_work *frame_work;
71
72 spinlock_t irq_lock; 90 spinlock_t irq_lock;
73 uint32_t irq_ena; 91 uint32_t irq_ena;
74 struct list_head vbl_list;
75}; 92};
76#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc) 93#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
77 94
78struct device_node;
79int armada_drm_crtc_create(struct drm_device *, struct device *,
80 struct resource *, int, const struct armada_variant *,
81 struct device_node *);
82void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int); 95void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
83void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int); 96void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
84void armada_drm_crtc_disable_irq(struct armada_crtc *, u32); 97void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
85void armada_drm_crtc_enable_irq(struct armada_crtc *, u32); 98void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
86void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *); 99void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
87 100
101void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
102 struct drm_plane *plane);
103
88extern struct platform_driver armada_lcd_platform_driver; 104extern struct platform_driver armada_lcd_platform_driver;
89 105
90#endif 106#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index 5f6aef0dca59..4df6f2af2b21 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -37,22 +37,6 @@ static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
37 return ALIGN(pitch, 128); 37 return ALIGN(pitch, 128);
38} 38}
39 39
40struct armada_vbl_event {
41 struct list_head node;
42 void *data;
43 void (*fn)(struct armada_crtc *, void *);
44};
45void armada_drm_vbl_event_add(struct armada_crtc *,
46 struct armada_vbl_event *);
47void armada_drm_vbl_event_remove(struct armada_crtc *,
48 struct armada_vbl_event *);
49#define armada_drm_vbl_event_init(_e, _f, _d) do { \
50 struct armada_vbl_event *__e = _e; \
51 INIT_LIST_HEAD(&__e->node); \
52 __e->data = _d; \
53 __e->fn = _f; \
54} while (0)
55
56 40
57struct armada_private; 41struct armada_private;
58 42
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 225034b74cda..77ab93d60125 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -11,6 +11,7 @@
11#include <linux/of_graph.h> 11#include <linux/of_graph.h>
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_crtc_helper.h> 13#include <drm/drm_crtc_helper.h>
14#include <drm/drm_of.h>
14#include "armada_crtc.h" 15#include "armada_crtc.h"
15#include "armada_drm.h" 16#include "armada_drm.h"
16#include "armada_gem.h" 17#include "armada_gem.h"
@@ -18,47 +19,6 @@
18#include <drm/armada_drm.h> 19#include <drm/armada_drm.h>
19#include "armada_ioctlP.h" 20#include "armada_ioctlP.h"
20 21
21#ifdef CONFIG_DRM_ARMADA_TDA1998X
22#include <drm/i2c/tda998x.h>
23#include "armada_slave.h"
24
25static struct tda998x_encoder_params params = {
26 /* With 0x24, there is no translation between vp_out and int_vp
27 FB LCD out Pins VIP Int Vp
28 R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
29 G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
30 B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
31 */
32 .swap_a = 2,
33 .swap_b = 3,
34 .swap_c = 4,
35 .swap_d = 5,
36 .swap_e = 0,
37 .swap_f = 1,
38 .audio_cfg = BIT(2),
39 .audio_frame[1] = 1,
40 .audio_format = AFMT_SPDIF,
41 .audio_sample_rate = 44100,
42};
43
44static const struct armada_drm_slave_config tda19988_config = {
45 .i2c_adapter_id = 0,
46 .crtcs = 1 << 0, /* Only LCD0 at the moment */
47 .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
48 .interlace_allowed = true,
49 .info = {
50 .type = "tda998x",
51 .addr = 0x70,
52 .platform_data = &params,
53 },
54};
55#endif
56
57static bool is_componentized(struct device *dev)
58{
59 return dev->of_node || dev->platform_data;
60}
61
62static void armada_drm_unref_work(struct work_struct *work) 22static void armada_drm_unref_work(struct work_struct *work)
63{ 23{
64 struct armada_private *priv = 24 struct armada_private *priv =
@@ -91,16 +51,11 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
91 51
92static int armada_drm_load(struct drm_device *dev, unsigned long flags) 52static int armada_drm_load(struct drm_device *dev, unsigned long flags)
93{ 53{
94 const struct platform_device_id *id;
95 const struct armada_variant *variant;
96 struct armada_private *priv; 54 struct armada_private *priv;
97 struct resource *res[ARRAY_SIZE(priv->dcrtc)];
98 struct resource *mem = NULL; 55 struct resource *mem = NULL;
99 int ret, n, i; 56 int ret, n;
100
101 memset(res, 0, sizeof(res));
102 57
103 for (n = i = 0; ; n++) { 58 for (n = 0; ; n++) {
104 struct resource *r = platform_get_resource(dev->platformdev, 59 struct resource *r = platform_get_resource(dev->platformdev,
105 IORESOURCE_MEM, n); 60 IORESOURCE_MEM, n);
106 if (!r) 61 if (!r)
@@ -109,8 +64,6 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
109 /* Resources above 64K are graphics memory */ 64 /* Resources above 64K are graphics memory */
110 if (resource_size(r) > SZ_64K) 65 if (resource_size(r) > SZ_64K)
111 mem = r; 66 mem = r;
112 else if (i < ARRAY_SIZE(priv->dcrtc))
113 res[i++] = r;
114 else 67 else
115 return -EINVAL; 68 return -EINVAL;
116 } 69 }
@@ -131,13 +84,6 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
131 platform_set_drvdata(dev->platformdev, dev); 84 platform_set_drvdata(dev->platformdev, dev);
132 dev->dev_private = priv; 85 dev->dev_private = priv;
133 86
134 /* Get the implementation specific driver data. */
135 id = platform_get_device_id(dev->platformdev);
136 if (!id)
137 return -ENXIO;
138
139 variant = (const struct armada_variant *)id->driver_data;
140
141 INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work); 87 INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
142 INIT_KFIFO(priv->fb_unref); 88 INIT_KFIFO(priv->fb_unref);
143 89
@@ -157,34 +103,9 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
157 dev->mode_config.funcs = &armada_drm_mode_config_funcs; 103 dev->mode_config.funcs = &armada_drm_mode_config_funcs;
158 drm_mm_init(&priv->linear, mem->start, resource_size(mem)); 104 drm_mm_init(&priv->linear, mem->start, resource_size(mem));
159 105
160 /* Create all LCD controllers */ 106 ret = component_bind_all(dev->dev, dev);
161 for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) { 107 if (ret)
162 int irq; 108 goto err_kms;
163
164 if (!res[n])
165 break;
166
167 irq = platform_get_irq(dev->platformdev, n);
168 if (irq < 0)
169 goto err_kms;
170
171 ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
172 variant, NULL);
173 if (ret)
174 goto err_kms;
175 }
176
177 if (is_componentized(dev->dev)) {
178 ret = component_bind_all(dev->dev, dev);
179 if (ret)
180 goto err_kms;
181 } else {
182#ifdef CONFIG_DRM_ARMADA_TDA1998X
183 ret = armada_drm_connector_slave_create(dev, &tda19988_config);
184 if (ret)
185 goto err_kms;
186#endif
187 }
188 109
189 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 110 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
190 if (ret) 111 if (ret)
@@ -202,8 +123,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
202 return 0; 123 return 0;
203 124
204 err_comp: 125 err_comp:
205 if (is_componentized(dev->dev)) 126 component_unbind_all(dev->dev, dev);
206 component_unbind_all(dev->dev, dev);
207 err_kms: 127 err_kms:
208 drm_mode_config_cleanup(dev); 128 drm_mode_config_cleanup(dev);
209 drm_mm_takedown(&priv->linear); 129 drm_mm_takedown(&priv->linear);
@@ -219,8 +139,7 @@ static int armada_drm_unload(struct drm_device *dev)
219 drm_kms_helper_poll_fini(dev); 139 drm_kms_helper_poll_fini(dev);
220 armada_fbdev_fini(dev); 140 armada_fbdev_fini(dev);
221 141
222 if (is_componentized(dev->dev)) 142 component_unbind_all(dev->dev, dev);
223 component_unbind_all(dev->dev, dev);
224 143
225 drm_mode_config_cleanup(dev); 144 drm_mode_config_cleanup(dev);
226 drm_mm_takedown(&priv->linear); 145 drm_mm_takedown(&priv->linear);
@@ -230,50 +149,24 @@ static int armada_drm_unload(struct drm_device *dev)
230 return 0; 149 return 0;
231} 150}
232 151
233void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
234 struct armada_vbl_event *evt)
235{
236 unsigned long flags;
237
238 spin_lock_irqsave(&dcrtc->irq_lock, flags);
239 if (list_empty(&evt->node)) {
240 list_add_tail(&evt->node, &dcrtc->vbl_list);
241
242 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
243 }
244 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
245}
246
247void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
248 struct armada_vbl_event *evt)
249{
250 if (!list_empty(&evt->node)) {
251 list_del_init(&evt->node);
252 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
253 }
254}
255
256/* These are called under the vbl_lock. */ 152/* These are called under the vbl_lock. */
257static int armada_drm_enable_vblank(struct drm_device *dev, int crtc) 153static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
258{ 154{
259 struct armada_private *priv = dev->dev_private; 155 struct armada_private *priv = dev->dev_private;
260 armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA); 156 armada_drm_crtc_enable_irq(priv->dcrtc[pipe], VSYNC_IRQ_ENA);
261 return 0; 157 return 0;
262} 158}
263 159
264static void armada_drm_disable_vblank(struct drm_device *dev, int crtc) 160static void armada_drm_disable_vblank(struct drm_device *dev, unsigned int pipe)
265{ 161{
266 struct armada_private *priv = dev->dev_private; 162 struct armada_private *priv = dev->dev_private;
267 armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA); 163 armada_drm_crtc_disable_irq(priv->dcrtc[pipe], VSYNC_IRQ_ENA);
268} 164}
269 165
270static struct drm_ioctl_desc armada_ioctls[] = { 166static struct drm_ioctl_desc armada_ioctls[] = {
271 DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl, 167 DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
272 DRM_UNLOCKED), 168 DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
273 DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 169 DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
274 DRM_UNLOCKED),
275 DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
276 DRM_UNLOCKED),
277}; 170};
278 171
279static void armada_drm_lastclose(struct drm_device *dev) 172static void armada_drm_lastclose(struct drm_device *dev)
@@ -300,7 +193,7 @@ static struct drm_driver armada_drm_driver = {
300 .lastclose = armada_drm_lastclose, 193 .lastclose = armada_drm_lastclose,
301 .unload = armada_drm_unload, 194 .unload = armada_drm_unload,
302 .set_busid = drm_platform_set_busid, 195 .set_busid = drm_platform_set_busid,
303 .get_vblank_counter = drm_vblank_count, 196 .get_vblank_counter = drm_vblank_no_hw_counter,
304 .enable_vblank = armada_drm_enable_vblank, 197 .enable_vblank = armada_drm_enable_vblank,
305 .disable_vblank = armada_drm_disable_vblank, 198 .disable_vblank = armada_drm_disable_vblank,
306#ifdef CONFIG_DEBUG_FS 199#ifdef CONFIG_DEBUG_FS
@@ -370,43 +263,29 @@ static void armada_add_endpoints(struct device *dev,
370 } 263 }
371} 264}
372 265
373static int armada_drm_find_components(struct device *dev, 266static const struct component_master_ops armada_master_ops = {
374 struct component_match **match) 267 .bind = armada_drm_bind,
375{ 268 .unbind = armada_drm_unbind,
376 struct device_node *port; 269};
377 int i;
378
379 if (dev->of_node) {
380 struct device_node *np = dev->of_node;
381
382 for (i = 0; ; i++) {
383 port = of_parse_phandle(np, "ports", i);
384 if (!port)
385 break;
386
387 component_match_add(dev, match, compare_of, port);
388 of_node_put(port);
389 }
390 270
391 if (i == 0) { 271static int armada_drm_probe(struct platform_device *pdev)
392 dev_err(dev, "missing 'ports' property\n"); 272{
393 return -ENODEV; 273 struct component_match *match = NULL;
394 } 274 struct device *dev = &pdev->dev;
275 int ret;
395 276
396 for (i = 0; ; i++) { 277 ret = drm_of_component_probe(dev, compare_dev_name, &armada_master_ops);
397 port = of_parse_phandle(np, "ports", i); 278 if (ret != -EINVAL)
398 if (!port) 279 return ret;
399 break;
400 280
401 armada_add_endpoints(dev, match, port); 281 if (dev->platform_data) {
402 of_node_put(port);
403 }
404 } else if (dev->platform_data) {
405 char **devices = dev->platform_data; 282 char **devices = dev->platform_data;
283 struct device_node *port;
406 struct device *d; 284 struct device *d;
285 int i;
407 286
408 for (i = 0; devices[i]; i++) 287 for (i = 0; devices[i]; i++)
409 component_match_add(dev, match, compare_dev_name, 288 component_match_add(dev, &match, compare_dev_name,
410 devices[i]); 289 devices[i]);
411 290
412 if (i == 0) { 291 if (i == 0) {
@@ -416,56 +295,30 @@ static int armada_drm_find_components(struct device *dev,
416 295
417 for (i = 0; devices[i]; i++) { 296 for (i = 0; devices[i]; i++) {
418 d = bus_find_device_by_name(&platform_bus_type, NULL, 297 d = bus_find_device_by_name(&platform_bus_type, NULL,
419 devices[i]); 298 devices[i]);
420 if (d && d->of_node) { 299 if (d && d->of_node) {
421 for_each_child_of_node(d->of_node, port) 300 for_each_child_of_node(d->of_node, port)
422 armada_add_endpoints(dev, match, port); 301 armada_add_endpoints(dev, &match, port);
423 } 302 }
424 put_device(d); 303 put_device(d);
425 } 304 }
426 } 305 }
427 306
428 return 0; 307 return component_master_add_with_match(&pdev->dev, &armada_master_ops,
429} 308 match);
430
431static const struct component_master_ops armada_master_ops = {
432 .bind = armada_drm_bind,
433 .unbind = armada_drm_unbind,
434};
435
436static int armada_drm_probe(struct platform_device *pdev)
437{
438 if (is_componentized(&pdev->dev)) {
439 struct component_match *match = NULL;
440 int ret;
441
442 ret = armada_drm_find_components(&pdev->dev, &match);
443 if (ret < 0)
444 return ret;
445
446 return component_master_add_with_match(&pdev->dev,
447 &armada_master_ops, match);
448 } else {
449 return drm_platform_init(&armada_drm_driver, pdev);
450 }
451} 309}
452 310
453static int armada_drm_remove(struct platform_device *pdev) 311static int armada_drm_remove(struct platform_device *pdev)
454{ 312{
455 if (is_componentized(&pdev->dev)) 313 component_master_del(&pdev->dev, &armada_master_ops);
456 component_master_del(&pdev->dev, &armada_master_ops);
457 else
458 drm_put_dev(platform_get_drvdata(pdev));
459 return 0; 314 return 0;
460} 315}
461 316
462static const struct platform_device_id armada_drm_platform_ids[] = { 317static const struct platform_device_id armada_drm_platform_ids[] = {
463 { 318 {
464 .name = "armada-drm", 319 .name = "armada-drm",
465 .driver_data = (unsigned long)&armada510_ops,
466 }, { 320 }, {
467 .name = "armada-510-drm", 321 .name = "armada-510-drm",
468 .driver_data = (unsigned long)&armada510_ops,
469 }, 322 },
470 { }, 323 { },
471}; 324};
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
deleted file mode 100644
index 5a9823178291..000000000000
--- a/drivers/gpu/drm/armada/armada_output.c
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <drm/drmP.h>
9#include <drm/drm_crtc_helper.h>
10#include <drm/drm_edid.h>
11#include <drm/drm_encoder_slave.h>
12#include "armada_output.h"
13#include "armada_drm.h"
14
15struct armada_connector {
16 struct drm_connector conn;
17 const struct armada_output_type *type;
18};
19
20#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
21
22struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
23{
24 struct drm_encoder *enc = conn->encoder;
25
26 return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
27}
28
29static enum drm_connector_status armada_drm_connector_detect(
30 struct drm_connector *conn, bool force)
31{
32 struct armada_connector *dconn = drm_to_armada_conn(conn);
33 enum drm_connector_status status = connector_status_disconnected;
34
35 if (dconn->type->detect) {
36 status = dconn->type->detect(conn, force);
37 } else {
38 struct drm_encoder *enc = armada_drm_connector_encoder(conn);
39
40 if (enc)
41 status = encoder_helper_funcs(enc)->detect(enc, conn);
42 }
43
44 return status;
45}
46
47static void armada_drm_connector_destroy(struct drm_connector *conn)
48{
49 struct armada_connector *dconn = drm_to_armada_conn(conn);
50
51 drm_connector_unregister(conn);
52 drm_connector_cleanup(conn);
53 kfree(dconn);
54}
55
56static int armada_drm_connector_set_property(struct drm_connector *conn,
57 struct drm_property *property, uint64_t value)
58{
59 struct armada_connector *dconn = drm_to_armada_conn(conn);
60
61 if (!dconn->type->set_property)
62 return -EINVAL;
63
64 return dconn->type->set_property(conn, property, value);
65}
66
67static const struct drm_connector_funcs armada_drm_conn_funcs = {
68 .dpms = drm_helper_connector_dpms,
69 .fill_modes = drm_helper_probe_single_connector_modes,
70 .detect = armada_drm_connector_detect,
71 .destroy = armada_drm_connector_destroy,
72 .set_property = armada_drm_connector_set_property,
73};
74
75/* Shouldn't this be a generic helper function? */
76int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
77 struct drm_display_mode *mode)
78{
79 struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
80 int valid = MODE_BAD;
81
82 if (encoder) {
83 struct drm_encoder_slave *slave = to_encoder_slave(encoder);
84
85 valid = slave->slave_funcs->mode_valid(encoder, mode);
86 }
87 return valid;
88}
89
90int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
91 struct drm_property *property, uint64_t value)
92{
93 struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
94 int rc = -EINVAL;
95
96 if (encoder) {
97 struct drm_encoder_slave *slave = to_encoder_slave(encoder);
98
99 rc = slave->slave_funcs->set_property(encoder, conn, property,
100 value);
101 }
102 return rc;
103}
104
105int armada_output_create(struct drm_device *dev,
106 const struct armada_output_type *type, const void *data)
107{
108 struct armada_connector *dconn;
109 int ret;
110
111 dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
112 if (!dconn)
113 return -ENOMEM;
114
115 dconn->type = type;
116
117 ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
118 type->connector_type);
119 if (ret) {
120 DRM_ERROR("unable to init connector\n");
121 goto err_destroy_dconn;
122 }
123
124 ret = type->create(&dconn->conn, data);
125 if (ret)
126 goto err_conn;
127
128 ret = drm_connector_register(&dconn->conn);
129 if (ret)
130 goto err_sysfs;
131
132 return 0;
133
134 err_sysfs:
135 if (dconn->conn.encoder)
136 dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
137 err_conn:
138 drm_connector_cleanup(&dconn->conn);
139 err_destroy_dconn:
140 kfree(dconn);
141 return ret;
142}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
deleted file mode 100644
index f448785753e8..000000000000
--- a/drivers/gpu/drm/armada/armada_output.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_CONNETOR_H
9#define ARMADA_CONNETOR_H
10
11#define encoder_helper_funcs(encoder) \
12 ((const struct drm_encoder_helper_funcs *)encoder->helper_private)
13
14struct armada_output_type {
15 int connector_type;
16 enum drm_connector_status (*detect)(struct drm_connector *, bool);
17 int (*create)(struct drm_connector *, const void *);
18 int (*set_property)(struct drm_connector *, struct drm_property *,
19 uint64_t);
20};
21
22struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
23
24int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
25 struct drm_display_mode *mode);
26
27int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
28 struct drm_property *property, uint64_t value);
29
30int armada_output_create(struct drm_device *dev,
31 const struct armada_output_type *type, const void *data);
32
33#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index e939faba7fcc..5c22b380f8f3 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -16,7 +16,7 @@
16#include <drm/armada_drm.h> 16#include <drm/armada_drm.h>
17#include "armada_ioctlP.h" 17#include "armada_ioctlP.h"
18 18
19struct armada_plane_properties { 19struct armada_ovl_plane_properties {
20 uint32_t colorkey_yr; 20 uint32_t colorkey_yr;
21 uint32_t colorkey_ug; 21 uint32_t colorkey_ug;
22 uint32_t colorkey_vb; 22 uint32_t colorkey_vb;
@@ -29,26 +29,25 @@ struct armada_plane_properties {
29 uint32_t colorkey_mode; 29 uint32_t colorkey_mode;
30}; 30};
31 31
32struct armada_plane { 32struct armada_ovl_plane {
33 struct drm_plane base; 33 struct armada_plane base;
34 spinlock_t lock;
35 struct drm_framebuffer *old_fb; 34 struct drm_framebuffer *old_fb;
36 uint32_t src_hw; 35 uint32_t src_hw;
37 uint32_t dst_hw; 36 uint32_t dst_hw;
38 uint32_t dst_yx; 37 uint32_t dst_yx;
39 uint32_t ctrl0; 38 uint32_t ctrl0;
40 struct { 39 struct {
41 struct armada_vbl_event update; 40 struct armada_plane_work work;
42 struct armada_regs regs[13]; 41 struct armada_regs regs[13];
43 wait_queue_head_t wait;
44 } vbl; 42 } vbl;
45 struct armada_plane_properties prop; 43 struct armada_ovl_plane_properties prop;
46}; 44};
47#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base) 45#define drm_to_armada_ovl_plane(p) \
46 container_of(p, struct armada_ovl_plane, base.base)
48 47
49 48
50static void 49static void
51armada_ovl_update_attr(struct armada_plane_properties *prop, 50armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
52 struct armada_crtc *dcrtc) 51 struct armada_crtc *dcrtc)
53{ 52{
54 writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y); 53 writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
@@ -71,32 +70,34 @@ armada_ovl_update_attr(struct armada_plane_properties *prop,
71 spin_unlock_irq(&dcrtc->irq_lock); 70 spin_unlock_irq(&dcrtc->irq_lock);
72} 71}
73 72
74/* === Plane support === */ 73static void armada_ovl_retire_fb(struct armada_ovl_plane *dplane,
75static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data) 74 struct drm_framebuffer *fb)
76{ 75{
77 struct armada_plane *dplane = data; 76 struct drm_framebuffer *old_fb;
78 struct drm_framebuffer *fb;
79 77
80 armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs); 78 old_fb = xchg(&dplane->old_fb, fb);
81 79
82 spin_lock(&dplane->lock); 80 if (old_fb)
83 fb = dplane->old_fb; 81 armada_drm_queue_unref_work(dplane->base.base.dev, old_fb);
84 dplane->old_fb = NULL; 82}
85 spin_unlock(&dplane->lock);
86 83
87 if (fb) 84/* === Plane support === */
88 armada_drm_queue_unref_work(dcrtc->crtc.dev, fb); 85static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
86 struct armada_plane *plane, struct armada_plane_work *work)
87{
88 struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
89 89
90 wake_up(&dplane->vbl.wait); 90 armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
91 armada_ovl_retire_fb(dplane, NULL);
91} 92}
92 93
93static int 94static int
94armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 95armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
95 struct drm_framebuffer *fb, 96 struct drm_framebuffer *fb,
96 int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h, 97 int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
97 uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) 98 uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
98{ 99{
99 struct armada_plane *dplane = drm_to_armada_plane(plane); 100 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
100 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 101 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
101 struct drm_rect src = { 102 struct drm_rect src = {
102 .x1 = src_x, 103 .x1 = src_x,
@@ -160,9 +161,8 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
160 dcrtc->base + LCD_SPU_SRAM_PARA1); 161 dcrtc->base + LCD_SPU_SRAM_PARA1);
161 } 162 }
162 163
163 wait_event_timeout(dplane->vbl.wait, 164 if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
164 list_empty(&dplane->vbl.update.node), 165 armada_drm_plane_work_cancel(dcrtc, &dplane->base);
165 HZ/25);
166 166
167 if (plane->fb != fb) { 167 if (plane->fb != fb) {
168 struct armada_gem_object *obj = drm_fb_obj(fb); 168 struct armada_gem_object *obj = drm_fb_obj(fb);
@@ -175,17 +175,8 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
175 */ 175 */
176 drm_framebuffer_reference(fb); 176 drm_framebuffer_reference(fb);
177 177
178 if (plane->fb) { 178 if (plane->fb)
179 struct drm_framebuffer *older_fb; 179 armada_ovl_retire_fb(dplane, plane->fb);
180
181 spin_lock_irq(&dplane->lock);
182 older_fb = dplane->old_fb;
183 dplane->old_fb = plane->fb;
184 spin_unlock_irq(&dplane->lock);
185 if (older_fb)
186 armada_drm_queue_unref_work(dcrtc->crtc.dev,
187 older_fb);
188 }
189 180
190 src_y = src.y1 >> 16; 181 src_y = src.y1 >> 16;
191 src_x = src.x1 >> 16; 182 src_x = src.x1 >> 16;
@@ -262,60 +253,50 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
262 } 253 }
263 if (idx) { 254 if (idx) {
264 armada_reg_queue_end(dplane->vbl.regs, idx); 255 armada_reg_queue_end(dplane->vbl.regs, idx);
265 armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update); 256 armada_drm_plane_work_queue(dcrtc, &dplane->base,
257 &dplane->vbl.work);
266 } 258 }
267 return 0; 259 return 0;
268} 260}
269 261
270static int armada_plane_disable(struct drm_plane *plane) 262static int armada_ovl_plane_disable(struct drm_plane *plane)
271{ 263{
272 struct armada_plane *dplane = drm_to_armada_plane(plane); 264 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
273 struct drm_framebuffer *fb; 265 struct drm_framebuffer *fb;
274 struct armada_crtc *dcrtc; 266 struct armada_crtc *dcrtc;
275 267
276 if (!dplane->base.crtc) 268 if (!dplane->base.base.crtc)
277 return 0; 269 return 0;
278 270
279 dcrtc = drm_to_armada_crtc(dplane->base.crtc); 271 dcrtc = drm_to_armada_crtc(dplane->base.base.crtc);
280 dcrtc->plane = NULL;
281
282 spin_lock_irq(&dcrtc->irq_lock);
283 armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
284 armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
285 dplane->ctrl0 = 0;
286 spin_unlock_irq(&dcrtc->irq_lock);
287 272
288 /* Power down the Y/U/V FIFOs */ 273 armada_drm_plane_work_cancel(dcrtc, &dplane->base);
289 armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0, 274 armada_drm_crtc_plane_disable(dcrtc, plane);
290 dcrtc->base + LCD_SPU_SRAM_PARA1);
291 275
292 if (plane->fb) 276 dcrtc->plane = NULL;
293 drm_framebuffer_unreference(plane->fb); 277 dplane->ctrl0 = 0;
294 278
295 spin_lock_irq(&dplane->lock); 279 fb = xchg(&dplane->old_fb, NULL);
296 fb = dplane->old_fb;
297 dplane->old_fb = NULL;
298 spin_unlock_irq(&dplane->lock);
299 if (fb) 280 if (fb)
300 drm_framebuffer_unreference(fb); 281 drm_framebuffer_unreference(fb);
301 282
302 return 0; 283 return 0;
303} 284}
304 285
305static void armada_plane_destroy(struct drm_plane *plane) 286static void armada_ovl_plane_destroy(struct drm_plane *plane)
306{ 287{
307 struct armada_plane *dplane = drm_to_armada_plane(plane); 288 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
308 289
309 drm_plane_cleanup(plane); 290 drm_plane_cleanup(plane);
310 291
311 kfree(dplane); 292 kfree(dplane);
312} 293}
313 294
314static int armada_plane_set_property(struct drm_plane *plane, 295static int armada_ovl_plane_set_property(struct drm_plane *plane,
315 struct drm_property *property, uint64_t val) 296 struct drm_property *property, uint64_t val)
316{ 297{
317 struct armada_private *priv = plane->dev->dev_private; 298 struct armada_private *priv = plane->dev->dev_private;
318 struct armada_plane *dplane = drm_to_armada_plane(plane); 299 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
319 bool update_attr = false; 300 bool update_attr = false;
320 301
321 if (property == priv->colorkey_prop) { 302 if (property == priv->colorkey_prop) {
@@ -372,21 +353,21 @@ static int armada_plane_set_property(struct drm_plane *plane,
372 update_attr = true; 353 update_attr = true;
373 } 354 }
374 355
375 if (update_attr && dplane->base.crtc) 356 if (update_attr && dplane->base.base.crtc)
376 armada_ovl_update_attr(&dplane->prop, 357 armada_ovl_update_attr(&dplane->prop,
377 drm_to_armada_crtc(dplane->base.crtc)); 358 drm_to_armada_crtc(dplane->base.base.crtc));
378 359
379 return 0; 360 return 0;
380} 361}
381 362
382static const struct drm_plane_funcs armada_plane_funcs = { 363static const struct drm_plane_funcs armada_ovl_plane_funcs = {
383 .update_plane = armada_plane_update, 364 .update_plane = armada_ovl_plane_update,
384 .disable_plane = armada_plane_disable, 365 .disable_plane = armada_ovl_plane_disable,
385 .destroy = armada_plane_destroy, 366 .destroy = armada_ovl_plane_destroy,
386 .set_property = armada_plane_set_property, 367 .set_property = armada_ovl_plane_set_property,
387}; 368};
388 369
389static const uint32_t armada_formats[] = { 370static const uint32_t armada_ovl_formats[] = {
390 DRM_FORMAT_UYVY, 371 DRM_FORMAT_UYVY,
391 DRM_FORMAT_YUYV, 372 DRM_FORMAT_YUYV,
392 DRM_FORMAT_YUV420, 373 DRM_FORMAT_YUV420,
@@ -456,7 +437,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
456{ 437{
457 struct armada_private *priv = dev->dev_private; 438 struct armada_private *priv = dev->dev_private;
458 struct drm_mode_object *mobj; 439 struct drm_mode_object *mobj;
459 struct armada_plane *dplane; 440 struct armada_ovl_plane *dplane;
460 int ret; 441 int ret;
461 442
462 ret = armada_overlay_create_properties(dev); 443 ret = armada_overlay_create_properties(dev);
@@ -467,13 +448,23 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
467 if (!dplane) 448 if (!dplane)
468 return -ENOMEM; 449 return -ENOMEM;
469 450
470 spin_lock_init(&dplane->lock); 451 ret = armada_drm_plane_init(&dplane->base);
471 init_waitqueue_head(&dplane->vbl.wait); 452 if (ret) {
472 armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl, 453 kfree(dplane);
473 dplane); 454 return ret;
455 }
456
457 dplane->vbl.work.fn = armada_ovl_plane_work;
474 458
475 drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs, 459 ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
476 armada_formats, ARRAY_SIZE(armada_formats), false); 460 &armada_ovl_plane_funcs,
461 armada_ovl_formats,
462 ARRAY_SIZE(armada_ovl_formats),
463 DRM_PLANE_TYPE_OVERLAY);
464 if (ret) {
465 kfree(dplane);
466 return ret;
467 }
477 468
478 dplane->prop.colorkey_yr = 0xfefefe00; 469 dplane->prop.colorkey_yr = 0xfefefe00;
479 dplane->prop.colorkey_ug = 0x01010100; 470 dplane->prop.colorkey_ug = 0x01010100;
@@ -483,7 +474,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
483 dplane->prop.contrast = 0x4000; 474 dplane->prop.contrast = 0x4000;
484 dplane->prop.saturation = 0x4000; 475 dplane->prop.saturation = 0x4000;
485 476
486 mobj = &dplane->base.base; 477 mobj = &dplane->base.base.base;
487 drm_object_attach_property(mobj, priv->colorkey_prop, 478 drm_object_attach_property(mobj, priv->colorkey_prop,
488 0x0101fe); 479 0x0101fe);
489 drm_object_attach_property(mobj, priv->colorkey_min_prop, 480 drm_object_attach_property(mobj, priv->colorkey_min_prop,
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
deleted file mode 100644
index 00d0facb42f3..000000000000
--- a/drivers/gpu/drm/armada/armada_slave.c
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <drm/drmP.h>
10#include <drm/drm_crtc_helper.h>
11#include <drm/drm_edid.h>
12#include <drm/drm_encoder_slave.h>
13#include "armada_drm.h"
14#include "armada_output.h"
15#include "armada_slave.h"
16
17static int armada_drm_slave_get_modes(struct drm_connector *conn)
18{
19 struct drm_encoder *enc = armada_drm_connector_encoder(conn);
20 int count = 0;
21
22 if (enc) {
23 struct drm_encoder_slave *slave = to_encoder_slave(enc);
24
25 count = slave->slave_funcs->get_modes(enc, conn);
26 }
27
28 return count;
29}
30
31static void armada_drm_slave_destroy(struct drm_encoder *enc)
32{
33 struct drm_encoder_slave *slave = to_encoder_slave(enc);
34 struct i2c_client *client = drm_i2c_encoder_get_client(enc);
35
36 if (slave->slave_funcs)
37 slave->slave_funcs->destroy(enc);
38 if (client)
39 i2c_put_adapter(client->adapter);
40
41 drm_encoder_cleanup(&slave->base);
42 kfree(slave);
43}
44
45static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
46 .destroy = armada_drm_slave_destroy,
47};
48
49static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
50 .get_modes = armada_drm_slave_get_modes,
51 .mode_valid = armada_drm_slave_encoder_mode_valid,
52 .best_encoder = armada_drm_connector_encoder,
53};
54
55static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
56 .dpms = drm_i2c_encoder_dpms,
57 .save = drm_i2c_encoder_save,
58 .restore = drm_i2c_encoder_restore,
59 .mode_fixup = drm_i2c_encoder_mode_fixup,
60 .prepare = drm_i2c_encoder_prepare,
61 .commit = drm_i2c_encoder_commit,
62 .mode_set = drm_i2c_encoder_mode_set,
63 .detect = drm_i2c_encoder_detect,
64};
65
66static int
67armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
68{
69 const struct armada_drm_slave_config *config = data;
70 struct drm_encoder_slave *slave;
71 struct i2c_adapter *adap;
72 int ret;
73
74 conn->interlace_allowed = config->interlace_allowed;
75 conn->doublescan_allowed = config->doublescan_allowed;
76 conn->polled = config->polled;
77
78 drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
79
80 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
81 if (!slave)
82 return -ENOMEM;
83
84 slave->base.possible_crtcs = config->crtcs;
85
86 adap = i2c_get_adapter(config->i2c_adapter_id);
87 if (!adap) {
88 kfree(slave);
89 return -EPROBE_DEFER;
90 }
91
92 ret = drm_encoder_init(conn->dev, &slave->base,
93 &armada_drm_slave_encoder_funcs,
94 DRM_MODE_ENCODER_TMDS);
95 if (ret) {
96 DRM_ERROR("unable to init encoder\n");
97 i2c_put_adapter(adap);
98 kfree(slave);
99 return ret;
100 }
101
102 ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
103 i2c_put_adapter(adap);
104 if (ret) {
105 DRM_ERROR("unable to init encoder slave\n");
106 armada_drm_slave_destroy(&slave->base);
107 return ret;
108 }
109
110 drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
111
112 ret = slave->slave_funcs->create_resources(&slave->base, conn);
113 if (ret) {
114 armada_drm_slave_destroy(&slave->base);
115 return ret;
116 }
117
118 ret = drm_mode_connector_attach_encoder(conn, &slave->base);
119 if (ret) {
120 armada_drm_slave_destroy(&slave->base);
121 return ret;
122 }
123
124 conn->encoder = &slave->base;
125
126 return ret;
127}
128
129static const struct armada_output_type armada_drm_conn_slave = {
130 .connector_type = DRM_MODE_CONNECTOR_HDMIA,
131 .create = armada_drm_conn_slave_create,
132 .set_property = armada_drm_slave_encoder_set_property,
133};
134
135int armada_drm_connector_slave_create(struct drm_device *dev,
136 const struct armada_drm_slave_config *config)
137{
138 return armada_output_create(dev, &armada_drm_conn_slave, config);
139}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
deleted file mode 100644
index bf2374c96fc1..000000000000
--- a/drivers/gpu/drm/armada/armada_slave.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_SLAVE_H
9#define ARMADA_SLAVE_H
10
11#include <linux/i2c.h>
12#include <drm/drmP.h>
13
14struct armada_drm_slave_config {
15 int i2c_adapter_id;
16 uint32_t crtcs;
17 uint8_t polled;
18 bool interlace_allowed;
19 bool doublescan_allowed;
20 struct i2c_board_info info;
21};
22
23int armada_drm_connector_slave_create(struct drm_device *dev,
24 const struct armada_drm_slave_config *);
25
26#endif
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8bc62ec407f9..244df0a440b7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -656,7 +656,8 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
656 regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); 656 regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr);
657} 657}
658 658
659static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc) 659static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev,
660 unsigned int pipe)
660{ 661{
661 struct atmel_hlcdc_dc *dc = dev->dev_private; 662 struct atmel_hlcdc_dc *dc = dev->dev_private;
662 663
@@ -666,7 +667,8 @@ static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc)
666 return 0; 667 return 0;
667} 668}
668 669
669static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev, int crtc) 670static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev,
671 unsigned int pipe)
670{ 672{
671 struct atmel_hlcdc_dc *dc = dev->dev_private; 673 struct atmel_hlcdc_dc *dc = dev->dev_private;
672 674
@@ -697,7 +699,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
697 .irq_preinstall = atmel_hlcdc_dc_irq_uninstall, 699 .irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
698 .irq_postinstall = atmel_hlcdc_dc_irq_postinstall, 700 .irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
699 .irq_uninstall = atmel_hlcdc_dc_irq_uninstall, 701 .irq_uninstall = atmel_hlcdc_dc_irq_uninstall,
700 .get_vblank_counter = drm_vblank_count, 702 .get_vblank_counter = drm_vblank_no_hw_counter,
701 .enable_vblank = atmel_hlcdc_dc_enable_vblank, 703 .enable_vblank = atmel_hlcdc_dc_enable_vblank,
702 .disable_vblank = atmel_hlcdc_dc_disable_vblank, 704 .disable_vblank = atmel_hlcdc_dc_disable_vblank,
703 .gem_free_object = drm_gem_cma_free_object, 705 .gem_free_object = drm_gem_cma_free_object,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index be9fa8220499..d0299aed517e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -633,7 +633,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
633 if (!state->bpp[i]) 633 if (!state->bpp[i])
634 return -EINVAL; 634 return -EINVAL;
635 635
636 switch (state->base.rotation & 0xf) { 636 switch (state->base.rotation & DRM_ROTATE_MASK) {
637 case BIT(DRM_ROTATE_90): 637 case BIT(DRM_ROTATE_90):
638 offset = ((y_offset + state->src_y + patched_src_w - 1) / 638 offset = ((y_offset + state->src_y + patched_src_w - 1) /
639 ydiv) * fb->pitches[i]; 639 ydiv) * fb->pitches[i];
@@ -712,11 +712,13 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
712} 712}
713 713
714static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p, 714static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
715 struct drm_framebuffer *fb,
716 const struct drm_plane_state *new_state) 715 const struct drm_plane_state *new_state)
717{ 716{
718 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); 717 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
719 718
719 if (!new_state->fb)
720 return 0;
721
720 return atmel_hlcdc_layer_update_start(&plane->layer); 722 return atmel_hlcdc_layer_update_start(&plane->layer);
721} 723}
722 724
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 2de52a53a803..6dddd392aa42 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -11,6 +11,18 @@ config DRM_DW_HDMI
11 tristate 11 tristate
12 select DRM_KMS_HELPER 12 select DRM_KMS_HELPER
13 13
14config DRM_DW_HDMI_AHB_AUDIO
15 tristate "Synopsis Designware AHB Audio interface"
16 depends on DRM_DW_HDMI && SND
17 select SND_PCM
18 select SND_PCM_ELD
19 select SND_PCM_IEC958
20 help
21 Support the AHB Audio interface which is part of the Synopsis
22 Designware HDMI block. This is used in conjunction with
23 the i.MX6 HDMI driver.
24
25
14config DRM_NXP_PTN3460 26config DRM_NXP_PTN3460
15 tristate "NXP PTN3460 DP/LVDS bridge" 27 tristate "NXP PTN3460 DP/LVDS bridge"
16 depends on OF 28 depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index e2eef1c2f4c3..d4e28beec30e 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,6 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2 2
3obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o 3obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
4obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw_hdmi-ahb-audio.o
4obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o 5obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
5obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o 6obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
new file mode 100644
index 000000000000..59f630f1c61a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
@@ -0,0 +1,653 @@
1/*
2 * DesignWare HDMI audio driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Written and tested against the Designware HDMI Tx found in iMX6.
9 */
10#include <linux/io.h>
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <drm/bridge/dw_hdmi.h>
15#include <drm/drm_edid.h>
16
17#include <sound/asoundef.h>
18#include <sound/core.h>
19#include <sound/initval.h>
20#include <sound/pcm.h>
21#include <sound/pcm_drm_eld.h>
22#include <sound/pcm_iec958.h>
23
24#include "dw_hdmi-audio.h"
25
26#define DRIVER_NAME "dw-hdmi-ahb-audio"
27
28/* Provide some bits rather than bit offsets */
29enum {
30 HDMI_AHB_DMA_CONF0_SW_FIFO_RST = BIT(7),
31 HDMI_AHB_DMA_CONF0_EN_HLOCK = BIT(3),
32 HDMI_AHB_DMA_START_START = BIT(0),
33 HDMI_AHB_DMA_STOP_STOP = BIT(0),
34 HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = BIT(5),
35 HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = BIT(4),
36 HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = BIT(3),
37 HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = BIT(2),
38 HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
39 HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
40 HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL =
41 HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR |
42 HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST |
43 HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY |
44 HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE |
45 HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL |
46 HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY,
47 HDMI_IH_AHBDMAAUD_STAT0_ERROR = BIT(5),
48 HDMI_IH_AHBDMAAUD_STAT0_LOST = BIT(4),
49 HDMI_IH_AHBDMAAUD_STAT0_RETRY = BIT(3),
50 HDMI_IH_AHBDMAAUD_STAT0_DONE = BIT(2),
51 HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
52 HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
53 HDMI_IH_AHBDMAAUD_STAT0_ALL =
54 HDMI_IH_AHBDMAAUD_STAT0_ERROR |
55 HDMI_IH_AHBDMAAUD_STAT0_LOST |
56 HDMI_IH_AHBDMAAUD_STAT0_RETRY |
57 HDMI_IH_AHBDMAAUD_STAT0_DONE |
58 HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL |
59 HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY,
60 HDMI_AHB_DMA_CONF0_INCR16 = 2 << 1,
61 HDMI_AHB_DMA_CONF0_INCR8 = 1 << 1,
62 HDMI_AHB_DMA_CONF0_INCR4 = 0,
63 HDMI_AHB_DMA_CONF0_BURST_MODE = BIT(0),
64 HDMI_AHB_DMA_MASK_DONE = BIT(7),
65
66 HDMI_REVISION_ID = 0x0001,
67 HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
68 HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
69 HDMI_FC_AUDICONF2 = 0x1027,
70 HDMI_FC_AUDSCONF = 0x1063,
71 HDMI_FC_AUDSCONF_LAYOUT1 = 1 << 0,
72 HDMI_FC_AUDSCONF_LAYOUT0 = 0 << 0,
73 HDMI_AHB_DMA_CONF0 = 0x3600,
74 HDMI_AHB_DMA_START = 0x3601,
75 HDMI_AHB_DMA_STOP = 0x3602,
76 HDMI_AHB_DMA_THRSLD = 0x3603,
77 HDMI_AHB_DMA_STRADDR0 = 0x3604,
78 HDMI_AHB_DMA_STPADDR0 = 0x3608,
79 HDMI_AHB_DMA_MASK = 0x3614,
80 HDMI_AHB_DMA_POL = 0x3615,
81 HDMI_AHB_DMA_CONF1 = 0x3616,
82 HDMI_AHB_DMA_BUFFPOL = 0x361a,
83};
84
85struct dw_hdmi_channel_conf {
86 u8 conf1;
87 u8 ca;
88};
89
90/*
91 * The default mapping of ALSA channels to HDMI channels and speaker
92 * allocation bits. Note that we can't do channel remapping here -
93 * channels must be in the same order.
94 *
95 * Mappings for alsa-lib pcm/surround*.conf files:
96 *
97 * Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1
98 * Channels 2 4 6 6 6 8
99 *
100 * Our mapping from ALSA channel to CEA686D speaker name and HDMI channel:
101 *
102 * Number of ALSA channels
103 * ALSA Channel 2 3 4 5 6 7 8
104 * 0 FL:0 = = = = = =
105 * 1 FR:1 = = = = = =
106 * 2 FC:3 RL:4 LFE:2 = = =
107 * 3 RR:5 RL:4 FC:3 = =
108 * 4 RR:5 RL:4 = =
109 * 5 RR:5 = =
110 * 6 RC:6 =
111 * 7 RLC/FRC RLC/FRC
112 */
113static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = {
114 { 0x03, 0x00 }, /* FL,FR */
115 { 0x0b, 0x02 }, /* FL,FR,FC */
116 { 0x33, 0x08 }, /* FL,FR,RL,RR */
117 { 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */
118 { 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */
119 { 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */
120 { 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */
121};
122
123struct snd_dw_hdmi {
124 struct snd_card *card;
125 struct snd_pcm *pcm;
126 spinlock_t lock;
127 struct dw_hdmi_audio_data data;
128 struct snd_pcm_substream *substream;
129 void (*reformat)(struct snd_dw_hdmi *, size_t, size_t);
130 void *buf_src;
131 void *buf_dst;
132 dma_addr_t buf_addr;
133 unsigned buf_offset;
134 unsigned buf_period;
135 unsigned buf_size;
136 unsigned channels;
137 u8 revision;
138 u8 iec_offset;
139 u8 cs[192][8];
140};
141
142static void dw_hdmi_writel(u32 val, void __iomem *ptr)
143{
144 writeb_relaxed(val, ptr);
145 writeb_relaxed(val >> 8, ptr + 1);
146 writeb_relaxed(val >> 16, ptr + 2);
147 writeb_relaxed(val >> 24, ptr + 3);
148}
149
150/*
151 * Convert to hardware format: The userspace buffer contains IEC958 samples,
152 * with the PCUV bits in bits 31..28 and audio samples in bits 27..4. We
153 * need these to be in bits 27..24, with the IEC B bit in bit 28, and audio
154 * samples in 23..0.
155 *
156 * Default preamble in bits 3..0: 8 = block start, 4 = even 2 = odd
157 *
158 * Ideally, we could do with having the data properly formatted in userspace.
159 */
160static void dw_hdmi_reformat_iec958(struct snd_dw_hdmi *dw,
161 size_t offset, size_t bytes)
162{
163 u32 *src = dw->buf_src + offset;
164 u32 *dst = dw->buf_dst + offset;
165 u32 *end = dw->buf_src + offset + bytes;
166
167 do {
168 u32 b, sample = *src++;
169
170 b = (sample & 8) << (28 - 3);
171
172 sample >>= 4;
173
174 *dst++ = sample | b;
175 } while (src < end);
176}
177
178static u32 parity(u32 sample)
179{
180 sample ^= sample >> 16;
181 sample ^= sample >> 8;
182 sample ^= sample >> 4;
183 sample ^= sample >> 2;
184 sample ^= sample >> 1;
185 return (sample & 1) << 27;
186}
187
188static void dw_hdmi_reformat_s24(struct snd_dw_hdmi *dw,
189 size_t offset, size_t bytes)
190{
191 u32 *src = dw->buf_src + offset;
192 u32 *dst = dw->buf_dst + offset;
193 u32 *end = dw->buf_src + offset + bytes;
194
195 do {
196 unsigned i;
197 u8 *cs;
198
199 cs = dw->cs[dw->iec_offset++];
200 if (dw->iec_offset >= 192)
201 dw->iec_offset = 0;
202
203 i = dw->channels;
204 do {
205 u32 sample = *src++;
206
207 sample &= ~0xff000000;
208 sample |= *cs++ << 24;
209 sample |= parity(sample & ~0xf8000000);
210
211 *dst++ = sample;
212 } while (--i);
213 } while (src < end);
214}
215
216static void dw_hdmi_create_cs(struct snd_dw_hdmi *dw,
217 struct snd_pcm_runtime *runtime)
218{
219 u8 cs[4];
220 unsigned ch, i, j;
221
222 snd_pcm_create_iec958_consumer(runtime, cs, sizeof(cs));
223
224 memset(dw->cs, 0, sizeof(dw->cs));
225
226 for (ch = 0; ch < 8; ch++) {
227 cs[2] &= ~IEC958_AES2_CON_CHANNEL;
228 cs[2] |= (ch + 1) << 4;
229
230 for (i = 0; i < ARRAY_SIZE(cs); i++) {
231 unsigned c = cs[i];
232
233 for (j = 0; j < 8; j++, c >>= 1)
234 dw->cs[i * 8 + j][ch] = (c & 1) << 2;
235 }
236 }
237 dw->cs[0][0] |= BIT(4);
238}
239
240static void dw_hdmi_start_dma(struct snd_dw_hdmi *dw)
241{
242 void __iomem *base = dw->data.base;
243 unsigned offset = dw->buf_offset;
244 unsigned period = dw->buf_period;
245 u32 start, stop;
246
247 dw->reformat(dw, offset, period);
248
249 /* Clear all irqs before enabling irqs and starting DMA */
250 writeb_relaxed(HDMI_IH_AHBDMAAUD_STAT0_ALL,
251 base + HDMI_IH_AHBDMAAUD_STAT0);
252
253 start = dw->buf_addr + offset;
254 stop = start + period - 1;
255
256 /* Setup the hardware start/stop addresses */
257 dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0);
258 dw_hdmi_writel(stop, base + HDMI_AHB_DMA_STPADDR0);
259
260 writeb_relaxed((u8)~HDMI_AHB_DMA_MASK_DONE, base + HDMI_AHB_DMA_MASK);
261 writeb(HDMI_AHB_DMA_START_START, base + HDMI_AHB_DMA_START);
262
263 offset += period;
264 if (offset >= dw->buf_size)
265 offset = 0;
266 dw->buf_offset = offset;
267}
268
269static void dw_hdmi_stop_dma(struct snd_dw_hdmi *dw)
270{
271 /* Disable interrupts before disabling DMA */
272 writeb_relaxed(~0, dw->data.base + HDMI_AHB_DMA_MASK);
273 writeb_relaxed(HDMI_AHB_DMA_STOP_STOP, dw->data.base + HDMI_AHB_DMA_STOP);
274}
275
276static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
277{
278 struct snd_dw_hdmi *dw = data;
279 struct snd_pcm_substream *substream;
280 unsigned stat;
281
282 stat = readb_relaxed(dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
283 if (!stat)
284 return IRQ_NONE;
285
286 writeb_relaxed(stat, dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
287
288 substream = dw->substream;
289 if (stat & HDMI_IH_AHBDMAAUD_STAT0_DONE && substream) {
290 snd_pcm_period_elapsed(substream);
291
292 spin_lock(&dw->lock);
293 if (dw->substream)
294 dw_hdmi_start_dma(dw);
295 spin_unlock(&dw->lock);
296 }
297
298 return IRQ_HANDLED;
299}
300
301static struct snd_pcm_hardware dw_hdmi_hw = {
302 .info = SNDRV_PCM_INFO_INTERLEAVED |
303 SNDRV_PCM_INFO_BLOCK_TRANSFER |
304 SNDRV_PCM_INFO_MMAP |
305 SNDRV_PCM_INFO_MMAP_VALID,
306 .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE |
307 SNDRV_PCM_FMTBIT_S24_LE,
308 .rates = SNDRV_PCM_RATE_32000 |
309 SNDRV_PCM_RATE_44100 |
310 SNDRV_PCM_RATE_48000 |
311 SNDRV_PCM_RATE_88200 |
312 SNDRV_PCM_RATE_96000 |
313 SNDRV_PCM_RATE_176400 |
314 SNDRV_PCM_RATE_192000,
315 .channels_min = 2,
316 .channels_max = 8,
317 .buffer_bytes_max = 1024 * 1024,
318 .period_bytes_min = 256,
319 .period_bytes_max = 8192, /* ERR004323: must limit to 8k */
320 .periods_min = 2,
321 .periods_max = 16,
322 .fifo_size = 0,
323};
324
325static int dw_hdmi_open(struct snd_pcm_substream *substream)
326{
327 struct snd_pcm_runtime *runtime = substream->runtime;
328 struct snd_dw_hdmi *dw = substream->private_data;
329 void __iomem *base = dw->data.base;
330 int ret;
331
332 runtime->hw = dw_hdmi_hw;
333
334 ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld);
335 if (ret < 0)
336 return ret;
337
338 ret = snd_pcm_limit_hw_rates(runtime);
339 if (ret < 0)
340 return ret;
341
342 ret = snd_pcm_hw_constraint_integer(runtime,
343 SNDRV_PCM_HW_PARAM_PERIODS);
344 if (ret < 0)
345 return ret;
346
347 /* Limit the buffer size to the size of the preallocated buffer */
348 ret = snd_pcm_hw_constraint_minmax(runtime,
349 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
350 0, substream->dma_buffer.bytes);
351 if (ret < 0)
352 return ret;
353
354 /* Clear FIFO */
355 writeb_relaxed(HDMI_AHB_DMA_CONF0_SW_FIFO_RST,
356 base + HDMI_AHB_DMA_CONF0);
357
358 /* Configure interrupt polarities */
359 writeb_relaxed(~0, base + HDMI_AHB_DMA_POL);
360 writeb_relaxed(~0, base + HDMI_AHB_DMA_BUFFPOL);
361
362 /* Keep interrupts masked, and clear any pending */
363 writeb_relaxed(~0, base + HDMI_AHB_DMA_MASK);
364 writeb_relaxed(~0, base + HDMI_IH_AHBDMAAUD_STAT0);
365
366 ret = request_irq(dw->data.irq, snd_dw_hdmi_irq, IRQF_SHARED,
367 "dw-hdmi-audio", dw);
368 if (ret)
369 return ret;
370
371 /* Un-mute done interrupt */
372 writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL &
373 ~HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE,
374 base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
375
376 return 0;
377}
378
379static int dw_hdmi_close(struct snd_pcm_substream *substream)
380{
381 struct snd_dw_hdmi *dw = substream->private_data;
382
383 /* Mute all interrupts */
384 writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
385 dw->data.base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
386
387 free_irq(dw->data.irq, dw);
388
389 return 0;
390}
391
392static int dw_hdmi_hw_free(struct snd_pcm_substream *substream)
393{
394 return snd_pcm_lib_free_vmalloc_buffer(substream);
395}
396
397static int dw_hdmi_hw_params(struct snd_pcm_substream *substream,
398 struct snd_pcm_hw_params *params)
399{
400 /* Allocate the PCM runtime buffer, which is exposed to userspace. */
401 return snd_pcm_lib_alloc_vmalloc_buffer(substream,
402 params_buffer_bytes(params));
403}
404
405static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
406{
407 struct snd_pcm_runtime *runtime = substream->runtime;
408 struct snd_dw_hdmi *dw = substream->private_data;
409 u8 threshold, conf0, conf1, layout, ca;
410
411 /* Setup as per 3.0.5 FSL 4.1.0 BSP */
412 switch (dw->revision) {
413 case 0x0a:
414 conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
415 HDMI_AHB_DMA_CONF0_INCR4;
416 if (runtime->channels == 2)
417 threshold = 126;
418 else
419 threshold = 124;
420 break;
421 case 0x1a:
422 conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
423 HDMI_AHB_DMA_CONF0_INCR8;
424 threshold = 128;
425 break;
426 default:
427 /* NOTREACHED */
428 return -EINVAL;
429 }
430
431 dw_hdmi_set_sample_rate(dw->data.hdmi, runtime->rate);
432
433 /* Minimum number of bytes in the fifo. */
434 runtime->hw.fifo_size = threshold * 32;
435
436 conf0 |= HDMI_AHB_DMA_CONF0_EN_HLOCK;
437 conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1;
438 ca = default_hdmi_channel_config[runtime->channels - 2].ca;
439
440 /*
441 * For >2 channel PCM audio, we need to select layout 1
442 * and set an appropriate channel map.
443 */
444 if (runtime->channels > 2)
445 layout = HDMI_FC_AUDSCONF_LAYOUT1;
446 else
447 layout = HDMI_FC_AUDSCONF_LAYOUT0;
448
449 writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
450 writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
451 writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
452 writeb_relaxed(layout, dw->data.base + HDMI_FC_AUDSCONF);
453 writeb_relaxed(ca, dw->data.base + HDMI_FC_AUDICONF2);
454
455 switch (runtime->format) {
456 case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
457 dw->reformat = dw_hdmi_reformat_iec958;
458 break;
459 case SNDRV_PCM_FORMAT_S24_LE:
460 dw_hdmi_create_cs(dw, runtime);
461 dw->reformat = dw_hdmi_reformat_s24;
462 break;
463 }
464 dw->iec_offset = 0;
465 dw->channels = runtime->channels;
466 dw->buf_src = runtime->dma_area;
467 dw->buf_dst = substream->dma_buffer.area;
468 dw->buf_addr = substream->dma_buffer.addr;
469 dw->buf_period = snd_pcm_lib_period_bytes(substream);
470 dw->buf_size = snd_pcm_lib_buffer_bytes(substream);
471
472 return 0;
473}
474
475static int dw_hdmi_trigger(struct snd_pcm_substream *substream, int cmd)
476{
477 struct snd_dw_hdmi *dw = substream->private_data;
478 unsigned long flags;
479 int ret = 0;
480
481 switch (cmd) {
482 case SNDRV_PCM_TRIGGER_START:
483 spin_lock_irqsave(&dw->lock, flags);
484 dw->buf_offset = 0;
485 dw->substream = substream;
486 dw_hdmi_start_dma(dw);
487 dw_hdmi_audio_enable(dw->data.hdmi);
488 spin_unlock_irqrestore(&dw->lock, flags);
489 substream->runtime->delay = substream->runtime->period_size;
490 break;
491
492 case SNDRV_PCM_TRIGGER_STOP:
493 spin_lock_irqsave(&dw->lock, flags);
494 dw->substream = NULL;
495 dw_hdmi_stop_dma(dw);
496 dw_hdmi_audio_disable(dw->data.hdmi);
497 spin_unlock_irqrestore(&dw->lock, flags);
498 break;
499
500 default:
501 ret = -EINVAL;
502 break;
503 }
504
505 return ret;
506}
507
508static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
509{
510 struct snd_pcm_runtime *runtime = substream->runtime;
511 struct snd_dw_hdmi *dw = substream->private_data;
512
513 /*
514 * We are unable to report the exact hardware position as
515 * reading the 32-bit DMA position using 8-bit reads is racy.
516 */
517 return bytes_to_frames(runtime, dw->buf_offset);
518}
519
520static struct snd_pcm_ops snd_dw_hdmi_ops = {
521 .open = dw_hdmi_open,
522 .close = dw_hdmi_close,
523 .ioctl = snd_pcm_lib_ioctl,
524 .hw_params = dw_hdmi_hw_params,
525 .hw_free = dw_hdmi_hw_free,
526 .prepare = dw_hdmi_prepare,
527 .trigger = dw_hdmi_trigger,
528 .pointer = dw_hdmi_pointer,
529 .page = snd_pcm_lib_get_vmalloc_page,
530};
531
532static int snd_dw_hdmi_probe(struct platform_device *pdev)
533{
534 const struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
535 struct device *dev = pdev->dev.parent;
536 struct snd_dw_hdmi *dw;
537 struct snd_card *card;
538 struct snd_pcm *pcm;
539 unsigned revision;
540 int ret;
541
542 writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
543 data->base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
544 revision = readb_relaxed(data->base + HDMI_REVISION_ID);
545 if (revision != 0x0a && revision != 0x1a) {
546 dev_err(dev, "dw-hdmi-audio: unknown revision 0x%02x\n",
547 revision);
548 return -ENXIO;
549 }
550
551 ret = snd_card_new(dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
552 THIS_MODULE, sizeof(struct snd_dw_hdmi), &card);
553 if (ret < 0)
554 return ret;
555
556 strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
557 strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
558 snprintf(card->longname, sizeof(card->longname),
559 "%s rev 0x%02x, irq %d", card->shortname, revision,
560 data->irq);
561
562 dw = card->private_data;
563 dw->card = card;
564 dw->data = *data;
565 dw->revision = revision;
566
567 spin_lock_init(&dw->lock);
568
569 ret = snd_pcm_new(card, "DW HDMI", 0, 1, 0, &pcm);
570 if (ret < 0)
571 goto err;
572
573 dw->pcm = pcm;
574 pcm->private_data = dw;
575 strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
576 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops);
577
578 /*
579 * To support 8-channel 96kHz audio reliably, we need 512k
580 * to satisfy alsa with our restricted period (ERR004323).
581 */
582 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
583 dev, 128 * 1024, 1024 * 1024);
584
585 ret = snd_card_register(card);
586 if (ret < 0)
587 goto err;
588
589 platform_set_drvdata(pdev, dw);
590
591 return 0;
592
593err:
594 snd_card_free(card);
595 return ret;
596}
597
598static int snd_dw_hdmi_remove(struct platform_device *pdev)
599{
600 struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
601
602 snd_card_free(dw->card);
603
604 return 0;
605}
606
607#if defined(CONFIG_PM_SLEEP) && defined(IS_NOT_BROKEN)
608/*
609 * This code is fine, but requires implementation in the dw_hdmi_trigger()
610 * method which is currently missing as I have no way to test this.
611 */
612static int snd_dw_hdmi_suspend(struct device *dev)
613{
614 struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
615
616 snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold);
617 snd_pcm_suspend_all(dw->pcm);
618
619 return 0;
620}
621
622static int snd_dw_hdmi_resume(struct device *dev)
623{
624 struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
625
626 snd_power_change_state(dw->card, SNDRV_CTL_POWER_D0);
627
628 return 0;
629}
630
631static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
632 snd_dw_hdmi_resume);
633#define PM_OPS &snd_dw_hdmi_pm
634#else
635#define PM_OPS NULL
636#endif
637
638static struct platform_driver snd_dw_hdmi_driver = {
639 .probe = snd_dw_hdmi_probe,
640 .remove = snd_dw_hdmi_remove,
641 .driver = {
642 .name = DRIVER_NAME,
643 .owner = THIS_MODULE,
644 .pm = PM_OPS,
645 },
646};
647
648module_platform_driver(snd_dw_hdmi_driver);
649
650MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
651MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface");
652MODULE_LICENSE("GPL v2");
653MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-audio.h b/drivers/gpu/drm/bridge/dw_hdmi-audio.h
new file mode 100644
index 000000000000..91f631beecc7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/dw_hdmi-audio.h
@@ -0,0 +1,14 @@
1#ifndef DW_HDMI_AUDIO_H
2#define DW_HDMI_AUDIO_H
3
4struct dw_hdmi;
5
6struct dw_hdmi_audio_data {
7 phys_addr_t phys;
8 void __iomem *base;
9 int irq;
10 struct dw_hdmi *hdmi;
11 u8 *eld;
12};
13
14#endif
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index 0083d4e7e7e2..56de9f1c95fc 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -28,6 +28,7 @@
28#include <drm/bridge/dw_hdmi.h> 28#include <drm/bridge/dw_hdmi.h>
29 29
30#include "dw_hdmi.h" 30#include "dw_hdmi.h"
31#include "dw_hdmi-audio.h"
31 32
32#define HDMI_EDID_LEN 512 33#define HDMI_EDID_LEN 512
33 34
@@ -104,6 +105,7 @@ struct dw_hdmi {
104 struct drm_encoder *encoder; 105 struct drm_encoder *encoder;
105 struct drm_bridge *bridge; 106 struct drm_bridge *bridge;
106 107
108 struct platform_device *audio;
107 enum dw_hdmi_devtype dev_type; 109 enum dw_hdmi_devtype dev_type;
108 struct device *dev; 110 struct device *dev;
109 struct clk *isfr_clk; 111 struct clk *isfr_clk;
@@ -126,7 +128,11 @@ struct dw_hdmi {
126 bool sink_has_audio; 128 bool sink_has_audio;
127 129
128 struct mutex mutex; /* for state below and previous_mode */ 130 struct mutex mutex; /* for state below and previous_mode */
131 enum drm_connector_force force; /* mutex-protected force state */
129 bool disabled; /* DRM has disabled our bridge */ 132 bool disabled; /* DRM has disabled our bridge */
133 bool bridge_is_on; /* indicates the bridge is on */
134 bool rxsense; /* rxsense state */
135 u8 phy_mask; /* desired phy int mask settings */
130 136
131 spinlock_t audio_lock; 137 spinlock_t audio_lock;
132 struct mutex audio_mutex; 138 struct mutex audio_mutex;
@@ -134,12 +140,19 @@ struct dw_hdmi {
134 unsigned int audio_cts; 140 unsigned int audio_cts;
135 unsigned int audio_n; 141 unsigned int audio_n;
136 bool audio_enable; 142 bool audio_enable;
137 int ratio;
138 143
139 void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); 144 void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
140 u8 (*read)(struct dw_hdmi *hdmi, int offset); 145 u8 (*read)(struct dw_hdmi *hdmi, int offset);
141}; 146};
142 147
148#define HDMI_IH_PHY_STAT0_RX_SENSE \
149 (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \
150 HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3)
151
152#define HDMI_PHY_RX_SENSE \
153 (HDMI_PHY_RX_SENSE0 | HDMI_PHY_RX_SENSE1 | \
154 HDMI_PHY_RX_SENSE2 | HDMI_PHY_RX_SENSE3)
155
143static void dw_hdmi_writel(struct dw_hdmi *hdmi, u8 val, int offset) 156static void dw_hdmi_writel(struct dw_hdmi *hdmi, u8 val, int offset)
144{ 157{
145 writel(val, hdmi->regs + (offset << 2)); 158 writel(val, hdmi->regs + (offset << 2));
@@ -203,61 +216,53 @@ static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
203 hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1); 216 hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1);
204} 217}
205 218
206static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk, 219static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
207 unsigned int ratio)
208{ 220{
209 unsigned int n = (128 * freq) / 1000; 221 unsigned int n = (128 * freq) / 1000;
222 unsigned int mult = 1;
223
224 while (freq > 48000) {
225 mult *= 2;
226 freq /= 2;
227 }
210 228
211 switch (freq) { 229 switch (freq) {
212 case 32000: 230 case 32000:
213 if (pixel_clk == 25170000) 231 if (pixel_clk == 25175000)
214 n = (ratio == 150) ? 9152 : 4576; 232 n = 4576;
215 else if (pixel_clk == 27020000) 233 else if (pixel_clk == 27027000)
216 n = (ratio == 150) ? 8192 : 4096; 234 n = 4096;
217 else if (pixel_clk == 74170000 || pixel_clk == 148350000) 235 else if (pixel_clk == 74176000 || pixel_clk == 148352000)
218 n = 11648; 236 n = 11648;
219 else 237 else
220 n = 4096; 238 n = 4096;
239 n *= mult;
221 break; 240 break;
222 241
223 case 44100: 242 case 44100:
224 if (pixel_clk == 25170000) 243 if (pixel_clk == 25175000)
225 n = 7007; 244 n = 7007;
226 else if (pixel_clk == 74170000) 245 else if (pixel_clk == 74176000)
227 n = 17836; 246 n = 17836;
228 else if (pixel_clk == 148350000) 247 else if (pixel_clk == 148352000)
229 n = (ratio == 150) ? 17836 : 8918; 248 n = 8918;
230 else 249 else
231 n = 6272; 250 n = 6272;
251 n *= mult;
232 break; 252 break;
233 253
234 case 48000: 254 case 48000:
235 if (pixel_clk == 25170000) 255 if (pixel_clk == 25175000)
236 n = (ratio == 150) ? 9152 : 6864; 256 n = 6864;
237 else if (pixel_clk == 27020000) 257 else if (pixel_clk == 27027000)
238 n = (ratio == 150) ? 8192 : 6144; 258 n = 6144;
239 else if (pixel_clk == 74170000) 259 else if (pixel_clk == 74176000)
240 n = 11648; 260 n = 11648;
241 else if (pixel_clk == 148350000) 261 else if (pixel_clk == 148352000)
242 n = (ratio == 150) ? 11648 : 5824; 262 n = 5824;
243 else 263 else
244 n = 6144; 264 n = 6144;
245 break; 265 n *= mult;
246
247 case 88200:
248 n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
249 break;
250
251 case 96000:
252 n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
253 break;
254
255 case 176400:
256 n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
257 break;
258
259 case 192000:
260 n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
261 break; 266 break;
262 267
263 default: 268 default:
@@ -267,93 +272,29 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
267 return n; 272 return n;
268} 273}
269 274
270static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
271 unsigned int ratio)
272{
273 unsigned int cts = 0;
274
275 pr_debug("%s: freq: %d pixel_clk: %ld ratio: %d\n", __func__, freq,
276 pixel_clk, ratio);
277
278 switch (freq) {
279 case 32000:
280 if (pixel_clk == 297000000) {
281 cts = 222750;
282 break;
283 }
284 case 48000:
285 case 96000:
286 case 192000:
287 switch (pixel_clk) {
288 case 25200000:
289 case 27000000:
290 case 54000000:
291 case 74250000:
292 case 148500000:
293 cts = pixel_clk / 1000;
294 break;
295 case 297000000:
296 cts = 247500;
297 break;
298 /*
299 * All other TMDS clocks are not supported by
300 * DWC_hdmi_tx. The TMDS clocks divided or
301 * multiplied by 1,001 coefficients are not
302 * supported.
303 */
304 default:
305 break;
306 }
307 break;
308 case 44100:
309 case 88200:
310 case 176400:
311 switch (pixel_clk) {
312 case 25200000:
313 cts = 28000;
314 break;
315 case 27000000:
316 cts = 30000;
317 break;
318 case 54000000:
319 cts = 60000;
320 break;
321 case 74250000:
322 cts = 82500;
323 break;
324 case 148500000:
325 cts = 165000;
326 break;
327 case 297000000:
328 cts = 247500;
329 break;
330 default:
331 break;
332 }
333 break;
334 default:
335 break;
336 }
337 if (ratio == 100)
338 return cts;
339 return (cts * ratio) / 100;
340}
341
342static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, 275static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
343 unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio) 276 unsigned long pixel_clk, unsigned int sample_rate)
344{ 277{
278 unsigned long ftdms = pixel_clk;
345 unsigned int n, cts; 279 unsigned int n, cts;
280 u64 tmp;
346 281
347 n = hdmi_compute_n(sample_rate, pixel_clk, ratio); 282 n = hdmi_compute_n(sample_rate, pixel_clk);
348 cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio); 283
349 if (!cts) { 284 /*
350 dev_err(hdmi->dev, 285 * Compute the CTS value from the N value. Note that CTS and N
351 "%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n", 286 * can be up to 20 bits in total, so we need 64-bit math. Also
352 __func__, pixel_clk, sample_rate); 287 * note that our TDMS clock is not fully accurate; it is accurate
353 } 288 * to kHz. This can introduce an unnecessary remainder in the
289 * calculation below, so we don't try to warn about that.
290 */
291 tmp = (u64)ftdms * n;
292 do_div(tmp, 128 * sample_rate);
293 cts = tmp;
354 294
355 dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n", 295 dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n",
356 __func__, sample_rate, ratio, pixel_clk, n, cts); 296 __func__, sample_rate, ftdms / 1000000, (ftdms / 1000) % 1000,
297 n, cts);
357 298
358 spin_lock_irq(&hdmi->audio_lock); 299 spin_lock_irq(&hdmi->audio_lock);
359 hdmi->audio_n = n; 300 hdmi->audio_n = n;
@@ -365,8 +306,7 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
365static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi) 306static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
366{ 307{
367 mutex_lock(&hdmi->audio_mutex); 308 mutex_lock(&hdmi->audio_mutex);
368 hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate, 309 hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate);
369 hdmi->ratio);
370 mutex_unlock(&hdmi->audio_mutex); 310 mutex_unlock(&hdmi->audio_mutex);
371} 311}
372 312
@@ -374,7 +314,7 @@ static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
374{ 314{
375 mutex_lock(&hdmi->audio_mutex); 315 mutex_lock(&hdmi->audio_mutex);
376 hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock, 316 hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
377 hdmi->sample_rate, hdmi->ratio); 317 hdmi->sample_rate);
378 mutex_unlock(&hdmi->audio_mutex); 318 mutex_unlock(&hdmi->audio_mutex);
379} 319}
380 320
@@ -383,7 +323,7 @@ void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
383 mutex_lock(&hdmi->audio_mutex); 323 mutex_lock(&hdmi->audio_mutex);
384 hdmi->sample_rate = rate; 324 hdmi->sample_rate = rate;
385 hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock, 325 hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
386 hdmi->sample_rate, hdmi->ratio); 326 hdmi->sample_rate);
387 mutex_unlock(&hdmi->audio_mutex); 327 mutex_unlock(&hdmi->audio_mutex);
388} 328}
389EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate); 329EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
@@ -1063,6 +1003,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
1063 u8 inv_val; 1003 u8 inv_val;
1064 struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; 1004 struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
1065 int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len; 1005 int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
1006 unsigned int vdisplay;
1066 1007
1067 vmode->mpixelclock = mode->clock * 1000; 1008 vmode->mpixelclock = mode->clock * 1000;
1068 1009
@@ -1102,13 +1043,29 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
1102 1043
1103 hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF); 1044 hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
1104 1045
1046 vdisplay = mode->vdisplay;
1047 vblank = mode->vtotal - mode->vdisplay;
1048 v_de_vs = mode->vsync_start - mode->vdisplay;
1049 vsync_len = mode->vsync_end - mode->vsync_start;
1050
1051 /*
1052 * When we're setting an interlaced mode, we need
1053 * to adjust the vertical timing to suit.
1054 */
1055 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1056 vdisplay /= 2;
1057 vblank /= 2;
1058 v_de_vs /= 2;
1059 vsync_len /= 2;
1060 }
1061
1105 /* Set up horizontal active pixel width */ 1062 /* Set up horizontal active pixel width */
1106 hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1); 1063 hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1);
1107 hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0); 1064 hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0);
1108 1065
1109 /* Set up vertical active lines */ 1066 /* Set up vertical active lines */
1110 hdmi_writeb(hdmi, mode->vdisplay >> 8, HDMI_FC_INVACTV1); 1067 hdmi_writeb(hdmi, vdisplay >> 8, HDMI_FC_INVACTV1);
1111 hdmi_writeb(hdmi, mode->vdisplay, HDMI_FC_INVACTV0); 1068 hdmi_writeb(hdmi, vdisplay, HDMI_FC_INVACTV0);
1112 1069
1113 /* Set up horizontal blanking pixel region width */ 1070 /* Set up horizontal blanking pixel region width */
1114 hblank = mode->htotal - mode->hdisplay; 1071 hblank = mode->htotal - mode->hdisplay;
@@ -1116,7 +1073,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
1116 hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0); 1073 hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0);
1117 1074
1118 /* Set up vertical blanking pixel region width */ 1075 /* Set up vertical blanking pixel region width */
1119 vblank = mode->vtotal - mode->vdisplay;
1120 hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK); 1076 hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK);
1121 1077
1122 /* Set up HSYNC active edge delay width (in pixel clks) */ 1078 /* Set up HSYNC active edge delay width (in pixel clks) */
@@ -1125,7 +1081,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
1125 hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0); 1081 hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0);
1126 1082
1127 /* Set up VSYNC active edge delay (in lines) */ 1083 /* Set up VSYNC active edge delay (in lines) */
1128 v_de_vs = mode->vsync_start - mode->vdisplay;
1129 hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY); 1084 hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY);
1130 1085
1131 /* Set up HSYNC active pulse width (in pixel clks) */ 1086 /* Set up HSYNC active pulse width (in pixel clks) */
@@ -1134,7 +1089,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
1134 hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0); 1089 hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0);
1135 1090
1136 /* Set up VSYNC active edge delay (in lines) */ 1091 /* Set up VSYNC active edge delay (in lines) */
1137 vsync_len = mode->vsync_end - mode->vsync_start;
1138 hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH); 1092 hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH);
1139} 1093}
1140 1094
@@ -1302,10 +1256,11 @@ static int dw_hdmi_fb_registered(struct dw_hdmi *hdmi)
1302 HDMI_PHY_I2CM_CTLINT_ADDR); 1256 HDMI_PHY_I2CM_CTLINT_ADDR);
1303 1257
1304 /* enable cable hot plug irq */ 1258 /* enable cable hot plug irq */
1305 hdmi_writeb(hdmi, (u8)~HDMI_PHY_HPD, HDMI_PHY_MASK0); 1259 hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
1306 1260
1307 /* Clear Hotplug interrupts */ 1261 /* Clear Hotplug interrupts */
1308 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0); 1262 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
1263 HDMI_IH_PHY_STAT0);
1309 1264
1310 return 0; 1265 return 0;
1311} 1266}
@@ -1364,12 +1319,61 @@ static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
1364 1319
1365static void dw_hdmi_poweron(struct dw_hdmi *hdmi) 1320static void dw_hdmi_poweron(struct dw_hdmi *hdmi)
1366{ 1321{
1322 hdmi->bridge_is_on = true;
1367 dw_hdmi_setup(hdmi, &hdmi->previous_mode); 1323 dw_hdmi_setup(hdmi, &hdmi->previous_mode);
1368} 1324}
1369 1325
1370static void dw_hdmi_poweroff(struct dw_hdmi *hdmi) 1326static void dw_hdmi_poweroff(struct dw_hdmi *hdmi)
1371{ 1327{
1372 dw_hdmi_phy_disable(hdmi); 1328 dw_hdmi_phy_disable(hdmi);
1329 hdmi->bridge_is_on = false;
1330}
1331
1332static void dw_hdmi_update_power(struct dw_hdmi *hdmi)
1333{
1334 int force = hdmi->force;
1335
1336 if (hdmi->disabled) {
1337 force = DRM_FORCE_OFF;
1338 } else if (force == DRM_FORCE_UNSPECIFIED) {
1339 if (hdmi->rxsense)
1340 force = DRM_FORCE_ON;
1341 else
1342 force = DRM_FORCE_OFF;
1343 }
1344
1345 if (force == DRM_FORCE_OFF) {
1346 if (hdmi->bridge_is_on)
1347 dw_hdmi_poweroff(hdmi);
1348 } else {
1349 if (!hdmi->bridge_is_on)
1350 dw_hdmi_poweron(hdmi);
1351 }
1352}
1353
1354/*
1355 * Adjust the detection of RXSENSE according to whether we have a forced
1356 * connection mode enabled, or whether we have been disabled. There is
1357 * no point processing RXSENSE interrupts if we have a forced connection
1358 * state, or DRM has us disabled.
1359 *
1360 * We also disable rxsense interrupts when we think we're disconnected
1361 * to avoid floating TDMS signals giving false rxsense interrupts.
1362 *
1363 * Note: we still need to listen for HPD interrupts even when DRM has us
1364 * disabled so that we can detect a connect event.
1365 */
1366static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi)
1367{
1368 u8 old_mask = hdmi->phy_mask;
1369
1370 if (hdmi->force || hdmi->disabled || !hdmi->rxsense)
1371 hdmi->phy_mask |= HDMI_PHY_RX_SENSE;
1372 else
1373 hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE;
1374
1375 if (old_mask != hdmi->phy_mask)
1376 hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
1373} 1377}
1374 1378
1375static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, 1379static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
@@ -1399,7 +1403,8 @@ static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
1399 1403
1400 mutex_lock(&hdmi->mutex); 1404 mutex_lock(&hdmi->mutex);
1401 hdmi->disabled = true; 1405 hdmi->disabled = true;
1402 dw_hdmi_poweroff(hdmi); 1406 dw_hdmi_update_power(hdmi);
1407 dw_hdmi_update_phy_mask(hdmi);
1403 mutex_unlock(&hdmi->mutex); 1408 mutex_unlock(&hdmi->mutex);
1404} 1409}
1405 1410
@@ -1408,8 +1413,9 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
1408 struct dw_hdmi *hdmi = bridge->driver_private; 1413 struct dw_hdmi *hdmi = bridge->driver_private;
1409 1414
1410 mutex_lock(&hdmi->mutex); 1415 mutex_lock(&hdmi->mutex);
1411 dw_hdmi_poweron(hdmi);
1412 hdmi->disabled = false; 1416 hdmi->disabled = false;
1417 dw_hdmi_update_power(hdmi);
1418 dw_hdmi_update_phy_mask(hdmi);
1413 mutex_unlock(&hdmi->mutex); 1419 mutex_unlock(&hdmi->mutex);
1414} 1420}
1415 1421
@@ -1424,6 +1430,12 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
1424 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, 1430 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
1425 connector); 1431 connector);
1426 1432
1433 mutex_lock(&hdmi->mutex);
1434 hdmi->force = DRM_FORCE_UNSPECIFIED;
1435 dw_hdmi_update_power(hdmi);
1436 dw_hdmi_update_phy_mask(hdmi);
1437 mutex_unlock(&hdmi->mutex);
1438
1427 return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ? 1439 return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
1428 connector_status_connected : connector_status_disconnected; 1440 connector_status_connected : connector_status_disconnected;
1429} 1441}
@@ -1447,6 +1459,8 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
1447 hdmi->sink_has_audio = drm_detect_monitor_audio(edid); 1459 hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
1448 drm_mode_connector_update_edid_property(connector, edid); 1460 drm_mode_connector_update_edid_property(connector, edid);
1449 ret = drm_add_edid_modes(connector, edid); 1461 ret = drm_add_edid_modes(connector, edid);
1462 /* Store the ELD */
1463 drm_edid_to_eld(connector, edid);
1450 kfree(edid); 1464 kfree(edid);
1451 } else { 1465 } else {
1452 dev_dbg(hdmi->dev, "failed to get edid\n"); 1466 dev_dbg(hdmi->dev, "failed to get edid\n");
@@ -1488,11 +1502,24 @@ static void dw_hdmi_connector_destroy(struct drm_connector *connector)
1488 drm_connector_cleanup(connector); 1502 drm_connector_cleanup(connector);
1489} 1503}
1490 1504
1505static void dw_hdmi_connector_force(struct drm_connector *connector)
1506{
1507 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
1508 connector);
1509
1510 mutex_lock(&hdmi->mutex);
1511 hdmi->force = connector->force;
1512 dw_hdmi_update_power(hdmi);
1513 dw_hdmi_update_phy_mask(hdmi);
1514 mutex_unlock(&hdmi->mutex);
1515}
1516
1491static struct drm_connector_funcs dw_hdmi_connector_funcs = { 1517static struct drm_connector_funcs dw_hdmi_connector_funcs = {
1492 .dpms = drm_helper_connector_dpms, 1518 .dpms = drm_helper_connector_dpms,
1493 .fill_modes = drm_helper_probe_single_connector_modes, 1519 .fill_modes = drm_helper_probe_single_connector_modes,
1494 .detect = dw_hdmi_connector_detect, 1520 .detect = dw_hdmi_connector_detect,
1495 .destroy = dw_hdmi_connector_destroy, 1521 .destroy = dw_hdmi_connector_destroy,
1522 .force = dw_hdmi_connector_force,
1496}; 1523};
1497 1524
1498static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { 1525static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
@@ -1525,33 +1552,69 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
1525static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) 1552static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
1526{ 1553{
1527 struct dw_hdmi *hdmi = dev_id; 1554 struct dw_hdmi *hdmi = dev_id;
1528 u8 intr_stat; 1555 u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat;
1529 u8 phy_int_pol;
1530 1556
1531 intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); 1557 intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
1532
1533 phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); 1558 phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
1559 phy_stat = hdmi_readb(hdmi, HDMI_PHY_STAT0);
1560
1561 phy_pol_mask = 0;
1562 if (intr_stat & HDMI_IH_PHY_STAT0_HPD)
1563 phy_pol_mask |= HDMI_PHY_HPD;
1564 if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE0)
1565 phy_pol_mask |= HDMI_PHY_RX_SENSE0;
1566 if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE1)
1567 phy_pol_mask |= HDMI_PHY_RX_SENSE1;
1568 if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE2)
1569 phy_pol_mask |= HDMI_PHY_RX_SENSE2;
1570 if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE3)
1571 phy_pol_mask |= HDMI_PHY_RX_SENSE3;
1572
1573 if (phy_pol_mask)
1574 hdmi_modb(hdmi, ~phy_int_pol, phy_pol_mask, HDMI_PHY_POL0);
1534 1575
1535 if (intr_stat & HDMI_IH_PHY_STAT0_HPD) { 1576 /*
1536 hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0); 1577 * RX sense tells us whether the TDMS transmitters are detecting
1578 * load - in other words, there's something listening on the
1579 * other end of the link. Use this to decide whether we should
1580 * power on the phy as HPD may be toggled by the sink to merely
1581 * ask the source to re-read the EDID.
1582 */
1583 if (intr_stat &
1584 (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
1537 mutex_lock(&hdmi->mutex); 1585 mutex_lock(&hdmi->mutex);
1538 if (phy_int_pol & HDMI_PHY_HPD) { 1586 if (!hdmi->disabled && !hdmi->force) {
1539 dev_dbg(hdmi->dev, "EVENT=plugin\n"); 1587 /*
1540 1588 * If the RX sense status indicates we're disconnected,
1541 if (!hdmi->disabled) 1589 * clear the software rxsense status.
1542 dw_hdmi_poweron(hdmi); 1590 */
1543 } else { 1591 if (!(phy_stat & HDMI_PHY_RX_SENSE))
1544 dev_dbg(hdmi->dev, "EVENT=plugout\n"); 1592 hdmi->rxsense = false;
1545 1593
1546 if (!hdmi->disabled) 1594 /*
1547 dw_hdmi_poweroff(hdmi); 1595 * Only set the software rxsense status when both
1596 * rxsense and hpd indicates we're connected.
1597 * This avoids what seems to be bad behaviour in
1598 * at least iMX6S versions of the phy.
1599 */
1600 if (phy_stat & HDMI_PHY_HPD)
1601 hdmi->rxsense = true;
1602
1603 dw_hdmi_update_power(hdmi);
1604 dw_hdmi_update_phy_mask(hdmi);
1548 } 1605 }
1549 mutex_unlock(&hdmi->mutex); 1606 mutex_unlock(&hdmi->mutex);
1607 }
1608
1609 if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
1610 dev_dbg(hdmi->dev, "EVENT=%s\n",
1611 phy_int_pol & HDMI_PHY_HPD ? "plugin" : "plugout");
1550 drm_helper_hpd_irq_event(hdmi->bridge->dev); 1612 drm_helper_hpd_irq_event(hdmi->bridge->dev);
1551 } 1613 }
1552 1614
1553 hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); 1615 hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
1554 hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0); 1616 hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
1617 HDMI_IH_MUTE_PHY_STAT0);
1555 1618
1556 return IRQ_HANDLED; 1619 return IRQ_HANDLED;
1557} 1620}
@@ -1599,7 +1662,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
1599{ 1662{
1600 struct drm_device *drm = data; 1663 struct drm_device *drm = data;
1601 struct device_node *np = dev->of_node; 1664 struct device_node *np = dev->of_node;
1665 struct platform_device_info pdevinfo;
1602 struct device_node *ddc_node; 1666 struct device_node *ddc_node;
1667 struct dw_hdmi_audio_data audio;
1603 struct dw_hdmi *hdmi; 1668 struct dw_hdmi *hdmi;
1604 int ret; 1669 int ret;
1605 u32 val = 1; 1670 u32 val = 1;
@@ -1608,13 +1673,16 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
1608 if (!hdmi) 1673 if (!hdmi)
1609 return -ENOMEM; 1674 return -ENOMEM;
1610 1675
1676 hdmi->connector.interlace_allowed = 1;
1677
1611 hdmi->plat_data = plat_data; 1678 hdmi->plat_data = plat_data;
1612 hdmi->dev = dev; 1679 hdmi->dev = dev;
1613 hdmi->dev_type = plat_data->dev_type; 1680 hdmi->dev_type = plat_data->dev_type;
1614 hdmi->sample_rate = 48000; 1681 hdmi->sample_rate = 48000;
1615 hdmi->ratio = 100;
1616 hdmi->encoder = encoder; 1682 hdmi->encoder = encoder;
1617 hdmi->disabled = true; 1683 hdmi->disabled = true;
1684 hdmi->rxsense = true;
1685 hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
1618 1686
1619 mutex_init(&hdmi->mutex); 1687 mutex_init(&hdmi->mutex);
1620 mutex_init(&hdmi->audio_mutex); 1688 mutex_init(&hdmi->audio_mutex);
@@ -1705,10 +1773,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
1705 * Configure registers related to HDMI interrupt 1773 * Configure registers related to HDMI interrupt
1706 * generation before registering IRQ. 1774 * generation before registering IRQ.
1707 */ 1775 */
1708 hdmi_writeb(hdmi, HDMI_PHY_HPD, HDMI_PHY_POL0); 1776 hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0);
1709 1777
1710 /* Clear Hotplug interrupts */ 1778 /* Clear Hotplug interrupts */
1711 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0); 1779 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
1780 HDMI_IH_PHY_STAT0);
1712 1781
1713 ret = dw_hdmi_fb_registered(hdmi); 1782 ret = dw_hdmi_fb_registered(hdmi);
1714 if (ret) 1783 if (ret)
@@ -1719,7 +1788,26 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
1719 goto err_iahb; 1788 goto err_iahb;
1720 1789
1721 /* Unmute interrupts */ 1790 /* Unmute interrupts */
1722 hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0); 1791 hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
1792 HDMI_IH_MUTE_PHY_STAT0);
1793
1794 memset(&pdevinfo, 0, sizeof(pdevinfo));
1795 pdevinfo.parent = dev;
1796 pdevinfo.id = PLATFORM_DEVID_AUTO;
1797
1798 if (hdmi_readb(hdmi, HDMI_CONFIG1_ID) & HDMI_CONFIG1_AHB) {
1799 audio.phys = iores->start;
1800 audio.base = hdmi->regs;
1801 audio.irq = irq;
1802 audio.hdmi = hdmi;
1803 audio.eld = hdmi->connector.eld;
1804
1805 pdevinfo.name = "dw-hdmi-ahb-audio";
1806 pdevinfo.data = &audio;
1807 pdevinfo.size_data = sizeof(audio);
1808 pdevinfo.dma_mask = DMA_BIT_MASK(32);
1809 hdmi->audio = platform_device_register_full(&pdevinfo);
1810 }
1723 1811
1724 dev_set_drvdata(dev, hdmi); 1812 dev_set_drvdata(dev, hdmi);
1725 1813
@@ -1738,6 +1826,9 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
1738{ 1826{
1739 struct dw_hdmi *hdmi = dev_get_drvdata(dev); 1827 struct dw_hdmi *hdmi = dev_get_drvdata(dev);
1740 1828
1829 if (hdmi->audio && !IS_ERR(hdmi->audio))
1830 platform_device_unregister(hdmi->audio);
1831
1741 /* Disable all interrupts */ 1832 /* Disable all interrupts */
1742 hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); 1833 hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
1743 1834
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw_hdmi.h
index ee7f7ed2ab12..fc9a560429d6 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.h
+++ b/drivers/gpu/drm/bridge/dw_hdmi.h
@@ -545,6 +545,9 @@
545#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12 545#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
546 546
547enum { 547enum {
548/* CONFIG1_ID field values */
549 HDMI_CONFIG1_AHB = 0x01,
550
548/* IH_FC_INT2 field values */ 551/* IH_FC_INT2 field values */
549 HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03, 552 HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
550 HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02, 553 HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1b1bf2384815..0ffa3a6a206a 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -400,7 +400,6 @@ static struct i2c_driver ptn3460_driver = {
400 .remove = ptn3460_remove, 400 .remove = ptn3460_remove,
401 .driver = { 401 .driver = {
402 .name = "nxp,ptn3460", 402 .name = "nxp,ptn3460",
403 .owner = THIS_MODULE,
404 .of_match_table = ptn3460_match, 403 .of_match_table = ptn3460_match,
405 }, 404 },
406}; 405};
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 1a6607beb29f..be881e9fef8f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -668,7 +668,6 @@ static struct i2c_driver ps8622_driver = {
668 .remove = ps8622_remove, 668 .remove = ps8622_remove,
669 .driver = { 669 .driver = {
670 .name = "ps8622", 670 .name = "ps8622",
671 .owner = THIS_MODULE,
672 .of_match_table = ps8622_devices, 671 .of_match_table = ps8622_devices,
673 }, 672 },
674}; 673};
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 4b2b4aa5033b..a10ea6aec629 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -36,8 +36,6 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include "drm_legacy.h" 37#include "drm_legacy.h"
38 38
39#if __OS_HAS_AGP
40
41#include <asm/agp.h> 39#include <asm/agp.h>
42 40
43/** 41/**
@@ -502,5 +500,3 @@ drm_agp_bind_pages(struct drm_device *dev,
502 return mem; 500 return mem;
503} 501}
504EXPORT_SYMBOL(drm_agp_bind_pages); 502EXPORT_SYMBOL(drm_agp_bind_pages);
505
506#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f7d5166f89b2..7bb3845d9974 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -438,7 +438,8 @@ EXPORT_SYMBOL(drm_atomic_crtc_set_property);
438 * consistent behavior you must call this function rather than the 438 * consistent behavior you must call this function rather than the
439 * driver hook directly. 439 * driver hook directly.
440 */ 440 */
441int drm_atomic_crtc_get_property(struct drm_crtc *crtc, 441static int
442drm_atomic_crtc_get_property(struct drm_crtc *crtc,
442 const struct drm_crtc_state *state, 443 const struct drm_crtc_state *state,
443 struct drm_property *property, uint64_t *val) 444 struct drm_property *property, uint64_t *val)
444{ 445{
@@ -663,6 +664,25 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
663 return 0; 664 return 0;
664} 665}
665 666
667static bool
668plane_switching_crtc(struct drm_atomic_state *state,
669 struct drm_plane *plane,
670 struct drm_plane_state *plane_state)
671{
672 if (!plane->state->crtc || !plane_state->crtc)
673 return false;
674
675 if (plane->state->crtc == plane_state->crtc)
676 return false;
677
678 /* This could be refined, but currently there's no helper or driver code
679 * to implement direct switching of active planes nor userspace to take
680 * advantage of more direct plane switching without the intermediate
681 * full OFF state.
682 */
683 return true;
684}
685
666/** 686/**
667 * drm_atomic_plane_check - check plane state 687 * drm_atomic_plane_check - check plane state
668 * @plane: plane to check 688 * @plane: plane to check
@@ -734,6 +754,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
734 return -ENOSPC; 754 return -ENOSPC;
735 } 755 }
736 756
757 if (plane_switching_crtc(state->state, plane, state)) {
758 DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n",
759 plane->base.id);
760 return -EINVAL;
761 }
762
737 return 0; 763 return 0;
738} 764}
739 765
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index aecb5d69bc2d..0c6f62168776 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -42,14 +42,14 @@
42 * add their own additional internal state. 42 * add their own additional internal state.
43 * 43 *
44 * This library also provides default implementations for the check callback in 44 * This library also provides default implementations for the check callback in
45 * drm_atomic_helper_check and for the commit callback with 45 * drm_atomic_helper_check() and for the commit callback with
46 * drm_atomic_helper_commit. But the individual stages and callbacks are expose 46 * drm_atomic_helper_commit(). But the individual stages and callbacks are
47 * to allow drivers to mix and match and e.g. use the plane helpers only 47 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
48 * together with a driver private modeset implementation. 48 * together with a driver private modeset implementation.
49 * 49 *
50 * This library also provides implementations for all the legacy driver 50 * This library also provides implementations for all the legacy driver
51 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config, 51 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
52 * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the 52 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
53 * various functions to implement set_property callbacks. New drivers must not 53 * various functions to implement set_property callbacks. New drivers must not
54 * implement these functions themselves but must use the provided helpers. 54 * implement these functions themselves but must use the provided helpers.
55 */ 55 */
@@ -993,6 +993,22 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
993 * object. This can still fail when e.g. the framebuffer reservation fails. For 993 * object. This can still fail when e.g. the framebuffer reservation fails. For
994 * now this doesn't implement asynchronous commits. 994 * now this doesn't implement asynchronous commits.
995 * 995 *
996 * Note that right now this function does not support async commits, and hence
997 * driver writers must implement their own version for now. Also note that the
998 * default ordering of how the various stages are called is to match the legacy
999 * modeset helper library closest. One peculiarity of that is that it doesn't
1000 * mesh well with runtime PM at all.
1001 *
1002 * For drivers supporting runtime PM the recommended sequence is
1003 *
1004 * drm_atomic_helper_commit_modeset_disables(dev, state);
1005 *
1006 * drm_atomic_helper_commit_modeset_enables(dev, state);
1007 *
1008 * drm_atomic_helper_commit_planes(dev, state, true);
1009 *
1010 * See the kerneldoc entries for these three functions for more details.
1011 *
996 * RETURNS 1012 * RETURNS
997 * Zero for success or -errno. 1013 * Zero for success or -errno.
998 */ 1014 */
@@ -1037,7 +1053,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1037 1053
1038 drm_atomic_helper_commit_modeset_disables(dev, state); 1054 drm_atomic_helper_commit_modeset_disables(dev, state);
1039 1055
1040 drm_atomic_helper_commit_planes(dev, state); 1056 drm_atomic_helper_commit_planes(dev, state, false);
1041 1057
1042 drm_atomic_helper_commit_modeset_enables(dev, state); 1058 drm_atomic_helper_commit_modeset_enables(dev, state);
1043 1059
@@ -1077,7 +1093,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1077 * work item, which allows nice concurrent updates on disjoint sets of crtcs. 1093 * work item, which allows nice concurrent updates on disjoint sets of crtcs.
1078 * 1094 *
1079 * 3. The software state is updated synchronously with 1095 * 3. The software state is updated synchronously with
1080 * drm_atomic_helper_swap_state. Doing this under the protection of all modeset 1096 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1081 * locks means concurrent callers never see inconsistent state. And doing this 1097 * locks means concurrent callers never see inconsistent state. And doing this
1082 * while it's guaranteed that no relevant async worker runs means that async 1098 * while it's guaranteed that no relevant async worker runs means that async
1083 * workers do not need grab any locks. Actually they must not grab locks, for 1099 * workers do not need grab any locks. Actually they must not grab locks, for
@@ -1111,17 +1127,14 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1111 const struct drm_plane_helper_funcs *funcs; 1127 const struct drm_plane_helper_funcs *funcs;
1112 struct drm_plane *plane = state->planes[i]; 1128 struct drm_plane *plane = state->planes[i];
1113 struct drm_plane_state *plane_state = state->plane_states[i]; 1129 struct drm_plane_state *plane_state = state->plane_states[i];
1114 struct drm_framebuffer *fb;
1115 1130
1116 if (!plane) 1131 if (!plane)
1117 continue; 1132 continue;
1118 1133
1119 funcs = plane->helper_private; 1134 funcs = plane->helper_private;
1120 1135
1121 fb = plane_state->fb; 1136 if (funcs->prepare_fb) {
1122 1137 ret = funcs->prepare_fb(plane, plane_state);
1123 if (fb && funcs->prepare_fb) {
1124 ret = funcs->prepare_fb(plane, fb, plane_state);
1125 if (ret) 1138 if (ret)
1126 goto fail; 1139 goto fail;
1127 } 1140 }
@@ -1134,17 +1147,14 @@ fail:
1134 const struct drm_plane_helper_funcs *funcs; 1147 const struct drm_plane_helper_funcs *funcs;
1135 struct drm_plane *plane = state->planes[i]; 1148 struct drm_plane *plane = state->planes[i];
1136 struct drm_plane_state *plane_state = state->plane_states[i]; 1149 struct drm_plane_state *plane_state = state->plane_states[i];
1137 struct drm_framebuffer *fb;
1138 1150
1139 if (!plane) 1151 if (!plane)
1140 continue; 1152 continue;
1141 1153
1142 funcs = plane->helper_private; 1154 funcs = plane->helper_private;
1143 1155
1144 fb = state->plane_states[i]->fb; 1156 if (funcs->cleanup_fb)
1145 1157 funcs->cleanup_fb(plane, plane_state);
1146 if (fb && funcs->cleanup_fb)
1147 funcs->cleanup_fb(plane, fb, plane_state);
1148 1158
1149 } 1159 }
1150 1160
@@ -1152,10 +1162,16 @@ fail:
1152} 1162}
1153EXPORT_SYMBOL(drm_atomic_helper_prepare_planes); 1163EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
1154 1164
1165bool plane_crtc_active(struct drm_plane_state *state)
1166{
1167 return state->crtc && state->crtc->state->active;
1168}
1169
1155/** 1170/**
1156 * drm_atomic_helper_commit_planes - commit plane state 1171 * drm_atomic_helper_commit_planes - commit plane state
1157 * @dev: DRM device 1172 * @dev: DRM device
1158 * @old_state: atomic state object with old state structures 1173 * @old_state: atomic state object with old state structures
1174 * @active_only: Only commit on active CRTC if set
1159 * 1175 *
1160 * This function commits the new plane state using the plane and atomic helper 1176 * This function commits the new plane state using the plane and atomic helper
1161 * functions for planes and crtcs. It assumes that the atomic state has already 1177 * functions for planes and crtcs. It assumes that the atomic state has already
@@ -1168,9 +1184,26 @@ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
1168 * Note that this function does all plane updates across all CRTCs in one step. 1184 * Note that this function does all plane updates across all CRTCs in one step.
1169 * If the hardware can't support this approach look at 1185 * If the hardware can't support this approach look at
1170 * drm_atomic_helper_commit_planes_on_crtc() instead. 1186 * drm_atomic_helper_commit_planes_on_crtc() instead.
1187 *
1188 * Plane parameters can be updated by applications while the associated CRTC is
1189 * disabled. The DRM/KMS core will store the parameters in the plane state,
1190 * which will be available to the driver when the CRTC is turned on. As a result
1191 * most drivers don't need to be immediately notified of plane updates for a
1192 * disabled CRTC.
1193 *
1194 * Unless otherwise needed, drivers are advised to set the @active_only
1195 * parameters to true in order not to receive plane update notifications related
1196 * to a disabled CRTC. This avoids the need to manually ignore plane updates in
1197 * driver code when the driver and/or hardware can't or just don't need to deal
1198 * with updates on disabled CRTCs, for example when supporting runtime PM.
1199 *
1200 * The drm_atomic_helper_commit() default implementation only sets @active_only
1201 * to false to most closely match the behaviour of the legacy helpers. This should
1202 * not be copied blindly by drivers.
1171 */ 1203 */
1172void drm_atomic_helper_commit_planes(struct drm_device *dev, 1204void drm_atomic_helper_commit_planes(struct drm_device *dev,
1173 struct drm_atomic_state *old_state) 1205 struct drm_atomic_state *old_state,
1206 bool active_only)
1174{ 1207{
1175 struct drm_crtc *crtc; 1208 struct drm_crtc *crtc;
1176 struct drm_crtc_state *old_crtc_state; 1209 struct drm_crtc_state *old_crtc_state;
@@ -1186,25 +1219,43 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
1186 if (!funcs || !funcs->atomic_begin) 1219 if (!funcs || !funcs->atomic_begin)
1187 continue; 1220 continue;
1188 1221
1222 if (active_only && !crtc->state->active)
1223 continue;
1224
1189 funcs->atomic_begin(crtc, old_crtc_state); 1225 funcs->atomic_begin(crtc, old_crtc_state);
1190 } 1226 }
1191 1227
1192 for_each_plane_in_state(old_state, plane, old_plane_state, i) { 1228 for_each_plane_in_state(old_state, plane, old_plane_state, i) {
1193 const struct drm_plane_helper_funcs *funcs; 1229 const struct drm_plane_helper_funcs *funcs;
1230 bool disabling;
1194 1231
1195 funcs = plane->helper_private; 1232 funcs = plane->helper_private;
1196 1233
1197 if (!funcs) 1234 if (!funcs)
1198 continue; 1235 continue;
1199 1236
1237 disabling = drm_atomic_plane_disabling(plane, old_plane_state);
1238
1239 if (active_only) {
1240 /*
1241 * Skip planes related to inactive CRTCs. If the plane
1242 * is enabled use the state of the current CRTC. If the
1243 * plane is being disabled use the state of the old
1244 * CRTC to avoid skipping planes being disabled on an
1245 * active CRTC.
1246 */
1247 if (!disabling && !plane_crtc_active(plane->state))
1248 continue;
1249 if (disabling && !plane_crtc_active(old_plane_state))
1250 continue;
1251 }
1252
1200 /* 1253 /*
1201 * Special-case disabling the plane if drivers support it. 1254 * Special-case disabling the plane if drivers support it.
1202 */ 1255 */
1203 if (drm_atomic_plane_disabling(plane, old_plane_state) && 1256 if (disabling && funcs->atomic_disable)
1204 funcs->atomic_disable)
1205 funcs->atomic_disable(plane, old_plane_state); 1257 funcs->atomic_disable(plane, old_plane_state);
1206 else if (plane->state->crtc || 1258 else if (plane->state->crtc || disabling)
1207 drm_atomic_plane_disabling(plane, old_plane_state))
1208 funcs->atomic_update(plane, old_plane_state); 1259 funcs->atomic_update(plane, old_plane_state);
1209 } 1260 }
1210 1261
@@ -1216,6 +1267,9 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
1216 if (!funcs || !funcs->atomic_flush) 1267 if (!funcs || !funcs->atomic_flush)
1217 continue; 1268 continue;
1218 1269
1270 if (active_only && !crtc->state->active)
1271 continue;
1272
1219 funcs->atomic_flush(crtc, old_crtc_state); 1273 funcs->atomic_flush(crtc, old_crtc_state);
1220 } 1274 }
1221} 1275}
@@ -1300,14 +1354,11 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
1300 1354
1301 for_each_plane_in_state(old_state, plane, plane_state, i) { 1355 for_each_plane_in_state(old_state, plane, plane_state, i) {
1302 const struct drm_plane_helper_funcs *funcs; 1356 const struct drm_plane_helper_funcs *funcs;
1303 struct drm_framebuffer *old_fb;
1304 1357
1305 funcs = plane->helper_private; 1358 funcs = plane->helper_private;
1306 1359
1307 old_fb = plane_state->fb; 1360 if (funcs->cleanup_fb)
1308 1361 funcs->cleanup_fb(plane, plane_state);
1309 if (old_fb && funcs->cleanup_fb)
1310 funcs->cleanup_fb(plane, old_fb, plane_state);
1311 } 1362 }
1312} 1363}
1313EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); 1364EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
@@ -1334,7 +1385,7 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1334 * 1385 *
1335 * 4. Actually commit the hardware state. 1386 * 4. Actually commit the hardware state.
1336 * 1387 *
1337 * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3 1388 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
1338 * contains the old state. Also do any other cleanup required with that state. 1389 * contains the old state. Also do any other cleanup required with that state.
1339 */ 1390 */
1340void drm_atomic_helper_swap_state(struct drm_device *dev, 1391void drm_atomic_helper_swap_state(struct drm_device *dev,
@@ -1502,21 +1553,9 @@ retry:
1502 goto fail; 1553 goto fail;
1503 } 1554 }
1504 1555
1505 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 1556 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
1506 if (ret != 0) 1557 if (ret != 0)
1507 goto fail; 1558 goto fail;
1508 drm_atomic_set_fb_for_plane(plane_state, NULL);
1509 plane_state->crtc_x = 0;
1510 plane_state->crtc_y = 0;
1511 plane_state->crtc_h = 0;
1512 plane_state->crtc_w = 0;
1513 plane_state->src_x = 0;
1514 plane_state->src_y = 0;
1515 plane_state->src_h = 0;
1516 plane_state->src_w = 0;
1517
1518 if (plane == plane->crtc->cursor)
1519 state->legacy_cursor_update = true;
1520 1559
1521 ret = drm_atomic_commit(state); 1560 ret = drm_atomic_commit(state);
1522 if (ret != 0) 1561 if (ret != 0)
@@ -1546,6 +1585,32 @@ backoff:
1546} 1585}
1547EXPORT_SYMBOL(drm_atomic_helper_disable_plane); 1586EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
1548 1587
1588/* just used from fb-helper and atomic-helper: */
1589int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
1590 struct drm_plane_state *plane_state)
1591{
1592 int ret;
1593
1594 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
1595 if (ret != 0)
1596 return ret;
1597
1598 drm_atomic_set_fb_for_plane(plane_state, NULL);
1599 plane_state->crtc_x = 0;
1600 plane_state->crtc_y = 0;
1601 plane_state->crtc_h = 0;
1602 plane_state->crtc_w = 0;
1603 plane_state->src_x = 0;
1604 plane_state->src_y = 0;
1605 plane_state->src_h = 0;
1606 plane_state->src_w = 0;
1607
1608 if (plane->crtc && (plane == plane->crtc->cursor))
1609 plane_state->state->legacy_cursor_update = true;
1610
1611 return 0;
1612}
1613
1549static int update_output_state(struct drm_atomic_state *state, 1614static int update_output_state(struct drm_atomic_state *state,
1550 struct drm_mode_set *set) 1615 struct drm_mode_set *set)
1551{ 1616{
@@ -1629,8 +1694,6 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
1629{ 1694{
1630 struct drm_atomic_state *state; 1695 struct drm_atomic_state *state;
1631 struct drm_crtc *crtc = set->crtc; 1696 struct drm_crtc *crtc = set->crtc;
1632 struct drm_crtc_state *crtc_state;
1633 struct drm_plane_state *primary_state;
1634 int ret = 0; 1697 int ret = 0;
1635 1698
1636 state = drm_atomic_state_alloc(crtc->dev); 1699 state = drm_atomic_state_alloc(crtc->dev);
@@ -1639,17 +1702,54 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
1639 1702
1640 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 1703 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
1641retry: 1704retry:
1642 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1705 ret = __drm_atomic_helper_set_config(set, state);
1643 if (IS_ERR(crtc_state)) { 1706 if (ret != 0)
1644 ret = PTR_ERR(crtc_state);
1645 goto fail; 1707 goto fail;
1646 }
1647 1708
1648 primary_state = drm_atomic_get_plane_state(state, crtc->primary); 1709 ret = drm_atomic_commit(state);
1649 if (IS_ERR(primary_state)) { 1710 if (ret != 0)
1650 ret = PTR_ERR(primary_state);
1651 goto fail; 1711 goto fail;
1652 } 1712
1713 /* Driver takes ownership of state on successful commit. */
1714 return 0;
1715fail:
1716 if (ret == -EDEADLK)
1717 goto backoff;
1718
1719 drm_atomic_state_free(state);
1720
1721 return ret;
1722backoff:
1723 drm_atomic_state_clear(state);
1724 drm_atomic_legacy_backoff(state);
1725
1726 /*
1727 * Someone might have exchanged the framebuffer while we dropped locks
1728 * in the backoff code. We need to fix up the fb refcount tracking the
1729 * core does for us.
1730 */
1731 crtc->primary->old_fb = crtc->primary->fb;
1732
1733 goto retry;
1734}
1735EXPORT_SYMBOL(drm_atomic_helper_set_config);
1736
1737/* just used from fb-helper and atomic-helper: */
1738int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1739 struct drm_atomic_state *state)
1740{
1741 struct drm_crtc_state *crtc_state;
1742 struct drm_plane_state *primary_state;
1743 struct drm_crtc *crtc = set->crtc;
1744 int ret;
1745
1746 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1747 if (IS_ERR(crtc_state))
1748 return PTR_ERR(crtc_state);
1749
1750 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
1751 if (IS_ERR(primary_state))
1752 return PTR_ERR(primary_state);
1653 1753
1654 if (!set->mode) { 1754 if (!set->mode) {
1655 WARN_ON(set->fb); 1755 WARN_ON(set->fb);
@@ -1657,13 +1757,13 @@ retry:
1657 1757
1658 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); 1758 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
1659 if (ret != 0) 1759 if (ret != 0)
1660 goto fail; 1760 return ret;
1661 1761
1662 crtc_state->active = false; 1762 crtc_state->active = false;
1663 1763
1664 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); 1764 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
1665 if (ret != 0) 1765 if (ret != 0)
1666 goto fail; 1766 return ret;
1667 1767
1668 drm_atomic_set_fb_for_plane(primary_state, NULL); 1768 drm_atomic_set_fb_for_plane(primary_state, NULL);
1669 1769
@@ -1675,13 +1775,14 @@ retry:
1675 1775
1676 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode); 1776 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
1677 if (ret != 0) 1777 if (ret != 0)
1678 goto fail; 1778 return ret;
1679 1779
1680 crtc_state->active = true; 1780 crtc_state->active = true;
1681 1781
1682 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); 1782 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
1683 if (ret != 0) 1783 if (ret != 0)
1684 goto fail; 1784 return ret;
1785
1685 drm_atomic_set_fb_for_plane(primary_state, set->fb); 1786 drm_atomic_set_fb_for_plane(primary_state, set->fb);
1686 primary_state->crtc_x = 0; 1787 primary_state->crtc_x = 0;
1687 primary_state->crtc_y = 0; 1788 primary_state->crtc_y = 0;
@@ -1689,41 +1790,21 @@ retry:
1689 primary_state->crtc_w = set->mode->hdisplay; 1790 primary_state->crtc_w = set->mode->hdisplay;
1690 primary_state->src_x = set->x << 16; 1791 primary_state->src_x = set->x << 16;
1691 primary_state->src_y = set->y << 16; 1792 primary_state->src_y = set->y << 16;
1692 primary_state->src_h = set->mode->vdisplay << 16; 1793 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
1693 primary_state->src_w = set->mode->hdisplay << 16; 1794 primary_state->src_h = set->mode->hdisplay << 16;
1795 primary_state->src_w = set->mode->vdisplay << 16;
1796 } else {
1797 primary_state->src_h = set->mode->vdisplay << 16;
1798 primary_state->src_w = set->mode->hdisplay << 16;
1799 }
1694 1800
1695commit: 1801commit:
1696 ret = update_output_state(state, set); 1802 ret = update_output_state(state, set);
1697 if (ret) 1803 if (ret)
1698 goto fail; 1804 return ret;
1699
1700 ret = drm_atomic_commit(state);
1701 if (ret != 0)
1702 goto fail;
1703 1805
1704 /* Driver takes ownership of state on successful commit. */
1705 return 0; 1806 return 0;
1706fail:
1707 if (ret == -EDEADLK)
1708 goto backoff;
1709
1710 drm_atomic_state_free(state);
1711
1712 return ret;
1713backoff:
1714 drm_atomic_state_clear(state);
1715 drm_atomic_legacy_backoff(state);
1716
1717 /*
1718 * Someone might have exchanged the framebuffer while we dropped locks
1719 * in the backoff code. We need to fix up the fb refcount tracking the
1720 * core does for us.
1721 */
1722 crtc->primary->old_fb = crtc->primary->fb;
1723
1724 goto retry;
1725} 1807}
1726EXPORT_SYMBOL(drm_atomic_helper_set_config);
1727 1808
1728/** 1809/**
1729 * drm_atomic_helper_crtc_set_property - helper for crtc properties 1810 * drm_atomic_helper_crtc_set_property - helper for crtc properties
@@ -2333,6 +2414,84 @@ drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
2333EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); 2414EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
2334 2415
2335/** 2416/**
2417 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
2418 * @dev: DRM device
2419 * @ctx: lock acquisition context
2420 *
2421 * Makes a copy of the current atomic state by looping over all objects and
2422 * duplicating their respective states.
2423 *
2424 * Note that this treats atomic state as persistent between save and restore.
2425 * Drivers must make sure that this is possible and won't result in confusion
2426 * or erroneous behaviour.
2427 *
2428 * Note that if callers haven't already acquired all modeset locks this might
2429 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
2430 *
2431 * Returns:
2432 * A pointer to the copy of the atomic state object on success or an
2433 * ERR_PTR()-encoded error code on failure.
2434 */
2435struct drm_atomic_state *
2436drm_atomic_helper_duplicate_state(struct drm_device *dev,
2437 struct drm_modeset_acquire_ctx *ctx)
2438{
2439 struct drm_atomic_state *state;
2440 struct drm_connector *conn;
2441 struct drm_plane *plane;
2442 struct drm_crtc *crtc;
2443 int err = 0;
2444
2445 state = drm_atomic_state_alloc(dev);
2446 if (!state)
2447 return ERR_PTR(-ENOMEM);
2448
2449 state->acquire_ctx = ctx;
2450
2451 drm_for_each_crtc(crtc, dev) {
2452 struct drm_crtc_state *crtc_state;
2453
2454 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2455 if (IS_ERR(crtc_state)) {
2456 err = PTR_ERR(crtc_state);
2457 goto free;
2458 }
2459 }
2460
2461 drm_for_each_plane(plane, dev) {
2462 struct drm_plane_state *plane_state;
2463
2464 plane_state = drm_atomic_get_plane_state(state, plane);
2465 if (IS_ERR(plane_state)) {
2466 err = PTR_ERR(plane_state);
2467 goto free;
2468 }
2469 }
2470
2471 drm_for_each_connector(conn, dev) {
2472 struct drm_connector_state *conn_state;
2473
2474 conn_state = drm_atomic_get_connector_state(state, conn);
2475 if (IS_ERR(conn_state)) {
2476 err = PTR_ERR(conn_state);
2477 goto free;
2478 }
2479 }
2480
2481 /* clear the acquire context so that it isn't accidentally reused */
2482 state->acquire_ctx = NULL;
2483
2484free:
2485 if (err < 0) {
2486 drm_atomic_state_free(state);
2487 state = ERR_PTR(err);
2488 }
2489
2490 return state;
2491}
2492EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
2493
2494/**
2336 * __drm_atomic_helper_connector_destroy_state - release connector state 2495 * __drm_atomic_helper_connector_destroy_state - release connector state
2337 * @connector: connector object 2496 * @connector: connector object
2338 * @state: connector state object to release 2497 * @state: connector state object to release
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 569064a00693..f1a204d253cc 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -582,7 +582,7 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
582 } 582 }
583} 583}
584 584
585#if __OS_HAS_AGP 585#if IS_ENABLED(CONFIG_AGP)
586/** 586/**
587 * Add AGP buffers for DMA transfers. 587 * Add AGP buffers for DMA transfers.
588 * 588 *
@@ -756,7 +756,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev,
756 return 0; 756 return 0;
757} 757}
758EXPORT_SYMBOL(drm_legacy_addbufs_agp); 758EXPORT_SYMBOL(drm_legacy_addbufs_agp);
759#endif /* __OS_HAS_AGP */ 759#endif /* CONFIG_AGP */
760 760
761int drm_legacy_addbufs_pci(struct drm_device *dev, 761int drm_legacy_addbufs_pci(struct drm_device *dev,
762 struct drm_buf_desc *request) 762 struct drm_buf_desc *request)
@@ -1145,7 +1145,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
1145 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1145 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1146 return -EINVAL; 1146 return -EINVAL;
1147 1147
1148#if __OS_HAS_AGP 1148#if IS_ENABLED(CONFIG_AGP)
1149 if (request->flags & _DRM_AGP_BUFFER) 1149 if (request->flags & _DRM_AGP_BUFFER)
1150 ret = drm_legacy_addbufs_agp(dev, request); 1150 ret = drm_legacy_addbufs_agp(dev, request);
1151 else 1151 else
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8328e7059205..24c5434abd1c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -306,8 +306,7 @@ static int drm_mode_object_get_reg(struct drm_device *dev,
306 * reference counted modeset objects like framebuffers. 306 * reference counted modeset objects like framebuffers.
307 * 307 *
308 * Returns: 308 * Returns:
309 * New unique (relative to other objects in @dev) integer identifier for the 309 * Zero on success, error code on failure.
310 * object.
311 */ 310 */
312int drm_mode_object_get(struct drm_device *dev, 311int drm_mode_object_get(struct drm_device *dev,
313 struct drm_mode_object *obj, uint32_t obj_type) 312 struct drm_mode_object *obj, uint32_t obj_type)
@@ -423,7 +422,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
423out: 422out:
424 mutex_unlock(&dev->mode_config.fb_lock); 423 mutex_unlock(&dev->mode_config.fb_lock);
425 424
426 return 0; 425 return ret;
427} 426}
428EXPORT_SYMBOL(drm_framebuffer_init); 427EXPORT_SYMBOL(drm_framebuffer_init);
429 428
@@ -538,7 +537,12 @@ EXPORT_SYMBOL(drm_framebuffer_reference);
538 */ 537 */
539void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) 538void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
540{ 539{
541 struct drm_device *dev = fb->dev; 540 struct drm_device *dev;
541
542 if (!fb)
543 return;
544
545 dev = fb->dev;
542 546
543 mutex_lock(&dev->mode_config.fb_lock); 547 mutex_lock(&dev->mode_config.fb_lock);
544 /* Mark fb as reaped and drop idr ref. */ 548 /* Mark fb as reaped and drop idr ref. */
@@ -589,12 +593,17 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
589 */ 593 */
590void drm_framebuffer_remove(struct drm_framebuffer *fb) 594void drm_framebuffer_remove(struct drm_framebuffer *fb)
591{ 595{
592 struct drm_device *dev = fb->dev; 596 struct drm_device *dev;
593 struct drm_crtc *crtc; 597 struct drm_crtc *crtc;
594 struct drm_plane *plane; 598 struct drm_plane *plane;
595 struct drm_mode_set set; 599 struct drm_mode_set set;
596 int ret; 600 int ret;
597 601
602 if (!fb)
603 return;
604
605 dev = fb->dev;
606
598 WARN_ON(!list_empty(&fb->filp_head)); 607 WARN_ON(!list_empty(&fb->filp_head));
599 608
600 /* 609 /*
@@ -667,7 +676,6 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
667 676
668 crtc->dev = dev; 677 crtc->dev = dev;
669 crtc->funcs = funcs; 678 crtc->funcs = funcs;
670 crtc->invert_dimensions = false;
671 679
672 drm_modeset_lock_init(&crtc->mutex); 680 drm_modeset_lock_init(&crtc->mutex);
673 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 681 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
@@ -1509,7 +1517,7 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
1509 */ 1517 */
1510int drm_mode_create_tv_properties(struct drm_device *dev, 1518int drm_mode_create_tv_properties(struct drm_device *dev,
1511 unsigned int num_modes, 1519 unsigned int num_modes,
1512 char *modes[]) 1520 const char * const modes[])
1513{ 1521{
1514 struct drm_property *tv_selector; 1522 struct drm_property *tv_selector;
1515 struct drm_property *tv_subconnector; 1523 struct drm_property *tv_subconnector;
@@ -1525,6 +1533,9 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
1525 "select subconnector", 1533 "select subconnector",
1526 drm_tv_select_enum_list, 1534 drm_tv_select_enum_list,
1527 ARRAY_SIZE(drm_tv_select_enum_list)); 1535 ARRAY_SIZE(drm_tv_select_enum_list));
1536 if (!tv_selector)
1537 goto nomem;
1538
1528 dev->mode_config.tv_select_subconnector_property = tv_selector; 1539 dev->mode_config.tv_select_subconnector_property = tv_selector;
1529 1540
1530 tv_subconnector = 1541 tv_subconnector =
@@ -1532,6 +1543,8 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
1532 "subconnector", 1543 "subconnector",
1533 drm_tv_subconnector_enum_list, 1544 drm_tv_subconnector_enum_list,
1534 ARRAY_SIZE(drm_tv_subconnector_enum_list)); 1545 ARRAY_SIZE(drm_tv_subconnector_enum_list));
1546 if (!tv_subconnector)
1547 goto nomem;
1535 dev->mode_config.tv_subconnector_property = tv_subconnector; 1548 dev->mode_config.tv_subconnector_property = tv_subconnector;
1536 1549
1537 /* 1550 /*
@@ -1539,42 +1552,67 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
1539 */ 1552 */
1540 dev->mode_config.tv_left_margin_property = 1553 dev->mode_config.tv_left_margin_property =
1541 drm_property_create_range(dev, 0, "left margin", 0, 100); 1554 drm_property_create_range(dev, 0, "left margin", 0, 100);
1555 if (!dev->mode_config.tv_left_margin_property)
1556 goto nomem;
1542 1557
1543 dev->mode_config.tv_right_margin_property = 1558 dev->mode_config.tv_right_margin_property =
1544 drm_property_create_range(dev, 0, "right margin", 0, 100); 1559 drm_property_create_range(dev, 0, "right margin", 0, 100);
1560 if (!dev->mode_config.tv_right_margin_property)
1561 goto nomem;
1545 1562
1546 dev->mode_config.tv_top_margin_property = 1563 dev->mode_config.tv_top_margin_property =
1547 drm_property_create_range(dev, 0, "top margin", 0, 100); 1564 drm_property_create_range(dev, 0, "top margin", 0, 100);
1565 if (!dev->mode_config.tv_top_margin_property)
1566 goto nomem;
1548 1567
1549 dev->mode_config.tv_bottom_margin_property = 1568 dev->mode_config.tv_bottom_margin_property =
1550 drm_property_create_range(dev, 0, "bottom margin", 0, 100); 1569 drm_property_create_range(dev, 0, "bottom margin", 0, 100);
1570 if (!dev->mode_config.tv_bottom_margin_property)
1571 goto nomem;
1551 1572
1552 dev->mode_config.tv_mode_property = 1573 dev->mode_config.tv_mode_property =
1553 drm_property_create(dev, DRM_MODE_PROP_ENUM, 1574 drm_property_create(dev, DRM_MODE_PROP_ENUM,
1554 "mode", num_modes); 1575 "mode", num_modes);
1576 if (!dev->mode_config.tv_mode_property)
1577 goto nomem;
1578
1555 for (i = 0; i < num_modes; i++) 1579 for (i = 0; i < num_modes; i++)
1556 drm_property_add_enum(dev->mode_config.tv_mode_property, i, 1580 drm_property_add_enum(dev->mode_config.tv_mode_property, i,
1557 i, modes[i]); 1581 i, modes[i]);
1558 1582
1559 dev->mode_config.tv_brightness_property = 1583 dev->mode_config.tv_brightness_property =
1560 drm_property_create_range(dev, 0, "brightness", 0, 100); 1584 drm_property_create_range(dev, 0, "brightness", 0, 100);
1585 if (!dev->mode_config.tv_brightness_property)
1586 goto nomem;
1561 1587
1562 dev->mode_config.tv_contrast_property = 1588 dev->mode_config.tv_contrast_property =
1563 drm_property_create_range(dev, 0, "contrast", 0, 100); 1589 drm_property_create_range(dev, 0, "contrast", 0, 100);
1590 if (!dev->mode_config.tv_contrast_property)
1591 goto nomem;
1564 1592
1565 dev->mode_config.tv_flicker_reduction_property = 1593 dev->mode_config.tv_flicker_reduction_property =
1566 drm_property_create_range(dev, 0, "flicker reduction", 0, 100); 1594 drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
1595 if (!dev->mode_config.tv_flicker_reduction_property)
1596 goto nomem;
1567 1597
1568 dev->mode_config.tv_overscan_property = 1598 dev->mode_config.tv_overscan_property =
1569 drm_property_create_range(dev, 0, "overscan", 0, 100); 1599 drm_property_create_range(dev, 0, "overscan", 0, 100);
1600 if (!dev->mode_config.tv_overscan_property)
1601 goto nomem;
1570 1602
1571 dev->mode_config.tv_saturation_property = 1603 dev->mode_config.tv_saturation_property =
1572 drm_property_create_range(dev, 0, "saturation", 0, 100); 1604 drm_property_create_range(dev, 0, "saturation", 0, 100);
1605 if (!dev->mode_config.tv_saturation_property)
1606 goto nomem;
1573 1607
1574 dev->mode_config.tv_hue_property = 1608 dev->mode_config.tv_hue_property =
1575 drm_property_create_range(dev, 0, "hue", 0, 100); 1609 drm_property_create_range(dev, 0, "hue", 0, 100);
1610 if (!dev->mode_config.tv_hue_property)
1611 goto nomem;
1576 1612
1577 return 0; 1613 return 0;
1614nomem:
1615 return -ENOMEM;
1578} 1616}
1579EXPORT_SYMBOL(drm_mode_create_tv_properties); 1617EXPORT_SYMBOL(drm_mode_create_tv_properties);
1580 1618
@@ -2276,6 +2314,32 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
2276 return -EINVAL; 2314 return -EINVAL;
2277} 2315}
2278 2316
2317static int check_src_coords(uint32_t src_x, uint32_t src_y,
2318 uint32_t src_w, uint32_t src_h,
2319 const struct drm_framebuffer *fb)
2320{
2321 unsigned int fb_width, fb_height;
2322
2323 fb_width = fb->width << 16;
2324 fb_height = fb->height << 16;
2325
2326 /* Make sure source coordinates are inside the fb. */
2327 if (src_w > fb_width ||
2328 src_x > fb_width - src_w ||
2329 src_h > fb_height ||
2330 src_y > fb_height - src_h) {
2331 DRM_DEBUG_KMS("Invalid source coordinates "
2332 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
2333 src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
2334 src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
2335 src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
2336 src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
2337 return -ENOSPC;
2338 }
2339
2340 return 0;
2341}
2342
2279/* 2343/*
2280 * setplane_internal - setplane handler for internal callers 2344 * setplane_internal - setplane handler for internal callers
2281 * 2345 *
@@ -2295,7 +2359,6 @@ static int __setplane_internal(struct drm_plane *plane,
2295 uint32_t src_w, uint32_t src_h) 2359 uint32_t src_w, uint32_t src_h)
2296{ 2360{
2297 int ret = 0; 2361 int ret = 0;
2298 unsigned int fb_width, fb_height;
2299 2362
2300 /* No fb means shut it down */ 2363 /* No fb means shut it down */
2301 if (!fb) { 2364 if (!fb) {
@@ -2332,27 +2395,13 @@ static int __setplane_internal(struct drm_plane *plane,
2332 crtc_y > INT_MAX - (int32_t) crtc_h) { 2395 crtc_y > INT_MAX - (int32_t) crtc_h) {
2333 DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", 2396 DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
2334 crtc_w, crtc_h, crtc_x, crtc_y); 2397 crtc_w, crtc_h, crtc_x, crtc_y);
2335 return -ERANGE; 2398 ret = -ERANGE;
2399 goto out;
2336 } 2400 }
2337 2401
2338 2402 ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
2339 fb_width = fb->width << 16; 2403 if (ret)
2340 fb_height = fb->height << 16;
2341
2342 /* Make sure source coordinates are inside the fb. */
2343 if (src_w > fb_width ||
2344 src_x > fb_width - src_w ||
2345 src_h > fb_height ||
2346 src_y > fb_height - src_h) {
2347 DRM_DEBUG_KMS("Invalid source coordinates "
2348 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
2349 src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
2350 src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
2351 src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
2352 src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
2353 ret = -ENOSPC;
2354 goto out; 2404 goto out;
2355 }
2356 2405
2357 plane->old_fb = plane->fb; 2406 plane->old_fb = plane->fb;
2358 ret = plane->funcs->update_plane(plane, crtc, fb, 2407 ret = plane->funcs->update_plane(plane, crtc, fb,
@@ -2543,20 +2592,13 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2543 2592
2544 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 2593 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
2545 2594
2546 if (crtc->invert_dimensions) 2595 if (crtc->state &&
2596 crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) |
2597 BIT(DRM_ROTATE_270)))
2547 swap(hdisplay, vdisplay); 2598 swap(hdisplay, vdisplay);
2548 2599
2549 if (hdisplay > fb->width || 2600 return check_src_coords(x << 16, y << 16,
2550 vdisplay > fb->height || 2601 hdisplay << 16, vdisplay << 16, fb);
2551 x > fb->width - hdisplay ||
2552 y > fb->height - vdisplay) {
2553 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2554 fb->width, fb->height, hdisplay, vdisplay, x, y,
2555 crtc->invert_dimensions ? " (inverted)" : "");
2556 return -ENOSPC;
2557 }
2558
2559 return 0;
2560} 2602}
2561EXPORT_SYMBOL(drm_crtc_check_viewport); 2603EXPORT_SYMBOL(drm_crtc_check_viewport);
2562 2604
@@ -3310,14 +3352,11 @@ int drm_mode_rmfb(struct drm_device *dev,
3310 if (!found) 3352 if (!found)
3311 goto fail_lookup; 3353 goto fail_lookup;
3312 3354
3313 /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
3314 __drm_framebuffer_unregister(dev, fb);
3315
3316 list_del_init(&fb->filp_head); 3355 list_del_init(&fb->filp_head);
3317 mutex_unlock(&dev->mode_config.fb_lock); 3356 mutex_unlock(&dev->mode_config.fb_lock);
3318 mutex_unlock(&file_priv->fbs_lock); 3357 mutex_unlock(&file_priv->fbs_lock);
3319 3358
3320 drm_framebuffer_remove(fb); 3359 drm_framebuffer_unreference(fb);
3321 3360
3322 return 0; 3361 return 0;
3323 3362
@@ -3484,7 +3523,6 @@ out_err1:
3484 */ 3523 */
3485void drm_fb_release(struct drm_file *priv) 3524void drm_fb_release(struct drm_file *priv)
3486{ 3525{
3487 struct drm_device *dev = priv->minor->dev;
3488 struct drm_framebuffer *fb, *tfb; 3526 struct drm_framebuffer *fb, *tfb;
3489 3527
3490 /* 3528 /*
@@ -3498,16 +3536,10 @@ void drm_fb_release(struct drm_file *priv)
3498 * at it any more. 3536 * at it any more.
3499 */ 3537 */
3500 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { 3538 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
3501
3502 mutex_lock(&dev->mode_config.fb_lock);
3503 /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
3504 __drm_framebuffer_unregister(dev, fb);
3505 mutex_unlock(&dev->mode_config.fb_lock);
3506
3507 list_del_init(&fb->filp_head); 3539 list_del_init(&fb->filp_head);
3508 3540
3509 /* This will also drop the fpriv->fbs reference. */ 3541 /* This drops the fpriv->fbs reference. */
3510 drm_framebuffer_remove(fb); 3542 drm_framebuffer_unreference(fb);
3511 } 3543 }
3512} 3544}
3513 3545
@@ -5181,7 +5213,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
5181 goto out; 5213 goto out;
5182 } 5214 }
5183 5215
5184 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb); 5216 if (crtc->state) {
5217 const struct drm_plane_state *state = crtc->primary->state;
5218
5219 ret = check_src_coords(state->src_x, state->src_y,
5220 state->src_w, state->src_h, fb);
5221 } else {
5222 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
5223 }
5185 if (ret) 5224 if (ret)
5186 goto out; 5225 goto out;
5187 5226
@@ -5629,7 +5668,8 @@ unsigned int drm_rotation_simplify(unsigned int rotation,
5629{ 5668{
5630 if (rotation & ~supported_rotations) { 5669 if (rotation & ~supported_rotations) {
5631 rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y); 5670 rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
5632 rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4); 5671 rotation = (rotation & DRM_REFLECT_MASK) |
5672 BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
5633 } 5673 }
5634 5674
5635 return rotation; 5675 return rotation;
@@ -5732,7 +5772,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5732 */ 5772 */
5733 WARN_ON(!list_empty(&dev->mode_config.fb_list)); 5773 WARN_ON(!list_empty(&dev->mode_config.fb_list));
5734 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { 5774 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
5735 drm_framebuffer_remove(fb); 5775 drm_framebuffer_free(&fb->refcount);
5736 } 5776 }
5737 5777
5738 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, 5778 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 291734e87fca..9535c5b60387 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -424,6 +424,19 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
424 I2C_FUNC_10BIT_ADDR; 424 I2C_FUNC_10BIT_ADDR;
425} 425}
426 426
427static void drm_dp_i2c_msg_write_status_update(struct drm_dp_aux_msg *msg)
428{
429 /*
430 * In case of i2c defer or short i2c ack reply to a write,
431 * we need to switch to WRITE_STATUS_UPDATE to drain the
432 * rest of the message
433 */
434 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE) {
435 msg->request &= DP_AUX_I2C_MOT;
436 msg->request |= DP_AUX_I2C_WRITE_STATUS_UPDATE;
437 }
438}
439
427#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */ 440#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */
428#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */ 441#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */
429#define AUX_STOP_LEN 4 442#define AUX_STOP_LEN 4
@@ -579,6 +592,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
579 * Both native ACK and I2C ACK replies received. We 592 * Both native ACK and I2C ACK replies received. We
580 * can assume the transfer was successful. 593 * can assume the transfer was successful.
581 */ 594 */
595 if (ret != msg->size)
596 drm_dp_i2c_msg_write_status_update(msg);
582 return ret; 597 return ret;
583 598
584 case DP_AUX_I2C_REPLY_NACK: 599 case DP_AUX_I2C_REPLY_NACK:
@@ -596,6 +611,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
596 if (defer_i2c < 7) 611 if (defer_i2c < 7)
597 defer_i2c++; 612 defer_i2c++;
598 usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); 613 usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
614 drm_dp_i2c_msg_write_status_update(msg);
615
599 continue; 616 continue;
600 617
601 default: 618 default:
@@ -608,6 +625,14 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
608 return -EREMOTEIO; 625 return -EREMOTEIO;
609} 626}
610 627
628static void drm_dp_i2c_msg_set_request(struct drm_dp_aux_msg *msg,
629 const struct i2c_msg *i2c_msg)
630{
631 msg->request = (i2c_msg->flags & I2C_M_RD) ?
632 DP_AUX_I2C_READ : DP_AUX_I2C_WRITE;
633 msg->request |= DP_AUX_I2C_MOT;
634}
635
611/* 636/*
612 * Keep retrying drm_dp_i2c_do_msg until all data has been transferred. 637 * Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
613 * 638 *
@@ -661,10 +686,7 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
661 686
662 for (i = 0; i < num; i++) { 687 for (i = 0; i < num; i++) {
663 msg.address = msgs[i].addr; 688 msg.address = msgs[i].addr;
664 msg.request = (msgs[i].flags & I2C_M_RD) ? 689 drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
665 DP_AUX_I2C_READ :
666 DP_AUX_I2C_WRITE;
667 msg.request |= DP_AUX_I2C_MOT;
668 /* Send a bare address packet to start the transaction. 690 /* Send a bare address packet to start the transaction.
669 * Zero sized messages specify an address only (bare 691 * Zero sized messages specify an address only (bare
670 * address) transaction. 692 * address) transaction.
@@ -672,6 +694,13 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
672 msg.buffer = NULL; 694 msg.buffer = NULL;
673 msg.size = 0; 695 msg.size = 0;
674 err = drm_dp_i2c_do_msg(aux, &msg); 696 err = drm_dp_i2c_do_msg(aux, &msg);
697
698 /*
699 * Reset msg.request in case in case it got
700 * changed into a WRITE_STATUS_UPDATE.
701 */
702 drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
703
675 if (err < 0) 704 if (err < 0)
676 break; 705 break;
677 /* We want each transaction to be as large as possible, but 706 /* We want each transaction to be as large as possible, but
@@ -684,6 +713,13 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
684 msg.size = min(transfer_size, msgs[i].len - j); 713 msg.size = min(transfer_size, msgs[i].len - j);
685 714
686 err = drm_dp_i2c_drain_msg(aux, &msg); 715 err = drm_dp_i2c_drain_msg(aux, &msg);
716
717 /*
718 * Reset msg.request in case in case it got
719 * changed into a WRITE_STATUS_UPDATE.
720 */
721 drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
722
687 if (err < 0) 723 if (err < 0)
688 break; 724 break;
689 transfer_size = err; 725 transfer_size = err;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 53d09a19f7e1..9362609df38a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -37,11 +37,9 @@
37#include "drm_legacy.h" 37#include "drm_legacy.h"
38#include "drm_internal.h" 38#include "drm_internal.h"
39 39
40unsigned int drm_debug = 0; /* 1 to enable debug output */ 40unsigned int drm_debug = 0; /* bitmask of DRM_UT_x */
41EXPORT_SYMBOL(drm_debug); 41EXPORT_SYMBOL(drm_debug);
42 42
43bool drm_atomic = 0;
44
45MODULE_AUTHOR(CORE_AUTHOR); 43MODULE_AUTHOR(CORE_AUTHOR);
46MODULE_DESCRIPTION(CORE_DESC); 44MODULE_DESCRIPTION(CORE_DESC);
47MODULE_LICENSE("GPL and additional rights"); 45MODULE_LICENSE("GPL and additional rights");
@@ -55,7 +53,6 @@ module_param_named(debug, drm_debug, int, 0600);
55static DEFINE_SPINLOCK(drm_minor_lock); 53static DEFINE_SPINLOCK(drm_minor_lock);
56static struct idr drm_minors_idr; 54static struct idr drm_minors_idr;
57 55
58struct class *drm_class;
59static struct dentry *drm_debugfs_root; 56static struct dentry *drm_debugfs_root;
60 57
61void drm_err(const char *format, ...) 58void drm_err(const char *format, ...)
@@ -398,15 +395,51 @@ void drm_minor_release(struct drm_minor *minor)
398} 395}
399 396
400/** 397/**
398 * DOC: driver instance overview
399 *
400 * A device instance for a drm driver is represented by struct &drm_device. This
401 * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
402 * callbacks implemented by the driver. The driver then needs to initialize all
403 * the various subsystems for the drm device like memory management, vblank
404 * handling, modesetting support and intial output configuration plus obviously
405 * initialize all the corresponding hardware bits. An important part of this is
406 * also calling drm_dev_set_unique() to set the userspace-visible unique name of
407 * this device instance. Finally when everything is up and running and ready for
408 * userspace the device instance can be published using drm_dev_register().
409 *
410 * There is also deprecated support for initalizing device instances using
411 * bus-specific helpers and the ->load() callback. But due to
412 * backwards-compatibility needs the device instance have to be published too
413 * early, which requires unpretty global locking to make safe and is therefore
414 * only support for existing drivers not yet converted to the new scheme.
415 *
416 * When cleaning up a device instance everything needs to be done in reverse:
417 * First unpublish the device instance with drm_dev_unregister(). Then clean up
418 * any other resources allocated at device initialization and drop the driver's
419 * reference to &drm_device using drm_dev_unref().
420 *
421 * Note that the lifetime rules for &drm_device instance has still a lot of
422 * historical baggage. Hence use the reference counting provided by
423 * drm_dev_ref() and drm_dev_unref() only carefully.
424 *
425 * Also note that embedding of &drm_device is currently not (yet) supported (but
426 * it would be easy to add). Drivers can store driver-private data in the
427 * dev_priv field of &drm_device.
428 */
429
430/**
401 * drm_put_dev - Unregister and release a DRM device 431 * drm_put_dev - Unregister and release a DRM device
402 * @dev: DRM device 432 * @dev: DRM device
403 * 433 *
404 * Called at module unload time or when a PCI device is unplugged. 434 * Called at module unload time or when a PCI device is unplugged.
405 * 435 *
406 * Use of this function is discouraged. It will eventually go away completely.
407 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
408 *
409 * Cleans up all DRM device, calling drm_lastclose(). 436 * Cleans up all DRM device, calling drm_lastclose().
437 *
438 * Note: Use of this function is deprecated. It will eventually go away
439 * completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly
440 * instead to make sure that the device isn't userspace accessible any more
441 * while teardown is in progress, ensuring that userspace can't access an
442 * inconsistent state.
410 */ 443 */
411void drm_put_dev(struct drm_device *dev) 444void drm_put_dev(struct drm_device *dev)
412{ 445{
@@ -519,7 +552,9 @@ static void drm_fs_inode_free(struct inode *inode)
519 * 552 *
520 * Allocate and initialize a new DRM device. No device registration is done. 553 * Allocate and initialize a new DRM device. No device registration is done.
521 * Call drm_dev_register() to advertice the device to user space and register it 554 * Call drm_dev_register() to advertice the device to user space and register it
522 * with other core subsystems. 555 * with other core subsystems. This should be done last in the device
556 * initialization sequence to make sure userspace can't access an inconsistent
557 * state.
523 * 558 *
524 * The initial ref-count of the object is 1. Use drm_dev_ref() and 559 * The initial ref-count of the object is 1. Use drm_dev_ref() and
525 * drm_dev_unref() to take and drop further ref-counts. 560 * drm_dev_unref() to take and drop further ref-counts.
@@ -566,6 +601,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
566 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); 601 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
567 if (ret) 602 if (ret)
568 goto err_minors; 603 goto err_minors;
604
605 WARN_ON(driver->suspend || driver->resume);
569 } 606 }
570 607
571 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 608 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
@@ -672,6 +709,12 @@ EXPORT_SYMBOL(drm_dev_unref);
672 * 709 *
673 * Never call this twice on any device! 710 * Never call this twice on any device!
674 * 711 *
712 * NOTE: To ensure backward compatibility with existing drivers method this
713 * function calls the ->load() method after registering the device nodes,
714 * creating race conditions. Usage of the ->load() methods is therefore
715 * deprecated, drivers must perform all initialization before calling
716 * drm_dev_register().
717 *
675 * RETURNS: 718 * RETURNS:
676 * 0 on success, negative error code on failure. 719 * 0 on success, negative error code on failure.
677 */ 720 */
@@ -719,6 +762,9 @@ EXPORT_SYMBOL(drm_dev_register);
719 * Unregister the DRM device from the system. This does the reverse of 762 * Unregister the DRM device from the system. This does the reverse of
720 * drm_dev_register() but does not deallocate the device. The caller must call 763 * drm_dev_register() but does not deallocate the device. The caller must call
721 * drm_dev_unref() to drop their final reference. 764 * drm_dev_unref() to drop their final reference.
765 *
766 * This should be called first in the device teardown code to make sure
767 * userspace can't access the device instance any more.
722 */ 768 */
723void drm_dev_unregister(struct drm_device *dev) 769void drm_dev_unregister(struct drm_device *dev)
724{ 770{
@@ -839,10 +885,9 @@ static int __init drm_core_init(void)
839 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 885 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
840 goto err_p1; 886 goto err_p1;
841 887
842 drm_class = drm_sysfs_create(THIS_MODULE, "drm"); 888 ret = drm_sysfs_init();
843 if (IS_ERR(drm_class)) { 889 if (ret < 0) {
844 printk(KERN_ERR "DRM: Error creating drm class.\n"); 890 printk(KERN_ERR "DRM: Error creating drm class.\n");
845 ret = PTR_ERR(drm_class);
846 goto err_p2; 891 goto err_p2;
847 } 892 }
848 893
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 05bb7311ac5d..d5d2c03fd136 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2044,7 +2044,7 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
2044static bool valid_inferred_mode(const struct drm_connector *connector, 2044static bool valid_inferred_mode(const struct drm_connector *connector,
2045 const struct drm_display_mode *mode) 2045 const struct drm_display_mode *mode)
2046{ 2046{
2047 struct drm_display_mode *m; 2047 const struct drm_display_mode *m;
2048 bool ok = false; 2048 bool ok = false;
2049 2049
2050 list_for_each_entry(m, &connector->probed_modes, head) { 2050 list_for_each_entry(m, &connector->probed_modes, head) {
@@ -2418,6 +2418,8 @@ add_cvt_modes(struct drm_connector *connector, struct edid *edid)
2418 return closure.modes; 2418 return closure.modes;
2419} 2419}
2420 2420
2421static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode);
2422
2421static void 2423static void
2422do_detailed_mode(struct detailed_timing *timing, void *c) 2424do_detailed_mode(struct detailed_timing *timing, void *c)
2423{ 2425{
@@ -2434,6 +2436,13 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
2434 if (closure->preferred) 2436 if (closure->preferred)
2435 newmode->type |= DRM_MODE_TYPE_PREFERRED; 2437 newmode->type |= DRM_MODE_TYPE_PREFERRED;
2436 2438
2439 /*
2440 * Detailed modes are limited to 10kHz pixel clock resolution,
2441 * so fix up anything that looks like CEA/HDMI mode, but the clock
2442 * is just slightly off.
2443 */
2444 fixup_detailed_cea_mode_clock(newmode);
2445
2437 drm_mode_probed_add(closure->connector, newmode); 2446 drm_mode_probed_add(closure->connector, newmode);
2438 closure->modes++; 2447 closure->modes++;
2439 closure->preferred = 0; 2448 closure->preferred = 0;
@@ -2529,9 +2538,9 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
2529 * and the 60Hz variant otherwise. 2538 * and the 60Hz variant otherwise.
2530 */ 2539 */
2531 if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480) 2540 if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
2532 clock = clock * 1001 / 1000; 2541 clock = DIV_ROUND_CLOSEST(clock * 1001, 1000);
2533 else 2542 else
2534 clock = DIV_ROUND_UP(clock * 1000, 1001); 2543 clock = DIV_ROUND_CLOSEST(clock * 1000, 1001);
2535 2544
2536 return clock; 2545 return clock;
2537} 2546}
@@ -3103,6 +3112,45 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
3103 return modes; 3112 return modes;
3104} 3113}
3105 3114
3115static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
3116{
3117 const struct drm_display_mode *cea_mode;
3118 int clock1, clock2, clock;
3119 u8 mode_idx;
3120 const char *type;
3121
3122 mode_idx = drm_match_cea_mode(mode) - 1;
3123 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
3124 type = "CEA";
3125 cea_mode = &edid_cea_modes[mode_idx];
3126 clock1 = cea_mode->clock;
3127 clock2 = cea_mode_alternate_clock(cea_mode);
3128 } else {
3129 mode_idx = drm_match_hdmi_mode(mode) - 1;
3130 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
3131 type = "HDMI";
3132 cea_mode = &edid_4k_modes[mode_idx];
3133 clock1 = cea_mode->clock;
3134 clock2 = hdmi_mode_alternate_clock(cea_mode);
3135 } else {
3136 return;
3137 }
3138 }
3139
3140 /* pick whichever is closest */
3141 if (abs(mode->clock - clock1) < abs(mode->clock - clock2))
3142 clock = clock1;
3143 else
3144 clock = clock2;
3145
3146 if (mode->clock == clock)
3147 return;
3148
3149 DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
3150 type, mode_idx + 1, mode->clock, clock);
3151 mode->clock = clock;
3152}
3153
3106static void 3154static void
3107parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) 3155parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
3108{ 3156{
@@ -3361,7 +3409,7 @@ EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
3361 * the sink doesn't support audio or video. 3409 * the sink doesn't support audio or video.
3362 */ 3410 */
3363int drm_av_sync_delay(struct drm_connector *connector, 3411int drm_av_sync_delay(struct drm_connector *connector,
3364 struct drm_display_mode *mode) 3412 const struct drm_display_mode *mode)
3365{ 3413{
3366 int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); 3414 int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
3367 int a, v; 3415 int a, v;
@@ -3396,7 +3444,6 @@ EXPORT_SYMBOL(drm_av_sync_delay);
3396/** 3444/**
3397 * drm_select_eld - select one ELD from multiple HDMI/DP sinks 3445 * drm_select_eld - select one ELD from multiple HDMI/DP sinks
3398 * @encoder: the encoder just changed display mode 3446 * @encoder: the encoder just changed display mode
3399 * @mode: the adjusted display mode
3400 * 3447 *
3401 * It's possible for one encoder to be associated with multiple HDMI/DP sinks. 3448 * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
3402 * The policy is now hard coded to simply use the first HDMI/DP sink's ELD. 3449 * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
@@ -3404,8 +3451,7 @@ EXPORT_SYMBOL(drm_av_sync_delay);
3404 * Return: The connector associated with the first HDMI/DP sink that has ELD 3451 * Return: The connector associated with the first HDMI/DP sink that has ELD
3405 * attached to it. 3452 * attached to it.
3406 */ 3453 */
3407struct drm_connector *drm_select_eld(struct drm_encoder *encoder, 3454struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
3408 struct drm_display_mode *mode)
3409{ 3455{
3410 struct drm_connector *connector; 3456 struct drm_connector *connector;
3411 struct drm_device *dev = encoder->dev; 3457 struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index c5605fe4907e..698b8c3b09d9 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
32 "from built-in data or /lib/firmware instead. "); 32 "from built-in data or /lib/firmware instead. ");
33 33
34#define GENERIC_EDIDS 6 34#define GENERIC_EDIDS 6
35static const char *generic_edid_name[GENERIC_EDIDS] = { 35static const char * const generic_edid_name[GENERIC_EDIDS] = {
36 "edid/800x600.bin", 36 "edid/800x600.bin",
37 "edid/1024x768.bin", 37 "edid/1024x768.bin",
38 "edid/1280x1024.bin", 38 "edid/1280x1024.bin",
@@ -264,20 +264,43 @@ out:
264int drm_load_edid_firmware(struct drm_connector *connector) 264int drm_load_edid_firmware(struct drm_connector *connector)
265{ 265{
266 const char *connector_name = connector->name; 266 const char *connector_name = connector->name;
267 char *edidname = edid_firmware, *last, *colon; 267 char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL;
268 int ret; 268 int ret;
269 struct edid *edid; 269 struct edid *edid;
270 270
271 if (*edidname == '\0') 271 if (edid_firmware[0] == '\0')
272 return 0; 272 return 0;
273 273
274 colon = strchr(edidname, ':'); 274 /*
275 if (colon != NULL) { 275 * If there are multiple edid files specified and separated
276 if (strncmp(connector_name, edidname, colon - edidname)) 276 * by commas, search through the list looking for one that
277 return 0; 277 * matches the connector.
278 edidname = colon + 1; 278 *
279 if (*edidname == '\0') 279 * If there's one or more that don't't specify a connector, keep
280 * the last one found one as a fallback.
281 */
282 fwstr = kstrdup(edid_firmware, GFP_KERNEL);
283 edidstr = fwstr;
284
285 while ((edidname = strsep(&edidstr, ","))) {
286 colon = strchr(edidname, ':');
287 if (colon != NULL) {
288 if (strncmp(connector_name, edidname, colon - edidname))
289 continue;
290 edidname = colon + 1;
291 break;
292 }
293
294 if (*edidname != '\0') /* corner case: multiple ',' */
295 fallback = edidname;
296 }
297
298 if (!edidname) {
299 if (!fallback) {
300 kfree(fwstr);
280 return 0; 301 return 0;
302 }
303 edidname = fallback;
281 } 304 }
282 305
283 last = edidname + strlen(edidname) - 1; 306 last = edidname + strlen(edidname) - 1;
@@ -285,6 +308,8 @@ int drm_load_edid_firmware(struct drm_connector *connector)
285 *last = '\0'; 308 *last = '\0';
286 309
287 edid = edid_load(connector, edidname, connector_name); 310 edid = edid_load(connector, edidname, connector_name);
311 kfree(fwstr);
312
288 if (IS_ERR_OR_NULL(edid)) 313 if (IS_ERR_OR_NULL(edid))
289 return 0; 314 return 0;
290 315
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index ca08c472311b..e673c13c7391 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -38,6 +38,13 @@
38#include <drm/drm_crtc.h> 38#include <drm/drm_crtc.h>
39#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
40#include <drm/drm_crtc_helper.h> 40#include <drm/drm_crtc_helper.h>
41#include <drm/drm_atomic.h>
42#include <drm/drm_atomic_helper.h>
43
44static bool drm_fbdev_emulation = true;
45module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
46MODULE_PARM_DESC(fbdev_emulation,
47 "Enable legacy fbdev emulation [default=true]");
41 48
42static LIST_HEAD(kernel_fb_helper_list); 49static LIST_HEAD(kernel_fb_helper_list);
43 50
@@ -99,6 +106,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
99 struct drm_connector *connector; 106 struct drm_connector *connector;
100 int i; 107 int i;
101 108
109 if (!drm_fbdev_emulation)
110 return 0;
111
102 mutex_lock(&dev->mode_config.mutex); 112 mutex_lock(&dev->mode_config.mutex);
103 drm_for_each_connector(connector, dev) { 113 drm_for_each_connector(connector, dev) {
104 struct drm_fb_helper_connector *fb_helper_connector; 114 struct drm_fb_helper_connector *fb_helper_connector;
@@ -129,6 +139,9 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
129 struct drm_fb_helper_connector **temp; 139 struct drm_fb_helper_connector **temp;
130 struct drm_fb_helper_connector *fb_helper_connector; 140 struct drm_fb_helper_connector *fb_helper_connector;
131 141
142 if (!drm_fbdev_emulation)
143 return 0;
144
132 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); 145 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
133 if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) { 146 if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
134 temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL); 147 temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
@@ -184,6 +197,9 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
184 struct drm_fb_helper_connector *fb_helper_connector; 197 struct drm_fb_helper_connector *fb_helper_connector;
185 int i, j; 198 int i, j;
186 199
200 if (!drm_fbdev_emulation)
201 return 0;
202
187 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); 203 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
188 204
189 for (i = 0; i < fb_helper->connector_count; i++) { 205 for (i = 0; i < fb_helper->connector_count; i++) {
@@ -320,15 +336,92 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
320} 336}
321EXPORT_SYMBOL(drm_fb_helper_debug_leave); 337EXPORT_SYMBOL(drm_fb_helper_debug_leave);
322 338
323static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) 339static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
340{
341 struct drm_device *dev = fb_helper->dev;
342 struct drm_plane *plane;
343 struct drm_atomic_state *state;
344 int i, ret;
345
346 state = drm_atomic_state_alloc(dev);
347 if (!state)
348 return -ENOMEM;
349
350 state->acquire_ctx = dev->mode_config.acquire_ctx;
351retry:
352 drm_for_each_plane(plane, dev) {
353 struct drm_plane_state *plane_state;
354
355 plane->old_fb = plane->fb;
356
357 plane_state = drm_atomic_get_plane_state(state, plane);
358 if (IS_ERR(plane_state)) {
359 ret = PTR_ERR(plane_state);
360 goto fail;
361 }
362
363 plane_state->rotation = BIT(DRM_ROTATE_0);
364
365 /* disable non-primary: */
366 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
367 continue;
368
369 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
370 if (ret != 0)
371 goto fail;
372 }
373
374 for(i = 0; i < fb_helper->crtc_count; i++) {
375 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
376
377 ret = __drm_atomic_helper_set_config(mode_set, state);
378 if (ret != 0)
379 goto fail;
380 }
381
382 ret = drm_atomic_commit(state);
383
384fail:
385 drm_for_each_plane(plane, dev) {
386 if (ret == 0) {
387 struct drm_framebuffer *new_fb = plane->state->fb;
388 if (new_fb)
389 drm_framebuffer_reference(new_fb);
390 plane->fb = new_fb;
391 plane->crtc = plane->state->crtc;
392
393 if (plane->old_fb)
394 drm_framebuffer_unreference(plane->old_fb);
395 }
396 plane->old_fb = NULL;
397 }
398
399 if (ret == -EDEADLK)
400 goto backoff;
401
402 if (ret != 0)
403 drm_atomic_state_free(state);
404
405 return ret;
406
407backoff:
408 drm_atomic_state_clear(state);
409 drm_atomic_legacy_backoff(state);
410
411 goto retry;
412}
413
414static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
324{ 415{
325 struct drm_device *dev = fb_helper->dev; 416 struct drm_device *dev = fb_helper->dev;
326 struct drm_plane *plane; 417 struct drm_plane *plane;
327 bool error = false;
328 int i; 418 int i;
329 419
330 drm_warn_on_modeset_not_all_locked(dev); 420 drm_warn_on_modeset_not_all_locked(dev);
331 421
422 if (fb_helper->atomic)
423 return restore_fbdev_mode_atomic(fb_helper);
424
332 drm_for_each_plane(plane, dev) { 425 drm_for_each_plane(plane, dev) {
333 if (plane->type != DRM_PLANE_TYPE_PRIMARY) 426 if (plane->type != DRM_PLANE_TYPE_PRIMARY)
334 drm_plane_force_disable(plane); 427 drm_plane_force_disable(plane);
@@ -348,18 +441,19 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
348 if (crtc->funcs->cursor_set2) { 441 if (crtc->funcs->cursor_set2) {
349 ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); 442 ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
350 if (ret) 443 if (ret)
351 error = true; 444 return ret;
352 } else if (crtc->funcs->cursor_set) { 445 } else if (crtc->funcs->cursor_set) {
353 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); 446 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
354 if (ret) 447 if (ret)
355 error = true; 448 return ret;
356 } 449 }
357 450
358 ret = drm_mode_set_config_internal(mode_set); 451 ret = drm_mode_set_config_internal(mode_set);
359 if (ret) 452 if (ret)
360 error = true; 453 return ret;
361 } 454 }
362 return error; 455
456 return 0;
363} 457}
364 458
365/** 459/**
@@ -369,12 +463,18 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
369 * This should be called from driver's drm ->lastclose callback 463 * This should be called from driver's drm ->lastclose callback
370 * when implementing an fbcon on top of kms using this helper. This ensures that 464 * when implementing an fbcon on top of kms using this helper. This ensures that
371 * the user isn't greeted with a black screen when e.g. X dies. 465 * the user isn't greeted with a black screen when e.g. X dies.
466 *
467 * RETURNS:
468 * Zero if everything went ok, negative error code otherwise.
372 */ 469 */
373bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) 470int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
374{ 471{
375 struct drm_device *dev = fb_helper->dev; 472 struct drm_device *dev = fb_helper->dev;
376 bool ret; 473 bool do_delayed;
377 bool do_delayed = false; 474 int ret;
475
476 if (!drm_fbdev_emulation)
477 return -ENODEV;
378 478
379 drm_modeset_lock_all(dev); 479 drm_modeset_lock_all(dev);
380 ret = restore_fbdev_mode(fb_helper); 480 ret = restore_fbdev_mode(fb_helper);
@@ -592,6 +692,9 @@ int drm_fb_helper_init(struct drm_device *dev,
592 struct drm_crtc *crtc; 692 struct drm_crtc *crtc;
593 int i; 693 int i;
594 694
695 if (!drm_fbdev_emulation)
696 return 0;
697
595 if (!max_conn_count) 698 if (!max_conn_count)
596 return -EINVAL; 699 return -EINVAL;
597 700
@@ -625,6 +728,8 @@ int drm_fb_helper_init(struct drm_device *dev,
625 i++; 728 i++;
626 } 729 }
627 730
731 fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
732
628 return 0; 733 return 0;
629out_free: 734out_free:
630 drm_fb_helper_crtc_free(fb_helper); 735 drm_fb_helper_crtc_free(fb_helper);
@@ -714,6 +819,9 @@ EXPORT_SYMBOL(drm_fb_helper_release_fbi);
714 819
715void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) 820void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
716{ 821{
822 if (!drm_fbdev_emulation)
823 return;
824
717 if (!list_empty(&fb_helper->kernel_fb_list)) { 825 if (!list_empty(&fb_helper->kernel_fb_list)) {
718 list_del(&fb_helper->kernel_fb_list); 826 list_del(&fb_helper->kernel_fb_list);
719 if (list_empty(&kernel_fb_helper_list)) { 827 if (list_empty(&kernel_fb_helper_list)) {
@@ -1122,6 +1230,80 @@ int drm_fb_helper_set_par(struct fb_info *info)
1122} 1230}
1123EXPORT_SYMBOL(drm_fb_helper_set_par); 1231EXPORT_SYMBOL(drm_fb_helper_set_par);
1124 1232
1233static int pan_display_atomic(struct fb_var_screeninfo *var,
1234 struct fb_info *info)
1235{
1236 struct drm_fb_helper *fb_helper = info->par;
1237 struct drm_device *dev = fb_helper->dev;
1238 struct drm_atomic_state *state;
1239 int i, ret;
1240
1241 state = drm_atomic_state_alloc(dev);
1242 if (!state)
1243 return -ENOMEM;
1244
1245 state->acquire_ctx = dev->mode_config.acquire_ctx;
1246retry:
1247 for(i = 0; i < fb_helper->crtc_count; i++) {
1248 struct drm_mode_set *mode_set;
1249
1250 mode_set = &fb_helper->crtc_info[i].mode_set;
1251
1252 mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
1253
1254 mode_set->x = var->xoffset;
1255 mode_set->y = var->yoffset;
1256
1257 ret = __drm_atomic_helper_set_config(mode_set, state);
1258 if (ret != 0)
1259 goto fail;
1260 }
1261
1262 ret = drm_atomic_commit(state);
1263 if (ret != 0)
1264 goto fail;
1265
1266 info->var.xoffset = var->xoffset;
1267 info->var.yoffset = var->yoffset;
1268
1269
1270fail:
1271 for(i = 0; i < fb_helper->crtc_count; i++) {
1272 struct drm_mode_set *mode_set;
1273 struct drm_plane *plane;
1274
1275 mode_set = &fb_helper->crtc_info[i].mode_set;
1276 plane = mode_set->crtc->primary;
1277
1278 if (ret == 0) {
1279 struct drm_framebuffer *new_fb = plane->state->fb;
1280
1281 if (new_fb)
1282 drm_framebuffer_reference(new_fb);
1283 plane->fb = new_fb;
1284 plane->crtc = plane->state->crtc;
1285
1286 if (plane->old_fb)
1287 drm_framebuffer_unreference(plane->old_fb);
1288 }
1289 plane->old_fb = NULL;
1290 }
1291
1292 if (ret == -EDEADLK)
1293 goto backoff;
1294
1295 if (ret != 0)
1296 drm_atomic_state_free(state);
1297
1298 return ret;
1299
1300backoff:
1301 drm_atomic_state_clear(state);
1302 drm_atomic_legacy_backoff(state);
1303
1304 goto retry;
1305}
1306
1125/** 1307/**
1126 * drm_fb_helper_pan_display - implementation for ->fb_pan_display 1308 * drm_fb_helper_pan_display - implementation for ->fb_pan_display
1127 * @var: updated screen information 1309 * @var: updated screen information
@@ -1145,6 +1327,11 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1145 return -EBUSY; 1327 return -EBUSY;
1146 } 1328 }
1147 1329
1330 if (fb_helper->atomic) {
1331 ret = pan_display_atomic(var, info);
1332 goto unlock;
1333 }
1334
1148 for (i = 0; i < fb_helper->crtc_count; i++) { 1335 for (i = 0; i < fb_helper->crtc_count; i++) {
1149 modeset = &fb_helper->crtc_info[i].mode_set; 1336 modeset = &fb_helper->crtc_info[i].mode_set;
1150 1337
@@ -1159,6 +1346,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1159 } 1346 }
1160 } 1347 }
1161 } 1348 }
1349unlock:
1162 drm_modeset_unlock_all(dev); 1350 drm_modeset_unlock_all(dev);
1163 return ret; 1351 return ret;
1164} 1352}
@@ -1934,6 +2122,9 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1934 struct drm_device *dev = fb_helper->dev; 2122 struct drm_device *dev = fb_helper->dev;
1935 int count = 0; 2123 int count = 0;
1936 2124
2125 if (!drm_fbdev_emulation)
2126 return 0;
2127
1937 mutex_lock(&dev->mode_config.mutex); 2128 mutex_lock(&dev->mode_config.mutex);
1938 count = drm_fb_helper_probe_connector_modes(fb_helper, 2129 count = drm_fb_helper_probe_connector_modes(fb_helper,
1939 dev->mode_config.max_width, 2130 dev->mode_config.max_width,
@@ -1977,6 +2168,9 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1977 struct drm_device *dev = fb_helper->dev; 2168 struct drm_device *dev = fb_helper->dev;
1978 u32 max_width, max_height; 2169 u32 max_width, max_height;
1979 2170
2171 if (!drm_fbdev_emulation)
2172 return 0;
2173
1980 mutex_lock(&fb_helper->dev->mode_config.mutex); 2174 mutex_lock(&fb_helper->dev->mode_config.mutex);
1981 if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) { 2175 if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
1982 fb_helper->delayed_hotplug = true; 2176 fb_helper->delayed_hotplug = true;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 1d47d2e9487c..c7de454e8e88 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -763,7 +763,8 @@ EXPORT_SYMBOL(drm_gem_object_release);
763void 763void
764drm_gem_object_free(struct kref *kref) 764drm_gem_object_free(struct kref *kref)
765{ 765{
766 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 766 struct drm_gem_object *obj =
767 container_of(kref, struct drm_gem_object, refcount);
767 struct drm_device *dev = obj->dev; 768 struct drm_device *dev = obj->dev;
768 769
769 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 770 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -810,8 +811,6 @@ EXPORT_SYMBOL(drm_gem_vm_close);
810 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 811 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
811 * callers must verify access restrictions before calling this helper. 812 * callers must verify access restrictions before calling this helper.
812 * 813 *
813 * NOTE: This function has to be protected with dev->struct_mutex
814 *
815 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 814 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
816 * size, or if no gem_vm_ops are provided. 815 * size, or if no gem_vm_ops are provided.
817 */ 816 */
@@ -820,8 +819,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
820{ 819{
821 struct drm_device *dev = obj->dev; 820 struct drm_device *dev = obj->dev;
822 821
823 lockdep_assert_held(&dev->struct_mutex);
824
825 /* Check for valid size. */ 822 /* Check for valid size. */
826 if (obj_size < vma->vm_end - vma->vm_start) 823 if (obj_size < vma->vm_end - vma->vm_start)
827 return -EINVAL; 824 return -EINVAL;
@@ -865,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
865{ 862{
866 struct drm_file *priv = filp->private_data; 863 struct drm_file *priv = filp->private_data;
867 struct drm_device *dev = priv->minor->dev; 864 struct drm_device *dev = priv->minor->dev;
868 struct drm_gem_object *obj; 865 struct drm_gem_object *obj = NULL;
869 struct drm_vma_offset_node *node; 866 struct drm_vma_offset_node *node;
870 int ret; 867 int ret;
871 868
872 if (drm_device_is_unplugged(dev)) 869 if (drm_device_is_unplugged(dev))
873 return -ENODEV; 870 return -ENODEV;
874 871
875 mutex_lock(&dev->struct_mutex); 872 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
873 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
874 vma->vm_pgoff,
875 vma_pages(vma));
876 if (likely(node)) {
877 obj = container_of(node, struct drm_gem_object, vma_node);
878 /*
879 * When the object is being freed, after it hits 0-refcnt it
880 * proceeds to tear down the object. In the process it will
881 * attempt to remove the VMA offset and so acquire this
882 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
883 * that matches our range, we know it is in the process of being
884 * destroyed and will be freed as soon as we release the lock -
885 * so we have to check for the 0-refcnted object and treat it as
886 * invalid.
887 */
888 if (!kref_get_unless_zero(&obj->refcount))
889 obj = NULL;
890 }
891 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
876 892
877 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, 893 if (!obj)
878 vma->vm_pgoff,
879 vma_pages(vma));
880 if (!node) {
881 mutex_unlock(&dev->struct_mutex);
882 return -EINVAL; 894 return -EINVAL;
883 } else if (!drm_vma_node_is_allowed(node, filp)) { 895
884 mutex_unlock(&dev->struct_mutex); 896 if (!drm_vma_node_is_allowed(node, filp)) {
897 drm_gem_object_unreference_unlocked(obj);
885 return -EACCES; 898 return -EACCES;
886 } 899 }
887 900
888 obj = container_of(node, struct drm_gem_object, vma_node); 901 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
889 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); 902 vma);
890 903
891 mutex_unlock(&dev->struct_mutex); 904 drm_gem_object_unreference_unlocked(obj);
892 905
893 return ret; 906 return ret;
894} 907}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 86cc793cdf79..e109b49cd25d 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -481,12 +481,9 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
481 struct vm_area_struct *vma) 481 struct vm_area_struct *vma)
482{ 482{
483 struct drm_gem_cma_object *cma_obj; 483 struct drm_gem_cma_object *cma_obj;
484 struct drm_device *dev = obj->dev;
485 int ret; 484 int ret;
486 485
487 mutex_lock(&dev->struct_mutex);
488 ret = drm_gem_mmap_obj(obj, obj->size, vma); 486 ret = drm_gem_mmap_obj(obj, obj->size, vma);
489 mutex_unlock(&dev->struct_mutex);
490 if (ret < 0) 487 if (ret < 0)
491 return ret; 488 return ret;
492 489
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 059af01bd07a..43cbda3306ac 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -73,7 +73,7 @@ int drm_authmagic(struct drm_device *dev, void *data,
73/* drm_sysfs.c */ 73/* drm_sysfs.c */
74extern struct class *drm_class; 74extern struct class *drm_class;
75 75
76struct class *drm_sysfs_create(struct module *owner, char *name); 76int drm_sysfs_init(void);
77void drm_sysfs_destroy(void); 77void drm_sysfs_destroy(void);
78struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); 78struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
79int drm_sysfs_connector_add(struct drm_connector *connector); 79int drm_sysfs_connector_add(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index ddfa6014c2c2..57676f8d7ecf 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -720,7 +720,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
720 return 0; 720 return 0;
721} 721}
722 722
723#if __OS_HAS_AGP 723#if IS_ENABLED(CONFIG_AGP)
724typedef struct drm_agp_mode32 { 724typedef struct drm_agp_mode32 {
725 u32 mode; /**< AGP mode */ 725 u32 mode; /**< AGP mode */
726} drm_agp_mode32_t; 726} drm_agp_mode32_t;
@@ -882,7 +882,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
882 882
883 return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request); 883 return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
884} 884}
885#endif /* __OS_HAS_AGP */ 885#endif /* CONFIG_AGP */
886 886
887typedef struct drm_scatter_gather32 { 887typedef struct drm_scatter_gather32 {
888 u32 size; /**< In bytes -- will round to page boundary */ 888 u32 size; /**< In bytes -- will round to page boundary */
@@ -1090,7 +1090,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
1090 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx, 1090 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
1091 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx, 1091 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
1092 [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma, 1092 [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
1093#if __OS_HAS_AGP 1093#if IS_ENABLED(CONFIG_AGP)
1094 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable, 1094 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
1095 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info, 1095 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
1096 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc, 1096 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index d93e7378c077..8ce2a0c59116 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -40,7 +40,7 @@
40static int drm_version(struct drm_device *dev, void *data, 40static int drm_version(struct drm_device *dev, void *data,
41 struct drm_file *file_priv); 41 struct drm_file *file_priv);
42 42
43/** 43/*
44 * Get the bus id. 44 * Get the bus id.
45 * 45 *
46 * \param inode device inode. 46 * \param inode device inode.
@@ -75,7 +75,7 @@ drm_unset_busid(struct drm_device *dev,
75 master->unique_len = 0; 75 master->unique_len = 0;
76} 76}
77 77
78/** 78/*
79 * Set the bus id. 79 * Set the bus id.
80 * 80 *
81 * \param inode device inode. 81 * \param inode device inode.
@@ -149,7 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
149 return 0; 149 return 0;
150} 150}
151 151
152/** 152/*
153 * Get a mapping information. 153 * Get a mapping information.
154 * 154 *
155 * \param inode device inode. 155 * \param inode device inode.
@@ -201,7 +201,7 @@ static int drm_getmap(struct drm_device *dev, void *data,
201 return 0; 201 return 0;
202} 202}
203 203
204/** 204/*
205 * Get client information. 205 * Get client information.
206 * 206 *
207 * \param inode device inode. 207 * \param inode device inode.
@@ -244,7 +244,7 @@ static int drm_getclient(struct drm_device *dev, void *data,
244 } 244 }
245} 245}
246 246
247/** 247/*
248 * Get statistics information. 248 * Get statistics information.
249 * 249 *
250 * \param inode device inode. 250 * \param inode device inode.
@@ -265,7 +265,7 @@ static int drm_getstats(struct drm_device *dev, void *data,
265 return 0; 265 return 0;
266} 266}
267 267
268/** 268/*
269 * Get device/driver capabilities 269 * Get device/driver capabilities
270 */ 270 */
271static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) 271static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
@@ -318,7 +318,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
318 return 0; 318 return 0;
319} 319}
320 320
321/** 321/*
322 * Set device/driver capabilities 322 * Set device/driver capabilities
323 */ 323 */
324static int 324static int
@@ -352,7 +352,7 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
352 return 0; 352 return 0;
353} 353}
354 354
355/** 355/*
356 * Setversion ioctl. 356 * Setversion ioctl.
357 * 357 *
358 * \param inode device inode. 358 * \param inode device inode.
@@ -406,7 +406,18 @@ done:
406 return retcode; 406 return retcode;
407} 407}
408 408
409/** No-op ioctl. */ 409/**
410 * drm_noop - DRM no-op ioctl implemntation
411 * @dev: DRM device for the ioctl
412 * @data: data pointer for the ioctl
413 * @file_priv: DRM file for the ioctl call
414 *
415 * This no-op implementation for drm ioctls is useful for deprecated
416 * functionality where we can't return a failure code because existing userspace
417 * checks the result of the ioctl, but doesn't care about the action.
418 *
419 * Always returns successfully with 0.
420 */
410int drm_noop(struct drm_device *dev, void *data, 421int drm_noop(struct drm_device *dev, void *data,
411 struct drm_file *file_priv) 422 struct drm_file *file_priv)
412{ 423{
@@ -416,6 +427,28 @@ int drm_noop(struct drm_device *dev, void *data,
416EXPORT_SYMBOL(drm_noop); 427EXPORT_SYMBOL(drm_noop);
417 428
418/** 429/**
430 * drm_invalid_op - DRM invalid ioctl implemntation
431 * @dev: DRM device for the ioctl
432 * @data: data pointer for the ioctl
433 * @file_priv: DRM file for the ioctl call
434 *
435 * This no-op implementation for drm ioctls is useful for deprecated
436 * functionality where we really don't want to allow userspace to call the ioctl
437 * any more. This is the case for old ums interfaces for drivers that
438 * transitioned to kms gradually and so kept the old legacy tables around. This
439 * only applies to radeon and i915 kms drivers, other drivers shouldn't need to
440 * use this function.
441 *
442 * Always fails with a return value of -EINVAL.
443 */
444int drm_invalid_op(struct drm_device *dev, void *data,
445 struct drm_file *file_priv)
446{
447 return -EINVAL;
448}
449EXPORT_SYMBOL(drm_invalid_op);
450
451/*
419 * Copy and IOCTL return string to user space 452 * Copy and IOCTL return string to user space
420 */ 453 */
421static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) 454static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
@@ -438,7 +471,7 @@ static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
438 return 0; 471 return 0;
439} 472}
440 473
441/** 474/*
442 * Get version information 475 * Get version information
443 * 476 *
444 * \param inode device inode. 477 * \param inode device inode.
@@ -470,7 +503,7 @@ static int drm_version(struct drm_device *dev, void *data,
470 return err; 503 return err;
471} 504}
472 505
473/** 506/*
474 * drm_ioctl_permit - Check ioctl permissions against caller 507 * drm_ioctl_permit - Check ioctl permissions against caller
475 * 508 *
476 * @flags: ioctl permission flags. 509 * @flags: ioctl permission flags.
@@ -518,7 +551,7 @@ EXPORT_SYMBOL(drm_ioctl_permit);
518 .name = #ioctl \ 551 .name = #ioctl \
519 } 552 }
520 553
521/** Ioctl table */ 554/* Ioctl table */
522static const struct drm_ioctl_desc drm_ioctls[] = { 555static const struct drm_ioctl_desc drm_ioctls[] = {
523 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 556 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
524 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), 557 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
@@ -571,7 +604,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
571 604
572 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 605 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
573 606
574#if __OS_HAS_AGP 607#if IS_ENABLED(CONFIG_AGP)
575 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 608 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
576 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 609 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
577 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 610 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -635,16 +668,16 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
635#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 668#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
636 669
637/** 670/**
638 * Called whenever a process performs an ioctl on /dev/drm. 671 * drm_ioctl - ioctl callback implementation for DRM drivers
639 * 672 * @filp: file this ioctl is called on
640 * \param inode device inode. 673 * @cmd: ioctl cmd number
641 * \param file_priv DRM file private. 674 * @arg: user argument
642 * \param cmd command.
643 * \param arg user argument.
644 * \return zero on success or negative number on failure.
645 * 675 *
646 * Looks up the ioctl function in the ::ioctls table, checking for root 676 * Looks up the ioctl function in the ::ioctls table, checking for root
647 * previleges if so required, and dispatches to the respective function. 677 * previleges if so required, and dispatches to the respective function.
678 *
679 * Returns:
680 * Zero on success, negative error code on failure.
648 */ 681 */
649long drm_ioctl(struct file *filp, 682long drm_ioctl(struct file *filp,
650 unsigned int cmd, unsigned long arg) 683 unsigned int cmd, unsigned long arg)
@@ -658,13 +691,16 @@ long drm_ioctl(struct file *filp,
658 char stack_kdata[128]; 691 char stack_kdata[128];
659 char *kdata = NULL; 692 char *kdata = NULL;
660 unsigned int usize, asize, drv_size; 693 unsigned int usize, asize, drv_size;
694 bool is_driver_ioctl;
661 695
662 dev = file_priv->minor->dev; 696 dev = file_priv->minor->dev;
663 697
664 if (drm_device_is_unplugged(dev)) 698 if (drm_device_is_unplugged(dev))
665 return -ENODEV; 699 return -ENODEV;
666 700
667 if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) { 701 is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
702
703 if (is_driver_ioctl) {
668 /* driver ioctl */ 704 /* driver ioctl */
669 if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls) 705 if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
670 goto err_i1; 706 goto err_i1;
@@ -723,7 +759,10 @@ long drm_ioctl(struct file *filp,
723 memset(kdata, 0, usize); 759 memset(kdata, 0, usize);
724 } 760 }
725 761
726 if (ioctl->flags & DRM_UNLOCKED) 762 /* Enforce sane locking for kms driver ioctls. Core ioctls are
763 * too messy still. */
764 if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
765 (ioctl->flags & DRM_UNLOCKED))
727 retcode = func(dev, kdata, file_priv); 766 retcode = func(dev, kdata, file_priv);
728 else { 767 else {
729 mutex_lock(&drm_global_mutex); 768 mutex_lock(&drm_global_mutex);
@@ -754,9 +793,15 @@ EXPORT_SYMBOL(drm_ioctl);
754 793
755/** 794/**
756 * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags 795 * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
796 * @nr: ioctl number
797 * @flags: where to return the ioctl permission flags
798 *
799 * This ioctl is only used by the vmwgfx driver to augment the access checks
800 * done by the drm core and insofar a pretty decent layering violation. This
801 * shouldn't be used by any drivers.
757 * 802 *
758 * @nr: Ioctl number. 803 * Returns:
759 * @flags: Where to return the ioctl permission flags 804 * True if the @nr corresponds to a DRM core ioctl numer, false otherwise.
760 */ 805 */
761bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) 806bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
762{ 807{
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 22d8b78d537e..eba6337f5860 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -74,22 +74,22 @@ module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
74module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 74module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
76 76
77static void store_vblank(struct drm_device *dev, int crtc, 77static void store_vblank(struct drm_device *dev, unsigned int pipe,
78 u32 vblank_count_inc, 78 u32 vblank_count_inc,
79 struct timeval *t_vblank) 79 struct timeval *t_vblank, u32 last)
80{ 80{
81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 81 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
82 u32 tslot; 82 u32 tslot;
83 83
84 assert_spin_locked(&dev->vblank_time_lock); 84 assert_spin_locked(&dev->vblank_time_lock);
85 85
86 if (t_vblank) { 86 vblank->last = last;
87 /* All writers hold the spinlock, but readers are serialized by 87
88 * the latching of vblank->count below. 88 /* All writers hold the spinlock, but readers are serialized by
89 */ 89 * the latching of vblank->count below.
90 tslot = vblank->count + vblank_count_inc; 90 */
91 vblanktimestamp(dev, crtc, tslot) = *t_vblank; 91 tslot = vblank->count + vblank_count_inc;
92 } 92 vblanktimestamp(dev, pipe, tslot) = *t_vblank;
93 93
94 /* 94 /*
95 * vblank timestamp updates are protected on the write side with 95 * vblank timestamp updates are protected on the write side with
@@ -105,12 +105,60 @@ static void store_vblank(struct drm_device *dev, int crtc,
105} 105}
106 106
107/** 107/**
108 * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
109 * @dev: DRM device
110 * @pipe: index of CRTC for which to reset the timestamp
111 *
112 * Reset the stored timestamp for the current vblank count to correspond
113 * to the last vblank occurred.
114 *
115 * Only to be called from drm_vblank_on().
116 *
117 * Note: caller must hold dev->vbl_lock since this reads & writes
118 * device vblank fields.
119 */
120static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe)
121{
122 u32 cur_vblank;
123 bool rc;
124 struct timeval t_vblank;
125 int count = DRM_TIMESTAMP_MAXRETRIES;
126
127 spin_lock(&dev->vblank_time_lock);
128
129 /*
130 * sample the current counter to avoid random jumps
131 * when drm_vblank_enable() applies the diff
132 */
133 do {
134 cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
135 rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
136 } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe) && --count > 0);
137
138 /*
139 * Only reinitialize corresponding vblank timestamp if high-precision query
140 * available and didn't fail. Otherwise reinitialize delayed at next vblank
141 * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
142 */
143 if (!rc)
144 t_vblank = (struct timeval) {0, 0};
145
146 /*
147 * +1 to make sure user will never see the same
148 * vblank counter value before and after a modeset
149 */
150 store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
151
152 spin_unlock(&dev->vblank_time_lock);
153}
154
155/**
108 * drm_update_vblank_count - update the master vblank counter 156 * drm_update_vblank_count - update the master vblank counter
109 * @dev: DRM device 157 * @dev: DRM device
110 * @pipe: counter to update 158 * @pipe: counter to update
111 * 159 *
112 * Call back into the driver to update the appropriate vblank counter 160 * Call back into the driver to update the appropriate vblank counter
113 * (specified by @crtc). Deal with wraparound, if it occurred, and 161 * (specified by @pipe). Deal with wraparound, if it occurred, and
114 * update the last read value so we can deal with wraparound on the next 162 * update the last read value so we can deal with wraparound on the next
115 * call if necessary. 163 * call if necessary.
116 * 164 *
@@ -120,12 +168,15 @@ static void store_vblank(struct drm_device *dev, int crtc,
120 * Note: caller must hold dev->vbl_lock since this reads & writes 168 * Note: caller must hold dev->vbl_lock since this reads & writes
121 * device vblank fields. 169 * device vblank fields.
122 */ 170 */
123static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe) 171static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
172 unsigned long flags)
124{ 173{
125 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 174 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
126 u32 cur_vblank, diff; 175 u32 cur_vblank, diff;
127 bool rc; 176 bool rc;
128 struct timeval t_vblank; 177 struct timeval t_vblank;
178 int count = DRM_TIMESTAMP_MAXRETRIES;
179 int framedur_ns = vblank->framedur_ns;
129 180
130 /* 181 /*
131 * Interrupts were disabled prior to this call, so deal with counter 182 * Interrupts were disabled prior to this call, so deal with counter
@@ -141,33 +192,54 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
141 */ 192 */
142 do { 193 do {
143 cur_vblank = dev->driver->get_vblank_counter(dev, pipe); 194 cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
144 rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0); 195 rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, flags);
145 } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe)); 196 } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe) && --count > 0);
197
198 if (dev->max_vblank_count != 0) {
199 /* trust the hw counter when it's around */
200 diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
201 } else if (rc && framedur_ns) {
202 const struct timeval *t_old;
203 u64 diff_ns;
146 204
147 /* Deal with counter wrap */ 205 t_old = &vblanktimestamp(dev, pipe, vblank->count);
148 diff = cur_vblank - vblank->last; 206 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
149 if (cur_vblank < vblank->last) { 207
150 diff += dev->max_vblank_count + 1; 208 /*
209 * Figure out how many vblanks we've missed based
210 * on the difference in the timestamps and the
211 * frame/field duration.
212 */
213 diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
151 214
152 DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 215 if (diff == 0 && flags & DRM_CALLED_FROM_VBLIRQ)
153 pipe, vblank->last, cur_vblank, diff); 216 DRM_DEBUG_VBL("crtc %u: Redundant vblirq ignored."
217 " diff_ns = %lld, framedur_ns = %d)\n",
218 pipe, (long long) diff_ns, framedur_ns);
219 } else {
220 /* some kind of default for drivers w/o accurate vbl timestamping */
221 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
154 } 222 }
155 223
156 DRM_DEBUG("updating vblank count on crtc %u, missed %d\n", 224 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
157 pipe, diff); 225 " current=%u, diff=%u, hw=%u hw_last=%u\n",
226 pipe, vblank->count, diff, cur_vblank, vblank->last);
158 227
159 if (diff == 0) 228 if (diff == 0) {
229 WARN_ON_ONCE(cur_vblank != vblank->last);
160 return; 230 return;
231 }
161 232
162 /* 233 /*
163 * Only reinitialize corresponding vblank timestamp if high-precision query 234 * Only reinitialize corresponding vblank timestamp if high-precision query
164 * available and didn't fail. Otherwise reinitialize delayed at next vblank 235 * available and didn't fail, or we were called from the vblank interrupt.
165 * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid. 236 * Otherwise reinitialize delayed at next vblank interrupt and assign 0
237 * for now, to mark the vblanktimestamp as invalid.
166 */ 238 */
167 if (!rc) 239 if (!rc && (flags & DRM_CALLED_FROM_VBLIRQ) == 0)
168 t_vblank = (struct timeval) {0, 0}; 240 t_vblank = (struct timeval) {0, 0};
169 241
170 store_vblank(dev, pipe, diff, &t_vblank); 242 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
171} 243}
172 244
173/* 245/*
@@ -180,11 +252,6 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
180{ 252{
181 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 253 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
182 unsigned long irqflags; 254 unsigned long irqflags;
183 u32 vblcount;
184 s64 diff_ns;
185 bool vblrc;
186 struct timeval tvblank;
187 int count = DRM_TIMESTAMP_MAXRETRIES;
188 255
189 /* Prevent vblank irq processing while disabling vblank irqs, 256 /* Prevent vblank irq processing while disabling vblank irqs,
190 * so no updates of timestamps or count can happen after we've 257 * so no updates of timestamps or count can happen after we've
@@ -193,26 +260,6 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
193 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 260 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
194 261
195 /* 262 /*
196 * If the vblank interrupt was already disabled update the count
197 * and timestamp to maintain the appearance that the counter
198 * has been ticking all along until this time. This makes the
199 * count account for the entire time between drm_vblank_on() and
200 * drm_vblank_off().
201 *
202 * But only do this if precise vblank timestamps are available.
203 * Otherwise we might read a totally bogus timestamp since drivers
204 * lacking precise timestamp support rely upon sampling the system clock
205 * at vblank interrupt time. Which obviously won't work out well if the
206 * vblank interrupt is disabled.
207 */
208 if (!vblank->enabled &&
209 drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
210 drm_update_vblank_count(dev, pipe);
211 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
212 return;
213 }
214
215 /*
216 * Only disable vblank interrupts if they're enabled. This avoids 263 * Only disable vblank interrupts if they're enabled. This avoids
217 * calling the ->disable_vblank() operation in atomic context with the 264 * calling the ->disable_vblank() operation in atomic context with the
218 * hardware potentially runtime suspended. 265 * hardware potentially runtime suspended.
@@ -222,47 +269,13 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
222 vblank->enabled = false; 269 vblank->enabled = false;
223 } 270 }
224 271
225 /* No further vblank irq's will be processed after 272 /*
226 * this point. Get current hardware vblank count and 273 * Always update the count and timestamp to maintain the
227 * vblank timestamp, repeat until they are consistent. 274 * appearance that the counter has been ticking all along until
228 * 275 * this time. This makes the count account for the entire time
229 * FIXME: There is still a race condition here and in 276 * between drm_vblank_on() and drm_vblank_off().
230 * drm_update_vblank_count() which can cause off-by-one
231 * reinitialization of software vblank counter. If gpu
232 * vblank counter doesn't increment exactly at the leading
233 * edge of a vblank interval, then we can lose 1 count if
234 * we happen to execute between start of vblank and the
235 * delayed gpu counter increment.
236 */
237 do {
238 vblank->last = dev->driver->get_vblank_counter(dev, pipe);
239 vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
240 } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
241
242 if (!count)
243 vblrc = 0;
244
245 /* Compute time difference to stored timestamp of last vblank
246 * as updated by last invocation of drm_handle_vblank() in vblank irq.
247 */
248 vblcount = vblank->count;
249 diff_ns = timeval_to_ns(&tvblank) -
250 timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
251
252 /* If there is at least 1 msec difference between the last stored
253 * timestamp and tvblank, then we are currently executing our
254 * disable inside a new vblank interval, the tvblank timestamp
255 * corresponds to this new vblank interval and the irq handler
256 * for this vblank didn't run yet and won't run due to our disable.
257 * Therefore we need to do the job of drm_handle_vblank() and
258 * increment the vblank counter by one to account for this vblank.
259 *
260 * Skip this step if there isn't any high precision timestamp
261 * available. In that case we can't account for this and just
262 * hope for the best.
263 */ 277 */
264 if (vblrc && (abs(diff_ns) > 1000000)) 278 drm_update_vblank_count(dev, pipe, 0);
265 store_vblank(dev, pipe, 1, &tvblank);
266 279
267 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 280 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
268} 281}
@@ -603,7 +616,8 @@ int drm_control(struct drm_device *dev, void *data,
603void drm_calc_timestamping_constants(struct drm_crtc *crtc, 616void drm_calc_timestamping_constants(struct drm_crtc *crtc,
604 const struct drm_display_mode *mode) 617 const struct drm_display_mode *mode)
605{ 618{
606 int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; 619 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
620 int linedur_ns = 0, framedur_ns = 0;
607 int dotclock = mode->crtc_clock; 621 int dotclock = mode->crtc_clock;
608 622
609 /* Valid dotclock? */ 623 /* Valid dotclock? */
@@ -612,10 +626,9 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
612 626
613 /* 627 /*
614 * Convert scanline length in pixels and video 628 * Convert scanline length in pixels and video
615 * dot clock to line duration, frame duration 629 * dot clock to line duration and frame duration
616 * and pixel duration in nanoseconds: 630 * in nanoseconds:
617 */ 631 */
618 pixeldur_ns = 1000000 / dotclock;
619 linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock); 632 linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
620 framedur_ns = div_u64((u64) frame_size * 1000000, dotclock); 633 framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
621 634
@@ -628,16 +641,14 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
628 DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n", 641 DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
629 crtc->base.id); 642 crtc->base.id);
630 643
631 crtc->pixeldur_ns = pixeldur_ns; 644 vblank->linedur_ns = linedur_ns;
632 crtc->linedur_ns = linedur_ns; 645 vblank->framedur_ns = framedur_ns;
633 crtc->framedur_ns = framedur_ns;
634 646
635 DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n", 647 DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
636 crtc->base.id, mode->crtc_htotal, 648 crtc->base.id, mode->crtc_htotal,
637 mode->crtc_vtotal, mode->crtc_vdisplay); 649 mode->crtc_vtotal, mode->crtc_vdisplay);
638 DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n", 650 DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d\n",
639 crtc->base.id, dotclock, framedur_ns, 651 crtc->base.id, dotclock, framedur_ns, linedur_ns);
640 linedur_ns, pixeldur_ns);
641} 652}
642EXPORT_SYMBOL(drm_calc_timestamping_constants); 653EXPORT_SYMBOL(drm_calc_timestamping_constants);
643 654
@@ -651,7 +662,6 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
651 * @flags: Flags to pass to driver: 662 * @flags: Flags to pass to driver:
652 * 0 = Default, 663 * 0 = Default,
653 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler 664 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
654 * @refcrtc: CRTC which defines scanout timing
655 * @mode: mode which defines the scanout timings 665 * @mode: mode which defines the scanout timings
656 * 666 *
657 * Implements calculation of exact vblank timestamps from given drm_display_mode 667 * Implements calculation of exact vblank timestamps from given drm_display_mode
@@ -692,15 +702,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
692 int *max_error, 702 int *max_error,
693 struct timeval *vblank_time, 703 struct timeval *vblank_time,
694 unsigned flags, 704 unsigned flags,
695 const struct drm_crtc *refcrtc,
696 const struct drm_display_mode *mode) 705 const struct drm_display_mode *mode)
697{ 706{
698 struct timeval tv_etime; 707 struct timeval tv_etime;
699 ktime_t stime, etime; 708 ktime_t stime, etime;
700 int vbl_status; 709 unsigned int vbl_status;
710 int ret = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
701 int vpos, hpos, i; 711 int vpos, hpos, i;
702 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; 712 int delta_ns, duration_ns;
703 bool invbl;
704 713
705 if (pipe >= dev->num_crtcs) { 714 if (pipe >= dev->num_crtcs) {
706 DRM_ERROR("Invalid crtc %u\n", pipe); 715 DRM_ERROR("Invalid crtc %u\n", pipe);
@@ -713,15 +722,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
713 return -EIO; 722 return -EIO;
714 } 723 }
715 724
716 /* Durations of frames, lines, pixels in nanoseconds. */
717 framedur_ns = refcrtc->framedur_ns;
718 linedur_ns = refcrtc->linedur_ns;
719 pixeldur_ns = refcrtc->pixeldur_ns;
720
721 /* If mode timing undefined, just return as no-op: 725 /* If mode timing undefined, just return as no-op:
722 * Happens during initial modesetting of a crtc. 726 * Happens during initial modesetting of a crtc.
723 */ 727 */
724 if (framedur_ns == 0) { 728 if (mode->crtc_clock == 0) {
725 DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe); 729 DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
726 return -EAGAIN; 730 return -EAGAIN;
727 } 731 }
@@ -738,12 +742,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
738 * Get vertical and horizontal scanout position vpos, hpos, 742 * Get vertical and horizontal scanout position vpos, hpos,
739 * and bounding timestamps stime, etime, pre/post query. 743 * and bounding timestamps stime, etime, pre/post query.
740 */ 744 */
741 vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos, 745 vbl_status = dev->driver->get_scanout_position(dev, pipe, flags,
742 &hpos, &stime, &etime); 746 &vpos, &hpos,
747 &stime, &etime,
748 mode);
743 749
744 /* Return as no-op if scanout query unsupported or failed. */ 750 /* Return as no-op if scanout query unsupported or failed. */
745 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 751 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
746 DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n", 752 DRM_DEBUG("crtc %u : scanoutpos query failed [0x%x].\n",
747 pipe, vbl_status); 753 pipe, vbl_status);
748 return -EIO; 754 return -EIO;
749 } 755 }
@@ -770,13 +776,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
770 * within vblank area, counting down the number of lines until 776 * within vblank area, counting down the number of lines until
771 * start of scanout. 777 * start of scanout.
772 */ 778 */
773 invbl = vbl_status & DRM_SCANOUTPOS_IN_VBLANK; 779 if (vbl_status & DRM_SCANOUTPOS_IN_VBLANK)
780 ret |= DRM_VBLANKTIME_IN_VBLANK;
774 781
775 /* Convert scanout position into elapsed time at raw_time query 782 /* Convert scanout position into elapsed time at raw_time query
776 * since start of scanout at first display scanline. delta_ns 783 * since start of scanout at first display scanline. delta_ns
777 * can be negative if start of scanout hasn't happened yet. 784 * can be negative if start of scanout hasn't happened yet.
778 */ 785 */
779 delta_ns = vpos * linedur_ns + hpos * pixeldur_ns; 786 delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
787 mode->crtc_clock);
780 788
781 if (!drm_timestamp_monotonic) 789 if (!drm_timestamp_monotonic)
782 etime = ktime_mono_to_real(etime); 790 etime = ktime_mono_to_real(etime);
@@ -792,17 +800,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
792 etime = ktime_sub_ns(etime, delta_ns); 800 etime = ktime_sub_ns(etime, delta_ns);
793 *vblank_time = ktime_to_timeval(etime); 801 *vblank_time = ktime_to_timeval(etime);
794 802
795 DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 803 DRM_DEBUG_VBL("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
796 pipe, (int)vbl_status, hpos, vpos, 804 pipe, vbl_status, hpos, vpos,
797 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, 805 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
798 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, 806 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
799 duration_ns/1000, i); 807 duration_ns/1000, i);
800 808
801 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; 809 return ret;
802 if (invbl)
803 vbl_status |= DRM_VBLANKTIME_IN_VBLANK;
804
805 return vbl_status;
806} 810}
807EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); 811EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
808 812
@@ -873,7 +877,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
873 * Returns: 877 * Returns:
874 * The software vblank counter. 878 * The software vblank counter.
875 */ 879 */
876u32 drm_vblank_count(struct drm_device *dev, int pipe) 880u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
877{ 881{
878 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 882 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
879 883
@@ -914,11 +918,14 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
914 * vblank events since the system was booted, including lost events due to 918 * vblank events since the system was booted, including lost events due to
915 * modesetting activity. Returns corresponding system timestamp of the time 919 * modesetting activity. Returns corresponding system timestamp of the time
916 * of the vblank interval that corresponds to the current vblank counter value. 920 * of the vblank interval that corresponds to the current vblank counter value.
921 *
922 * This is the legacy version of drm_crtc_vblank_count_and_time().
917 */ 923 */
918u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, 924u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
919 struct timeval *vblanktime) 925 struct timeval *vblanktime)
920{ 926{
921 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 927 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
928 int count = DRM_TIMESTAMP_MAXRETRIES;
922 u32 cur_vblank; 929 u32 cur_vblank;
923 930
924 if (WARN_ON(pipe >= dev->num_crtcs)) 931 if (WARN_ON(pipe >= dev->num_crtcs))
@@ -934,12 +941,33 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
934 smp_rmb(); 941 smp_rmb();
935 *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); 942 *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
936 smp_rmb(); 943 smp_rmb();
937 } while (cur_vblank != vblank->count); 944 } while (cur_vblank != vblank->count && --count > 0);
938 945
939 return cur_vblank; 946 return cur_vblank;
940} 947}
941EXPORT_SYMBOL(drm_vblank_count_and_time); 948EXPORT_SYMBOL(drm_vblank_count_and_time);
942 949
950/**
951 * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
952 * and the system timestamp corresponding to that vblank counter value
953 * @crtc: which counter to retrieve
954 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
955 *
956 * Fetches the "cooked" vblank count value that represents the number of
957 * vblank events since the system was booted, including lost events due to
958 * modesetting activity. Returns corresponding system timestamp of the time
959 * of the vblank interval that corresponds to the current vblank counter value.
960 *
961 * This is the native KMS version of drm_vblank_count_and_time().
962 */
963u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
964 struct timeval *vblanktime)
965{
966 return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
967 vblanktime);
968}
969EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
970
943static void send_vblank_event(struct drm_device *dev, 971static void send_vblank_event(struct drm_device *dev,
944 struct drm_pending_vblank_event *e, 972 struct drm_pending_vblank_event *e,
945 unsigned long seq, struct timeval *now) 973 unsigned long seq, struct timeval *now)
@@ -1033,7 +1061,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
1033 atomic_dec(&vblank->refcount); 1061 atomic_dec(&vblank->refcount);
1034 else { 1062 else {
1035 vblank->enabled = true; 1063 vblank->enabled = true;
1036 drm_update_vblank_count(dev, pipe); 1064 drm_update_vblank_count(dev, pipe, 0);
1037 } 1065 }
1038 } 1066 }
1039 1067
@@ -1154,8 +1182,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
1154 * @dev: DRM device 1182 * @dev: DRM device
1155 * @pipe: CRTC index 1183 * @pipe: CRTC index
1156 * 1184 *
1157 * This waits for one vblank to pass on @crtc, using the irq driver interfaces. 1185 * This waits for one vblank to pass on @pipe, using the irq driver interfaces.
1158 * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. 1186 * It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
1159 * due to lack of driver support or because the crtc is off. 1187 * due to lack of driver support or because the crtc is off.
1160 */ 1188 */
1161void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) 1189void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
@@ -1244,8 +1272,8 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
1244 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1272 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
1245 if (e->pipe != pipe) 1273 if (e->pipe != pipe)
1246 continue; 1274 continue;
1247 DRM_DEBUG("Sending premature vblank event on disable: \ 1275 DRM_DEBUG("Sending premature vblank event on disable: "
1248 wanted %d, current %d\n", 1276 "wanted %d, current %d\n",
1249 e->event.sequence, seq); 1277 e->event.sequence, seq);
1250 list_del(&e->base.link); 1278 list_del(&e->base.link);
1251 drm_vblank_put(dev, pipe); 1279 drm_vblank_put(dev, pipe);
@@ -1276,7 +1304,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
1276 1304
1277/** 1305/**
1278 * drm_crtc_vblank_reset - reset vblank state to off on a CRTC 1306 * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
1279 * @drm_crtc: CRTC in question 1307 * @crtc: CRTC in question
1280 * 1308 *
1281 * Drivers can use this function to reset the vblank state to off at load time. 1309 * Drivers can use this function to reset the vblank state to off at load time.
1282 * Drivers should use this together with the drm_crtc_vblank_off() and 1310 * Drivers should use this together with the drm_crtc_vblank_off() and
@@ -1284,12 +1312,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
1284 * drm_crtc_vblank_off() is that this function doesn't save the vblank counter 1312 * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
1285 * and hence doesn't need to call any driver hooks. 1313 * and hence doesn't need to call any driver hooks.
1286 */ 1314 */
1287void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc) 1315void drm_crtc_vblank_reset(struct drm_crtc *crtc)
1288{ 1316{
1289 struct drm_device *dev = drm_crtc->dev; 1317 struct drm_device *dev = crtc->dev;
1290 unsigned long irqflags; 1318 unsigned long irqflags;
1291 int crtc = drm_crtc_index(drm_crtc); 1319 unsigned int pipe = drm_crtc_index(crtc);
1292 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1320 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1293 1321
1294 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1322 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1295 /* 1323 /*
@@ -1333,16 +1361,8 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1333 vblank->inmodeset = 0; 1361 vblank->inmodeset = 0;
1334 } 1362 }
1335 1363
1336 /* 1364 drm_reset_vblank_timestamp(dev, pipe);
1337 * sample the current counter to avoid random jumps 1365
1338 * when drm_vblank_enable() applies the diff
1339 *
1340 * -1 to make sure user will never see the same
1341 * vblank counter value before and after a modeset
1342 */
1343 vblank->last =
1344 (dev->driver->get_vblank_counter(dev, pipe) - 1) &
1345 dev->max_vblank_count;
1346 /* 1366 /*
1347 * re-enable interrupts if there are users left, or the 1367 * re-enable interrupts if there are users left, or the
1348 * user wishes vblank interrupts to be enabled all the time. 1368 * user wishes vblank interrupts to be enabled all the time.
@@ -1725,9 +1745,6 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
1725bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) 1745bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
1726{ 1746{
1727 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1747 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1728 u32 vblcount;
1729 s64 diff_ns;
1730 struct timeval tvblank;
1731 unsigned long irqflags; 1748 unsigned long irqflags;
1732 1749
1733 if (WARN_ON_ONCE(!dev->num_crtcs)) 1750 if (WARN_ON_ONCE(!dev->num_crtcs))
@@ -1751,32 +1768,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
1751 return false; 1768 return false;
1752 } 1769 }
1753 1770
1754 /* Fetch corresponding timestamp for this vblank interval from 1771 drm_update_vblank_count(dev, pipe, DRM_CALLED_FROM_VBLIRQ);
1755 * driver and store it in proper slot of timestamp ringbuffer.
1756 */
1757
1758 /* Get current timestamp and count. */
1759 vblcount = vblank->count;
1760 drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1761
1762 /* Compute time difference to timestamp of last vblank */
1763 diff_ns = timeval_to_ns(&tvblank) -
1764 timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
1765
1766 /* Update vblank timestamp and count if at least
1767 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
1768 * difference between last stored timestamp and current
1769 * timestamp. A smaller difference means basically
1770 * identical timestamps. Happens if this vblank has
1771 * been already processed and this is a redundant call,
1772 * e.g., due to spurious vblank interrupts. We need to
1773 * ignore those for accounting.
1774 */
1775 if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
1776 store_vblank(dev, pipe, 1, &tvblank);
1777 else
1778 DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
1779 pipe, (int) diff_ns);
1780 1772
1781 spin_unlock(&dev->vblank_time_lock); 1773 spin_unlock(&dev->vblank_time_lock);
1782 1774
@@ -1806,3 +1798,20 @@ bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
1806 return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc)); 1798 return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
1807} 1799}
1808EXPORT_SYMBOL(drm_crtc_handle_vblank); 1800EXPORT_SYMBOL(drm_crtc_handle_vblank);
1801
1802/**
1803 * drm_vblank_no_hw_counter - "No hw counter" implementation of .get_vblank_counter()
1804 * @dev: DRM device
1805 * @pipe: CRTC for which to read the counter
1806 *
1807 * Drivers can plug this into the .get_vblank_counter() function if
1808 * there is no useable hardware frame counter available.
1809 *
1810 * Returns:
1811 * 0
1812 */
1813u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
1814{
1815 return 0;
1816}
1817EXPORT_SYMBOL(drm_vblank_no_hw_counter);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index a521ef6ff807..87a8cb73366f 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -38,7 +38,7 @@
38#include <drm/drmP.h> 38#include <drm/drmP.h>
39#include "drm_legacy.h" 39#include "drm_legacy.h"
40 40
41#if __OS_HAS_AGP 41#if IS_ENABLED(CONFIG_AGP)
42 42
43#ifdef HAVE_PAGE_AGP 43#ifdef HAVE_PAGE_AGP
44# include <asm/agp.h> 44# include <asm/agp.h>
@@ -111,14 +111,14 @@ int drm_unbind_agp(struct agp_memory * handle)
111 return agp_unbind_memory(handle); 111 return agp_unbind_memory(handle);
112} 112}
113 113
114#else /* __OS_HAS_AGP */ 114#else /* CONFIG_AGP */
115static inline void *agp_remap(unsigned long offset, unsigned long size, 115static inline void *agp_remap(unsigned long offset, unsigned long size,
116 struct drm_device * dev) 116 struct drm_device * dev)
117{ 117{
118 return NULL; 118 return NULL;
119} 119}
120 120
121#endif /* agp */ 121#endif /* CONFIG_AGP */
122 122
123void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) 123void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
124{ 124{
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 3427b115e2bb..04de6fd88f8c 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -267,12 +267,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
267 if (adj_end > end) 267 if (adj_end > end)
268 adj_end = end; 268 adj_end = end;
269 269
270 if (flags & DRM_MM_CREATE_TOP)
271 adj_start = adj_end - size;
272
273 if (mm->color_adjust) 270 if (mm->color_adjust)
274 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 271 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
275 272
273 if (flags & DRM_MM_CREATE_TOP)
274 adj_start = adj_end - size;
275
276 if (alignment) { 276 if (alignment) {
277 u64 tmp = adj_start; 277 u64 tmp = adj_start;
278 unsigned rem; 278 unsigned rem;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index fba321ca4344..6675b1428410 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -307,6 +307,8 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
307 WARN_ON(ctx->contended); 307 WARN_ON(ctx->contended);
308 308
309 if (ctx->trylock_only) { 309 if (ctx->trylock_only) {
310 lockdep_assert_held(&ctx->ww_ctx);
311
310 if (!ww_mutex_trylock(&lock->mutex)) 312 if (!ww_mutex_trylock(&lock->mutex))
311 return -EBUSY; 313 return -EBUSY;
312 else 314 else
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index be3884073ea4..493c05c9ce4f 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -1,3 +1,4 @@
1#include <linux/component.h>
1#include <linux/export.h> 2#include <linux/export.h>
2#include <linux/list.h> 3#include <linux/list.h>
3#include <linux/of_graph.h> 4#include <linux/of_graph.h>
@@ -61,3 +62,90 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
61 return possible_crtcs; 62 return possible_crtcs;
62} 63}
63EXPORT_SYMBOL(drm_of_find_possible_crtcs); 64EXPORT_SYMBOL(drm_of_find_possible_crtcs);
65
66/**
67 * drm_of_component_probe - Generic probe function for a component based master
68 * @dev: master device containing the OF node
69 * @compare_of: compare function used for matching components
70 * @master_ops: component master ops to be used
71 *
72 * Parse the platform device OF node and bind all the components associated
73 * with the master. Interface ports are added before the encoders in order to
74 * satisfy their .bind requirements
75 * See Documentation/devicetree/bindings/graph.txt for the bindings.
76 *
77 * Returns zero if successful, or one of the standard error codes if it fails.
78 */
79int drm_of_component_probe(struct device *dev,
80 int (*compare_of)(struct device *, void *),
81 const struct component_master_ops *m_ops)
82{
83 struct device_node *ep, *port, *remote;
84 struct component_match *match = NULL;
85 int i;
86
87 if (!dev->of_node)
88 return -EINVAL;
89
90 /*
91 * Bind the crtc's ports first, so that drm_of_find_possible_crtcs()
92 * called from encoder's .bind callbacks works as expected
93 */
94 for (i = 0; ; i++) {
95 port = of_parse_phandle(dev->of_node, "ports", i);
96 if (!port)
97 break;
98
99 if (!of_device_is_available(port->parent)) {
100 of_node_put(port);
101 continue;
102 }
103
104 component_match_add(dev, &match, compare_of, port);
105 of_node_put(port);
106 }
107
108 if (i == 0) {
109 dev_err(dev, "missing 'ports' property\n");
110 return -ENODEV;
111 }
112
113 if (!match) {
114 dev_err(dev, "no available port\n");
115 return -ENODEV;
116 }
117
118 /*
119 * For bound crtcs, bind the encoders attached to their remote endpoint
120 */
121 for (i = 0; ; i++) {
122 port = of_parse_phandle(dev->of_node, "ports", i);
123 if (!port)
124 break;
125
126 if (!of_device_is_available(port->parent)) {
127 of_node_put(port);
128 continue;
129 }
130
131 for_each_child_of_node(port, ep) {
132 remote = of_graph_get_remote_port_parent(ep);
133 if (!remote || !of_device_is_available(remote)) {
134 of_node_put(remote);
135 continue;
136 } else if (!of_device_is_available(remote->parent)) {
137 dev_warn(dev, "parent device of %s is not available\n",
138 remote->full_name);
139 of_node_put(remote);
140 continue;
141 }
142
143 component_match_add(dev, &match, compare_of, remote);
144 of_node_put(remote);
145 }
146 of_node_put(port);
147 }
148
149 return component_master_add_with_match(dev, m_ops, match);
150}
151EXPORT_SYMBOL(drm_of_component_probe);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1b1bd42b0368..fcd2a86acd2c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -266,6 +266,9 @@ void drm_pci_agp_destroy(struct drm_device *dev)
266 * then register the character device and inter module information. 266 * then register the character device and inter module information.
267 * Try and register, if we fail to register, backout previous work. 267 * Try and register, if we fail to register, backout previous work.
268 * 268 *
269 * NOTE: This function is deprecated, please use drm_dev_alloc() and
270 * drm_dev_register() instead and remove your ->load() callback.
271 *
269 * Return: 0 on success or a negative error code on failure. 272 * Return: 0 on success or a negative error code on failure.
270 */ 273 */
271int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 274int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@@ -326,6 +329,10 @@ EXPORT_SYMBOL(drm_get_pci_dev);
326 * Initializes a drm_device structures, registering the stubs and initializing 329 * Initializes a drm_device structures, registering the stubs and initializing
327 * the AGP device. 330 * the AGP device.
328 * 331 *
332 * NOTE: This function is deprecated. Modern modesetting drm drivers should use
333 * pci_register_driver() directly, this function only provides shadow-binding
334 * support for old legacy drivers on top of that core pci function.
335 *
329 * Return: 0 on success or a negative error code on failure. 336 * Return: 0 on success or a negative error code on failure.
330 */ 337 */
331int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 338int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
@@ -435,6 +442,10 @@ EXPORT_SYMBOL(drm_pci_init);
435 * 442 *
436 * Unregisters one or more devices matched by a PCI driver from the DRM 443 * Unregisters one or more devices matched by a PCI driver from the DRM
437 * subsystem. 444 * subsystem.
445 *
446 * NOTE: This function is deprecated. Modern modesetting drm drivers should use
447 * pci_unregister_driver() directly, this function only provides shadow-binding
448 * support for old legacy drivers on top of that core pci function.
438 */ 449 */
439void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) 450void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
440{ 451{
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5e5a07af02c8..d384ebcf0aaf 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -426,7 +426,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
426 426
427 if (plane_funcs->prepare_fb && plane_state->fb && 427 if (plane_funcs->prepare_fb && plane_state->fb &&
428 plane_state->fb != old_fb) { 428 plane_state->fb != old_fb) {
429 ret = plane_funcs->prepare_fb(plane, plane_state->fb, 429 ret = plane_funcs->prepare_fb(plane,
430 plane_state); 430 plane_state);
431 if (ret) 431 if (ret)
432 goto out; 432 goto out;
@@ -479,8 +479,8 @@ int drm_plane_helper_commit(struct drm_plane *plane,
479 ret = 0; 479 ret = 0;
480 } 480 }
481 481
482 if (plane_funcs->cleanup_fb && old_fb) 482 if (plane_funcs->cleanup_fb)
483 plane_funcs->cleanup_fb(plane, old_fb, plane_state); 483 plane_funcs->cleanup_fb(plane, plane_state);
484out: 484out:
485 if (plane_state) { 485 if (plane_state) {
486 if (plane->funcs->atomic_destroy_state) 486 if (plane->funcs->atomic_destroy_state)
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 5314c9d5fef4..644169e1a029 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -95,6 +95,9 @@ EXPORT_SYMBOL(drm_platform_set_busid);
95 * subsystem, initializing a drm_device structure and calling the driver's 95 * subsystem, initializing a drm_device structure and calling the driver's
96 * .load() function. 96 * .load() function.
97 * 97 *
98 * NOTE: This function is deprecated, please use drm_dev_alloc() and
99 * drm_dev_register() instead and remove your ->load() callback.
100 *
98 * Return: 0 on success or a negative error code on failure. 101 * Return: 0 on success or a negative error code on failure.
99 */ 102 */
100int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device) 103int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 631f5afd451c..531ac4cc9756 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -330,7 +330,7 @@ void drm_rect_rotate(struct drm_rect *r,
330 } 330 }
331 } 331 }
332 332
333 switch (rotation & 0xf) { 333 switch (rotation & DRM_ROTATE_MASK) {
334 case BIT(DRM_ROTATE_0): 334 case BIT(DRM_ROTATE_0):
335 break; 335 break;
336 case BIT(DRM_ROTATE_90): 336 case BIT(DRM_ROTATE_90):
@@ -390,7 +390,7 @@ void drm_rect_rotate_inv(struct drm_rect *r,
390{ 390{
391 struct drm_rect tmp; 391 struct drm_rect tmp;
392 392
393 switch (rotation & 0xf) { 393 switch (rotation & DRM_ROTATE_MASK) {
394 case BIT(DRM_ROTATE_0): 394 case BIT(DRM_ROTATE_0):
395 break; 395 break;
396 case BIT(DRM_ROTATE_90): 396 case BIT(DRM_ROTATE_90):
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 684bd4a13843..615b7e667320 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -30,6 +30,8 @@ static struct device_type drm_sysfs_device_minor = {
30 .name = "drm_minor" 30 .name = "drm_minor"
31}; 31};
32 32
33struct class *drm_class;
34
33/** 35/**
34 * __drm_class_suspend - internal DRM class suspend routine 36 * __drm_class_suspend - internal DRM class suspend routine
35 * @dev: Linux device to suspend 37 * @dev: Linux device to suspend
@@ -112,41 +114,34 @@ static CLASS_ATTR_STRING(version, S_IRUGO,
112 CORE_DATE); 114 CORE_DATE);
113 115
114/** 116/**
115 * drm_sysfs_create - create a struct drm_sysfs_class structure 117 * drm_sysfs_init - initialize sysfs helpers
116 * @owner: pointer to the module that is to "own" this struct drm_sysfs_class 118 *
117 * @name: pointer to a string for the name of this class. 119 * This is used to create the DRM class, which is the implicit parent of any
120 * other top-level DRM sysfs objects.
118 * 121 *
119 * This is used to create DRM class pointer that can then be used 122 * You must call drm_sysfs_destroy() to release the allocated resources.
120 * in calls to drm_sysfs_device_add().
121 * 123 *
122 * Note, the pointer created here is to be destroyed when finished by making a 124 * Return: 0 on success, negative error code on failure.
123 * call to drm_sysfs_destroy().
124 */ 125 */
125struct class *drm_sysfs_create(struct module *owner, char *name) 126int drm_sysfs_init(void)
126{ 127{
127 struct class *class;
128 int err; 128 int err;
129 129
130 class = class_create(owner, name); 130 drm_class = class_create(THIS_MODULE, "drm");
131 if (IS_ERR(class)) { 131 if (IS_ERR(drm_class))
132 err = PTR_ERR(class); 132 return PTR_ERR(drm_class);
133 goto err_out;
134 }
135
136 class->pm = &drm_class_dev_pm_ops;
137 133
138 err = class_create_file(class, &class_attr_version.attr); 134 drm_class->pm = &drm_class_dev_pm_ops;
139 if (err)
140 goto err_out_class;
141 135
142 class->devnode = drm_devnode; 136 err = class_create_file(drm_class, &class_attr_version.attr);
143 137 if (err) {
144 return class; 138 class_destroy(drm_class);
139 drm_class = NULL;
140 return err;
141 }
145 142
146err_out_class: 143 drm_class->devnode = drm_devnode;
147 class_destroy(class); 144 return 0;
148err_out:
149 return ERR_PTR(err);
150} 145}
151 146
152/** 147/**
@@ -156,7 +151,7 @@ err_out:
156 */ 151 */
157void drm_sysfs_destroy(void) 152void drm_sysfs_destroy(void)
158{ 153{
159 if ((drm_class == NULL) || (IS_ERR(drm_class))) 154 if (IS_ERR_OR_NULL(drm_class))
160 return; 155 return;
161 class_remove_file(drm_class, &class_attr_version.attr); 156 class_remove_file(drm_class, &class_attr_version.attr);
162 class_destroy(drm_class); 157 class_destroy(drm_class);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index aab49ee4ed40..f90bd5fe35ba 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -95,7 +95,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
95 * Find the right map and if it's AGP memory find the real physical page to 95 * Find the right map and if it's AGP memory find the real physical page to
96 * map, get the page, increment the use count and return it. 96 * map, get the page, increment the use count and return it.
97 */ 97 */
98#if __OS_HAS_AGP 98#if IS_ENABLED(CONFIG_AGP)
99static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 99static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
100{ 100{
101 struct drm_file *priv = vma->vm_file->private_data; 101 struct drm_file *priv = vma->vm_file->private_data;
@@ -168,12 +168,12 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
168vm_fault_error: 168vm_fault_error:
169 return VM_FAULT_SIGBUS; /* Disallow mremap */ 169 return VM_FAULT_SIGBUS; /* Disallow mremap */
170} 170}
171#else /* __OS_HAS_AGP */ 171#else
172static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 172static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
173{ 173{
174 return VM_FAULT_SIGBUS; 174 return VM_FAULT_SIGBUS;
175} 175}
176#endif /* __OS_HAS_AGP */ 176#endif
177 177
178/** 178/**
179 * \c nopage method for shared virtual memory. 179 * \c nopage method for shared virtual memory.
@@ -556,7 +556,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
556 * --BenH. 556 * --BenH.
557 */ 557 */
558 if (!vma->vm_pgoff 558 if (!vma->vm_pgoff
559#if __OS_HAS_AGP 559#if IS_ENABLED(CONFIG_AGP)
560 && (!dev->agp 560 && (!dev->agp
561 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) 561 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
562#endif 562#endif
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 68c1f32fb086..2f2ecde8285b 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -112,7 +112,7 @@ void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
112EXPORT_SYMBOL(drm_vma_offset_manager_destroy); 112EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
113 113
114/** 114/**
115 * drm_vma_offset_lookup() - Find node in offset space 115 * drm_vma_offset_lookup_locked() - Find node in offset space
116 * @mgr: Manager object 116 * @mgr: Manager object
117 * @start: Start address for object (page-based) 117 * @start: Start address for object (page-based)
118 * @pages: Size of object (page-based) 118 * @pages: Size of object (page-based)
@@ -122,37 +122,21 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
122 * region and the given node will be returned, as long as the node spans the 122 * region and the given node will be returned, as long as the node spans the
123 * whole requested area (given the size in number of pages as @pages). 123 * whole requested area (given the size in number of pages as @pages).
124 * 124 *
125 * RETURNS: 125 * Note that before lookup the vma offset manager lookup lock must be acquired
126 * Returns NULL if no suitable node can be found. Otherwise, the best match 126 * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
127 * is returned. It's the caller's responsibility to make sure the node doesn't 127 * used to implement weakly referenced lookups using kref_get_unless_zero().
128 * get destroyed before the caller can access it.
129 */
130struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
131 unsigned long start,
132 unsigned long pages)
133{
134 struct drm_vma_offset_node *node;
135
136 read_lock(&mgr->vm_lock);
137 node = drm_vma_offset_lookup_locked(mgr, start, pages);
138 read_unlock(&mgr->vm_lock);
139
140 return node;
141}
142EXPORT_SYMBOL(drm_vma_offset_lookup);
143
144/**
145 * drm_vma_offset_lookup_locked() - Find node in offset space
146 * @mgr: Manager object
147 * @start: Start address for object (page-based)
148 * @pages: Size of object (page-based)
149 * 128 *
150 * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup 129 * Example:
151 * manually. See drm_vma_offset_lock_lookup() for an example. 130 * drm_vma_offset_lock_lookup(mgr);
131 * node = drm_vma_offset_lookup_locked(mgr);
132 * if (node)
133 * kref_get_unless_zero(container_of(node, sth, entr));
134 * drm_vma_offset_unlock_lookup(mgr);
152 * 135 *
153 * RETURNS: 136 * RETURNS:
154 * Returns NULL if no suitable node can be found. Otherwise, the best match 137 * Returns NULL if no suitable node can be found. Otherwise, the best match
155 * is returned. 138 * is returned. It's the caller's responsibility to make sure the node doesn't
139 * get destroyed before the caller can access it.
156 */ 140 */
157struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, 141struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
158 unsigned long start, 142 unsigned long start,
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index bd1a4156f647..96e86cf4455b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -11,43 +11,59 @@ config DRM_EXYNOS
11 Choose this option if you have a Samsung SoC EXYNOS chipset. 11 Choose this option if you have a Samsung SoC EXYNOS chipset.
12 If M is selected the module will be called exynosdrm. 12 If M is selected the module will be called exynosdrm.
13 13
14if DRM_EXYNOS
15
14config DRM_EXYNOS_IOMMU 16config DRM_EXYNOS_IOMMU
15 bool 17 bool
16 depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU 18 depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
17 default y 19 default y
18 20
21comment "CRTCs"
22
19config DRM_EXYNOS_FIMD 23config DRM_EXYNOS_FIMD
20 bool "Exynos DRM FIMD" 24 bool "FIMD"
21 depends on DRM_EXYNOS && !FB_S3C 25 depends on !FB_S3C
22 select FB_MODE_HELPERS 26 select FB_MODE_HELPERS
23 select MFD_SYSCON 27 select MFD_SYSCON
24 help 28 help
25 Choose this option if you want to use Exynos FIMD for DRM. 29 Choose this option if you want to use Exynos FIMD for DRM.
26 30
27config DRM_EXYNOS5433_DECON 31config DRM_EXYNOS5433_DECON
28 bool "Exynos5433 DRM DECON" 32 bool "DECON on Exynos5433"
29 depends on DRM_EXYNOS
30 help 33 help
31 Choose this option if you want to use Exynos5433 DECON for DRM. 34 Choose this option if you want to use Exynos5433 DECON for DRM.
32 35
33config DRM_EXYNOS7_DECON 36config DRM_EXYNOS7_DECON
34 bool "Exynos7 DRM DECON" 37 bool "DECON on Exynos7"
35 depends on DRM_EXYNOS && !FB_S3C 38 depends on !FB_S3C
36 select FB_MODE_HELPERS 39 select FB_MODE_HELPERS
37 help 40 help
38 Choose this option if you want to use Exynos DECON for DRM. 41 Choose this option if you want to use Exynos DECON for DRM.
39 42
43config DRM_EXYNOS_MIXER
44 bool "Mixer"
45 depends on !VIDEO_SAMSUNG_S5P_TV
46 help
47 Choose this option if you want to use Exynos Mixer for DRM.
48
49config DRM_EXYNOS_VIDI
50 bool "Virtual Display"
51 help
52 Choose this option if you want to use Exynos VIDI for DRM.
53
54comment "Encoders and Bridges"
55
40config DRM_EXYNOS_DPI 56config DRM_EXYNOS_DPI
41 bool "EXYNOS DRM parallel output support" 57 bool "Parallel output"
42 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) 58 depends on DRM_EXYNOS_FIMD
43 select DRM_PANEL 59 select DRM_PANEL
44 default n 60 default n
45 help 61 help
46 This enables support for Exynos parallel output. 62 This enables support for Exynos parallel output.
47 63
48config DRM_EXYNOS_DSI 64config DRM_EXYNOS_DSI
49 bool "EXYNOS DRM MIPI-DSI driver support" 65 bool "MIPI-DSI host"
50 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS5433_DECON || DRM_EXYNOS7_DECON) 66 depends on DRM_EXYNOS_FIMD || DRM_EXYNOS5433_DECON || DRM_EXYNOS7_DECON
51 select DRM_MIPI_DSI 67 select DRM_MIPI_DSI
52 select DRM_PANEL 68 select DRM_PANEL
53 default n 69 default n
@@ -55,58 +71,55 @@ config DRM_EXYNOS_DSI
55 This enables support for Exynos MIPI-DSI device. 71 This enables support for Exynos MIPI-DSI device.
56 72
57config DRM_EXYNOS_DP 73config DRM_EXYNOS_DP
58 bool "EXYNOS DRM DP driver support" 74 bool "Display Port"
59 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) 75 depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
60 default DRM_EXYNOS 76 default DRM_EXYNOS
61 select DRM_PANEL 77 select DRM_PANEL
62 help 78 help
63 This enables support for DP device. 79 This enables support for DP device.
64 80
65config DRM_EXYNOS_HDMI 81config DRM_EXYNOS_HDMI
66 bool "Exynos DRM HDMI" 82 bool "HDMI"
67 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV 83 depends on !VIDEO_SAMSUNG_S5P_TV && (DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON)
68 help 84 help
69 Choose this option if you want to use Exynos HDMI for DRM. 85 Choose this option if you want to use Exynos HDMI for DRM.
70 86
71config DRM_EXYNOS_VIDI 87config DRM_EXYNOS_MIC
72 bool "Exynos DRM Virtual Display" 88 bool "Mobile Image Compressor"
73 depends on DRM_EXYNOS 89 depends on DRM_EXYNOS5433_DECON
74 help 90 help
75 Choose this option if you want to use Exynos VIDI for DRM. 91 Choose this option if you want to use Exynos MIC for DRM.
92
93comment "Sub-drivers"
76 94
77config DRM_EXYNOS_G2D 95config DRM_EXYNOS_G2D
78 bool "Exynos DRM G2D" 96 bool "G2D"
79 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 97 depends on !VIDEO_SAMSUNG_S5P_G2D
80 select FRAME_VECTOR 98 select FRAME_VECTOR
81 help 99 help
82 Choose this option if you want to use Exynos G2D for DRM. 100 Choose this option if you want to use Exynos G2D for DRM.
83 101
84config DRM_EXYNOS_IPP 102config DRM_EXYNOS_IPP
85 bool "Exynos DRM IPP" 103 bool "Image Post Processor"
86 depends on DRM_EXYNOS
87 help 104 help
88 Choose this option if you want to use IPP feature for DRM. 105 Choose this option if you want to use IPP feature for DRM.
89 106
90config DRM_EXYNOS_FIMC 107config DRM_EXYNOS_FIMC
91 bool "Exynos DRM FIMC" 108 bool "FIMC"
92 depends on DRM_EXYNOS_IPP && MFD_SYSCON 109 depends on DRM_EXYNOS_IPP && MFD_SYSCON
93 help 110 help
94 Choose this option if you want to use Exynos FIMC for DRM. 111 Choose this option if you want to use Exynos FIMC for DRM.
95 112
96config DRM_EXYNOS_ROTATOR 113config DRM_EXYNOS_ROTATOR
97 bool "Exynos DRM Rotator" 114 bool "Rotator"
98 depends on DRM_EXYNOS_IPP 115 depends on DRM_EXYNOS_IPP
99 help 116 help
100 Choose this option if you want to use Exynos Rotator for DRM. 117 Choose this option if you want to use Exynos Rotator for DRM.
101 118
102config DRM_EXYNOS_GSC 119config DRM_EXYNOS_GSC
103 bool "Exynos DRM GSC" 120 bool "GScaler"
104 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM 121 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
105 help 122 help
106 Choose this option if you want to use Exynos GSC for DRM. 123 Choose this option if you want to use Exynos GSC for DRM.
107 124
108config DRM_EXYNOS_MIC 125endif
109 bool "Exynos DRM MIC"
110 depends on (DRM_EXYNOS && DRM_EXYNOS5433_DECON)
111 help
112 Choose this option if you want to use Exynos MIC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 02aecfed6354..6496532aaa91 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -14,7 +14,8 @@ exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
14exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o 14exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
15exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o 15exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
16exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o 16exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o 17exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o
18exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o
18exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 19exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
19exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 20exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
20exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o 21exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index b3c730770b0f..fbe1b3174f75 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,6 +13,7 @@
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/component.h> 15#include <linux/component.h>
16#include <linux/of_device.h>
16#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
17#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
18 19
@@ -24,28 +25,11 @@
24#include "exynos_drm_iommu.h" 25#include "exynos_drm_iommu.h"
25 26
26#define WINDOWS_NR 3 27#define WINDOWS_NR 3
28#define CURSOR_WIN 2
27#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 29#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
28 30
29struct decon_context {
30 struct device *dev;
31 struct drm_device *drm_dev;
32 struct exynos_drm_crtc *crtc;
33 struct exynos_drm_plane planes[WINDOWS_NR];
34 void __iomem *addr;
35 struct clk *clks[6];
36 unsigned int default_win;
37 unsigned long irq_flags;
38 int pipe;
39 bool suspended;
40
41#define BIT_CLKS_ENABLED 0
42#define BIT_IRQS_ENABLED 1
43 unsigned long enabled;
44 bool i80_if;
45 atomic_t win_updated;
46};
47
48static const char * const decon_clks_name[] = { 31static const char * const decon_clks_name[] = {
32 "pclk",
49 "aclk_decon", 33 "aclk_decon",
50 "aclk_smmu_decon0x", 34 "aclk_smmu_decon0x",
51 "aclk_xiu_decon0x", 35 "aclk_xiu_decon0x",
@@ -54,6 +38,32 @@ static const char * const decon_clks_name[] = {
54 "sclk_decon_eclk", 38 "sclk_decon_eclk",
55}; 39};
56 40
41enum decon_iftype {
42 IFTYPE_RGB,
43 IFTYPE_I80,
44 IFTYPE_HDMI
45};
46
47enum decon_flag_bits {
48 BIT_CLKS_ENABLED,
49 BIT_IRQS_ENABLED,
50 BIT_WIN_UPDATED,
51 BIT_SUSPENDED
52};
53
54struct decon_context {
55 struct device *dev;
56 struct drm_device *drm_dev;
57 struct exynos_drm_crtc *crtc;
58 struct exynos_drm_plane planes[WINDOWS_NR];
59 void __iomem *addr;
60 struct clk *clks[ARRAY_SIZE(decon_clks_name)];
61 int pipe;
62 unsigned long flags;
63 enum decon_iftype out_type;
64 int first_win;
65};
66
57static const uint32_t decon_formats[] = { 67static const uint32_t decon_formats[] = {
58 DRM_FORMAT_XRGB1555, 68 DRM_FORMAT_XRGB1555,
59 DRM_FORMAT_RGB565, 69 DRM_FORMAT_RGB565,
@@ -61,17 +71,24 @@ static const uint32_t decon_formats[] = {
61 DRM_FORMAT_ARGB8888, 71 DRM_FORMAT_ARGB8888,
62}; 72};
63 73
74static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
75 u32 val)
76{
77 val = (val & mask) | (readl(ctx->addr + reg) & ~mask);
78 writel(val, ctx->addr + reg);
79}
80
64static int decon_enable_vblank(struct exynos_drm_crtc *crtc) 81static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
65{ 82{
66 struct decon_context *ctx = crtc->ctx; 83 struct decon_context *ctx = crtc->ctx;
67 u32 val; 84 u32 val;
68 85
69 if (ctx->suspended) 86 if (test_bit(BIT_SUSPENDED, &ctx->flags))
70 return -EPERM; 87 return -EPERM;
71 88
72 if (test_and_set_bit(0, &ctx->irq_flags)) { 89 if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
73 val = VIDINTCON0_INTEN; 90 val = VIDINTCON0_INTEN;
74 if (ctx->i80_if) 91 if (ctx->out_type == IFTYPE_I80)
75 val |= VIDINTCON0_FRAMEDONE; 92 val |= VIDINTCON0_FRAMEDONE;
76 else 93 else
77 val |= VIDINTCON0_INTFRMEN; 94 val |= VIDINTCON0_INTFRMEN;
@@ -86,79 +103,85 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
86{ 103{
87 struct decon_context *ctx = crtc->ctx; 104 struct decon_context *ctx = crtc->ctx;
88 105
89 if (ctx->suspended) 106 if (test_bit(BIT_SUSPENDED, &ctx->flags))
90 return; 107 return;
91 108
92 if (test_and_clear_bit(0, &ctx->irq_flags)) 109 if (test_and_clear_bit(BIT_IRQS_ENABLED, &ctx->flags))
93 writel(0, ctx->addr + DECON_VIDINTCON0); 110 writel(0, ctx->addr + DECON_VIDINTCON0);
94} 111}
95 112
96static void decon_setup_trigger(struct decon_context *ctx) 113static void decon_setup_trigger(struct decon_context *ctx)
97{ 114{
98 u32 val = TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | 115 u32 val = (ctx->out_type != IFTYPE_HDMI)
99 TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN; 116 ? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
117 TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
118 : TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
119 TRIGCON_HWTRIGMASK_I80_RGB | TRIGCON_HWTRIGEN_I80_RGB;
100 writel(val, ctx->addr + DECON_TRIGCON); 120 writel(val, ctx->addr + DECON_TRIGCON);
101} 121}
102 122
103static void decon_commit(struct exynos_drm_crtc *crtc) 123static void decon_commit(struct exynos_drm_crtc *crtc)
104{ 124{
105 struct decon_context *ctx = crtc->ctx; 125 struct decon_context *ctx = crtc->ctx;
106 struct drm_display_mode *mode = &crtc->base.mode; 126 struct drm_display_mode *m = &crtc->base.mode;
107 u32 val; 127 u32 val;
108 128
109 if (ctx->suspended) 129 if (test_bit(BIT_SUSPENDED, &ctx->flags))
110 return; 130 return;
111 131
132 if (ctx->out_type == IFTYPE_HDMI) {
133 m->crtc_hsync_start = m->crtc_hdisplay + 10;
134 m->crtc_hsync_end = m->crtc_htotal - 92;
135 m->crtc_vsync_start = m->crtc_vdisplay + 1;
136 m->crtc_vsync_end = m->crtc_vsync_start + 1;
137 }
138
139 decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
140
112 /* enable clock gate */ 141 /* enable clock gate */
113 val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; 142 val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
114 writel(val, ctx->addr + DECON_CMU); 143 writel(val, ctx->addr + DECON_CMU);
115 144
116 /* lcd on and use command if */ 145 /* lcd on and use command if */
117 val = VIDOUT_LCD_ON; 146 val = VIDOUT_LCD_ON;
118 if (ctx->i80_if) 147 if (ctx->out_type == IFTYPE_I80)
119 val |= VIDOUT_COMMAND_IF; 148 val |= VIDOUT_COMMAND_IF;
120 else 149 else
121 val |= VIDOUT_RGB_IF; 150 val |= VIDOUT_RGB_IF;
122 writel(val, ctx->addr + DECON_VIDOUTCON0); 151 writel(val, ctx->addr + DECON_VIDOUTCON0);
123 152
124 val = VIDTCON2_LINEVAL(mode->vdisplay - 1) | 153 val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
125 VIDTCON2_HOZVAL(mode->hdisplay - 1); 154 VIDTCON2_HOZVAL(m->hdisplay - 1);
126 writel(val, ctx->addr + DECON_VIDTCON2); 155 writel(val, ctx->addr + DECON_VIDTCON2);
127 156
128 if (!ctx->i80_if) { 157 if (ctx->out_type != IFTYPE_I80) {
129 val = VIDTCON00_VBPD_F( 158 val = VIDTCON00_VBPD_F(
130 mode->crtc_vtotal - mode->crtc_vsync_end) | 159 m->crtc_vtotal - m->crtc_vsync_end - 1) |
131 VIDTCON00_VFPD_F( 160 VIDTCON00_VFPD_F(
132 mode->crtc_vsync_start - mode->crtc_vdisplay); 161 m->crtc_vsync_start - m->crtc_vdisplay - 1);
133 writel(val, ctx->addr + DECON_VIDTCON00); 162 writel(val, ctx->addr + DECON_VIDTCON00);
134 163
135 val = VIDTCON01_VSPW_F( 164 val = VIDTCON01_VSPW_F(
136 mode->crtc_vsync_end - mode->crtc_vsync_start); 165 m->crtc_vsync_end - m->crtc_vsync_start - 1);
137 writel(val, ctx->addr + DECON_VIDTCON01); 166 writel(val, ctx->addr + DECON_VIDTCON01);
138 167
139 val = VIDTCON10_HBPD_F( 168 val = VIDTCON10_HBPD_F(
140 mode->crtc_htotal - mode->crtc_hsync_end) | 169 m->crtc_htotal - m->crtc_hsync_end - 1) |
141 VIDTCON10_HFPD_F( 170 VIDTCON10_HFPD_F(
142 mode->crtc_hsync_start - mode->crtc_hdisplay); 171 m->crtc_hsync_start - m->crtc_hdisplay - 1);
143 writel(val, ctx->addr + DECON_VIDTCON10); 172 writel(val, ctx->addr + DECON_VIDTCON10);
144 173
145 val = VIDTCON11_HSPW_F( 174 val = VIDTCON11_HSPW_F(
146 mode->crtc_hsync_end - mode->crtc_hsync_start); 175 m->crtc_hsync_end - m->crtc_hsync_start - 1);
147 writel(val, ctx->addr + DECON_VIDTCON11); 176 writel(val, ctx->addr + DECON_VIDTCON11);
148 } 177 }
149 178
150 decon_setup_trigger(ctx); 179 decon_setup_trigger(ctx);
151 180
152 /* enable output and display signal */ 181 /* enable output and display signal */
153 val = VIDCON0_ENVID | VIDCON0_ENVID_F; 182 decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
154 writel(val, ctx->addr + DECON_VIDCON0);
155} 183}
156 184
157#define COORDINATE_X(x) (((x) & 0xfff) << 12)
158#define COORDINATE_Y(x) ((x) & 0xfff)
159#define OFFSIZE(x) (((x) & 0x3fff) << 14)
160#define PAGEWIDTH(x) ((x) & 0x3fff)
161
162static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, 185static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
163 struct drm_framebuffer *fb) 186 struct drm_framebuffer *fb)
164{ 187{
@@ -214,16 +237,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
214static void decon_shadow_protect_win(struct decon_context *ctx, int win, 237static void decon_shadow_protect_win(struct decon_context *ctx, int win,
215 bool protect) 238 bool protect)
216{ 239{
217 u32 val; 240 decon_set_bits(ctx, DECON_SHADOWCON, SHADOWCON_Wx_PROTECT(win),
218 241 protect ? ~0 : 0);
219 val = readl(ctx->addr + DECON_SHADOWCON);
220
221 if (protect)
222 val |= SHADOWCON_Wx_PROTECT(win);
223 else
224 val &= ~SHADOWCON_Wx_PROTECT(win);
225
226 writel(val, ctx->addr + DECON_SHADOWCON);
227} 242}
228 243
229static void decon_atomic_begin(struct exynos_drm_crtc *crtc, 244static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
@@ -231,12 +246,16 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
231{ 246{
232 struct decon_context *ctx = crtc->ctx; 247 struct decon_context *ctx = crtc->ctx;
233 248
234 if (ctx->suspended) 249 if (test_bit(BIT_SUSPENDED, &ctx->flags))
235 return; 250 return;
236 251
237 decon_shadow_protect_win(ctx, plane->zpos, true); 252 decon_shadow_protect_win(ctx, plane->zpos, true);
238} 253}
239 254
255#define BIT_VAL(x, e, s) (((x) & ((1 << ((e) - (s) + 1)) - 1)) << (s))
256#define COORDINATE_X(x) BIT_VAL((x), 23, 12)
257#define COORDINATE_Y(x) BIT_VAL((x), 11, 0)
258
240static void decon_update_plane(struct exynos_drm_crtc *crtc, 259static void decon_update_plane(struct exynos_drm_crtc *crtc,
241 struct exynos_drm_plane *plane) 260 struct exynos_drm_plane *plane)
242{ 261{
@@ -247,7 +266,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
247 unsigned int pitch = state->fb->pitches[0]; 266 unsigned int pitch = state->fb->pitches[0];
248 u32 val; 267 u32 val;
249 268
250 if (ctx->suspended) 269 if (test_bit(BIT_SUSPENDED, &ctx->flags))
251 return; 270 return;
252 271
253 val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y); 272 val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
@@ -270,21 +289,21 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
270 val = plane->dma_addr[0] + pitch * plane->crtc_h; 289 val = plane->dma_addr[0] + pitch * plane->crtc_h;
271 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win)); 290 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
272 291
273 val = OFFSIZE(pitch - plane->crtc_w * bpp) 292 if (ctx->out_type != IFTYPE_HDMI)
274 | PAGEWIDTH(plane->crtc_w * bpp); 293 val = BIT_VAL(pitch - plane->crtc_w * bpp, 27, 14)
294 | BIT_VAL(plane->crtc_w * bpp, 13, 0);
295 else
296 val = BIT_VAL(pitch - plane->crtc_w * bpp, 29, 15)
297 | BIT_VAL(plane->crtc_w * bpp, 14, 0);
275 writel(val, ctx->addr + DECON_VIDW0xADD2(win)); 298 writel(val, ctx->addr + DECON_VIDW0xADD2(win));
276 299
277 decon_win_set_pixfmt(ctx, win, state->fb); 300 decon_win_set_pixfmt(ctx, win, state->fb);
278 301
279 /* window enable */ 302 /* window enable */
280 val = readl(ctx->addr + DECON_WINCONx(win)); 303 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
281 val |= WINCONx_ENWIN_F;
282 writel(val, ctx->addr + DECON_WINCONx(win));
283 304
284 /* standalone update */ 305 /* standalone update */
285 val = readl(ctx->addr + DECON_UPDATE); 306 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
286 val |= STANDALONE_UPDATE_F;
287 writel(val, ctx->addr + DECON_UPDATE);
288} 307}
289 308
290static void decon_disable_plane(struct exynos_drm_crtc *crtc, 309static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -292,24 +311,19 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
292{ 311{
293 struct decon_context *ctx = crtc->ctx; 312 struct decon_context *ctx = crtc->ctx;
294 unsigned int win = plane->zpos; 313 unsigned int win = plane->zpos;
295 u32 val;
296 314
297 if (ctx->suspended) 315 if (test_bit(BIT_SUSPENDED, &ctx->flags))
298 return; 316 return;
299 317
300 decon_shadow_protect_win(ctx, win, true); 318 decon_shadow_protect_win(ctx, win, true);
301 319
302 /* window disable */ 320 /* window disable */
303 val = readl(ctx->addr + DECON_WINCONx(win)); 321 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
304 val &= ~WINCONx_ENWIN_F;
305 writel(val, ctx->addr + DECON_WINCONx(win));
306 322
307 decon_shadow_protect_win(ctx, win, false); 323 decon_shadow_protect_win(ctx, win, false);
308 324
309 /* standalone update */ 325 /* standalone update */
310 val = readl(ctx->addr + DECON_UPDATE); 326 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
311 val |= STANDALONE_UPDATE_F;
312 writel(val, ctx->addr + DECON_UPDATE);
313} 327}
314 328
315static void decon_atomic_flush(struct exynos_drm_crtc *crtc, 329static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
@@ -317,13 +331,13 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
317{ 331{
318 struct decon_context *ctx = crtc->ctx; 332 struct decon_context *ctx = crtc->ctx;
319 333
320 if (ctx->suspended) 334 if (test_bit(BIT_SUSPENDED, &ctx->flags))
321 return; 335 return;
322 336
323 decon_shadow_protect_win(ctx, plane->zpos, false); 337 decon_shadow_protect_win(ctx, plane->zpos, false);
324 338
325 if (ctx->i80_if) 339 if (ctx->out_type == IFTYPE_I80)
326 atomic_set(&ctx->win_updated, 1); 340 set_bit(BIT_WIN_UPDATED, &ctx->flags);
327} 341}
328 342
329static void decon_swreset(struct decon_context *ctx) 343static void decon_swreset(struct decon_context *ctx)
@@ -347,6 +361,17 @@ static void decon_swreset(struct decon_context *ctx)
347 } 361 }
348 362
349 WARN(tries == 0, "failed to software reset DECON\n"); 363 WARN(tries == 0, "failed to software reset DECON\n");
364
365 if (ctx->out_type != IFTYPE_HDMI)
366 return;
367
368 writel(VIDCON0_CLKVALUP | VIDCON0_VLCKFREE, ctx->addr + DECON_VIDCON0);
369 decon_set_bits(ctx, DECON_CMU,
370 CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F, ~0);
371 writel(VIDCON1_VCLK_RUN_VDEN_DISABLE, ctx->addr + DECON_VIDCON1);
372 writel(CRCCTRL_CRCEN | CRCCTRL_CRCSTART_F | CRCCTRL_CRCCLKEN,
373 ctx->addr + DECON_CRCCTRL);
374 decon_setup_trigger(ctx);
350} 375}
351 376
352static void decon_enable(struct exynos_drm_crtc *crtc) 377static void decon_enable(struct exynos_drm_crtc *crtc)
@@ -355,11 +380,9 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
355 int ret; 380 int ret;
356 int i; 381 int i;
357 382
358 if (!ctx->suspended) 383 if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags))
359 return; 384 return;
360 385
361 ctx->suspended = false;
362
363 pm_runtime_get_sync(ctx->dev); 386 pm_runtime_get_sync(ctx->dev);
364 387
365 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) { 388 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
@@ -368,10 +391,10 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
368 goto err; 391 goto err;
369 } 392 }
370 393
371 set_bit(BIT_CLKS_ENABLED, &ctx->enabled); 394 set_bit(BIT_CLKS_ENABLED, &ctx->flags);
372 395
373 /* if vblank was enabled status, enable it again. */ 396 /* if vblank was enabled status, enable it again. */
374 if (test_and_clear_bit(0, &ctx->irq_flags)) 397 if (test_and_clear_bit(BIT_IRQS_ENABLED, &ctx->flags))
375 decon_enable_vblank(ctx->crtc); 398 decon_enable_vblank(ctx->crtc);
376 399
377 decon_commit(ctx->crtc); 400 decon_commit(ctx->crtc);
@@ -381,7 +404,7 @@ err:
381 while (--i >= 0) 404 while (--i >= 0)
382 clk_disable_unprepare(ctx->clks[i]); 405 clk_disable_unprepare(ctx->clks[i]);
383 406
384 ctx->suspended = true; 407 set_bit(BIT_SUSPENDED, &ctx->flags);
385} 408}
386 409
387static void decon_disable(struct exynos_drm_crtc *crtc) 410static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -389,7 +412,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
389 struct decon_context *ctx = crtc->ctx; 412 struct decon_context *ctx = crtc->ctx;
390 int i; 413 int i;
391 414
392 if (ctx->suspended) 415 if (test_bit(BIT_SUSPENDED, &ctx->flags))
393 return; 416 return;
394 417
395 /* 418 /*
@@ -397,7 +420,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
397 * suspend that connector. Otherwise we might try to scan from 420 * suspend that connector. Otherwise we might try to scan from
398 * a destroyed buffer later. 421 * a destroyed buffer later.
399 */ 422 */
400 for (i = 0; i < WINDOWS_NR; i++) 423 for (i = ctx->first_win; i < WINDOWS_NR; i++)
401 decon_disable_plane(crtc, &ctx->planes[i]); 424 decon_disable_plane(crtc, &ctx->planes[i]);
402 425
403 decon_swreset(ctx); 426 decon_swreset(ctx);
@@ -405,27 +428,22 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
405 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) 428 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
406 clk_disable_unprepare(ctx->clks[i]); 429 clk_disable_unprepare(ctx->clks[i]);
407 430
408 clear_bit(BIT_CLKS_ENABLED, &ctx->enabled); 431 clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
409 432
410 pm_runtime_put_sync(ctx->dev); 433 pm_runtime_put_sync(ctx->dev);
411 434
412 ctx->suspended = true; 435 set_bit(BIT_SUSPENDED, &ctx->flags);
413} 436}
414 437
415void decon_te_irq_handler(struct exynos_drm_crtc *crtc) 438void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
416{ 439{
417 struct decon_context *ctx = crtc->ctx; 440 struct decon_context *ctx = crtc->ctx;
418 u32 val;
419 441
420 if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled)) 442 if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags))
421 return; 443 return;
422 444
423 if (atomic_add_unless(&ctx->win_updated, -1, 0)) { 445 if (test_and_clear_bit(BIT_WIN_UPDATED, &ctx->flags))
424 /* trigger */ 446 decon_set_bits(ctx, DECON_TRIGCON, TRIGCON_SWTRIGCMD, ~0);
425 val = readl(ctx->addr + DECON_TRIGCON);
426 val |= TRIGCON_SWTRIGCMD;
427 writel(val, ctx->addr + DECON_TRIGCON);
428 }
429 447
430 drm_crtc_handle_vblank(&ctx->crtc->base); 448 drm_crtc_handle_vblank(&ctx->crtc->base);
431} 449}
@@ -434,7 +452,6 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
434{ 452{
435 struct decon_context *ctx = crtc->ctx; 453 struct decon_context *ctx = crtc->ctx;
436 int win, i, ret; 454 int win, i, ret;
437 u32 val;
438 455
439 DRM_DEBUG_KMS("%s\n", __FILE__); 456 DRM_DEBUG_KMS("%s\n", __FILE__);
440 457
@@ -445,25 +462,10 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
445 } 462 }
446 463
447 for (win = 0; win < WINDOWS_NR; win++) { 464 for (win = 0; win < WINDOWS_NR; win++) {
448 /* shadow update disable */ 465 decon_shadow_protect_win(ctx, win, true);
449 val = readl(ctx->addr + DECON_SHADOWCON); 466 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
450 val |= SHADOWCON_Wx_PROTECT(win); 467 decon_shadow_protect_win(ctx, win, false);
451 writel(val, ctx->addr + DECON_SHADOWCON); 468 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
452
453 /* window disable */
454 val = readl(ctx->addr + DECON_WINCONx(win));
455 val &= ~WINCONx_ENWIN_F;
456 writel(val, ctx->addr + DECON_WINCONx(win));
457
458 /* shadow update enable */
459 val = readl(ctx->addr + DECON_SHADOWCON);
460 val &= ~SHADOWCON_Wx_PROTECT(win);
461 writel(val, ctx->addr + DECON_SHADOWCON);
462
463 /* standalone update */
464 val = readl(ctx->addr + DECON_UPDATE);
465 val |= STANDALONE_UPDATE_F;
466 writel(val, ctx->addr + DECON_UPDATE);
467 } 469 }
468 /* TODO: wait for possible vsync */ 470 /* TODO: wait for possible vsync */
469 msleep(50); 471 msleep(50);
@@ -479,7 +481,6 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
479 .commit = decon_commit, 481 .commit = decon_commit,
480 .enable_vblank = decon_enable_vblank, 482 .enable_vblank = decon_enable_vblank,
481 .disable_vblank = decon_disable_vblank, 483 .disable_vblank = decon_disable_vblank,
482 .commit = decon_commit,
483 .atomic_begin = decon_atomic_begin, 484 .atomic_begin = decon_atomic_begin,
484 .update_plane = decon_update_plane, 485 .update_plane = decon_update_plane,
485 .disable_plane = decon_disable_plane, 486 .disable_plane = decon_disable_plane,
@@ -493,26 +494,30 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
493 struct drm_device *drm_dev = data; 494 struct drm_device *drm_dev = data;
494 struct exynos_drm_private *priv = drm_dev->dev_private; 495 struct exynos_drm_private *priv = drm_dev->dev_private;
495 struct exynos_drm_plane *exynos_plane; 496 struct exynos_drm_plane *exynos_plane;
497 enum exynos_drm_output_type out_type;
496 enum drm_plane_type type; 498 enum drm_plane_type type;
497 unsigned int zpos; 499 unsigned int win;
498 int ret; 500 int ret;
499 501
500 ctx->drm_dev = drm_dev; 502 ctx->drm_dev = drm_dev;
501 ctx->pipe = priv->pipe++; 503 ctx->pipe = priv->pipe++;
502 504
503 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 505 for (win = ctx->first_win; win < WINDOWS_NR; win++) {
504 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : 506 int tmp = (win == ctx->first_win) ? 0 : win;
505 DRM_PLANE_TYPE_OVERLAY; 507
506 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 508 type = exynos_plane_get_type(tmp, CURSOR_WIN);
509 ret = exynos_plane_init(drm_dev, &ctx->planes[win],
507 1 << ctx->pipe, type, decon_formats, 510 1 << ctx->pipe, type, decon_formats,
508 ARRAY_SIZE(decon_formats), zpos); 511 ARRAY_SIZE(decon_formats), win);
509 if (ret) 512 if (ret)
510 return ret; 513 return ret;
511 } 514 }
512 515
513 exynos_plane = &ctx->planes[ctx->default_win]; 516 exynos_plane = &ctx->planes[ctx->first_win];
517 out_type = (ctx->out_type == IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
518 : EXYNOS_DISPLAY_TYPE_LCD;
514 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, 519 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
515 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, 520 ctx->pipe, out_type,
516 &decon_crtc_ops, ctx); 521 &decon_crtc_ops, ctx);
517 if (IS_ERR(ctx->crtc)) { 522 if (IS_ERR(ctx->crtc)) {
518 ret = PTR_ERR(ctx->crtc); 523 ret = PTR_ERR(ctx->crtc);
@@ -546,38 +551,20 @@ static const struct component_ops decon_component_ops = {
546 .unbind = decon_unbind, 551 .unbind = decon_unbind,
547}; 552};
548 553
549static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id) 554static irqreturn_t decon_irq_handler(int irq, void *dev_id)
550{
551 struct decon_context *ctx = dev_id;
552 u32 val;
553
554 if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
555 goto out;
556
557 val = readl(ctx->addr + DECON_VIDINTCON1);
558 if (val & VIDINTCON1_INTFRMPEND) {
559 drm_crtc_handle_vblank(&ctx->crtc->base);
560
561 /* clear */
562 writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
563 }
564
565out:
566 return IRQ_HANDLED;
567}
568
569static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
570{ 555{
571 struct decon_context *ctx = dev_id; 556 struct decon_context *ctx = dev_id;
572 u32 val; 557 u32 val;
573 int win; 558 int win;
574 559
575 if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled)) 560 if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags))
576 goto out; 561 goto out;
577 562
578 val = readl(ctx->addr + DECON_VIDINTCON1); 563 val = readl(ctx->addr + DECON_VIDINTCON1);
579 if (val & VIDINTCON1_INTFRMDONEPEND) { 564 val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND;
580 for (win = 0 ; win < WINDOWS_NR ; win++) { 565
566 if (val) {
567 for (win = ctx->first_win; win < WINDOWS_NR ; win++) {
581 struct exynos_drm_plane *plane = &ctx->planes[win]; 568 struct exynos_drm_plane *plane = &ctx->planes[win];
582 569
583 if (!plane->pending_fb) 570 if (!plane->pending_fb)
@@ -587,16 +574,29 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
587 } 574 }
588 575
589 /* clear */ 576 /* clear */
590 writel(VIDINTCON1_INTFRMDONEPEND, 577 writel(val, ctx->addr + DECON_VIDINTCON1);
591 ctx->addr + DECON_VIDINTCON1);
592 } 578 }
593 579
594out: 580out:
595 return IRQ_HANDLED; 581 return IRQ_HANDLED;
596} 582}
597 583
584static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
585 {
586 .compatible = "samsung,exynos5433-decon",
587 .data = (void *)IFTYPE_RGB
588 },
589 {
590 .compatible = "samsung,exynos5433-decon-tv",
591 .data = (void *)IFTYPE_HDMI
592 },
593 {},
594};
595MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
596
598static int exynos5433_decon_probe(struct platform_device *pdev) 597static int exynos5433_decon_probe(struct platform_device *pdev)
599{ 598{
599 const struct of_device_id *of_id;
600 struct device *dev = &pdev->dev; 600 struct device *dev = &pdev->dev;
601 struct decon_context *ctx; 601 struct decon_context *ctx;
602 struct resource *res; 602 struct resource *res;
@@ -607,11 +607,16 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
607 if (!ctx) 607 if (!ctx)
608 return -ENOMEM; 608 return -ENOMEM;
609 609
610 ctx->default_win = 0; 610 __set_bit(BIT_SUSPENDED, &ctx->flags);
611 ctx->suspended = true;
612 ctx->dev = dev; 611 ctx->dev = dev;
613 if (of_get_child_by_name(dev->of_node, "i80-if-timings")) 612
614 ctx->i80_if = true; 613 of_id = of_match_device(exynos5433_decon_driver_dt_match, &pdev->dev);
614 ctx->out_type = (enum decon_iftype)of_id->data;
615
616 if (ctx->out_type == IFTYPE_HDMI)
617 ctx->first_win = 1;
618 else if (of_get_child_by_name(dev->of_node, "i80-if-timings"))
619 ctx->out_type = IFTYPE_I80;
615 620
616 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) { 621 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
617 struct clk *clk; 622 struct clk *clk;
@@ -636,15 +641,14 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
636 } 641 }
637 642
638 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 643 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
639 ctx->i80_if ? "lcd_sys" : "vsync"); 644 (ctx->out_type == IFTYPE_I80) ? "lcd_sys" : "vsync");
640 if (!res) { 645 if (!res) {
641 dev_err(dev, "cannot find IRQ resource\n"); 646 dev_err(dev, "cannot find IRQ resource\n");
642 return -ENXIO; 647 return -ENXIO;
643 } 648 }
644 649
645 ret = devm_request_irq(dev, res->start, ctx->i80_if ? 650 ret = devm_request_irq(dev, res->start, decon_irq_handler, 0,
646 decon_lcd_sys_irq_handler : decon_vsync_irq_handler, 0, 651 "drm_decon", ctx);
647 "drm_decon", ctx);
648 if (ret < 0) { 652 if (ret < 0) {
649 dev_err(dev, "lcd_sys irq request failed\n"); 653 dev_err(dev, "lcd_sys irq request failed\n");
650 return ret; 654 return ret;
@@ -675,12 +679,6 @@ static int exynos5433_decon_remove(struct platform_device *pdev)
675 return 0; 679 return 0;
676} 680}
677 681
678static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
679 { .compatible = "samsung,exynos5433-decon" },
680 {},
681};
682MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
683
684struct platform_driver exynos5433_decon_driver = { 682struct platform_driver exynos5433_decon_driver = {
685 .probe = exynos5433_decon_probe, 683 .probe = exynos5433_decon_probe,
686 .remove = exynos5433_decon_remove, 684 .remove = exynos5433_decon_remove,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index e6cbaca821a4..ead2b16e237d 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -40,6 +40,7 @@
40#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 40#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
41 41
42#define WINDOWS_NR 2 42#define WINDOWS_NR 2
43#define CURSOR_WIN 1
43 44
44struct decon_context { 45struct decon_context {
45 struct device *dev; 46 struct device *dev;
@@ -51,7 +52,6 @@ struct decon_context {
51 struct clk *eclk; 52 struct clk *eclk;
52 struct clk *vclk; 53 struct clk *vclk;
53 void __iomem *regs; 54 void __iomem *regs;
54 unsigned int default_win;
55 unsigned long irq_flags; 55 unsigned long irq_flags;
56 bool i80_if; 56 bool i80_if;
57 bool suspended; 57 bool suspended;
@@ -690,8 +690,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
690 } 690 }
691 691
692 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 692 for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
693 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : 693 type = exynos_plane_get_type(zpos, CURSOR_WIN);
694 DRM_PLANE_TYPE_OVERLAY;
695 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 694 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
696 1 << ctx->pipe, type, decon_formats, 695 1 << ctx->pipe, type, decon_formats,
697 ARRAY_SIZE(decon_formats), zpos); 696 ARRAY_SIZE(decon_formats), zpos);
@@ -699,7 +698,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
699 return ret; 698 return ret;
700 } 699 }
701 700
702 exynos_plane = &ctx->planes[ctx->default_win]; 701 exynos_plane = &ctx->planes[DEFAULT_WIN];
703 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, 702 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
704 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, 703 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
705 &decon_crtc_ops, ctx); 704 &decon_crtc_ops, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index ed28823d3b35..b3ba27fd9a6b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -50,6 +50,17 @@ exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
50 exynos_crtc->ops->commit(exynos_crtc); 50 exynos_crtc->ops->commit(exynos_crtc);
51} 51}
52 52
53static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
54 struct drm_crtc_state *state)
55{
56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
57
58 if (exynos_crtc->ops->atomic_check)
59 return exynos_crtc->ops->atomic_check(exynos_crtc, state);
60
61 return 0;
62}
63
53static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, 64static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
54 struct drm_crtc_state *old_crtc_state) 65 struct drm_crtc_state *old_crtc_state)
55{ 66{
@@ -86,6 +97,7 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
86 .enable = exynos_drm_crtc_enable, 97 .enable = exynos_drm_crtc_enable,
87 .disable = exynos_drm_crtc_disable, 98 .disable = exynos_drm_crtc_disable,
88 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, 99 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
100 .atomic_check = exynos_crtc_atomic_check,
89 .atomic_begin = exynos_crtc_atomic_begin, 101 .atomic_begin = exynos_crtc_atomic_begin,
90 .atomic_flush = exynos_crtc_atomic_flush, 102 .atomic_flush = exynos_crtc_atomic_flush,
91}; 103};
@@ -152,7 +164,7 @@ err_crtc:
152 return ERR_PTR(ret); 164 return ERR_PTR(ret);
153} 165}
154 166
155int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) 167int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
156{ 168{
157 struct exynos_drm_private *private = dev->dev_private; 169 struct exynos_drm_private *private = dev->dev_private;
158 struct exynos_drm_crtc *exynos_crtc = 170 struct exynos_drm_crtc *exynos_crtc =
@@ -164,7 +176,7 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
164 return 0; 176 return 0;
165} 177}
166 178
167void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe) 179void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe)
168{ 180{
169 struct exynos_drm_private *private = dev->dev_private; 181 struct exynos_drm_private *private = dev->dev_private;
170 struct exynos_drm_crtc *exynos_crtc = 182 struct exynos_drm_crtc *exynos_crtc =
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index f87d4abda6f7..f9f365bd0257 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -23,8 +23,8 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
23 enum exynos_drm_output_type type, 23 enum exynos_drm_output_type type,
24 const struct exynos_drm_crtc_ops *ops, 24 const struct exynos_drm_crtc_ops *ops,
25 void *context); 25 void *context);
26int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); 26int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
27void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe); 27void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
28void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc); 28void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
29void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, 29void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
30 struct exynos_drm_plane *exynos_plane); 30 struct exynos_drm_plane *exynos_plane);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ae9e6b2d3758..2c6019d6a205 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -105,7 +105,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
105 atomic_inc(&exynos_crtc->pending_update); 105 atomic_inc(&exynos_crtc->pending_update);
106 } 106 }
107 107
108 drm_atomic_helper_commit_planes(dev, state); 108 drm_atomic_helper_commit_planes(dev, state, false);
109 109
110 exynos_atomic_wait_for_commit(state); 110 exynos_atomic_wait_for_commit(state);
111 111
@@ -405,25 +405,25 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
405 405
406static const struct drm_ioctl_desc exynos_ioctls[] = { 406static const struct drm_ioctl_desc exynos_ioctls[] = {
407 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 407 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
408 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 408 DRM_AUTH | DRM_RENDER_ALLOW),
409 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl, 409 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
410 DRM_UNLOCKED | DRM_RENDER_ALLOW), 410 DRM_RENDER_ALLOW),
411 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl, 411 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
412 DRM_UNLOCKED | DRM_AUTH), 412 DRM_AUTH),
413 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl, 413 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
414 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 414 DRM_AUTH | DRM_RENDER_ALLOW),
415 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl, 415 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
416 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 416 DRM_AUTH | DRM_RENDER_ALLOW),
417 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl, 417 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
418 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 418 DRM_AUTH | DRM_RENDER_ALLOW),
419 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property, 419 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
420 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 420 DRM_AUTH | DRM_RENDER_ALLOW),
421 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property, 421 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
422 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 422 DRM_AUTH | DRM_RENDER_ALLOW),
423 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf, 423 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
424 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 424 DRM_AUTH | DRM_RENDER_ALLOW),
425 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl, 425 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
426 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), 426 DRM_AUTH | DRM_RENDER_ALLOW),
427}; 427};
428 428
429static const struct file_operations exynos_drm_driver_fops = { 429static const struct file_operations exynos_drm_driver_fops = {
@@ -449,7 +449,7 @@ static struct drm_driver exynos_drm_driver = {
449 .lastclose = exynos_drm_lastclose, 449 .lastclose = exynos_drm_lastclose,
450 .postclose = exynos_drm_postclose, 450 .postclose = exynos_drm_postclose,
451 .set_busid = drm_platform_set_busid, 451 .set_busid = drm_platform_set_busid,
452 .get_vblank_counter = drm_vblank_count, 452 .get_vblank_counter = drm_vblank_no_hw_counter,
453 .enable_vblank = exynos_drm_crtc_enable_vblank, 453 .enable_vblank = exynos_drm_crtc_enable_vblank,
454 .disable_vblank = exynos_drm_crtc_disable_vblank, 454 .disable_vblank = exynos_drm_crtc_disable_vblank,
455 .gem_free_object = exynos_drm_gem_free_object, 455 .gem_free_object = exynos_drm_gem_free_object,
@@ -529,8 +529,10 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = {
529#ifdef CONFIG_DRM_EXYNOS_DSI 529#ifdef CONFIG_DRM_EXYNOS_DSI
530 &dsi_driver, 530 &dsi_driver,
531#endif 531#endif
532#ifdef CONFIG_DRM_EXYNOS_HDMI 532#ifdef CONFIG_DRM_EXYNOS_MIXER
533 &mixer_driver, 533 &mixer_driver,
534#endif
535#ifdef CONFIG_DRM_EXYNOS_HDMI
534 &hdmi_driver, 536 &hdmi_driver,
535#endif 537#endif
536#ifdef CONFIG_DRM_EXYNOS_VIDI 538#ifdef CONFIG_DRM_EXYNOS_VIDI
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 6c717ba672db..f1eda7fa4e3c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -22,6 +22,8 @@
22#define MAX_PLANE 5 22#define MAX_PLANE 5
23#define MAX_FB_BUFFER 4 23#define MAX_FB_BUFFER 4
24 24
25#define DEFAULT_WIN 0
26
25#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base) 27#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base)
26#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base) 28#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base)
27 29
@@ -87,6 +89,7 @@ struct exynos_drm_plane {
87 * @disable_vblank: specific driver callback for disabling vblank interrupt. 89 * @disable_vblank: specific driver callback for disabling vblank interrupt.
88 * @wait_for_vblank: wait for vblank interrupt to make sure that 90 * @wait_for_vblank: wait for vblank interrupt to make sure that
89 * hardware overlay is updated. 91 * hardware overlay is updated.
92 * @atomic_check: validate state
90 * @atomic_begin: prepare a window to receive a update 93 * @atomic_begin: prepare a window to receive a update
91 * @atomic_flush: mark the end of a window update 94 * @atomic_flush: mark the end of a window update
92 * @update_plane: apply hardware specific overlay data to registers. 95 * @update_plane: apply hardware specific overlay data to registers.
@@ -106,6 +109,8 @@ struct exynos_drm_crtc_ops {
106 int (*enable_vblank)(struct exynos_drm_crtc *crtc); 109 int (*enable_vblank)(struct exynos_drm_crtc *crtc);
107 void (*disable_vblank)(struct exynos_drm_crtc *crtc); 110 void (*disable_vblank)(struct exynos_drm_crtc *crtc);
108 void (*wait_for_vblank)(struct exynos_drm_crtc *crtc); 111 void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
112 int (*atomic_check)(struct exynos_drm_crtc *crtc,
113 struct drm_crtc_state *state);
109 void (*atomic_begin)(struct exynos_drm_crtc *crtc, 114 void (*atomic_begin)(struct exynos_drm_crtc *crtc,
110 struct exynos_drm_plane *plane); 115 struct exynos_drm_plane *plane);
111 void (*update_plane)(struct exynos_drm_crtc *crtc, 116 void (*update_plane)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 084280859589..fcea28bdbc42 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -32,15 +32,15 @@
32 * exynos specific framebuffer structure. 32 * exynos specific framebuffer structure.
33 * 33 *
34 * @fb: drm framebuffer obejct. 34 * @fb: drm framebuffer obejct.
35 * @exynos_gem_obj: array of exynos specific gem object containing a gem object. 35 * @exynos_gem: array of exynos specific gem object containing a gem object.
36 */ 36 */
37struct exynos_drm_fb { 37struct exynos_drm_fb {
38 struct drm_framebuffer fb; 38 struct drm_framebuffer fb;
39 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; 39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
40}; 40};
41 41
42static int check_fb_gem_memory_type(struct drm_device *drm_dev, 42static int check_fb_gem_memory_type(struct drm_device *drm_dev,
43 struct exynos_drm_gem_obj *exynos_gem_obj) 43 struct exynos_drm_gem *exynos_gem)
44{ 44{
45 unsigned int flags; 45 unsigned int flags;
46 46
@@ -51,7 +51,7 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
51 if (is_drm_iommu_supported(drm_dev)) 51 if (is_drm_iommu_supported(drm_dev))
52 return 0; 52 return 0;
53 53
54 flags = exynos_gem_obj->flags; 54 flags = exynos_gem->flags;
55 55
56 /* 56 /*
57 * without iommu support, not support physically non-continuous memory 57 * without iommu support, not support physically non-continuous memory
@@ -75,13 +75,13 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
75 75
76 drm_framebuffer_cleanup(fb); 76 drm_framebuffer_cleanup(fb);
77 77
78 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { 78 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
79 struct drm_gem_object *obj; 79 struct drm_gem_object *obj;
80 80
81 if (exynos_fb->exynos_gem_obj[i] == NULL) 81 if (exynos_fb->exynos_gem[i] == NULL)
82 continue; 82 continue;
83 83
84 obj = &exynos_fb->exynos_gem_obj[i]->base; 84 obj = &exynos_fb->exynos_gem[i]->base;
85 drm_gem_object_unreference_unlocked(obj); 85 drm_gem_object_unreference_unlocked(obj);
86 } 86 }
87 87
@@ -96,7 +96,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
96 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 96 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
97 97
98 return drm_gem_handle_create(file_priv, 98 return drm_gem_handle_create(file_priv,
99 &exynos_fb->exynos_gem_obj[0]->base, handle); 99 &exynos_fb->exynos_gem[0]->base, handle);
100} 100}
101 101
102static int exynos_drm_fb_dirty(struct drm_framebuffer *fb, 102static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
@@ -118,7 +118,7 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
118struct drm_framebuffer * 118struct drm_framebuffer *
119exynos_drm_framebuffer_init(struct drm_device *dev, 119exynos_drm_framebuffer_init(struct drm_device *dev,
120 struct drm_mode_fb_cmd2 *mode_cmd, 120 struct drm_mode_fb_cmd2 *mode_cmd,
121 struct exynos_drm_gem_obj **gem_obj, 121 struct exynos_drm_gem **exynos_gem,
122 int count) 122 int count)
123{ 123{
124 struct exynos_drm_fb *exynos_fb; 124 struct exynos_drm_fb *exynos_fb;
@@ -130,11 +130,11 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
130 return ERR_PTR(-ENOMEM); 130 return ERR_PTR(-ENOMEM);
131 131
132 for (i = 0; i < count; i++) { 132 for (i = 0; i < count; i++) {
133 ret = check_fb_gem_memory_type(dev, gem_obj[i]); 133 ret = check_fb_gem_memory_type(dev, exynos_gem[i]);
134 if (ret < 0) 134 if (ret < 0)
135 goto err; 135 goto err;
136 136
137 exynos_fb->exynos_gem_obj[i] = gem_obj[i]; 137 exynos_fb->exynos_gem[i] = exynos_gem[i];
138 } 138 }
139 139
140 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 140 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
@@ -156,7 +156,7 @@ static struct drm_framebuffer *
156exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 156exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
157 struct drm_mode_fb_cmd2 *mode_cmd) 157 struct drm_mode_fb_cmd2 *mode_cmd)
158{ 158{
159 struct exynos_drm_gem_obj *gem_objs[MAX_FB_BUFFER]; 159 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
160 struct drm_gem_object *obj; 160 struct drm_gem_object *obj;
161 struct drm_framebuffer *fb; 161 struct drm_framebuffer *fb;
162 int i; 162 int i;
@@ -171,10 +171,10 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
171 goto err; 171 goto err;
172 } 172 }
173 173
174 gem_objs[i] = to_exynos_gem_obj(obj); 174 exynos_gem[i] = to_exynos_gem(obj);
175 } 175 }
176 176
177 fb = exynos_drm_framebuffer_init(dev, mode_cmd, gem_objs, i); 177 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
178 if (IS_ERR(fb)) { 178 if (IS_ERR(fb)) {
179 ret = PTR_ERR(fb); 179 ret = PTR_ERR(fb);
180 goto err; 180 goto err;
@@ -184,27 +184,26 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
184 184
185err: 185err:
186 while (i--) 186 while (i--)
187 drm_gem_object_unreference_unlocked(&gem_objs[i]->base); 187 drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
188 188
189 return ERR_PTR(ret); 189 return ERR_PTR(ret);
190} 190}
191 191
192struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, 192struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index)
193 int index)
194{ 193{
195 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 194 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
196 struct exynos_drm_gem_obj *obj; 195 struct exynos_drm_gem *exynos_gem;
197 196
198 if (index >= MAX_FB_BUFFER) 197 if (index >= MAX_FB_BUFFER)
199 return NULL; 198 return NULL;
200 199
201 obj = exynos_fb->exynos_gem_obj[index]; 200 exynos_gem = exynos_fb->exynos_gem[index];
202 if (!obj) 201 if (!exynos_gem)
203 return NULL; 202 return NULL;
204 203
205 DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr); 204 DRM_DEBUG_KMS("dma_addr: 0x%lx\n", (unsigned long)exynos_gem->dma_addr);
206 205
207 return obj; 206 return exynos_gem;
208} 207}
209 208
210static void exynos_drm_output_poll_changed(struct drm_device *dev) 209static void exynos_drm_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 85e4445b920e..726a2d44371f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -19,12 +19,11 @@
19struct drm_framebuffer * 19struct drm_framebuffer *
20exynos_drm_framebuffer_init(struct drm_device *dev, 20exynos_drm_framebuffer_init(struct drm_device *dev,
21 struct drm_mode_fb_cmd2 *mode_cmd, 21 struct drm_mode_fb_cmd2 *mode_cmd,
22 struct exynos_drm_gem_obj **gem_obj, 22 struct exynos_drm_gem **exynos_gem,
23 int count); 23 int count);
24 24
25/* get gem object of a drm framebuffer */ 25/* get gem object of a drm framebuffer */
26struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, 26struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index);
27 int index);
28 27
29void exynos_drm_mode_config_init(struct drm_device *dev); 28void exynos_drm_mode_config_init(struct drm_device *dev);
30 29
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index a221f753ad9c..f6118baa8e3e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -30,8 +30,8 @@
30 drm_fb_helper) 30 drm_fb_helper)
31 31
32struct exynos_drm_fbdev { 32struct exynos_drm_fbdev {
33 struct drm_fb_helper drm_fb_helper; 33 struct drm_fb_helper drm_fb_helper;
34 struct exynos_drm_gem_obj *obj; 34 struct exynos_drm_gem *exynos_gem;
35}; 35};
36 36
37static int exynos_drm_fb_mmap(struct fb_info *info, 37static int exynos_drm_fb_mmap(struct fb_info *info,
@@ -39,7 +39,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
39{ 39{
40 struct drm_fb_helper *helper = info->par; 40 struct drm_fb_helper *helper = info->par;
41 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); 41 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
42 struct exynos_drm_gem_obj *obj = exynos_fbd->obj; 42 struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
43 unsigned long vm_size; 43 unsigned long vm_size;
44 int ret; 44 int ret;
45 45
@@ -47,11 +47,12 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
47 47
48 vm_size = vma->vm_end - vma->vm_start; 48 vm_size = vma->vm_end - vma->vm_start;
49 49
50 if (vm_size > obj->size) 50 if (vm_size > exynos_gem->size)
51 return -EINVAL; 51 return -EINVAL;
52 52
53 ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr, 53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages,
54 obj->size, &obj->dma_attrs); 54 exynos_gem->dma_addr, exynos_gem->size,
55 &exynos_gem->dma_attrs);
55 if (ret < 0) { 56 if (ret < 0) {
56 DRM_ERROR("failed to mmap.\n"); 57 DRM_ERROR("failed to mmap.\n");
57 return ret; 58 return ret;
@@ -75,7 +76,7 @@ static struct fb_ops exynos_drm_fb_ops = {
75 76
76static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, 77static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
77 struct drm_fb_helper_surface_size *sizes, 78 struct drm_fb_helper_surface_size *sizes,
78 struct exynos_drm_gem_obj *obj) 79 struct exynos_drm_gem *exynos_gem)
79{ 80{
80 struct fb_info *fbi; 81 struct fb_info *fbi;
81 struct drm_framebuffer *fb = helper->fb; 82 struct drm_framebuffer *fb = helper->fb;
@@ -96,11 +97,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
96 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 97 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
97 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 98 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
98 99
99 nr_pages = obj->size >> PAGE_SHIFT; 100 nr_pages = exynos_gem->size >> PAGE_SHIFT;
100 101
101 obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP, 102 exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
102 pgprot_writecombine(PAGE_KERNEL)); 103 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
103 if (!obj->kvaddr) { 104 if (!exynos_gem->kvaddr) {
104 DRM_ERROR("failed to map pages to kernel space.\n"); 105 DRM_ERROR("failed to map pages to kernel space.\n");
105 drm_fb_helper_release_fbi(helper); 106 drm_fb_helper_release_fbi(helper);
106 return -EIO; 107 return -EIO;
@@ -109,7 +110,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
109 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 110 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
110 offset += fbi->var.yoffset * fb->pitches[0]; 111 offset += fbi->var.yoffset * fb->pitches[0];
111 112
112 fbi->screen_base = obj->kvaddr + offset; 113 fbi->screen_base = exynos_gem->kvaddr + offset;
113 fbi->screen_size = size; 114 fbi->screen_size = size;
114 fbi->fix.smem_len = size; 115 fbi->fix.smem_len = size;
115 116
@@ -120,7 +121,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
120 struct drm_fb_helper_surface_size *sizes) 121 struct drm_fb_helper_surface_size *sizes)
121{ 122{
122 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); 123 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
123 struct exynos_drm_gem_obj *obj; 124 struct exynos_drm_gem *exynos_gem;
124 struct drm_device *dev = helper->dev; 125 struct drm_device *dev = helper->dev;
125 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 126 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
126 struct platform_device *pdev = dev->platformdev; 127 struct platform_device *pdev = dev->platformdev;
@@ -141,32 +142,34 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
141 142
142 size = mode_cmd.pitches[0] * mode_cmd.height; 143 size = mode_cmd.pitches[0] * mode_cmd.height;
143 144
144 obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size); 145 exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
145 /* 146 /*
146 * If physically contiguous memory allocation fails and if IOMMU is 147 * If physically contiguous memory allocation fails and if IOMMU is
147 * supported then try to get buffer from non physically contiguous 148 * supported then try to get buffer from non physically contiguous
148 * memory area. 149 * memory area.
149 */ 150 */
150 if (IS_ERR(obj) && is_drm_iommu_supported(dev)) { 151 if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) {
151 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n"); 152 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
152 obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, size); 153 exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
154 size);
153 } 155 }
154 156
155 if (IS_ERR(obj)) { 157 if (IS_ERR(exynos_gem)) {
156 ret = PTR_ERR(obj); 158 ret = PTR_ERR(exynos_gem);
157 goto out; 159 goto out;
158 } 160 }
159 161
160 exynos_fbdev->obj = obj; 162 exynos_fbdev->exynos_gem = exynos_gem;
161 163
162 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &obj, 1); 164 helper->fb =
165 exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
163 if (IS_ERR(helper->fb)) { 166 if (IS_ERR(helper->fb)) {
164 DRM_ERROR("failed to create drm framebuffer.\n"); 167 DRM_ERROR("failed to create drm framebuffer.\n");
165 ret = PTR_ERR(helper->fb); 168 ret = PTR_ERR(helper->fb);
166 goto err_destroy_gem; 169 goto err_destroy_gem;
167 } 170 }
168 171
169 ret = exynos_drm_fbdev_update(helper, sizes, obj); 172 ret = exynos_drm_fbdev_update(helper, sizes, exynos_gem);
170 if (ret < 0) 173 if (ret < 0)
171 goto err_destroy_framebuffer; 174 goto err_destroy_framebuffer;
172 175
@@ -176,7 +179,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
176err_destroy_framebuffer: 179err_destroy_framebuffer:
177 drm_framebuffer_cleanup(helper->fb); 180 drm_framebuffer_cleanup(helper->fb);
178err_destroy_gem: 181err_destroy_gem:
179 exynos_drm_gem_destroy(obj); 182 exynos_drm_gem_destroy(exynos_gem);
180 183
181/* 184/*
182 * if failed, all resources allocated above would be released by 185 * if failed, all resources allocated above would be released by
@@ -269,11 +272,11 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
269 struct drm_fb_helper *fb_helper) 272 struct drm_fb_helper *fb_helper)
270{ 273{
271 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper); 274 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
272 struct exynos_drm_gem_obj *obj = exynos_fbd->obj; 275 struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
273 struct drm_framebuffer *fb; 276 struct drm_framebuffer *fb;
274 277
275 if (obj->kvaddr) 278 if (exynos_gem->kvaddr)
276 vunmap(obj->kvaddr); 279 vunmap(exynos_gem->kvaddr);
277 280
278 /* release drm framebuffer and real buffer */ 281 /* release drm framebuffer and real buffer */
279 if (fb_helper->fb && fb_helper->fb->funcs) { 282 if (fb_helper->fb && fb_helper->fb->funcs) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index dd3a5e6d58c8..c747824f3c98 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -466,7 +466,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
466 EXYNOS_MSCTRL_C_INT_IN_2PLANE); 466 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
467 break; 467 break;
468 default: 468 default:
469 dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt); 469 dev_err(ippdrv->dev, "invalid source yuv order 0x%x.\n", fmt);
470 return -EINVAL; 470 return -EINVAL;
471 } 471 }
472 472
@@ -513,7 +513,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
513 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; 513 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
514 break; 514 break;
515 default: 515 default:
516 dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt); 516 dev_err(ippdrv->dev, "invalid source format 0x%x.\n", fmt);
517 return -EINVAL; 517 return -EINVAL;
518 } 518 }
519 519
@@ -578,7 +578,7 @@ static int fimc_src_set_transf(struct device *dev,
578 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; 578 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
579 break; 579 break;
580 default: 580 default:
581 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); 581 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
582 return -EINVAL; 582 return -EINVAL;
583 } 583 }
584 584
@@ -701,7 +701,7 @@ static int fimc_src_set_addr(struct device *dev,
701 property->prop_id, buf_id, buf_type); 701 property->prop_id, buf_id, buf_type);
702 702
703 if (buf_id > FIMC_MAX_SRC) { 703 if (buf_id > FIMC_MAX_SRC) {
704 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); 704 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
705 return -ENOMEM; 705 return -ENOMEM;
706 } 706 }
707 707
@@ -812,7 +812,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
812 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; 812 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
813 break; 813 break;
814 default: 814 default:
815 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); 815 dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
816 return -EINVAL; 816 return -EINVAL;
817 } 817 }
818 818
@@ -865,7 +865,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
865 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; 865 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
866 break; 866 break;
867 default: 867 default:
868 dev_err(ippdrv->dev, "inavlid target format 0x%x.\n", 868 dev_err(ippdrv->dev, "invalid target format 0x%x.\n",
869 fmt); 869 fmt);
870 return -EINVAL; 870 return -EINVAL;
871 } 871 }
@@ -929,7 +929,7 @@ static int fimc_dst_set_transf(struct device *dev,
929 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; 929 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
930 break; 930 break;
931 default: 931 default:
932 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); 932 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
933 return -EINVAL; 933 return -EINVAL;
934 } 934 }
935 935
@@ -1160,7 +1160,7 @@ static int fimc_dst_set_addr(struct device *dev,
1160 property->prop_id, buf_id, buf_type); 1160 property->prop_id, buf_id, buf_type);
1161 1161
1162 if (buf_id > FIMC_MAX_DST) { 1162 if (buf_id > FIMC_MAX_DST) {
1163 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); 1163 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
1164 return -ENOMEM; 1164 return -ENOMEM;
1165 } 1165 }
1166 1166
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3d1aba67758b..bd75c1531cac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -87,6 +87,7 @@
87 87
88/* FIMD has totally five hardware windows. */ 88/* FIMD has totally five hardware windows. */
89#define WINDOWS_NR 5 89#define WINDOWS_NR 5
90#define CURSOR_WIN 4
90 91
91struct fimd_driver_data { 92struct fimd_driver_data {
92 unsigned int timing_base; 93 unsigned int timing_base;
@@ -153,7 +154,6 @@ struct fimd_context {
153 struct clk *lcd_clk; 154 struct clk *lcd_clk;
154 void __iomem *regs; 155 void __iomem *regs;
155 struct regmap *sysreg; 156 struct regmap *sysreg;
156 unsigned int default_win;
157 unsigned long irq_flags; 157 unsigned long irq_flags;
158 u32 vidcon0; 158 u32 vidcon0;
159 u32 vidcon1; 159 u32 vidcon1;
@@ -949,8 +949,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
949 ctx->pipe = priv->pipe++; 949 ctx->pipe = priv->pipe++;
950 950
951 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 951 for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
952 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : 952 type = exynos_plane_get_type(zpos, CURSOR_WIN);
953 DRM_PLANE_TYPE_OVERLAY;
954 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 953 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
955 1 << ctx->pipe, type, fimd_formats, 954 1 << ctx->pipe, type, fimd_formats,
956 ARRAY_SIZE(fimd_formats), zpos); 955 ARRAY_SIZE(fimd_formats), zpos);
@@ -958,7 +957,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
958 return ret; 957 return ret;
959 } 958 }
960 959
961 exynos_plane = &ctx->planes[ctx->default_win]; 960 exynos_plane = &ctx->planes[DEFAULT_WIN];
962 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, 961 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
963 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, 962 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
964 &fimd_crtc_ops, ctx); 963 &fimd_crtc_ops, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 407afedb6003..252eb301470c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -20,97 +20,108 @@
20#include "exynos_drm_gem.h" 20#include "exynos_drm_gem.h"
21#include "exynos_drm_iommu.h" 21#include "exynos_drm_iommu.h"
22 22
23static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) 23static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
24{ 24{
25 struct drm_device *dev = obj->base.dev; 25 struct drm_device *dev = exynos_gem->base.dev;
26 enum dma_attr attr; 26 enum dma_attr attr;
27 unsigned int nr_pages; 27 unsigned int nr_pages;
28 struct sg_table sgt;
29 int ret = -ENOMEM;
28 30
29 if (obj->dma_addr) { 31 if (exynos_gem->dma_addr) {
30 DRM_DEBUG_KMS("already allocated.\n"); 32 DRM_DEBUG_KMS("already allocated.\n");
31 return 0; 33 return 0;
32 } 34 }
33 35
34 init_dma_attrs(&obj->dma_attrs); 36 init_dma_attrs(&exynos_gem->dma_attrs);
35 37
36 /* 38 /*
37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory 39 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 * region will be allocated else physically contiguous 40 * region will be allocated else physically contiguous
39 * as possible. 41 * as possible.
40 */ 42 */
41 if (!(obj->flags & EXYNOS_BO_NONCONTIG)) 43 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs); 44 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
43 45
44 /* 46 /*
45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping 47 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 * else cachable mapping. 48 * else cachable mapping.
47 */ 49 */
48 if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE)) 50 if (exynos_gem->flags & EXYNOS_BO_WC ||
51 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
49 attr = DMA_ATTR_WRITE_COMBINE; 52 attr = DMA_ATTR_WRITE_COMBINE;
50 else 53 else
51 attr = DMA_ATTR_NON_CONSISTENT; 54 attr = DMA_ATTR_NON_CONSISTENT;
52 55
53 dma_set_attr(attr, &obj->dma_attrs); 56 dma_set_attr(attr, &exynos_gem->dma_attrs);
54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs); 57 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
55 58
56 nr_pages = obj->size >> PAGE_SHIFT; 59 nr_pages = exynos_gem->size >> PAGE_SHIFT;
57 60
58 if (!is_drm_iommu_supported(dev)) { 61 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
59 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); 62 if (!exynos_gem->pages) {
60 if (!obj->pages) { 63 DRM_ERROR("failed to allocate pages.\n");
61 DRM_ERROR("failed to allocate pages.\n"); 64 return -ENOMEM;
62 return -ENOMEM;
63 }
64 } 65 }
65 66
66 obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, 67 exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
67 GFP_KERNEL, &obj->dma_attrs); 68 &exynos_gem->dma_addr, GFP_KERNEL,
68 if (!obj->cookie) { 69 &exynos_gem->dma_attrs);
70 if (!exynos_gem->cookie) {
69 DRM_ERROR("failed to allocate buffer.\n"); 71 DRM_ERROR("failed to allocate buffer.\n");
70 if (obj->pages) 72 goto err_free;
71 drm_free_large(obj->pages);
72 return -ENOMEM;
73 } 73 }
74 74
75 if (obj->pages) { 75 ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
76 dma_addr_t start_addr; 76 exynos_gem->dma_addr, exynos_gem->size,
77 unsigned int i = 0; 77 &exynos_gem->dma_attrs);
78 78 if (ret < 0) {
79 start_addr = obj->dma_addr; 79 DRM_ERROR("failed to get sgtable.\n");
80 while (i < nr_pages) { 80 goto err_dma_free;
81 obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
82 start_addr));
83 start_addr += PAGE_SIZE;
84 i++;
85 }
86 } else {
87 obj->pages = obj->cookie;
88 } 81 }
89 82
83 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
84 nr_pages)) {
85 DRM_ERROR("invalid sgtable.\n");
86 ret = -EINVAL;
87 goto err_sgt_free;
88 }
89
90 sg_free_table(&sgt);
91
90 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 92 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
91 (unsigned long)obj->dma_addr, 93 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
92 obj->size);
93 94
94 return 0; 95 return 0;
96
97err_sgt_free:
98 sg_free_table(&sgt);
99err_dma_free:
100 dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
101 exynos_gem->dma_addr, &exynos_gem->dma_attrs);
102err_free:
103 drm_free_large(exynos_gem->pages);
104
105 return ret;
95} 106}
96 107
97static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) 108static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
98{ 109{
99 struct drm_device *dev = obj->base.dev; 110 struct drm_device *dev = exynos_gem->base.dev;
100 111
101 if (!obj->dma_addr) { 112 if (!exynos_gem->dma_addr) {
102 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 113 DRM_DEBUG_KMS("dma_addr is invalid.\n");
103 return; 114 return;
104 } 115 }
105 116
106 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 117 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
107 (unsigned long)obj->dma_addr, obj->size); 118 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
108 119
109 dma_free_attrs(dev->dev, obj->size, obj->cookie, 120 dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
110 (dma_addr_t)obj->dma_addr, &obj->dma_attrs); 121 (dma_addr_t)exynos_gem->dma_addr,
122 &exynos_gem->dma_attrs);
111 123
112 if (!is_drm_iommu_supported(dev)) 124 drm_free_large(exynos_gem->pages);
113 drm_free_large(obj->pages);
114} 125}
115 126
116static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 127static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -135,9 +146,9 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
135 return 0; 146 return 0;
136} 147}
137 148
138void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) 149void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
139{ 150{
140 struct drm_gem_object *obj = &exynos_gem_obj->base; 151 struct drm_gem_object *obj = &exynos_gem->base;
141 152
142 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); 153 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
143 154
@@ -148,21 +159,21 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
148 * once dmabuf's refcount becomes 0. 159 * once dmabuf's refcount becomes 0.
149 */ 160 */
150 if (obj->import_attach) 161 if (obj->import_attach)
151 drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); 162 drm_prime_gem_destroy(obj, exynos_gem->sgt);
152 else 163 else
153 exynos_drm_free_buf(exynos_gem_obj); 164 exynos_drm_free_buf(exynos_gem);
154 165
155 /* release file pointer to gem object. */ 166 /* release file pointer to gem object. */
156 drm_gem_object_release(obj); 167 drm_gem_object_release(obj);
157 168
158 kfree(exynos_gem_obj); 169 kfree(exynos_gem);
159} 170}
160 171
161unsigned long exynos_drm_gem_get_size(struct drm_device *dev, 172unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
162 unsigned int gem_handle, 173 unsigned int gem_handle,
163 struct drm_file *file_priv) 174 struct drm_file *file_priv)
164{ 175{
165 struct exynos_drm_gem_obj *exynos_gem_obj; 176 struct exynos_drm_gem *exynos_gem;
166 struct drm_gem_object *obj; 177 struct drm_gem_object *obj;
167 178
168 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 179 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
@@ -171,51 +182,51 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
171 return 0; 182 return 0;
172 } 183 }
173 184
174 exynos_gem_obj = to_exynos_gem_obj(obj); 185 exynos_gem = to_exynos_gem(obj);
175 186
176 drm_gem_object_unreference_unlocked(obj); 187 drm_gem_object_unreference_unlocked(obj);
177 188
178 return exynos_gem_obj->size; 189 return exynos_gem->size;
179} 190}
180 191
181static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 192static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
182 unsigned long size) 193 unsigned long size)
183{ 194{
184 struct exynos_drm_gem_obj *exynos_gem_obj; 195 struct exynos_drm_gem *exynos_gem;
185 struct drm_gem_object *obj; 196 struct drm_gem_object *obj;
186 int ret; 197 int ret;
187 198
188 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 199 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
189 if (!exynos_gem_obj) 200 if (!exynos_gem)
190 return ERR_PTR(-ENOMEM); 201 return ERR_PTR(-ENOMEM);
191 202
192 exynos_gem_obj->size = size; 203 exynos_gem->size = size;
193 obj = &exynos_gem_obj->base; 204 obj = &exynos_gem->base;
194 205
195 ret = drm_gem_object_init(dev, obj, size); 206 ret = drm_gem_object_init(dev, obj, size);
196 if (ret < 0) { 207 if (ret < 0) {
197 DRM_ERROR("failed to initialize gem object\n"); 208 DRM_ERROR("failed to initialize gem object\n");
198 kfree(exynos_gem_obj); 209 kfree(exynos_gem);
199 return ERR_PTR(ret); 210 return ERR_PTR(ret);
200 } 211 }
201 212
202 ret = drm_gem_create_mmap_offset(obj); 213 ret = drm_gem_create_mmap_offset(obj);
203 if (ret < 0) { 214 if (ret < 0) {
204 drm_gem_object_release(obj); 215 drm_gem_object_release(obj);
205 kfree(exynos_gem_obj); 216 kfree(exynos_gem);
206 return ERR_PTR(ret); 217 return ERR_PTR(ret);
207 } 218 }
208 219
209 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 220 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
210 221
211 return exynos_gem_obj; 222 return exynos_gem;
212} 223}
213 224
214struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 225struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
215 unsigned int flags, 226 unsigned int flags,
216 unsigned long size) 227 unsigned long size)
217{ 228{
218 struct exynos_drm_gem_obj *exynos_gem_obj; 229 struct exynos_drm_gem *exynos_gem;
219 int ret; 230 int ret;
220 231
221 if (flags & ~(EXYNOS_BO_MASK)) { 232 if (flags & ~(EXYNOS_BO_MASK)) {
@@ -230,38 +241,38 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
230 241
231 size = roundup(size, PAGE_SIZE); 242 size = roundup(size, PAGE_SIZE);
232 243
233 exynos_gem_obj = exynos_drm_gem_init(dev, size); 244 exynos_gem = exynos_drm_gem_init(dev, size);
234 if (IS_ERR(exynos_gem_obj)) 245 if (IS_ERR(exynos_gem))
235 return exynos_gem_obj; 246 return exynos_gem;
236 247
237 /* set memory type and cache attribute from user side. */ 248 /* set memory type and cache attribute from user side. */
238 exynos_gem_obj->flags = flags; 249 exynos_gem->flags = flags;
239 250
240 ret = exynos_drm_alloc_buf(exynos_gem_obj); 251 ret = exynos_drm_alloc_buf(exynos_gem);
241 if (ret < 0) { 252 if (ret < 0) {
242 drm_gem_object_release(&exynos_gem_obj->base); 253 drm_gem_object_release(&exynos_gem->base);
243 kfree(exynos_gem_obj); 254 kfree(exynos_gem);
244 return ERR_PTR(ret); 255 return ERR_PTR(ret);
245 } 256 }
246 257
247 return exynos_gem_obj; 258 return exynos_gem;
248} 259}
249 260
250int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 261int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
251 struct drm_file *file_priv) 262 struct drm_file *file_priv)
252{ 263{
253 struct drm_exynos_gem_create *args = data; 264 struct drm_exynos_gem_create *args = data;
254 struct exynos_drm_gem_obj *exynos_gem_obj; 265 struct exynos_drm_gem *exynos_gem;
255 int ret; 266 int ret;
256 267
257 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 268 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
258 if (IS_ERR(exynos_gem_obj)) 269 if (IS_ERR(exynos_gem))
259 return PTR_ERR(exynos_gem_obj); 270 return PTR_ERR(exynos_gem);
260 271
261 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, 272 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
262 &args->handle); 273 &args->handle);
263 if (ret) { 274 if (ret) {
264 exynos_drm_gem_destroy(exynos_gem_obj); 275 exynos_drm_gem_destroy(exynos_gem);
265 return ret; 276 return ret;
266 } 277 }
267 278
@@ -272,7 +283,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
272 unsigned int gem_handle, 283 unsigned int gem_handle,
273 struct drm_file *filp) 284 struct drm_file *filp)
274{ 285{
275 struct exynos_drm_gem_obj *exynos_gem_obj; 286 struct exynos_drm_gem *exynos_gem;
276 struct drm_gem_object *obj; 287 struct drm_gem_object *obj;
277 288
278 obj = drm_gem_object_lookup(dev, filp, gem_handle); 289 obj = drm_gem_object_lookup(dev, filp, gem_handle);
@@ -281,9 +292,9 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
281 return ERR_PTR(-EINVAL); 292 return ERR_PTR(-EINVAL);
282 } 293 }
283 294
284 exynos_gem_obj = to_exynos_gem_obj(obj); 295 exynos_gem = to_exynos_gem(obj);
285 296
286 return &exynos_gem_obj->dma_addr; 297 return &exynos_gem->dma_addr;
287} 298}
288 299
289void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 300void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -307,10 +318,10 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
307 drm_gem_object_unreference_unlocked(obj); 318 drm_gem_object_unreference_unlocked(obj);
308} 319}
309 320
310static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, 321static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
311 struct vm_area_struct *vma) 322 struct vm_area_struct *vma)
312{ 323{
313 struct drm_device *drm_dev = exynos_gem_obj->base.dev; 324 struct drm_device *drm_dev = exynos_gem->base.dev;
314 unsigned long vm_size; 325 unsigned long vm_size;
315 int ret; 326 int ret;
316 327
@@ -320,12 +331,12 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
320 vm_size = vma->vm_end - vma->vm_start; 331 vm_size = vma->vm_end - vma->vm_start;
321 332
322 /* check if user-requested size is valid. */ 333 /* check if user-requested size is valid. */
323 if (vm_size > exynos_gem_obj->size) 334 if (vm_size > exynos_gem->size)
324 return -EINVAL; 335 return -EINVAL;
325 336
326 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages, 337 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages,
327 exynos_gem_obj->dma_addr, exynos_gem_obj->size, 338 exynos_gem->dma_addr, exynos_gem->size,
328 &exynos_gem_obj->dma_attrs); 339 &exynos_gem->dma_attrs);
329 if (ret < 0) { 340 if (ret < 0) {
330 DRM_ERROR("failed to mmap.\n"); 341 DRM_ERROR("failed to mmap.\n");
331 return ret; 342 return ret;
@@ -337,7 +348,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
337int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 348int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
338 struct drm_file *file_priv) 349 struct drm_file *file_priv)
339{ 350{
340 struct exynos_drm_gem_obj *exynos_gem_obj; 351 struct exynos_drm_gem *exynos_gem;
341 struct drm_exynos_gem_info *args = data; 352 struct drm_exynos_gem_info *args = data;
342 struct drm_gem_object *obj; 353 struct drm_gem_object *obj;
343 354
@@ -350,10 +361,10 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
350 return -EINVAL; 361 return -EINVAL;
351 } 362 }
352 363
353 exynos_gem_obj = to_exynos_gem_obj(obj); 364 exynos_gem = to_exynos_gem(obj);
354 365
355 args->flags = exynos_gem_obj->flags; 366 args->flags = exynos_gem->flags;
356 args->size = exynos_gem_obj->size; 367 args->size = exynos_gem->size;
357 368
358 drm_gem_object_unreference(obj); 369 drm_gem_object_unreference(obj);
359 mutex_unlock(&dev->struct_mutex); 370 mutex_unlock(&dev->struct_mutex);
@@ -389,14 +400,14 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
389 400
390void exynos_drm_gem_free_object(struct drm_gem_object *obj) 401void exynos_drm_gem_free_object(struct drm_gem_object *obj)
391{ 402{
392 exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); 403 exynos_drm_gem_destroy(to_exynos_gem(obj));
393} 404}
394 405
395int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 406int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
396 struct drm_device *dev, 407 struct drm_device *dev,
397 struct drm_mode_create_dumb *args) 408 struct drm_mode_create_dumb *args)
398{ 409{
399 struct exynos_drm_gem_obj *exynos_gem_obj; 410 struct exynos_drm_gem *exynos_gem;
400 unsigned int flags; 411 unsigned int flags;
401 int ret; 412 int ret;
402 413
@@ -414,16 +425,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
414 else 425 else
415 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 426 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
416 427
417 exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); 428 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
418 if (IS_ERR(exynos_gem_obj)) { 429 if (IS_ERR(exynos_gem)) {
419 dev_warn(dev->dev, "FB allocation failed.\n"); 430 dev_warn(dev->dev, "FB allocation failed.\n");
420 return PTR_ERR(exynos_gem_obj); 431 return PTR_ERR(exynos_gem);
421 } 432 }
422 433
423 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, 434 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
424 &args->handle); 435 &args->handle);
425 if (ret) { 436 if (ret) {
426 exynos_drm_gem_destroy(exynos_gem_obj); 437 exynos_drm_gem_destroy(exynos_gem);
427 return ret; 438 return ret;
428 } 439 }
429 440
@@ -464,7 +475,7 @@ unlock:
464int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 475int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
465{ 476{
466 struct drm_gem_object *obj = vma->vm_private_data; 477 struct drm_gem_object *obj = vma->vm_private_data;
467 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 478 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
468 unsigned long pfn; 479 unsigned long pfn;
469 pgoff_t page_offset; 480 pgoff_t page_offset;
470 int ret; 481 int ret;
@@ -472,13 +483,13 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
472 page_offset = ((unsigned long)vmf->virtual_address - 483 page_offset = ((unsigned long)vmf->virtual_address -
473 vma->vm_start) >> PAGE_SHIFT; 484 vma->vm_start) >> PAGE_SHIFT;
474 485
475 if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) { 486 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
476 DRM_ERROR("invalid page offset\n"); 487 DRM_ERROR("invalid page offset\n");
477 ret = -EINVAL; 488 ret = -EINVAL;
478 goto out; 489 goto out;
479 } 490 }
480 491
481 pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]); 492 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
482 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); 493 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
483 494
484out: 495out:
@@ -496,7 +507,7 @@ out:
496 507
497int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 508int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
498{ 509{
499 struct exynos_drm_gem_obj *exynos_gem_obj; 510 struct exynos_drm_gem *exynos_gem;
500 struct drm_gem_object *obj; 511 struct drm_gem_object *obj;
501 int ret; 512 int ret;
502 513
@@ -508,21 +519,21 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
508 } 519 }
509 520
510 obj = vma->vm_private_data; 521 obj = vma->vm_private_data;
511 exynos_gem_obj = to_exynos_gem_obj(obj); 522 exynos_gem = to_exynos_gem(obj);
512 523
513 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags); 524 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
514 525
515 /* non-cachable as default. */ 526 /* non-cachable as default. */
516 if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE) 527 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
517 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 528 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
518 else if (exynos_gem_obj->flags & EXYNOS_BO_WC) 529 else if (exynos_gem->flags & EXYNOS_BO_WC)
519 vma->vm_page_prot = 530 vma->vm_page_prot =
520 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 531 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
521 else 532 else
522 vma->vm_page_prot = 533 vma->vm_page_prot =
523 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 534 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
524 535
525 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma); 536 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
526 if (ret) 537 if (ret)
527 goto err_close_vm; 538 goto err_close_vm;
528 539
@@ -537,12 +548,12 @@ err_close_vm:
537/* low-level interface prime helpers */ 548/* low-level interface prime helpers */
538struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 549struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
539{ 550{
540 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 551 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
541 int npages; 552 int npages;
542 553
543 npages = exynos_gem_obj->size >> PAGE_SHIFT; 554 npages = exynos_gem->size >> PAGE_SHIFT;
544 555
545 return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages); 556 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
546} 557}
547 558
548struct drm_gem_object * 559struct drm_gem_object *
@@ -550,35 +561,35 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
550 struct dma_buf_attachment *attach, 561 struct dma_buf_attachment *attach,
551 struct sg_table *sgt) 562 struct sg_table *sgt)
552{ 563{
553 struct exynos_drm_gem_obj *exynos_gem_obj; 564 struct exynos_drm_gem *exynos_gem;
554 int npages; 565 int npages;
555 int ret; 566 int ret;
556 567
557 exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size); 568 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
558 if (IS_ERR(exynos_gem_obj)) { 569 if (IS_ERR(exynos_gem)) {
559 ret = PTR_ERR(exynos_gem_obj); 570 ret = PTR_ERR(exynos_gem);
560 return ERR_PTR(ret); 571 return ERR_PTR(ret);
561 } 572 }
562 573
563 exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl); 574 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
564 575
565 npages = exynos_gem_obj->size >> PAGE_SHIFT; 576 npages = exynos_gem->size >> PAGE_SHIFT;
566 exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); 577 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
567 if (!exynos_gem_obj->pages) { 578 if (!exynos_gem->pages) {
568 ret = -ENOMEM; 579 ret = -ENOMEM;
569 goto err; 580 goto err;
570 } 581 }
571 582
572 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL, 583 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
573 npages); 584 npages);
574 if (ret < 0) 585 if (ret < 0)
575 goto err_free_large; 586 goto err_free_large;
576 587
577 exynos_gem_obj->sgt = sgt; 588 exynos_gem->sgt = sgt;
578 589
579 if (sgt->nents == 1) { 590 if (sgt->nents == 1) {
580 /* always physically continuous memory if sgt->nents is 1. */ 591 /* always physically continuous memory if sgt->nents is 1. */
581 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 592 exynos_gem->flags |= EXYNOS_BO_CONTIG;
582 } else { 593 } else {
583 /* 594 /*
584 * this case could be CONTIG or NONCONTIG type but for now 595 * this case could be CONTIG or NONCONTIG type but for now
@@ -586,16 +597,16 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
586 * TODO. we have to find a way that exporter can notify 597 * TODO. we have to find a way that exporter can notify
587 * the type of its own buffer to importer. 598 * the type of its own buffer to importer.
588 */ 599 */
589 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; 600 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
590 } 601 }
591 602
592 return &exynos_gem_obj->base; 603 return &exynos_gem->base;
593 604
594err_free_large: 605err_free_large:
595 drm_free_large(exynos_gem_obj->pages); 606 drm_free_large(exynos_gem->pages);
596err: 607err:
597 drm_gem_object_release(&exynos_gem_obj->base); 608 drm_gem_object_release(&exynos_gem->base);
598 kfree(exynos_gem_obj); 609 kfree(exynos_gem);
599 return ERR_PTR(ret); 610 return ERR_PTR(ret);
600} 611}
601 612
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index b62d1007c0e0..37ab8b282db6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -14,8 +14,7 @@
14 14
15#include <drm/drm_gem.h> 15#include <drm/drm_gem.h>
16 16
17#define to_exynos_gem_obj(x) container_of(x,\ 17#define to_exynos_gem(x) container_of(x, struct exynos_drm_gem, base)
18 struct exynos_drm_gem_obj, base)
19 18
20#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG) 19#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
21 20
@@ -44,7 +43,7 @@
44 * P.S. this object would be transferred to user as kms_bo.handle so 43 * P.S. this object would be transferred to user as kms_bo.handle so
45 * user can access the buffer through kms_bo.handle. 44 * user can access the buffer through kms_bo.handle.
46 */ 45 */
47struct exynos_drm_gem_obj { 46struct exynos_drm_gem {
48 struct drm_gem_object base; 47 struct drm_gem_object base;
49 unsigned int flags; 48 unsigned int flags;
50 unsigned long size; 49 unsigned long size;
@@ -59,12 +58,12 @@ struct exynos_drm_gem_obj {
59struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 58struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
60 59
61/* destroy a buffer with gem object */ 60/* destroy a buffer with gem object */
62void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); 61void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
63 62
64/* create a new buffer with gem object */ 63/* create a new buffer with gem object */
65struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 64struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
66 unsigned int flags, 65 unsigned int flags,
67 unsigned long size); 66 unsigned long size);
68 67
69/* 68/*
70 * request gem object creation and buffer allocation as the size 69 * request gem object creation and buffer allocation as the size
@@ -106,7 +105,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
106 struct drm_file *file_priv); 105 struct drm_file *file_priv);
107 106
108/* free gem object. */ 107/* free gem object. */
109void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); 108void exynos_drm_gem_free_object(struct drm_gem_object *obj);
110 109
111/* create memory region for drm framebuffer. */ 110/* create memory region for drm framebuffer. */
112int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 111int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 808a0a013780..11b87d2a7913 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -543,7 +543,7 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
543 GSC_IN_YUV420_2P); 543 GSC_IN_YUV420_2P);
544 break; 544 break;
545 default: 545 default:
546 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); 546 dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
547 return -EINVAL; 547 return -EINVAL;
548 } 548 }
549 549
@@ -595,7 +595,7 @@ static int gsc_src_set_transf(struct device *dev,
595 cfg &= ~GSC_IN_ROT_YFLIP; 595 cfg &= ~GSC_IN_ROT_YFLIP;
596 break; 596 break;
597 default: 597 default:
598 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); 598 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
599 return -EINVAL; 599 return -EINVAL;
600 } 600 }
601 601
@@ -721,7 +721,7 @@ static int gsc_src_set_addr(struct device *dev,
721 property->prop_id, buf_id, buf_type); 721 property->prop_id, buf_id, buf_type);
722 722
723 if (buf_id > GSC_MAX_SRC) { 723 if (buf_id > GSC_MAX_SRC) {
724 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); 724 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
725 return -EINVAL; 725 return -EINVAL;
726 } 726 }
727 727
@@ -814,7 +814,7 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
814 GSC_OUT_YUV420_2P); 814 GSC_OUT_YUV420_2P);
815 break; 815 break;
816 default: 816 default:
817 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); 817 dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
818 return -EINVAL; 818 return -EINVAL;
819 } 819 }
820 820
@@ -866,7 +866,7 @@ static int gsc_dst_set_transf(struct device *dev,
866 cfg &= ~GSC_IN_ROT_YFLIP; 866 cfg &= ~GSC_IN_ROT_YFLIP;
867 break; 867 break;
868 default: 868 default:
869 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); 869 dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
870 return -EINVAL; 870 return -EINVAL;
871 } 871 }
872 872
@@ -1176,7 +1176,7 @@ static int gsc_dst_set_addr(struct device *dev,
1176 property->prop_id, buf_id, buf_type); 1176 property->prop_id, buf_id, buf_type);
1177 1177
1178 if (buf_id > GSC_MAX_DST) { 1178 if (buf_id > GSC_MAX_DST) {
1179 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); 1179 dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
1180 return -EINVAL; 1180 return -EINVAL;
1181 } 1181 }
1182 1182
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 055e8ec2ef21..d73b9ad35b7a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -139,6 +139,5 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
139 if (!mapping || !mapping->domain) 139 if (!mapping || !mapping->domain)
140 return; 140 return;
141 141
142 iommu_detach_device(mapping->domain, subdrv_dev); 142 arm_iommu_detach_device(subdrv_dev);
143 drm_release_iommu_mapping(drm_dev);
144} 143}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 714822441467..179311760bb7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -128,15 +128,14 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
128 128
129 nr = drm_format_num_planes(state->fb->pixel_format); 129 nr = drm_format_num_planes(state->fb->pixel_format);
130 for (i = 0; i < nr; i++) { 130 for (i = 0; i < nr; i++) {
131 struct exynos_drm_gem_obj *obj = 131 struct exynos_drm_gem *exynos_gem =
132 exynos_drm_fb_gem_obj(state->fb, i); 132 exynos_drm_fb_gem(state->fb, i);
133 133 if (!exynos_gem) {
134 if (!obj) {
135 DRM_DEBUG_KMS("gem object is null\n"); 134 DRM_DEBUG_KMS("gem object is null\n");
136 return -EFAULT; 135 return -EFAULT;
137 } 136 }
138 137
139 exynos_plane->dma_addr[i] = obj->dma_addr + 138 exynos_plane->dma_addr[i] = exynos_gem->dma_addr +
140 state->fb->offsets[i]; 139 state->fb->offsets[i];
141 140
142 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", 141 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
@@ -208,6 +207,17 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
208 drm_object_attach_property(&plane->base, prop, zpos); 207 drm_object_attach_property(&plane->base, prop, zpos);
209} 208}
210 209
210enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
211 unsigned int cursor_win)
212{
213 if (zpos == DEFAULT_WIN)
214 return DRM_PLANE_TYPE_PRIMARY;
215 else if (zpos == cursor_win)
216 return DRM_PLANE_TYPE_CURSOR;
217 else
218 return DRM_PLANE_TYPE_OVERLAY;
219}
220
211int exynos_plane_init(struct drm_device *dev, 221int exynos_plane_init(struct drm_device *dev,
212 struct exynos_drm_plane *exynos_plane, 222 struct exynos_drm_plane *exynos_plane,
213 unsigned long possible_crtcs, enum drm_plane_type type, 223 unsigned long possible_crtcs, enum drm_plane_type type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 476c9340b591..abb641e64c23 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
13 unsigned int cursor_win);
12int exynos_plane_init(struct drm_device *dev, 14int exynos_plane_init(struct drm_device *dev,
13 struct exynos_drm_plane *exynos_plane, 15 struct exynos_drm_plane *exynos_plane,
14 unsigned long possible_crtcs, enum drm_plane_type type, 16 unsigned long possible_crtcs, enum drm_plane_type type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 75718e1bc3dd..669362c53f49 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -29,6 +29,7 @@
29 29
30/* vidi has totally three virtual windows. */ 30/* vidi has totally three virtual windows. */
31#define WINDOWS_NR 3 31#define WINDOWS_NR 3
32#define CURSOR_WIN 2
32 33
33#define ctx_from_connector(c) container_of(c, struct vidi_context, \ 34#define ctx_from_connector(c) container_of(c, struct vidi_context, \
34 connector) 35 connector)
@@ -42,7 +43,6 @@ struct vidi_context {
42 struct exynos_drm_plane planes[WINDOWS_NR]; 43 struct exynos_drm_plane planes[WINDOWS_NR];
43 struct edid *raw_edid; 44 struct edid *raw_edid;
44 unsigned int clkdiv; 45 unsigned int clkdiv;
45 unsigned int default_win;
46 unsigned long irq_flags; 46 unsigned long irq_flags;
47 unsigned int connected; 47 unsigned int connected;
48 bool vblank_on; 48 bool vblank_on;
@@ -446,8 +446,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
446 vidi_ctx_initialize(ctx, drm_dev); 446 vidi_ctx_initialize(ctx, drm_dev);
447 447
448 for (zpos = 0; zpos < WINDOWS_NR; zpos++) { 448 for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
449 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : 449 type = exynos_plane_get_type(zpos, CURSOR_WIN);
450 DRM_PLANE_TYPE_OVERLAY;
451 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 450 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
452 1 << ctx->pipe, type, formats, 451 1 << ctx->pipe, type, formats,
453 ARRAY_SIZE(formats), zpos); 452 ARRAY_SIZE(formats), zpos);
@@ -455,7 +454,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
455 return ret; 454 return ret;
456 } 455 }
457 456
458 exynos_plane = &ctx->planes[ctx->default_win]; 457 exynos_plane = &ctx->planes[DEFAULT_WIN];
459 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, 458 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
460 ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI, 459 ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI,
461 &vidi_crtc_ops, ctx); 460 &vidi_crtc_ops, ctx);
@@ -507,7 +506,6 @@ static int vidi_probe(struct platform_device *pdev)
507 if (!ctx) 506 if (!ctx)
508 return -ENOMEM; 507 return -ENOMEM;
509 508
510 ctx->default_win = 0;
511 ctx->pdev = pdev; 509 ctx->pdev = pdev;
512 510
513 INIT_WORK(&ctx->work, vidi_fake_vblank_handler); 511 INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 932f7fa240f8..57b675563e94 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -30,11 +30,11 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/pm_runtime.h> 31#include <linux/pm_runtime.h>
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/gpio/consumer.h>
33#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
34#include <linux/io.h> 35#include <linux/io.h>
35#include <linux/of_address.h> 36#include <linux/of_address.h>
36#include <linux/of_device.h> 37#include <linux/of_device.h>
37#include <linux/of_gpio.h>
38#include <linux/hdmi.h> 38#include <linux/hdmi.h>
39#include <linux/component.h> 39#include <linux/component.h>
40#include <linux/mfd/syscon.h> 40#include <linux/mfd/syscon.h>
@@ -44,11 +44,6 @@
44 44
45#include "exynos_drm_drv.h" 45#include "exynos_drm_drv.h"
46#include "exynos_drm_crtc.h" 46#include "exynos_drm_crtc.h"
47#include "exynos_mixer.h"
48
49#include <linux/gpio.h>
50
51#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
52 47
53#define HOTPLUG_DEBOUNCE_MS 1100 48#define HOTPLUG_DEBOUNCE_MS 1100
54 49
@@ -66,6 +61,33 @@
66enum hdmi_type { 61enum hdmi_type {
67 HDMI_TYPE13, 62 HDMI_TYPE13,
68 HDMI_TYPE14, 63 HDMI_TYPE14,
64 HDMI_TYPE_COUNT
65};
66
67#define HDMI_MAPPED_BASE 0xffff0000
68
69enum hdmi_mapped_regs {
70 HDMI_PHY_STATUS = HDMI_MAPPED_BASE,
71 HDMI_PHY_RSTOUT,
72 HDMI_ACR_CON,
73 HDMI_ACR_MCTS0,
74 HDMI_ACR_CTS0,
75 HDMI_ACR_N0
76};
77
78static const u32 hdmi_reg_map[][HDMI_TYPE_COUNT] = {
79 { HDMI_V13_PHY_STATUS, HDMI_PHY_STATUS_0 },
80 { HDMI_V13_PHY_RSTOUT, HDMI_V14_PHY_RSTOUT },
81 { HDMI_V13_ACR_CON, HDMI_V14_ACR_CON },
82 { HDMI_V13_ACR_MCTS0, HDMI_V14_ACR_MCTS0 },
83 { HDMI_V13_ACR_CTS0, HDMI_V14_ACR_CTS0 },
84 { HDMI_V13_ACR_N0, HDMI_V14_ACR_N0 },
85};
86
87static const char * const supply[] = {
88 "vdd",
89 "vdd_osc",
90 "vdd_pll",
69}; 91};
70 92
71struct hdmi_driver_data { 93struct hdmi_driver_data {
@@ -75,44 +97,32 @@ struct hdmi_driver_data {
75 unsigned int is_apb_phy:1; 97 unsigned int is_apb_phy:1;
76}; 98};
77 99
78struct hdmi_resources {
79 struct clk *hdmi;
80 struct clk *sclk_hdmi;
81 struct clk *sclk_pixel;
82 struct clk *sclk_hdmiphy;
83 struct clk *mout_hdmi;
84 struct regulator_bulk_data *regul_bulk;
85 struct regulator *reg_hdmi_en;
86 int regul_count;
87};
88
89struct hdmi_context { 100struct hdmi_context {
90 struct drm_encoder encoder; 101 struct drm_encoder encoder;
91 struct device *dev; 102 struct device *dev;
92 struct drm_device *drm_dev; 103 struct drm_device *drm_dev;
93 struct drm_connector connector; 104 struct drm_connector connector;
94 bool hpd;
95 bool powered; 105 bool powered;
96 bool dvi_mode; 106 bool dvi_mode;
97
98 void __iomem *regs;
99 int irq;
100 struct delayed_work hotplug_work; 107 struct delayed_work hotplug_work;
101
102 struct i2c_adapter *ddc_adpt;
103 struct i2c_client *hdmiphy_port;
104
105 /* current hdmiphy conf regs */
106 struct drm_display_mode current_mode; 108 struct drm_display_mode current_mode;
107 u8 cea_video_id; 109 u8 cea_video_id;
108
109 struct hdmi_resources res;
110 const struct hdmi_driver_data *drv_data; 110 const struct hdmi_driver_data *drv_data;
111 111
112 int hpd_gpio; 112 void __iomem *regs;
113 void __iomem *regs_hdmiphy; 113 void __iomem *regs_hdmiphy;
114 114 struct i2c_client *hdmiphy_port;
115 struct i2c_adapter *ddc_adpt;
116 struct gpio_desc *hpd_gpio;
117 int irq;
115 struct regmap *pmureg; 118 struct regmap *pmureg;
119 struct clk *hdmi;
120 struct clk *sclk_hdmi;
121 struct clk *sclk_pixel;
122 struct clk *sclk_hdmiphy;
123 struct clk *mout_hdmi;
124 struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)];
125 struct regulator *reg_hdmi_en;
116}; 126};
117 127
118static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e) 128static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -120,6 +130,11 @@ static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
120 return container_of(e, struct hdmi_context, encoder); 130 return container_of(e, struct hdmi_context, encoder);
121} 131}
122 132
133static inline struct hdmi_context *connector_to_hdmi(struct drm_connector *c)
134{
135 return container_of(c, struct hdmi_context, connector);
136}
137
123struct hdmiphy_config { 138struct hdmiphy_config {
124 int pixel_clock; 139 int pixel_clock;
125 u8 conf[32]; 140 u8 conf[32];
@@ -133,7 +148,7 @@ static const struct hdmiphy_config hdmiphy_v13_configs[] = {
133 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, 148 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
134 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 149 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
135 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 150 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
136 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, 151 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x80,
137 }, 152 },
138 }, 153 },
139 { 154 {
@@ -142,7 +157,7 @@ static const struct hdmiphy_config hdmiphy_v13_configs[] = {
142 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64, 157 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
143 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 158 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
144 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 159 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
145 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, 160 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x80,
146 }, 161 },
147 }, 162 },
148 { 163 {
@@ -151,7 +166,7 @@ static const struct hdmiphy_config hdmiphy_v13_configs[] = {
151 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B, 166 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
152 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9, 167 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
153 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 168 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
154 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00, 169 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x80,
155 }, 170 },
156 }, 171 },
157 { 172 {
@@ -160,7 +175,7 @@ static const struct hdmiphy_config hdmiphy_v13_configs[] = {
160 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40, 175 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
161 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba, 176 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
162 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0, 177 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
163 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00, 178 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x80,
164 }, 179 },
165 }, 180 },
166 { 181 {
@@ -169,7 +184,7 @@ static const struct hdmiphy_config hdmiphy_v13_configs[] = {
169 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40, 184 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
170 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba, 185 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
171 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0, 186 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
172 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00, 187 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x80,
173 }, 188 },
174 }, 189 },
175}; 190};
@@ -199,7 +214,7 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
199 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08, 214 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
200 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 215 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
201 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 216 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
202 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 217 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
203 }, 218 },
204 }, 219 },
205 { 220 {
@@ -262,7 +277,7 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
262 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08, 277 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
263 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 278 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
264 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 279 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
265 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, 280 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
266 }, 281 },
267 }, 282 },
268 { 283 {
@@ -325,7 +340,7 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
325 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08, 340 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
326 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 341 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
327 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 342 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
328 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, 343 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
329 }, 344 },
330 }, 345 },
331}; 346};
@@ -507,29 +522,31 @@ static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
507 .is_apb_phy = 0, 522 .is_apb_phy = 0,
508}; 523};
509 524
510static struct hdmi_driver_data exynos5_hdmi_driver_data = { 525static inline u32 hdmi_map_reg(struct hdmi_context *hdata, u32 reg_id)
511 .type = HDMI_TYPE14, 526{
512 .phy_confs = hdmiphy_v13_configs, 527 if ((reg_id & 0xffff0000) == HDMI_MAPPED_BASE)
513 .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs), 528 return hdmi_reg_map[reg_id & 0xffff][hdata->drv_data->type];
514 .is_apb_phy = 0, 529 return reg_id;
515}; 530}
516 531
517static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 532static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
518{ 533{
519 return readl(hdata->regs + reg_id); 534 return readl(hdata->regs + hdmi_map_reg(hdata, reg_id));
520} 535}
521 536
522static inline void hdmi_reg_writeb(struct hdmi_context *hdata, 537static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
523 u32 reg_id, u8 value) 538 u32 reg_id, u8 value)
524{ 539{
525 writeb(value, hdata->regs + reg_id); 540 writel(value, hdata->regs + hdmi_map_reg(hdata, reg_id));
526} 541}
527 542
528static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id, 543static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
529 int bytes, u32 val) 544 int bytes, u32 val)
530{ 545{
546 reg_id = hdmi_map_reg(hdata, reg_id);
547
531 while (--bytes >= 0) { 548 while (--bytes >= 0) {
532 writeb(val & 0xff, hdata->regs + reg_id); 549 writel(val & 0xff, hdata->regs + reg_id);
533 val >>= 8; 550 val >>= 8;
534 reg_id += 4; 551 reg_id += 4;
535 } 552 }
@@ -538,31 +555,14 @@ static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
538static inline void hdmi_reg_writemask(struct hdmi_context *hdata, 555static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
539 u32 reg_id, u32 value, u32 mask) 556 u32 reg_id, u32 value, u32 mask)
540{ 557{
541 u32 old = readl(hdata->regs + reg_id); 558 u32 old;
559
560 reg_id = hdmi_map_reg(hdata, reg_id);
561 old = readl(hdata->regs + reg_id);
542 value = (value & mask) | (old & ~mask); 562 value = (value & mask) | (old & ~mask);
543 writel(value, hdata->regs + reg_id); 563 writel(value, hdata->regs + reg_id);
544} 564}
545 565
546static int hdmiphy_reg_writeb(struct hdmi_context *hdata,
547 u32 reg_offset, u8 value)
548{
549 if (hdata->hdmiphy_port) {
550 u8 buffer[2];
551 int ret;
552
553 buffer[0] = reg_offset;
554 buffer[1] = value;
555
556 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 2);
557 if (ret == 2)
558 return 0;
559 return ret;
560 } else {
561 writeb(value, hdata->regs_hdmiphy + (reg_offset<<2));
562 return 0;
563 }
564}
565
566static int hdmiphy_reg_write_buf(struct hdmi_context *hdata, 566static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
567 u32 reg_offset, const u8 *buf, u32 len) 567 u32 reg_offset, const u8 *buf, u32 len)
568{ 568{
@@ -579,7 +579,7 @@ static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
579 } else { 579 } else {
580 int i; 580 int i;
581 for (i = 0; i < len; i++) 581 for (i = 0; i < len; i++)
582 writeb(buf[i], hdata->regs_hdmiphy + 582 writel(buf[i], hdata->regs_hdmiphy +
583 ((reg_offset + i)<<2)); 583 ((reg_offset + i)<<2));
584 return 0; 584 return 0;
585 } 585 }
@@ -689,7 +689,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
689 DUMPREG(HDMI_PHY_STATUS_0); 689 DUMPREG(HDMI_PHY_STATUS_0);
690 DUMPREG(HDMI_PHY_STATUS_PLL); 690 DUMPREG(HDMI_PHY_STATUS_PLL);
691 DUMPREG(HDMI_PHY_CON_0); 691 DUMPREG(HDMI_PHY_CON_0);
692 DUMPREG(HDMI_PHY_RSTOUT); 692 DUMPREG(HDMI_V14_PHY_RSTOUT);
693 DUMPREG(HDMI_PHY_VPLL); 693 DUMPREG(HDMI_PHY_VPLL);
694 DUMPREG(HDMI_PHY_CMU); 694 DUMPREG(HDMI_PHY_CMU);
695 DUMPREG(HDMI_CORE_RSTOUT); 695 DUMPREG(HDMI_CORE_RSTOUT);
@@ -942,9 +942,9 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
942static enum drm_connector_status hdmi_detect(struct drm_connector *connector, 942static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
943 bool force) 943 bool force)
944{ 944{
945 struct hdmi_context *hdata = ctx_from_connector(connector); 945 struct hdmi_context *hdata = connector_to_hdmi(connector);
946 946
947 if (gpio_get_value(hdata->hpd_gpio)) 947 if (gpiod_get_value(hdata->hpd_gpio))
948 return connector_status_connected; 948 return connector_status_connected;
949 949
950 return connector_status_disconnected; 950 return connector_status_disconnected;
@@ -968,7 +968,7 @@ static struct drm_connector_funcs hdmi_connector_funcs = {
968 968
969static int hdmi_get_modes(struct drm_connector *connector) 969static int hdmi_get_modes(struct drm_connector *connector)
970{ 970{
971 struct hdmi_context *hdata = ctx_from_connector(connector); 971 struct hdmi_context *hdata = connector_to_hdmi(connector);
972 struct edid *edid; 972 struct edid *edid;
973 int ret; 973 int ret;
974 974
@@ -1008,7 +1008,7 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
1008static int hdmi_mode_valid(struct drm_connector *connector, 1008static int hdmi_mode_valid(struct drm_connector *connector,
1009 struct drm_display_mode *mode) 1009 struct drm_display_mode *mode)
1010{ 1010{
1011 struct hdmi_context *hdata = ctx_from_connector(connector); 1011 struct hdmi_context *hdata = connector_to_hdmi(connector);
1012 int ret; 1012 int ret;
1013 1013
1014 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", 1014 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
@@ -1016,10 +1016,6 @@ static int hdmi_mode_valid(struct drm_connector *connector,
1016 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true : 1016 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
1017 false, mode->clock * 1000); 1017 false, mode->clock * 1000);
1018 1018
1019 ret = mixer_check_mode(mode);
1020 if (ret)
1021 return MODE_BAD;
1022
1023 ret = hdmi_find_phy_conf(hdata, mode->clock * 1000); 1019 ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
1024 if (ret < 0) 1020 if (ret < 0)
1025 return MODE_BAD; 1021 return MODE_BAD;
@@ -1029,7 +1025,7 @@ static int hdmi_mode_valid(struct drm_connector *connector,
1029 1025
1030static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) 1026static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
1031{ 1027{
1032 struct hdmi_context *hdata = ctx_from_connector(connector); 1028 struct hdmi_context *hdata = connector_to_hdmi(connector);
1033 1029
1034 return &hdata->encoder; 1030 return &hdata->encoder;
1035} 1031}
@@ -1110,70 +1106,17 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
1110 return true; 1106 return true;
1111} 1107}
1112 1108
1113static void hdmi_set_acr(u32 freq, u8 *acr) 1109static void hdmi_reg_acr(struct hdmi_context *hdata, u32 freq)
1114{ 1110{
1115 u32 n, cts; 1111 u32 n, cts;
1116 1112
1117 switch (freq) { 1113 cts = (freq % 9) ? 27000 : 30000;
1118 case 32000: 1114 n = 128 * freq / (27000000 / cts);
1119 n = 4096;
1120 cts = 27000;
1121 break;
1122 case 44100:
1123 n = 6272;
1124 cts = 30000;
1125 break;
1126 case 88200:
1127 n = 12544;
1128 cts = 30000;
1129 break;
1130 case 176400:
1131 n = 25088;
1132 cts = 30000;
1133 break;
1134 case 48000:
1135 n = 6144;
1136 cts = 27000;
1137 break;
1138 case 96000:
1139 n = 12288;
1140 cts = 27000;
1141 break;
1142 case 192000:
1143 n = 24576;
1144 cts = 27000;
1145 break;
1146 default:
1147 n = 0;
1148 cts = 0;
1149 break;
1150 }
1151
1152 acr[1] = cts >> 16;
1153 acr[2] = cts >> 8 & 0xff;
1154 acr[3] = cts & 0xff;
1155 1115
1156 acr[4] = n >> 16; 1116 hdmi_reg_writev(hdata, HDMI_ACR_N0, 3, n);
1157 acr[5] = n >> 8 & 0xff; 1117 hdmi_reg_writev(hdata, HDMI_ACR_MCTS0, 3, cts);
1158 acr[6] = n & 0xff; 1118 hdmi_reg_writev(hdata, HDMI_ACR_CTS0, 3, cts);
1159} 1119 hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
1160
1161static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
1162{
1163 hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]);
1164 hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]);
1165 hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]);
1166 hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]);
1167 hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]);
1168 hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]);
1169 hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]);
1170 hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
1171 hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
1172
1173 if (hdata->drv_data->type == HDMI_TYPE13)
1174 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
1175 else
1176 hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
1177} 1120}
1178 1121
1179static void hdmi_audio_init(struct hdmi_context *hdata) 1122static void hdmi_audio_init(struct hdmi_context *hdata)
@@ -1181,7 +1124,6 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
1181 u32 sample_rate, bits_per_sample; 1124 u32 sample_rate, bits_per_sample;
1182 u32 data_num, bit_ch, sample_frq; 1125 u32 data_num, bit_ch, sample_frq;
1183 u32 val; 1126 u32 val;
1184 u8 acr[7];
1185 1127
1186 sample_rate = 44100; 1128 sample_rate = 44100;
1187 bits_per_sample = 16; 1129 bits_per_sample = 16;
@@ -1201,8 +1143,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
1201 break; 1143 break;
1202 } 1144 }
1203 1145
1204 hdmi_set_acr(sample_rate, acr); 1146 hdmi_reg_acr(hdata, sample_rate);
1205 hdmi_reg_acr(hdata, acr);
1206 1147
1207 hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE 1148 hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
1208 | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE 1149 | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
@@ -1335,11 +1276,27 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1335 } 1276 }
1336} 1277}
1337 1278
1279static void hdmiphy_wait_for_pll(struct hdmi_context *hdata)
1280{
1281 int tries;
1282
1283 for (tries = 0; tries < 10; ++tries) {
1284 u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
1285
1286 if (val & HDMI_PHY_STATUS_READY) {
1287 DRM_DEBUG_KMS("PLL stabilized after %d tries\n", tries);
1288 return;
1289 }
1290 usleep_range(10, 20);
1291 }
1292
1293 DRM_ERROR("PLL could not reach steady state\n");
1294}
1295
1338static void hdmi_v13_mode_apply(struct hdmi_context *hdata) 1296static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
1339{ 1297{
1340 struct drm_display_mode *m = &hdata->current_mode; 1298 struct drm_display_mode *m = &hdata->current_mode;
1341 unsigned int val; 1299 unsigned int val;
1342 int tries;
1343 1300
1344 hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay); 1301 hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
1345 hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3, 1302 hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
@@ -1425,32 +1382,11 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
1425 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233); 1382 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
1426 hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1); 1383 hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
1427 hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233); 1384 hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
1428
1429 /* waiting for HDMIPHY's PLL to get to steady state */
1430 for (tries = 100; tries; --tries) {
1431 u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
1432 if (val & HDMI_PHY_STATUS_READY)
1433 break;
1434 usleep_range(1000, 2000);
1435 }
1436 /* steady state not achieved */
1437 if (tries == 0) {
1438 DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
1439 hdmi_regs_dump(hdata, "timing apply");
1440 }
1441
1442 clk_disable_unprepare(hdata->res.sclk_hdmi);
1443 clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy);
1444 clk_prepare_enable(hdata->res.sclk_hdmi);
1445
1446 /* enable HDMI and timing generator */
1447 hdmi_start(hdata, true);
1448} 1385}
1449 1386
1450static void hdmi_v14_mode_apply(struct hdmi_context *hdata) 1387static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
1451{ 1388{
1452 struct drm_display_mode *m = &hdata->current_mode; 1389 struct drm_display_mode *m = &hdata->current_mode;
1453 int tries;
1454 1390
1455 hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay); 1391 hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
1456 hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal); 1392 hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
@@ -1562,26 +1498,6 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
1562 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1); 1498 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
1563 hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1); 1499 hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
1564 hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0); 1500 hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
1565
1566 /* waiting for HDMIPHY's PLL to get to steady state */
1567 for (tries = 100; tries; --tries) {
1568 u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
1569 if (val & HDMI_PHY_STATUS_READY)
1570 break;
1571 usleep_range(1000, 2000);
1572 }
1573 /* steady state not achieved */
1574 if (tries == 0) {
1575 DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
1576 hdmi_regs_dump(hdata, "timing apply");
1577 }
1578
1579 clk_disable_unprepare(hdata->res.sclk_hdmi);
1580 clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy);
1581 clk_prepare_enable(hdata->res.sclk_hdmi);
1582
1583 /* enable HDMI and timing generator */
1584 hdmi_start(hdata, true);
1585} 1501}
1586 1502
1587static void hdmi_mode_apply(struct hdmi_context *hdata) 1503static void hdmi_mode_apply(struct hdmi_context *hdata)
@@ -1590,74 +1506,26 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
1590 hdmi_v13_mode_apply(hdata); 1506 hdmi_v13_mode_apply(hdata);
1591 else 1507 else
1592 hdmi_v14_mode_apply(hdata); 1508 hdmi_v14_mode_apply(hdata);
1593}
1594 1509
1595static void hdmiphy_conf_reset(struct hdmi_context *hdata) 1510 hdmiphy_wait_for_pll(hdata);
1596{
1597 u32 reg;
1598 1511
1599 clk_disable_unprepare(hdata->res.sclk_hdmi); 1512 clk_set_parent(hdata->mout_hdmi, hdata->sclk_hdmiphy);
1600 clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_pixel);
1601 clk_prepare_enable(hdata->res.sclk_hdmi);
1602 1513
1603 /* operation mode */ 1514 /* enable HDMI and timing generator */
1604 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, 1515 hdmi_start(hdata, true);
1605 HDMI_PHY_ENABLE_MODE_SET); 1516}
1606 1517
1607 if (hdata->drv_data->type == HDMI_TYPE13) 1518static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1608 reg = HDMI_V13_PHY_RSTOUT; 1519{
1609 else 1520 clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
1610 reg = HDMI_PHY_RSTOUT;
1611 1521
1612 /* reset hdmiphy */ 1522 /* reset hdmiphy */
1613 hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); 1523 hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
1614 usleep_range(10000, 12000); 1524 usleep_range(10000, 12000);
1615 hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT); 1525 hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
1616 usleep_range(10000, 12000); 1526 usleep_range(10000, 12000);
1617} 1527}
1618 1528
1619static void hdmiphy_poweron(struct hdmi_context *hdata)
1620{
1621 if (hdata->drv_data->type != HDMI_TYPE14)
1622 return;
1623
1624 DRM_DEBUG_KMS("\n");
1625
1626 /* For PHY Mode Setting */
1627 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1628 HDMI_PHY_ENABLE_MODE_SET);
1629 /* Phy Power On */
1630 hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
1631 HDMI_PHY_POWER_ON);
1632 /* For PHY Mode Setting */
1633 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1634 HDMI_PHY_DISABLE_MODE_SET);
1635 /* PHY SW Reset */
1636 hdmiphy_conf_reset(hdata);
1637}
1638
1639static void hdmiphy_poweroff(struct hdmi_context *hdata)
1640{
1641 if (hdata->drv_data->type != HDMI_TYPE14)
1642 return;
1643
1644 DRM_DEBUG_KMS("\n");
1645
1646 /* PHY SW Reset */
1647 hdmiphy_conf_reset(hdata);
1648 /* For PHY Mode Setting */
1649 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1650 HDMI_PHY_ENABLE_MODE_SET);
1651
1652 /* PHY Power Off */
1653 hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
1654 HDMI_PHY_POWER_OFF);
1655
1656 /* For PHY Mode Setting */
1657 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1658 HDMI_PHY_DISABLE_MODE_SET);
1659}
1660
1661static void hdmiphy_conf_apply(struct hdmi_context *hdata) 1529static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1662{ 1530{
1663 int ret; 1531 int ret;
@@ -1678,14 +1546,6 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1678 } 1546 }
1679 1547
1680 usleep_range(10000, 12000); 1548 usleep_range(10000, 12000);
1681
1682 ret = hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1683 HDMI_PHY_DISABLE_MODE_SET);
1684 if (ret) {
1685 DRM_ERROR("failed to enable hdmiphy\n");
1686 return;
1687 }
1688
1689} 1549}
1690 1550
1691static void hdmi_conf_apply(struct hdmi_context *hdata) 1551static void hdmi_conf_apply(struct hdmi_context *hdata)
@@ -1724,7 +1584,6 @@ static void hdmi_mode_set(struct drm_encoder *encoder,
1724static void hdmi_enable(struct drm_encoder *encoder) 1584static void hdmi_enable(struct drm_encoder *encoder)
1725{ 1585{
1726 struct hdmi_context *hdata = encoder_to_hdmi(encoder); 1586 struct hdmi_context *hdata = encoder_to_hdmi(encoder);
1727 struct hdmi_resources *res = &hdata->res;
1728 1587
1729 if (hdata->powered) 1588 if (hdata->powered)
1730 return; 1589 return;
@@ -1733,24 +1592,22 @@ static void hdmi_enable(struct drm_encoder *encoder)
1733 1592
1734 pm_runtime_get_sync(hdata->dev); 1593 pm_runtime_get_sync(hdata->dev);
1735 1594
1736 if (regulator_bulk_enable(res->regul_count, res->regul_bulk)) 1595 if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
1737 DRM_DEBUG_KMS("failed to enable regulator bulk\n"); 1596 DRM_DEBUG_KMS("failed to enable regulator bulk\n");
1738 1597
1739 /* set pmu hdmiphy control bit to enable hdmiphy */ 1598 /* set pmu hdmiphy control bit to enable hdmiphy */
1740 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, 1599 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
1741 PMU_HDMI_PHY_ENABLE_BIT, 1); 1600 PMU_HDMI_PHY_ENABLE_BIT, 1);
1742 1601
1743 clk_prepare_enable(res->hdmi); 1602 clk_prepare_enable(hdata->hdmi);
1744 clk_prepare_enable(res->sclk_hdmi); 1603 clk_prepare_enable(hdata->sclk_hdmi);
1745 1604
1746 hdmiphy_poweron(hdata);
1747 hdmi_conf_apply(hdata); 1605 hdmi_conf_apply(hdata);
1748} 1606}
1749 1607
1750static void hdmi_disable(struct drm_encoder *encoder) 1608static void hdmi_disable(struct drm_encoder *encoder)
1751{ 1609{
1752 struct hdmi_context *hdata = encoder_to_hdmi(encoder); 1610 struct hdmi_context *hdata = encoder_to_hdmi(encoder);
1753 struct hdmi_resources *res = &hdata->res;
1754 struct drm_crtc *crtc = encoder->crtc; 1611 struct drm_crtc *crtc = encoder->crtc;
1755 const struct drm_crtc_helper_funcs *funcs = NULL; 1612 const struct drm_crtc_helper_funcs *funcs = NULL;
1756 1613
@@ -1774,18 +1631,16 @@ static void hdmi_disable(struct drm_encoder *encoder)
1774 /* HDMI System Disable */ 1631 /* HDMI System Disable */
1775 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN); 1632 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
1776 1633
1777 hdmiphy_poweroff(hdata);
1778
1779 cancel_delayed_work(&hdata->hotplug_work); 1634 cancel_delayed_work(&hdata->hotplug_work);
1780 1635
1781 clk_disable_unprepare(res->sclk_hdmi); 1636 clk_disable_unprepare(hdata->sclk_hdmi);
1782 clk_disable_unprepare(res->hdmi); 1637 clk_disable_unprepare(hdata->hdmi);
1783 1638
1784 /* reset pmu hdmiphy control bit to disable hdmiphy */ 1639 /* reset pmu hdmiphy control bit to disable hdmiphy */
1785 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, 1640 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
1786 PMU_HDMI_PHY_ENABLE_BIT, 0); 1641 PMU_HDMI_PHY_ENABLE_BIT, 0);
1787 1642
1788 regulator_bulk_disable(res->regul_count, res->regul_bulk); 1643 regulator_bulk_disable(ARRAY_SIZE(supply), hdata->regul_bulk);
1789 1644
1790 pm_runtime_put_sync(hdata->dev); 1645 pm_runtime_put_sync(hdata->dev);
1791 1646
@@ -1826,80 +1681,76 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
1826static int hdmi_resources_init(struct hdmi_context *hdata) 1681static int hdmi_resources_init(struct hdmi_context *hdata)
1827{ 1682{
1828 struct device *dev = hdata->dev; 1683 struct device *dev = hdata->dev;
1829 struct hdmi_resources *res = &hdata->res;
1830 static char *supply[] = {
1831 "vdd",
1832 "vdd_osc",
1833 "vdd_pll",
1834 };
1835 int i, ret; 1684 int i, ret;
1836 1685
1837 DRM_DEBUG_KMS("HDMI resource init\n"); 1686 DRM_DEBUG_KMS("HDMI resource init\n");
1838 1687
1688 hdata->hpd_gpio = devm_gpiod_get(dev, "hpd", GPIOD_IN);
1689 if (IS_ERR(hdata->hpd_gpio)) {
1690 DRM_ERROR("cannot get hpd gpio property\n");
1691 return PTR_ERR(hdata->hpd_gpio);
1692 }
1693
1694 hdata->irq = gpiod_to_irq(hdata->hpd_gpio);
1695 if (hdata->irq < 0) {
1696 DRM_ERROR("failed to get GPIO irq\n");
1697 return hdata->irq;
1698 }
1839 /* get clocks, power */ 1699 /* get clocks, power */
1840 res->hdmi = devm_clk_get(dev, "hdmi"); 1700 hdata->hdmi = devm_clk_get(dev, "hdmi");
1841 if (IS_ERR(res->hdmi)) { 1701 if (IS_ERR(hdata->hdmi)) {
1842 DRM_ERROR("failed to get clock 'hdmi'\n"); 1702 DRM_ERROR("failed to get clock 'hdmi'\n");
1843 ret = PTR_ERR(res->hdmi); 1703 ret = PTR_ERR(hdata->hdmi);
1844 goto fail; 1704 goto fail;
1845 } 1705 }
1846 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); 1706 hdata->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
1847 if (IS_ERR(res->sclk_hdmi)) { 1707 if (IS_ERR(hdata->sclk_hdmi)) {
1848 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 1708 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
1849 ret = PTR_ERR(res->sclk_hdmi); 1709 ret = PTR_ERR(hdata->sclk_hdmi);
1850 goto fail; 1710 goto fail;
1851 } 1711 }
1852 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); 1712 hdata->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
1853 if (IS_ERR(res->sclk_pixel)) { 1713 if (IS_ERR(hdata->sclk_pixel)) {
1854 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 1714 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
1855 ret = PTR_ERR(res->sclk_pixel); 1715 ret = PTR_ERR(hdata->sclk_pixel);
1856 goto fail; 1716 goto fail;
1857 } 1717 }
1858 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); 1718 hdata->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
1859 if (IS_ERR(res->sclk_hdmiphy)) { 1719 if (IS_ERR(hdata->sclk_hdmiphy)) {
1860 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 1720 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
1861 ret = PTR_ERR(res->sclk_hdmiphy); 1721 ret = PTR_ERR(hdata->sclk_hdmiphy);
1862 goto fail; 1722 goto fail;
1863 } 1723 }
1864 res->mout_hdmi = devm_clk_get(dev, "mout_hdmi"); 1724 hdata->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
1865 if (IS_ERR(res->mout_hdmi)) { 1725 if (IS_ERR(hdata->mout_hdmi)) {
1866 DRM_ERROR("failed to get clock 'mout_hdmi'\n"); 1726 DRM_ERROR("failed to get clock 'mout_hdmi'\n");
1867 ret = PTR_ERR(res->mout_hdmi); 1727 ret = PTR_ERR(hdata->mout_hdmi);
1868 goto fail; 1728 goto fail;
1869 } 1729 }
1870 1730
1871 clk_set_parent(res->mout_hdmi, res->sclk_pixel); 1731 clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
1872 1732
1873 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
1874 sizeof(res->regul_bulk[0]), GFP_KERNEL);
1875 if (!res->regul_bulk) {
1876 ret = -ENOMEM;
1877 goto fail;
1878 }
1879 for (i = 0; i < ARRAY_SIZE(supply); ++i) { 1733 for (i = 0; i < ARRAY_SIZE(supply); ++i) {
1880 res->regul_bulk[i].supply = supply[i]; 1734 hdata->regul_bulk[i].supply = supply[i];
1881 res->regul_bulk[i].consumer = NULL; 1735 hdata->regul_bulk[i].consumer = NULL;
1882 } 1736 }
1883 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); 1737 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
1884 if (ret) { 1738 if (ret) {
1885 DRM_ERROR("failed to get regulators\n"); 1739 DRM_ERROR("failed to get regulators\n");
1886 return ret; 1740 return ret;
1887 } 1741 }
1888 res->regul_count = ARRAY_SIZE(supply);
1889 1742
1890 res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en"); 1743 hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
1891 if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) { 1744
1892 DRM_ERROR("failed to get hdmi-en regulator\n"); 1745 if (PTR_ERR(hdata->reg_hdmi_en) == -ENODEV)
1893 return PTR_ERR(res->reg_hdmi_en); 1746 return 0;
1894 } 1747
1895 if (!IS_ERR(res->reg_hdmi_en)) { 1748 if (IS_ERR(hdata->reg_hdmi_en))
1896 ret = regulator_enable(res->reg_hdmi_en); 1749 return PTR_ERR(hdata->reg_hdmi_en);
1897 if (ret) { 1750
1898 DRM_ERROR("failed to enable hdmi-en regulator\n"); 1751 ret = regulator_enable(hdata->reg_hdmi_en);
1899 return ret; 1752 if (ret)
1900 } 1753 DRM_ERROR("failed to enable hdmi-en regulator\n");
1901 } else
1902 res->reg_hdmi_en = NULL;
1903 1754
1904 return ret; 1755 return ret;
1905fail: 1756fail:
@@ -1909,9 +1760,6 @@ fail:
1909 1760
1910static struct of_device_id hdmi_match_types[] = { 1761static struct of_device_id hdmi_match_types[] = {
1911 { 1762 {
1912 .compatible = "samsung,exynos5-hdmi",
1913 .data = &exynos5_hdmi_driver_data,
1914 }, {
1915 .compatible = "samsung,exynos4210-hdmi", 1763 .compatible = "samsung,exynos4210-hdmi",
1916 .data = &exynos4210_hdmi_driver_data, 1764 .data = &exynos4210_hdmi_driver_data,
1917 }, { 1765 }, {
@@ -2009,11 +1857,6 @@ static int hdmi_probe(struct platform_device *pdev)
2009 platform_set_drvdata(pdev, hdata); 1857 platform_set_drvdata(pdev, hdata);
2010 1858
2011 hdata->dev = dev; 1859 hdata->dev = dev;
2012 hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
2013 if (hdata->hpd_gpio < 0) {
2014 DRM_ERROR("cannot get hpd gpio property\n");
2015 return hdata->hpd_gpio;
2016 }
2017 1860
2018 ret = hdmi_resources_init(hdata); 1861 ret = hdmi_resources_init(hdata);
2019 if (ret) { 1862 if (ret) {
@@ -2028,12 +1871,6 @@ static int hdmi_probe(struct platform_device *pdev)
2028 return ret; 1871 return ret;
2029 } 1872 }
2030 1873
2031 ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
2032 if (ret) {
2033 DRM_ERROR("failed to request HPD gpio\n");
2034 return ret;
2035 }
2036
2037 ddc_node = hdmi_legacy_ddc_dt_binding(dev); 1874 ddc_node = hdmi_legacy_ddc_dt_binding(dev);
2038 if (ddc_node) 1875 if (ddc_node)
2039 goto out_get_ddc_adpt; 1876 goto out_get_ddc_adpt;
@@ -2081,13 +1918,6 @@ out_get_phy_port:
2081 } 1918 }
2082 } 1919 }
2083 1920
2084 hdata->irq = gpio_to_irq(hdata->hpd_gpio);
2085 if (hdata->irq < 0) {
2086 DRM_ERROR("failed to get GPIO irq\n");
2087 ret = hdata->irq;
2088 goto err_hdmiphy;
2089 }
2090
2091 INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); 1921 INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
2092 1922
2093 ret = devm_request_threaded_irq(dev, hdata->irq, NULL, 1923 ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
@@ -2133,15 +1963,17 @@ static int hdmi_remove(struct platform_device *pdev)
2133 1963
2134 cancel_delayed_work_sync(&hdata->hotplug_work); 1964 cancel_delayed_work_sync(&hdata->hotplug_work);
2135 1965
2136 if (hdata->res.reg_hdmi_en) 1966 component_del(&pdev->dev, &hdmi_component_ops);
2137 regulator_disable(hdata->res.reg_hdmi_en); 1967
1968 pm_runtime_disable(&pdev->dev);
1969
1970 if (!IS_ERR(hdata->reg_hdmi_en))
1971 regulator_disable(hdata->reg_hdmi_en);
2138 1972
2139 if (hdata->hdmiphy_port) 1973 if (hdata->hdmiphy_port)
2140 put_device(&hdata->hdmiphy_port->dev); 1974 put_device(&hdata->hdmiphy_port->dev);
2141 put_device(&hdata->ddc_adpt->dev);
2142 1975
2143 pm_runtime_disable(&pdev->dev); 1976 put_device(&hdata->ddc_adpt->dev);
2144 component_del(&pdev->dev, &hdmi_component_ops);
2145 1977
2146 return 0; 1978 return 0;
2147} 1979}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7f81cce966d4..d09f8f9a8939 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -39,11 +39,10 @@
39#include "exynos_drm_crtc.h" 39#include "exynos_drm_crtc.h"
40#include "exynos_drm_plane.h" 40#include "exynos_drm_plane.h"
41#include "exynos_drm_iommu.h" 41#include "exynos_drm_iommu.h"
42#include "exynos_mixer.h"
43 42
44#define MIXER_WIN_NR 3 43#define MIXER_WIN_NR 3
45#define MIXER_DEFAULT_WIN 0
46#define VP_DEFAULT_WIN 2 44#define VP_DEFAULT_WIN 2
45#define CURSOR_WIN 1
47 46
48/* The pixelformats that are natively supported by the mixer. */ 47/* The pixelformats that are natively supported by the mixer. */
49#define MXR_FORMAT_RGB565 4 48#define MXR_FORMAT_RGB565 4
@@ -600,7 +599,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
600 599
601 /* setup display size */ 600 /* setup display size */
602 if (ctx->mxr_ver == MXR_VER_128_0_0_184 && 601 if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
603 win == MIXER_DEFAULT_WIN) { 602 win == DEFAULT_WIN) {
604 val = MXR_MXR_RES_HEIGHT(mode->vdisplay); 603 val = MXR_MXR_RES_HEIGHT(mode->vdisplay);
605 val |= MXR_MXR_RES_WIDTH(mode->hdisplay); 604 val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
606 mixer_reg_write(res, MXR_RESOLUTION, val); 605 mixer_reg_write(res, MXR_RESOLUTION, val);
@@ -652,7 +651,7 @@ static void vp_win_reset(struct mixer_context *ctx)
652 /* waiting until VP_SRESET_PROCESSING is 0 */ 651 /* waiting until VP_SRESET_PROCESSING is 0 */
653 if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) 652 if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
654 break; 653 break;
655 usleep_range(10000, 12000); 654 mdelay(10);
656 } 655 }
657 WARN(tries == 0, "failed to reset Video Processor\n"); 656 WARN(tries == 0, "failed to reset Video Processor\n");
658} 657}
@@ -1096,8 +1095,10 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
1096} 1095}
1097 1096
1098/* Only valid for Mixer version 16.0.33.0 */ 1097/* Only valid for Mixer version 16.0.33.0 */
1099int mixer_check_mode(struct drm_display_mode *mode) 1098static int mixer_atomic_check(struct exynos_drm_crtc *crtc,
1099 struct drm_crtc_state *state)
1100{ 1100{
1101 struct drm_display_mode *mode = &state->adjusted_mode;
1101 u32 w, h; 1102 u32 w, h;
1102 1103
1103 w = mode->hdisplay; 1104 w = mode->hdisplay;
@@ -1123,6 +1124,7 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1123 .wait_for_vblank = mixer_wait_for_vblank, 1124 .wait_for_vblank = mixer_wait_for_vblank,
1124 .update_plane = mixer_update_plane, 1125 .update_plane = mixer_update_plane,
1125 .disable_plane = mixer_disable_plane, 1126 .disable_plane = mixer_disable_plane,
1127 .atomic_check = mixer_atomic_check,
1126}; 1128};
1127 1129
1128static struct mixer_drv_data exynos5420_mxr_drv_data = { 1130static struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1197,8 +1199,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1197 const uint32_t *formats; 1199 const uint32_t *formats;
1198 unsigned int fcount; 1200 unsigned int fcount;
1199 1201
1200 type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
1201 DRM_PLANE_TYPE_OVERLAY;
1202 if (zpos < VP_DEFAULT_WIN) { 1202 if (zpos < VP_DEFAULT_WIN) {
1203 formats = mixer_formats; 1203 formats = mixer_formats;
1204 fcount = ARRAY_SIZE(mixer_formats); 1204 fcount = ARRAY_SIZE(mixer_formats);
@@ -1207,6 +1207,7 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1207 fcount = ARRAY_SIZE(vp_formats); 1207 fcount = ARRAY_SIZE(vp_formats);
1208 } 1208 }
1209 1209
1210 type = exynos_plane_get_type(zpos, CURSOR_WIN);
1210 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], 1211 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
1211 1 << ctx->pipe, type, formats, fcount, 1212 1 << ctx->pipe, type, formats, fcount,
1212 zpos); 1213 zpos);
@@ -1214,7 +1215,7 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1214 return ret; 1215 return ret;
1215 } 1216 }
1216 1217
1217 exynos_plane = &ctx->planes[MIXER_DEFAULT_WIN]; 1218 exynos_plane = &ctx->planes[DEFAULT_WIN];
1218 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, 1219 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
1219 ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI, 1220 ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI,
1220 &mixer_crtc_ops, ctx); 1221 &mixer_crtc_ops, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
deleted file mode 100644
index 3811e417f0e9..000000000000
--- a/drivers/gpu/drm/exynos/exynos_mixer.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (C) 2013 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _EXYNOS_MIXER_H_
15#define _EXYNOS_MIXER_H_
16
17/* This function returns 0 if the given timing is valid for the mixer */
18int mixer_check_mode(struct drm_display_mode *mode);
19
20#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 3f35ac6d8a47..8c891e59be21 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -72,7 +72,6 @@
72#define HDMI_V13_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150) 72#define HDMI_V13_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
73#define HDMI_V13_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154) 73#define HDMI_V13_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
74#define HDMI_V13_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158) 74#define HDMI_V13_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
75#define HDMI_V13_ACR_CON HDMI_CORE_BASE(0x0180)
76#define HDMI_V13_AVI_CON HDMI_CORE_BASE(0x0300) 75#define HDMI_V13_AVI_CON HDMI_CORE_BASE(0x0300)
77#define HDMI_V13_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n)) 76#define HDMI_V13_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
78#define HDMI_V13_DC_CONTROL HDMI_CORE_BASE(0x05C0) 77#define HDMI_V13_DC_CONTROL HDMI_CORE_BASE(0x05C0)
@@ -171,7 +170,7 @@
171#define HDMI_HPD_ST HDMI_CTRL_BASE(0x0044) 170#define HDMI_HPD_ST HDMI_CTRL_BASE(0x0044)
172#define HDMI_HPD_TH_X HDMI_CTRL_BASE(0x0050) 171#define HDMI_HPD_TH_X HDMI_CTRL_BASE(0x0050)
173#define HDMI_AUDIO_CLKSEL HDMI_CTRL_BASE(0x0070) 172#define HDMI_AUDIO_CLKSEL HDMI_CTRL_BASE(0x0070)
174#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0074) 173#define HDMI_V14_PHY_RSTOUT HDMI_CTRL_BASE(0x0074)
175#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0078) 174#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0078)
176#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C) 175#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
177#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080) 176#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
@@ -277,16 +276,26 @@
277#define HDMI_ASP_CHCFG2 HDMI_CORE_BASE(0x0318) 276#define HDMI_ASP_CHCFG2 HDMI_CORE_BASE(0x0318)
278#define HDMI_ASP_CHCFG3 HDMI_CORE_BASE(0x031C) 277#define HDMI_ASP_CHCFG3 HDMI_CORE_BASE(0x031C)
279 278
280#define HDMI_ACR_CON HDMI_CORE_BASE(0x0400) 279#define HDMI_V13_ACR_CON HDMI_CORE_BASE(0x0180)
281#define HDMI_ACR_MCTS0 HDMI_CORE_BASE(0x0410) 280#define HDMI_V13_ACR_MCTS0 HDMI_CORE_BASE(0x0184)
282#define HDMI_ACR_MCTS1 HDMI_CORE_BASE(0x0414) 281#define HDMI_V13_ACR_MCTS1 HDMI_CORE_BASE(0x0188)
283#define HDMI_ACR_MCTS2 HDMI_CORE_BASE(0x0418) 282#define HDMI_V13_ACR_MCTS2 HDMI_CORE_BASE(0x018C)
284#define HDMI_ACR_CTS0 HDMI_CORE_BASE(0x0420) 283#define HDMI_V13_ACR_CTS0 HDMI_CORE_BASE(0x0190)
285#define HDMI_ACR_CTS1 HDMI_CORE_BASE(0x0424) 284#define HDMI_V13_ACR_CTS1 HDMI_CORE_BASE(0x0194)
286#define HDMI_ACR_CTS2 HDMI_CORE_BASE(0x0428) 285#define HDMI_V13_ACR_CTS2 HDMI_CORE_BASE(0x0198)
287#define HDMI_ACR_N0 HDMI_CORE_BASE(0x0430) 286#define HDMI_V13_ACR_N0 HDMI_CORE_BASE(0x01A0)
288#define HDMI_ACR_N1 HDMI_CORE_BASE(0x0434) 287#define HDMI_V13_ACR_N1 HDMI_CORE_BASE(0x01A4)
289#define HDMI_ACR_N2 HDMI_CORE_BASE(0x0438) 288#define HDMI_V13_ACR_N2 HDMI_CORE_BASE(0x01A8)
289#define HDMI_V14_ACR_CON HDMI_CORE_BASE(0x0400)
290#define HDMI_V14_ACR_MCTS0 HDMI_CORE_BASE(0x0410)
291#define HDMI_V14_ACR_MCTS1 HDMI_CORE_BASE(0x0414)
292#define HDMI_V14_ACR_MCTS2 HDMI_CORE_BASE(0x0418)
293#define HDMI_V14_ACR_CTS0 HDMI_CORE_BASE(0x0420)
294#define HDMI_V14_ACR_CTS1 HDMI_CORE_BASE(0x0424)
295#define HDMI_V14_ACR_CTS2 HDMI_CORE_BASE(0x0428)
296#define HDMI_V14_ACR_N0 HDMI_CORE_BASE(0x0430)
297#define HDMI_V14_ACR_N1 HDMI_CORE_BASE(0x0434)
298#define HDMI_V14_ACR_N2 HDMI_CORE_BASE(0x0438)
290 299
291/* Packet related registers */ 300/* Packet related registers */
292#define HDMI_ACP_CON HDMI_CORE_BASE(0x0500) 301#define HDMI_ACP_CON HDMI_CORE_BASE(0x0500)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 9a8e2da47158..1930234ba5f1 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -140,7 +140,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
140 return IRQ_HANDLED; 140 return IRQ_HANDLED;
141} 141}
142 142
143static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc) 143static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
144{ 144{
145 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 145 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
146 unsigned int value; 146 unsigned int value;
@@ -156,7 +156,8 @@ static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
156 return 0; 156 return 0;
157} 157}
158 158
159static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc) 159static void fsl_dcu_drm_disable_vblank(struct drm_device *dev,
160 unsigned int pipe)
160{ 161{
161 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 162 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
162 unsigned int value; 163 unsigned int value;
@@ -192,7 +193,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
192 .unload = fsl_dcu_unload, 193 .unload = fsl_dcu_unload,
193 .preclose = fsl_dcu_drm_preclose, 194 .preclose = fsl_dcu_drm_preclose,
194 .irq_handler = fsl_dcu_drm_irq, 195 .irq_handler = fsl_dcu_drm_irq,
195 .get_vblank_counter = drm_vblank_count, 196 .get_vblank_counter = drm_vblank_no_hw_counter,
196 .enable_vblank = fsl_dcu_drm_enable_vblank, 197 .enable_vblank = fsl_dcu_drm_enable_vblank,
197 .disable_vblank = fsl_dcu_drm_disable_vblank, 198 .disable_vblank = fsl_dcu_drm_disable_vblank,
198 .gem_free_object = drm_gem_cma_free_object, 199 .gem_free_object = drm_gem_cma_free_object,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index d1e300dcd544..51daaea40b4d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -191,14 +191,12 @@ set_failed:
191 191
192static void 192static void
193fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane, 193fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
194 struct drm_framebuffer *fb,
195 const struct drm_plane_state *new_state) 194 const struct drm_plane_state *new_state)
196{ 195{
197} 196}
198 197
199static int 198static int
200fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane, 199fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
201 struct drm_framebuffer *fb,
202 const struct drm_plane_state *new_state) 200 const struct drm_plane_state *new_state)
203{ 201{
204 return 0; 202 return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 0fafb8e2483a..17cea400ae32 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -247,7 +247,6 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
247 247
248#define wait_for(COND, MS) _wait_for(COND, MS, 1) 248#define wait_for(COND, MS) _wait_for(COND, MS, 1)
249 249
250#define DP_LINK_STATUS_SIZE 6
251#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 250#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
252 251
253#define DP_LINK_CONFIGURATION_SIZE 9 252#define DP_LINK_CONFIGURATION_SIZE 9
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index e38057b91865..e21726ecac32 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -687,15 +687,15 @@ extern void psb_irq_turn_off_dpst(struct drm_device *dev);
687extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands); 687extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
688extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 688extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
689extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence); 689extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
690extern int psb_enable_vblank(struct drm_device *dev, int crtc); 690extern int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
691extern void psb_disable_vblank(struct drm_device *dev, int crtc); 691extern void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
692void 692void
693psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask); 693psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
694 694
695void 695void
696psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask); 696psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
697 697
698extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc); 698extern u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
699 699
700/* framebuffer.c */ 700/* framebuffer.c */
701extern int psbfb_probed(struct drm_device *dev); 701extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 624eb36511c5..78eb10902809 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -510,7 +510,7 @@ int psb_irq_disable_dpst(struct drm_device *dev)
510/* 510/*
511 * It is used to enable VBLANK interrupt 511 * It is used to enable VBLANK interrupt
512 */ 512 */
513int psb_enable_vblank(struct drm_device *dev, int pipe) 513int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
514{ 514{
515 struct drm_psb_private *dev_priv = dev->dev_private; 515 struct drm_psb_private *dev_priv = dev->dev_private;
516 unsigned long irqflags; 516 unsigned long irqflags;
@@ -549,7 +549,7 @@ int psb_enable_vblank(struct drm_device *dev, int pipe)
549/* 549/*
550 * It is used to disable VBLANK interrupt 550 * It is used to disable VBLANK interrupt
551 */ 551 */
552void psb_disable_vblank(struct drm_device *dev, int pipe) 552void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
553{ 553{
554 struct drm_psb_private *dev_priv = dev->dev_private; 554 struct drm_psb_private *dev_priv = dev->dev_private;
555 unsigned long irqflags; 555 unsigned long irqflags;
@@ -622,7 +622,7 @@ void mdfld_disable_te(struct drm_device *dev, int pipe)
622/* Called from drm generic code, passed a 'crtc', which 622/* Called from drm generic code, passed a 'crtc', which
623 * we use as a pipe index 623 * we use as a pipe index
624 */ 624 */
625u32 psb_get_vblank_counter(struct drm_device *dev, int pipe) 625u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
626{ 626{
627 uint32_t high_frame = PIPEAFRAMEHIGH; 627 uint32_t high_frame = PIPEAFRAMEHIGH;
628 uint32_t low_frame = PIPEAFRAMEPIXEL; 628 uint32_t low_frame = PIPEAFRAMEPIXEL;
@@ -654,7 +654,7 @@ u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
654 reg_val = REG_READ(pipeconf_reg); 654 reg_val = REG_READ(pipeconf_reg);
655 655
656 if (!(reg_val & PIPEACONF_ENABLE)) { 656 if (!(reg_val & PIPEACONF_ENABLE)) {
657 dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n", 657 dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
658 pipe); 658 pipe);
659 goto psb_get_vblank_counter_exit; 659 goto psb_get_vblank_counter_exit;
660 } 660 }
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index d0b45ffa1126..e6a81a8c9f35 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -38,9 +38,9 @@ int psb_irq_enable_dpst(struct drm_device *dev);
38int psb_irq_disable_dpst(struct drm_device *dev); 38int psb_irq_disable_dpst(struct drm_device *dev);
39void psb_irq_turn_on_dpst(struct drm_device *dev); 39void psb_irq_turn_on_dpst(struct drm_device *dev);
40void psb_irq_turn_off_dpst(struct drm_device *dev); 40void psb_irq_turn_off_dpst(struct drm_device *dev);
41int psb_enable_vblank(struct drm_device *dev, int pipe); 41int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
42void psb_disable_vblank(struct drm_device *dev, int pipe); 42void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
43u32 psb_get_vblank_counter(struct drm_device *dev, int pipe); 43u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
44 44
45int mdfld_enable_te(struct drm_device *dev, int pipe); 45int mdfld_enable_te(struct drm_device *dev, int pipe);
46void mdfld_disable_te(struct drm_device *dev, int pipe); 46void mdfld_disable_te(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 51fa32392029..d9a72c96e56c 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -119,8 +119,8 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
119 struct ch7006_encoder_params *params = &priv->params; 119 struct ch7006_encoder_params *params = &priv->params;
120 struct ch7006_state *state = &priv->state; 120 struct ch7006_state *state = &priv->state;
121 uint8_t *regs = state->regs; 121 uint8_t *regs = state->regs;
122 struct ch7006_mode *mode = priv->mode; 122 const struct ch7006_mode *mode = priv->mode;
123 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; 123 const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
124 int start_active; 124 int start_active;
125 125
126 ch7006_dbg(client, "\n"); 126 ch7006_dbg(client, "\n");
@@ -226,7 +226,7 @@ static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
226 struct drm_connector *connector) 226 struct drm_connector *connector)
227{ 227{
228 struct ch7006_priv *priv = to_ch7006_priv(encoder); 228 struct ch7006_priv *priv = to_ch7006_priv(encoder);
229 struct ch7006_mode *mode; 229 const struct ch7006_mode *mode;
230 int n = 0; 230 int n = 0;
231 231
232 for (mode = ch7006_modes; mode->mode.clock; mode++) { 232 for (mode = ch7006_modes; mode->mode.clock; mode++) {
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index 9b83574141a6..bb5f67f10edb 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -26,7 +26,7 @@
26 26
27#include "ch7006_priv.h" 27#include "ch7006_priv.h"
28 28
29char *ch7006_tv_norm_names[] = { 29const char * const ch7006_tv_norm_names[] = {
30 [TV_NORM_PAL] = "PAL", 30 [TV_NORM_PAL] = "PAL",
31 [TV_NORM_PAL_M] = "PAL-M", 31 [TV_NORM_PAL_M] = "PAL-M",
32 [TV_NORM_PAL_N] = "PAL-N", 32 [TV_NORM_PAL_N] = "PAL-N",
@@ -46,7 +46,7 @@ char *ch7006_tv_norm_names[] = {
46 .vtotal = 625, \ 46 .vtotal = 625, \
47 .hvirtual = 810 47 .hvirtual = 810
48 48
49struct ch7006_tv_norm_info ch7006_tv_norms[] = { 49const struct ch7006_tv_norm_info ch7006_tv_norms[] = {
50 [TV_NORM_NTSC_M] = { 50 [TV_NORM_NTSC_M] = {
51 NTSC_LIKE_TIMINGS, 51 NTSC_LIKE_TIMINGS,
52 .black_level = 0.339 * fixed1, 52 .black_level = 0.339 * fixed1,
@@ -142,7 +142,7 @@ struct ch7006_tv_norm_info ch7006_tv_norms[] = {
142 142
143#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC) 143#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
144 144
145struct ch7006_mode ch7006_modes[] = { 145const struct ch7006_mode ch7006_modes[] = {
146 MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE), 146 MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
147 MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE), 147 MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
148 MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE), 148 MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
@@ -171,11 +171,11 @@ struct ch7006_mode ch7006_modes[] = {
171 {} 171 {}
172}; 172};
173 173
174struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, 174const struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
175 const struct drm_display_mode *drm_mode) 175 const struct drm_display_mode *drm_mode)
176{ 176{
177 struct ch7006_priv *priv = to_ch7006_priv(encoder); 177 struct ch7006_priv *priv = to_ch7006_priv(encoder);
178 struct ch7006_mode *mode; 178 const struct ch7006_mode *mode;
179 179
180 for (mode = ch7006_modes; mode->mode.clock; mode++) { 180 for (mode = ch7006_modes; mode->mode.clock; mode++) {
181 181
@@ -202,7 +202,7 @@ void ch7006_setup_levels(struct drm_encoder *encoder)
202 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 202 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
203 struct ch7006_priv *priv = to_ch7006_priv(encoder); 203 struct ch7006_priv *priv = to_ch7006_priv(encoder);
204 uint8_t *regs = priv->state.regs; 204 uint8_t *regs = priv->state.regs;
205 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; 205 const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
206 int gain; 206 int gain;
207 int black_level; 207 int black_level;
208 208
@@ -233,8 +233,8 @@ void ch7006_setup_subcarrier(struct drm_encoder *encoder)
233 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 233 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
234 struct ch7006_priv *priv = to_ch7006_priv(encoder); 234 struct ch7006_priv *priv = to_ch7006_priv(encoder);
235 struct ch7006_state *state = &priv->state; 235 struct ch7006_state *state = &priv->state;
236 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; 236 const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
237 struct ch7006_mode *mode = priv->mode; 237 const struct ch7006_mode *mode = priv->mode;
238 uint32_t subc_inc; 238 uint32_t subc_inc;
239 239
240 subc_inc = round_fixed((mode->subc_coeff >> 8) 240 subc_inc = round_fixed((mode->subc_coeff >> 8)
@@ -257,7 +257,7 @@ void ch7006_setup_pll(struct drm_encoder *encoder)
257 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 257 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
258 struct ch7006_priv *priv = to_ch7006_priv(encoder); 258 struct ch7006_priv *priv = to_ch7006_priv(encoder);
259 uint8_t *regs = priv->state.regs; 259 uint8_t *regs = priv->state.regs;
260 struct ch7006_mode *mode = priv->mode; 260 const struct ch7006_mode *mode = priv->mode;
261 int n, best_n = 0; 261 int n, best_n = 0;
262 int m, best_m = 0; 262 int m, best_m = 0;
263 int freq, best_freq = 0; 263 int freq, best_freq = 0;
@@ -328,9 +328,9 @@ void ch7006_setup_properties(struct drm_encoder *encoder)
328 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 328 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
329 struct ch7006_priv *priv = to_ch7006_priv(encoder); 329 struct ch7006_priv *priv = to_ch7006_priv(encoder);
330 struct ch7006_state *state = &priv->state; 330 struct ch7006_state *state = &priv->state;
331 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; 331 const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
332 struct ch7006_mode *ch_mode = priv->mode; 332 const struct ch7006_mode *ch_mode = priv->mode;
333 struct drm_display_mode *mode = &ch_mode->mode; 333 const struct drm_display_mode *mode = &ch_mode->mode;
334 uint8_t *regs = state->regs; 334 uint8_t *regs = state->regs;
335 int flicker, contrast, hpos, vpos; 335 int flicker, contrast, hpos, vpos;
336 uint64_t scale, aspect; 336 uint64_t scale, aspect;
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index ce577841f931..dc6414af5d79 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -78,7 +78,7 @@ struct ch7006_state {
78 78
79struct ch7006_priv { 79struct ch7006_priv {
80 struct ch7006_encoder_params params; 80 struct ch7006_encoder_params params;
81 struct ch7006_mode *mode; 81 const struct ch7006_mode *mode;
82 82
83 struct ch7006_state state; 83 struct ch7006_state state;
84 struct ch7006_state saved_state; 84 struct ch7006_state saved_state;
@@ -106,12 +106,12 @@ extern int ch7006_debug;
106extern char *ch7006_tv_norm; 106extern char *ch7006_tv_norm;
107extern int ch7006_scale; 107extern int ch7006_scale;
108 108
109extern char *ch7006_tv_norm_names[]; 109extern const char * const ch7006_tv_norm_names[];
110extern struct ch7006_tv_norm_info ch7006_tv_norms[]; 110extern const struct ch7006_tv_norm_info ch7006_tv_norms[];
111extern struct ch7006_mode ch7006_modes[]; 111extern const struct ch7006_mode ch7006_modes[];
112 112
113struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, 113const struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
114 const struct drm_display_mode *drm_mode); 114 const struct drm_display_mode *drm_mode);
115 115
116void ch7006_setup_levels(struct drm_encoder *encoder); 116void ch7006_setup_levels(struct drm_encoder *encoder);
117void ch7006_setup_subcarrier(struct drm_encoder *encoder); 117void ch7006_setup_subcarrier(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 424228be79ae..896b6aaf8c4d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -23,7 +23,6 @@
23 23
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include <drm/drm_crtc_helper.h> 25#include <drm/drm_crtc_helper.h>
26#include <drm/drm_encoder_slave.h>
27#include <drm/drm_edid.h> 26#include <drm/drm_edid.h>
28#include <drm/drm_of.h> 27#include <drm/drm_of.h>
29#include <drm/i2c/tda998x.h> 28#include <drm/i2c/tda998x.h>
@@ -34,9 +33,8 @@ struct tda998x_priv {
34 struct i2c_client *cec; 33 struct i2c_client *cec;
35 struct i2c_client *hdmi; 34 struct i2c_client *hdmi;
36 struct mutex mutex; 35 struct mutex mutex;
37 struct delayed_work dwork; 36 u16 rev;
38 uint16_t rev; 37 u8 current_page;
39 uint8_t current_page;
40 int dpms; 38 int dpms;
41 bool is_hdmi_sink; 39 bool is_hdmi_sink;
42 u8 vip_cntrl_0; 40 u8 vip_cntrl_0;
@@ -46,10 +44,21 @@ struct tda998x_priv {
46 44
47 wait_queue_head_t wq_edid; 45 wait_queue_head_t wq_edid;
48 volatile int wq_edid_wait; 46 volatile int wq_edid_wait;
49 struct drm_encoder *encoder; 47
48 struct work_struct detect_work;
49 struct timer_list edid_delay_timer;
50 wait_queue_head_t edid_delay_waitq;
51 bool edid_delay_active;
52
53 struct drm_encoder encoder;
54 struct drm_connector connector;
50}; 55};
51 56
52#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) 57#define conn_to_tda998x_priv(x) \
58 container_of(x, struct tda998x_priv, connector)
59
60#define enc_to_tda998x_priv(x) \
61 container_of(x, struct tda998x_priv, encoder)
53 62
54/* The TDA9988 series of devices use a paged register scheme.. to simplify 63/* The TDA9988 series of devices use a paged register scheme.. to simplify
55 * things we encode the page # in upper bits of the register #. To read/ 64 * things we encode the page # in upper bits of the register #. To read/
@@ -326,6 +335,8 @@ struct tda998x_priv {
326# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0) 335# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
327#define REG_CEC_RXSHPDINTENA 0xfc /* read/write */ 336#define REG_CEC_RXSHPDINTENA 0xfc /* read/write */
328#define REG_CEC_RXSHPDINT 0xfd /* read */ 337#define REG_CEC_RXSHPDINT 0xfd /* read */
338# define CEC_RXSHPDINT_RXSENS BIT(0)
339# define CEC_RXSHPDINT_HPD BIT(1)
329#define REG_CEC_RXSHPDLEV 0xfe /* read */ 340#define REG_CEC_RXSHPDLEV 0xfe /* read */
330# define CEC_RXSHPDLEV_RXSENS (1 << 0) 341# define CEC_RXSHPDLEV_RXSENS (1 << 0)
331# define CEC_RXSHPDLEV_HPD (1 << 1) 342# define CEC_RXSHPDLEV_HPD (1 << 1)
@@ -345,10 +356,10 @@ struct tda998x_priv {
345#define TDA19988 0x0301 356#define TDA19988 0x0301
346 357
347static void 358static void
348cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val) 359cec_write(struct tda998x_priv *priv, u16 addr, u8 val)
349{ 360{
350 struct i2c_client *client = priv->cec; 361 struct i2c_client *client = priv->cec;
351 uint8_t buf[] = {addr, val}; 362 u8 buf[] = {addr, val};
352 int ret; 363 int ret;
353 364
354 ret = i2c_master_send(client, buf, sizeof(buf)); 365 ret = i2c_master_send(client, buf, sizeof(buf));
@@ -356,11 +367,11 @@ cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val)
356 dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr); 367 dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
357} 368}
358 369
359static uint8_t 370static u8
360cec_read(struct tda998x_priv *priv, uint8_t addr) 371cec_read(struct tda998x_priv *priv, u8 addr)
361{ 372{
362 struct i2c_client *client = priv->cec; 373 struct i2c_client *client = priv->cec;
363 uint8_t val; 374 u8 val;
364 int ret; 375 int ret;
365 376
366 ret = i2c_master_send(client, &addr, sizeof(addr)); 377 ret = i2c_master_send(client, &addr, sizeof(addr));
@@ -379,11 +390,11 @@ fail:
379} 390}
380 391
381static int 392static int
382set_page(struct tda998x_priv *priv, uint16_t reg) 393set_page(struct tda998x_priv *priv, u16 reg)
383{ 394{
384 if (REG2PAGE(reg) != priv->current_page) { 395 if (REG2PAGE(reg) != priv->current_page) {
385 struct i2c_client *client = priv->hdmi; 396 struct i2c_client *client = priv->hdmi;
386 uint8_t buf[] = { 397 u8 buf[] = {
387 REG_CURPAGE, REG2PAGE(reg) 398 REG_CURPAGE, REG2PAGE(reg)
388 }; 399 };
389 int ret = i2c_master_send(client, buf, sizeof(buf)); 400 int ret = i2c_master_send(client, buf, sizeof(buf));
@@ -399,10 +410,10 @@ set_page(struct tda998x_priv *priv, uint16_t reg)
399} 410}
400 411
401static int 412static int
402reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) 413reg_read_range(struct tda998x_priv *priv, u16 reg, char *buf, int cnt)
403{ 414{
404 struct i2c_client *client = priv->hdmi; 415 struct i2c_client *client = priv->hdmi;
405 uint8_t addr = REG2ADDR(reg); 416 u8 addr = REG2ADDR(reg);
406 int ret; 417 int ret;
407 418
408 mutex_lock(&priv->mutex); 419 mutex_lock(&priv->mutex);
@@ -428,10 +439,10 @@ out:
428} 439}
429 440
430static void 441static void
431reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) 442reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
432{ 443{
433 struct i2c_client *client = priv->hdmi; 444 struct i2c_client *client = priv->hdmi;
434 uint8_t buf[cnt+1]; 445 u8 buf[cnt+1];
435 int ret; 446 int ret;
436 447
437 buf[0] = REG2ADDR(reg); 448 buf[0] = REG2ADDR(reg);
@@ -450,9 +461,9 @@ out:
450} 461}
451 462
452static int 463static int
453reg_read(struct tda998x_priv *priv, uint16_t reg) 464reg_read(struct tda998x_priv *priv, u16 reg)
454{ 465{
455 uint8_t val = 0; 466 u8 val = 0;
456 int ret; 467 int ret;
457 468
458 ret = reg_read_range(priv, reg, &val, sizeof(val)); 469 ret = reg_read_range(priv, reg, &val, sizeof(val));
@@ -462,10 +473,10 @@ reg_read(struct tda998x_priv *priv, uint16_t reg)
462} 473}
463 474
464static void 475static void
465reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) 476reg_write(struct tda998x_priv *priv, u16 reg, u8 val)
466{ 477{
467 struct i2c_client *client = priv->hdmi; 478 struct i2c_client *client = priv->hdmi;
468 uint8_t buf[] = {REG2ADDR(reg), val}; 479 u8 buf[] = {REG2ADDR(reg), val};
469 int ret; 480 int ret;
470 481
471 mutex_lock(&priv->mutex); 482 mutex_lock(&priv->mutex);
@@ -481,10 +492,10 @@ out:
481} 492}
482 493
483static void 494static void
484reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) 495reg_write16(struct tda998x_priv *priv, u16 reg, u16 val)
485{ 496{
486 struct i2c_client *client = priv->hdmi; 497 struct i2c_client *client = priv->hdmi;
487 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; 498 u8 buf[] = {REG2ADDR(reg), val >> 8, val};
488 int ret; 499 int ret;
489 500
490 mutex_lock(&priv->mutex); 501 mutex_lock(&priv->mutex);
@@ -500,7 +511,7 @@ out:
500} 511}
501 512
502static void 513static void
503reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val) 514reg_set(struct tda998x_priv *priv, u16 reg, u8 val)
504{ 515{
505 int old_val; 516 int old_val;
506 517
@@ -510,7 +521,7 @@ reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
510} 521}
511 522
512static void 523static void
513reg_clear(struct tda998x_priv *priv, uint16_t reg, uint8_t val) 524reg_clear(struct tda998x_priv *priv, u16 reg, u8 val)
514{ 525{
515 int old_val; 526 int old_val;
516 527
@@ -551,15 +562,50 @@ tda998x_reset(struct tda998x_priv *priv)
551 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); 562 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
552} 563}
553 564
554/* handle HDMI connect/disconnect */ 565/*
555static void tda998x_hpd(struct work_struct *work) 566 * The TDA998x has a problem when trying to read the EDID close to a
567 * HPD assertion: it needs a delay of 100ms to avoid timing out while
568 * trying to read EDID data.
569 *
570 * However, tda998x_encoder_get_modes() may be called at any moment
571 * after tda998x_connector_detect() indicates that we are connected, so
572 * we need to delay probing modes in tda998x_encoder_get_modes() after
573 * we have seen a HPD inactive->active transition. This code implements
574 * that delay.
575 */
576static void tda998x_edid_delay_done(unsigned long data)
577{
578 struct tda998x_priv *priv = (struct tda998x_priv *)data;
579
580 priv->edid_delay_active = false;
581 wake_up(&priv->edid_delay_waitq);
582 schedule_work(&priv->detect_work);
583}
584
585static void tda998x_edid_delay_start(struct tda998x_priv *priv)
586{
587 priv->edid_delay_active = true;
588 mod_timer(&priv->edid_delay_timer, jiffies + HZ/10);
589}
590
591static int tda998x_edid_delay_wait(struct tda998x_priv *priv)
592{
593 return wait_event_killable(priv->edid_delay_waitq, !priv->edid_delay_active);
594}
595
596/*
597 * We need to run the KMS hotplug event helper outside of our threaded
598 * interrupt routine as this can call back into our get_modes method,
599 * which will want to make use of interrupts.
600 */
601static void tda998x_detect_work(struct work_struct *work)
556{ 602{
557 struct delayed_work *dwork = to_delayed_work(work);
558 struct tda998x_priv *priv = 603 struct tda998x_priv *priv =
559 container_of(dwork, struct tda998x_priv, dwork); 604 container_of(work, struct tda998x_priv, detect_work);
605 struct drm_device *dev = priv->encoder.dev;
560 606
561 if (priv->encoder && priv->encoder->dev) 607 if (dev)
562 drm_kms_helper_hotplug_event(priv->encoder->dev); 608 drm_kms_helper_hotplug_event(dev);
563} 609}
564 610
565/* 611/*
@@ -569,9 +615,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
569{ 615{
570 struct tda998x_priv *priv = data; 616 struct tda998x_priv *priv = data;
571 u8 sta, cec, lvl, flag0, flag1, flag2; 617 u8 sta, cec, lvl, flag0, flag1, flag2;
618 bool handled = false;
572 619
573 if (!priv)
574 return IRQ_HANDLED;
575 sta = cec_read(priv, REG_CEC_INTSTATUS); 620 sta = cec_read(priv, REG_CEC_INTSTATUS);
576 cec = cec_read(priv, REG_CEC_RXSHPDINT); 621 cec = cec_read(priv, REG_CEC_RXSHPDINT);
577 lvl = cec_read(priv, REG_CEC_RXSHPDLEV); 622 lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
@@ -581,75 +626,76 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
581 DRM_DEBUG_DRIVER( 626 DRM_DEBUG_DRIVER(
582 "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n", 627 "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
583 sta, cec, lvl, flag0, flag1, flag2); 628 sta, cec, lvl, flag0, flag1, flag2);
629
630 if (cec & CEC_RXSHPDINT_HPD) {
631 if (lvl & CEC_RXSHPDLEV_HPD)
632 tda998x_edid_delay_start(priv);
633 else
634 schedule_work(&priv->detect_work);
635
636 handled = true;
637 }
638
584 if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) { 639 if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
585 priv->wq_edid_wait = 0; 640 priv->wq_edid_wait = 0;
586 wake_up(&priv->wq_edid); 641 wake_up(&priv->wq_edid);
587 } else if (cec != 0) { /* HPD change */ 642 handled = true;
588 schedule_delayed_work(&priv->dwork, HZ/10);
589 } 643 }
590 return IRQ_HANDLED;
591}
592 644
593static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes) 645 return IRQ_RETVAL(handled);
594{
595 int sum = 0;
596
597 while (bytes--)
598 sum -= *buf++;
599 return sum;
600} 646}
601 647
602#define HB(x) (x)
603#define PB(x) (HB(2) + 1 + (x))
604
605static void 648static void
606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, 649tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
607 uint8_t *buf, size_t size) 650 union hdmi_infoframe *frame)
608{ 651{
652 u8 buf[32];
653 ssize_t len;
654
655 len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
656 if (len < 0) {
657 dev_err(&priv->hdmi->dev,
658 "hdmi_infoframe_pack() type=0x%02x failed: %zd\n",
659 frame->any.type, len);
660 return;
661 }
662
609 reg_clear(priv, REG_DIP_IF_FLAGS, bit); 663 reg_clear(priv, REG_DIP_IF_FLAGS, bit);
610 reg_write_range(priv, addr, buf, size); 664 reg_write_range(priv, addr, buf, len);
611 reg_set(priv, REG_DIP_IF_FLAGS, bit); 665 reg_set(priv, REG_DIP_IF_FLAGS, bit);
612} 666}
613 667
614static void 668static void
615tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p) 669tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
616{ 670{
617 u8 buf[PB(HDMI_AUDIO_INFOFRAME_SIZE) + 1]; 671 union hdmi_infoframe frame;
672
673 hdmi_audio_infoframe_init(&frame.audio);
618 674
619 memset(buf, 0, sizeof(buf)); 675 frame.audio.channels = p->audio_frame[1] & 0x07;
620 buf[HB(0)] = HDMI_INFOFRAME_TYPE_AUDIO; 676 frame.audio.channel_allocation = p->audio_frame[4];
621 buf[HB(1)] = 0x01; 677 frame.audio.level_shift_value = (p->audio_frame[5] & 0x78) >> 3;
622 buf[HB(2)] = HDMI_AUDIO_INFOFRAME_SIZE; 678 frame.audio.downmix_inhibit = (p->audio_frame[5] & 0x80) >> 7;
623 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
624 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
625 buf[PB(4)] = p->audio_frame[4];
626 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
627 679
628 buf[PB(0)] = tda998x_cksum(buf, sizeof(buf)); 680 /*
681 * L-PCM and IEC61937 compressed audio shall always set sample
682 * frequency to "refer to stream". For others, see the HDMI
683 * specification.
684 */
685 frame.audio.sample_frequency = (p->audio_frame[2] & 0x1c) >> 2;
629 686
630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, 687 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, &frame);
631 sizeof(buf));
632} 688}
633 689
634static void 690static void
635tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode) 691tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
636{ 692{
637 struct hdmi_avi_infoframe frame; 693 union hdmi_infoframe frame;
638 u8 buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
639 ssize_t len;
640 694
641 drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 695 drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
696 frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
642 697
643 frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL; 698 tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
644
645 len = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
646 if (len < 0) {
647 dev_err(&priv->hdmi->dev,
648 "hdmi_avi_infoframe_pack() failed: %zd\n", len);
649 return;
650 }
651
652 tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, len);
653} 699}
654 700
655static void tda998x_audio_mute(struct tda998x_priv *priv, bool on) 701static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
@@ -667,8 +713,8 @@ static void
667tda998x_configure_audio(struct tda998x_priv *priv, 713tda998x_configure_audio(struct tda998x_priv *priv,
668 struct drm_display_mode *mode, struct tda998x_encoder_params *p) 714 struct drm_display_mode *mode, struct tda998x_encoder_params *p)
669{ 715{
670 uint8_t buf[6], clksel_aip, clksel_fs, cts_n, adiv; 716 u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
671 uint32_t n; 717 u32 n;
672 718
673 /* Enable audio ports */ 719 /* Enable audio ports */
674 reg_write(priv, REG_ENA_AP, p->audio_cfg); 720 reg_write(priv, REG_ENA_AP, p->audio_cfg);
@@ -776,8 +822,10 @@ static void tda998x_encoder_set_config(struct tda998x_priv *priv,
776 priv->params = *p; 822 priv->params = *p;
777} 823}
778 824
779static void tda998x_encoder_dpms(struct tda998x_priv *priv, int mode) 825static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
780{ 826{
827 struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
828
781 /* we only care about on or off: */ 829 /* we only care about on or off: */
782 if (mode != DRM_MODE_DPMS_ON) 830 if (mode != DRM_MODE_DPMS_ON)
783 mode = DRM_MODE_DPMS_OFF; 831 mode = DRM_MODE_DPMS_OFF;
@@ -827,8 +875,8 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
827 return true; 875 return true;
828} 876}
829 877
830static int tda998x_encoder_mode_valid(struct tda998x_priv *priv, 878static int tda998x_connector_mode_valid(struct drm_connector *connector,
831 struct drm_display_mode *mode) 879 struct drm_display_mode *mode)
832{ 880{
833 if (mode->clock > 150000) 881 if (mode->clock > 150000)
834 return MODE_CLOCK_HIGH; 882 return MODE_CLOCK_HIGH;
@@ -840,18 +888,19 @@ static int tda998x_encoder_mode_valid(struct tda998x_priv *priv,
840} 888}
841 889
842static void 890static void
843tda998x_encoder_mode_set(struct tda998x_priv *priv, 891tda998x_encoder_mode_set(struct drm_encoder *encoder,
844 struct drm_display_mode *mode, 892 struct drm_display_mode *mode,
845 struct drm_display_mode *adjusted_mode) 893 struct drm_display_mode *adjusted_mode)
846{ 894{
847 uint16_t ref_pix, ref_line, n_pix, n_line; 895 struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
848 uint16_t hs_pix_s, hs_pix_e; 896 u16 ref_pix, ref_line, n_pix, n_line;
849 uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e; 897 u16 hs_pix_s, hs_pix_e;
850 uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e; 898 u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
851 uint16_t vwin1_line_s, vwin1_line_e; 899 u16 vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
852 uint16_t vwin2_line_s, vwin2_line_e; 900 u16 vwin1_line_s, vwin1_line_e;
853 uint16_t de_pix_s, de_pix_e; 901 u16 vwin2_line_s, vwin2_line_e;
854 uint8_t reg, div, rep; 902 u16 de_pix_s, de_pix_e;
903 u8 reg, div, rep;
855 904
856 /* 905 /*
857 * Internally TDA998x is using ITU-R BT.656 style sync but 906 * Internally TDA998x is using ITU-R BT.656 style sync but
@@ -1031,9 +1080,10 @@ tda998x_encoder_mode_set(struct tda998x_priv *priv,
1031} 1080}
1032 1081
1033static enum drm_connector_status 1082static enum drm_connector_status
1034tda998x_encoder_detect(struct tda998x_priv *priv) 1083tda998x_connector_detect(struct drm_connector *connector, bool force)
1035{ 1084{
1036 uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV); 1085 struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
1086 u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
1037 1087
1038 return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected : 1088 return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
1039 connector_status_disconnected; 1089 connector_status_disconnected;
@@ -1042,7 +1092,7 @@ tda998x_encoder_detect(struct tda998x_priv *priv)
1042static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length) 1092static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
1043{ 1093{
1044 struct tda998x_priv *priv = data; 1094 struct tda998x_priv *priv = data;
1045 uint8_t offset, segptr; 1095 u8 offset, segptr;
1046 int ret, i; 1096 int ret, i;
1047 1097
1048 offset = (blk & 1) ? 128 : 0; 1098 offset = (blk & 1) ? 128 : 0;
@@ -1095,13 +1145,20 @@ static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
1095 return 0; 1145 return 0;
1096} 1146}
1097 1147
1098static int 1148static int tda998x_connector_get_modes(struct drm_connector *connector)
1099tda998x_encoder_get_modes(struct tda998x_priv *priv,
1100 struct drm_connector *connector)
1101{ 1149{
1150 struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
1102 struct edid *edid; 1151 struct edid *edid;
1103 int n; 1152 int n;
1104 1153
1154 /*
1155 * If we get killed while waiting for the HPD timeout, return
1156 * no modes found: we are not in a restartable path, so we
1157 * can't handle signals gracefully.
1158 */
1159 if (tda998x_edid_delay_wait(priv))
1160 return 0;
1161
1105 if (priv->rev == TDA19988) 1162 if (priv->rev == TDA19988)
1106 reg_clear(priv, REG_TX4, TX4_PD_RAM); 1163 reg_clear(priv, REG_TX4, TX4_PD_RAM);
1107 1164
@@ -1133,101 +1190,21 @@ static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
1133 DRM_CONNECTOR_POLL_DISCONNECT; 1190 DRM_CONNECTOR_POLL_DISCONNECT;
1134} 1191}
1135 1192
1136static int
1137tda998x_encoder_set_property(struct drm_encoder *encoder,
1138 struct drm_connector *connector,
1139 struct drm_property *property,
1140 uint64_t val)
1141{
1142 DBG("");
1143 return 0;
1144}
1145
1146static void tda998x_destroy(struct tda998x_priv *priv) 1193static void tda998x_destroy(struct tda998x_priv *priv)
1147{ 1194{
1148 /* disable all IRQs and free the IRQ handler */ 1195 /* disable all IRQs and free the IRQ handler */
1149 cec_write(priv, REG_CEC_RXSHPDINTENA, 0); 1196 cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
1150 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); 1197 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
1151 if (priv->hdmi->irq) {
1152 free_irq(priv->hdmi->irq, priv);
1153 cancel_delayed_work_sync(&priv->dwork);
1154 }
1155
1156 i2c_unregister_device(priv->cec);
1157}
1158
1159/* Slave encoder support */
1160
1161static void
1162tda998x_encoder_slave_set_config(struct drm_encoder *encoder, void *params)
1163{
1164 tda998x_encoder_set_config(to_tda998x_priv(encoder), params);
1165}
1166 1198
1167static void tda998x_encoder_slave_destroy(struct drm_encoder *encoder) 1199 if (priv->hdmi->irq)
1168{ 1200 free_irq(priv->hdmi->irq, priv);
1169 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1170
1171 tda998x_destroy(priv);
1172 drm_i2c_encoder_destroy(encoder);
1173 kfree(priv);
1174}
1175
1176static void tda998x_encoder_slave_dpms(struct drm_encoder *encoder, int mode)
1177{
1178 tda998x_encoder_dpms(to_tda998x_priv(encoder), mode);
1179}
1180
1181static int tda998x_encoder_slave_mode_valid(struct drm_encoder *encoder,
1182 struct drm_display_mode *mode)
1183{
1184 return tda998x_encoder_mode_valid(to_tda998x_priv(encoder), mode);
1185}
1186 1201
1187static void 1202 del_timer_sync(&priv->edid_delay_timer);
1188tda998x_encoder_slave_mode_set(struct drm_encoder *encoder, 1203 cancel_work_sync(&priv->detect_work);
1189 struct drm_display_mode *mode,
1190 struct drm_display_mode *adjusted_mode)
1191{
1192 tda998x_encoder_mode_set(to_tda998x_priv(encoder), mode, adjusted_mode);
1193}
1194 1204
1195static enum drm_connector_status 1205 i2c_unregister_device(priv->cec);
1196tda998x_encoder_slave_detect(struct drm_encoder *encoder,
1197 struct drm_connector *connector)
1198{
1199 return tda998x_encoder_detect(to_tda998x_priv(encoder));
1200}
1201
1202static int tda998x_encoder_slave_get_modes(struct drm_encoder *encoder,
1203 struct drm_connector *connector)
1204{
1205 return tda998x_encoder_get_modes(to_tda998x_priv(encoder), connector);
1206}
1207
1208static int
1209tda998x_encoder_slave_create_resources(struct drm_encoder *encoder,
1210 struct drm_connector *connector)
1211{
1212 tda998x_encoder_set_polling(to_tda998x_priv(encoder), connector);
1213 return 0;
1214} 1206}
1215 1207
1216static struct drm_encoder_slave_funcs tda998x_encoder_slave_funcs = {
1217 .set_config = tda998x_encoder_slave_set_config,
1218 .destroy = tda998x_encoder_slave_destroy,
1219 .dpms = tda998x_encoder_slave_dpms,
1220 .save = tda998x_encoder_save,
1221 .restore = tda998x_encoder_restore,
1222 .mode_fixup = tda998x_encoder_mode_fixup,
1223 .mode_valid = tda998x_encoder_slave_mode_valid,
1224 .mode_set = tda998x_encoder_slave_mode_set,
1225 .detect = tda998x_encoder_slave_detect,
1226 .get_modes = tda998x_encoder_slave_get_modes,
1227 .create_resources = tda998x_encoder_slave_create_resources,
1228 .set_property = tda998x_encoder_set_property,
1229};
1230
1231/* I2C driver functions */ 1208/* I2C driver functions */
1232 1209
1233static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) 1210static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
@@ -1252,6 +1229,10 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1252 priv->dpms = DRM_MODE_DPMS_OFF; 1229 priv->dpms = DRM_MODE_DPMS_OFF;
1253 1230
1254 mutex_init(&priv->mutex); /* protect the page access */ 1231 mutex_init(&priv->mutex); /* protect the page access */
1232 init_waitqueue_head(&priv->edid_delay_waitq);
1233 setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
1234 (unsigned long)priv);
1235 INIT_WORK(&priv->detect_work, tda998x_detect_work);
1255 1236
1256 /* wake up the device: */ 1237 /* wake up the device: */
1257 cec_write(priv, REG_CEC_ENAMODS, 1238 cec_write(priv, REG_CEC_ENAMODS,
@@ -1310,7 +1291,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1310 1291
1311 /* init read EDID waitqueue and HDP work */ 1292 /* init read EDID waitqueue and HDP work */
1312 init_waitqueue_head(&priv->wq_edid); 1293 init_waitqueue_head(&priv->wq_edid);
1313 INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
1314 1294
1315 /* clear pending interrupts */ 1295 /* clear pending interrupts */
1316 reg_read(priv, REG_INT_FLAGS_0); 1296 reg_read(priv, REG_INT_FLAGS_0);
@@ -1359,84 +1339,31 @@ fail:
1359 return -ENXIO; 1339 return -ENXIO;
1360} 1340}
1361 1341
1362static int tda998x_encoder_init(struct i2c_client *client,
1363 struct drm_device *dev,
1364 struct drm_encoder_slave *encoder_slave)
1365{
1366 struct tda998x_priv *priv;
1367 int ret;
1368
1369 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1370 if (!priv)
1371 return -ENOMEM;
1372
1373 priv->encoder = &encoder_slave->base;
1374
1375 ret = tda998x_create(client, priv);
1376 if (ret) {
1377 kfree(priv);
1378 return ret;
1379 }
1380
1381 encoder_slave->slave_priv = priv;
1382 encoder_slave->slave_funcs = &tda998x_encoder_slave_funcs;
1383
1384 return 0;
1385}
1386
1387struct tda998x_priv2 {
1388 struct tda998x_priv base;
1389 struct drm_encoder encoder;
1390 struct drm_connector connector;
1391};
1392
1393#define conn_to_tda998x_priv2(x) \
1394 container_of(x, struct tda998x_priv2, connector);
1395
1396#define enc_to_tda998x_priv2(x) \
1397 container_of(x, struct tda998x_priv2, encoder);
1398
1399static void tda998x_encoder2_dpms(struct drm_encoder *encoder, int mode)
1400{
1401 struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
1402
1403 tda998x_encoder_dpms(&priv->base, mode);
1404}
1405
1406static void tda998x_encoder_prepare(struct drm_encoder *encoder) 1342static void tda998x_encoder_prepare(struct drm_encoder *encoder)
1407{ 1343{
1408 tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_OFF); 1344 tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1409} 1345}
1410 1346
1411static void tda998x_encoder_commit(struct drm_encoder *encoder) 1347static void tda998x_encoder_commit(struct drm_encoder *encoder)
1412{ 1348{
1413 tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_ON); 1349 tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
1414}
1415
1416static void tda998x_encoder2_mode_set(struct drm_encoder *encoder,
1417 struct drm_display_mode *mode,
1418 struct drm_display_mode *adjusted_mode)
1419{
1420 struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
1421
1422 tda998x_encoder_mode_set(&priv->base, mode, adjusted_mode);
1423} 1350}
1424 1351
1425static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = { 1352static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
1426 .dpms = tda998x_encoder2_dpms, 1353 .dpms = tda998x_encoder_dpms,
1427 .save = tda998x_encoder_save, 1354 .save = tda998x_encoder_save,
1428 .restore = tda998x_encoder_restore, 1355 .restore = tda998x_encoder_restore,
1429 .mode_fixup = tda998x_encoder_mode_fixup, 1356 .mode_fixup = tda998x_encoder_mode_fixup,
1430 .prepare = tda998x_encoder_prepare, 1357 .prepare = tda998x_encoder_prepare,
1431 .commit = tda998x_encoder_commit, 1358 .commit = tda998x_encoder_commit,
1432 .mode_set = tda998x_encoder2_mode_set, 1359 .mode_set = tda998x_encoder_mode_set,
1433}; 1360};
1434 1361
1435static void tda998x_encoder_destroy(struct drm_encoder *encoder) 1362static void tda998x_encoder_destroy(struct drm_encoder *encoder)
1436{ 1363{
1437 struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder); 1364 struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
1438 1365
1439 tda998x_destroy(&priv->base); 1366 tda998x_destroy(priv);
1440 drm_encoder_cleanup(encoder); 1367 drm_encoder_cleanup(encoder);
1441} 1368}
1442 1369
@@ -1444,25 +1371,10 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
1444 .destroy = tda998x_encoder_destroy, 1371 .destroy = tda998x_encoder_destroy,
1445}; 1372};
1446 1373
1447static int tda998x_connector_get_modes(struct drm_connector *connector)
1448{
1449 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
1450
1451 return tda998x_encoder_get_modes(&priv->base, connector);
1452}
1453
1454static int tda998x_connector_mode_valid(struct drm_connector *connector,
1455 struct drm_display_mode *mode)
1456{
1457 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
1458
1459 return tda998x_encoder_mode_valid(&priv->base, mode);
1460}
1461
1462static struct drm_encoder * 1374static struct drm_encoder *
1463tda998x_connector_best_encoder(struct drm_connector *connector) 1375tda998x_connector_best_encoder(struct drm_connector *connector)
1464{ 1376{
1465 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector); 1377 struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
1466 1378
1467 return &priv->encoder; 1379 return &priv->encoder;
1468} 1380}
@@ -1474,14 +1386,6 @@ const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
1474 .best_encoder = tda998x_connector_best_encoder, 1386 .best_encoder = tda998x_connector_best_encoder,
1475}; 1387};
1476 1388
1477static enum drm_connector_status
1478tda998x_connector_detect(struct drm_connector *connector, bool force)
1479{
1480 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
1481
1482 return tda998x_encoder_detect(&priv->base);
1483}
1484
1485static void tda998x_connector_destroy(struct drm_connector *connector) 1389static void tda998x_connector_destroy(struct drm_connector *connector)
1486{ 1390{
1487 drm_connector_unregister(connector); 1391 drm_connector_unregister(connector);
@@ -1500,8 +1404,8 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
1500 struct tda998x_encoder_params *params = dev->platform_data; 1404 struct tda998x_encoder_params *params = dev->platform_data;
1501 struct i2c_client *client = to_i2c_client(dev); 1405 struct i2c_client *client = to_i2c_client(dev);
1502 struct drm_device *drm = data; 1406 struct drm_device *drm = data;
1503 struct tda998x_priv2 *priv; 1407 struct tda998x_priv *priv;
1504 uint32_t crtcs = 0; 1408 u32 crtcs = 0;
1505 int ret; 1409 int ret;
1506 1410
1507 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1411 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -1519,18 +1423,17 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
1519 crtcs = 1 << 0; 1423 crtcs = 1 << 0;
1520 } 1424 }
1521 1425
1522 priv->base.encoder = &priv->encoder;
1523 priv->connector.interlace_allowed = 1; 1426 priv->connector.interlace_allowed = 1;
1524 priv->encoder.possible_crtcs = crtcs; 1427 priv->encoder.possible_crtcs = crtcs;
1525 1428
1526 ret = tda998x_create(client, &priv->base); 1429 ret = tda998x_create(client, priv);
1527 if (ret) 1430 if (ret)
1528 return ret; 1431 return ret;
1529 1432
1530 if (!dev->of_node && params) 1433 if (!dev->of_node && params)
1531 tda998x_encoder_set_config(&priv->base, params); 1434 tda998x_encoder_set_config(priv, params);
1532 1435
1533 tda998x_encoder_set_polling(&priv->base, &priv->connector); 1436 tda998x_encoder_set_polling(priv, &priv->connector);
1534 1437
1535 drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs); 1438 drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
1536 ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, 1439 ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
@@ -1560,18 +1463,18 @@ err_sysfs:
1560err_connector: 1463err_connector:
1561 drm_encoder_cleanup(&priv->encoder); 1464 drm_encoder_cleanup(&priv->encoder);
1562err_encoder: 1465err_encoder:
1563 tda998x_destroy(&priv->base); 1466 tda998x_destroy(priv);
1564 return ret; 1467 return ret;
1565} 1468}
1566 1469
1567static void tda998x_unbind(struct device *dev, struct device *master, 1470static void tda998x_unbind(struct device *dev, struct device *master,
1568 void *data) 1471 void *data)
1569{ 1472{
1570 struct tda998x_priv2 *priv = dev_get_drvdata(dev); 1473 struct tda998x_priv *priv = dev_get_drvdata(dev);
1571 1474
1572 drm_connector_cleanup(&priv->connector); 1475 drm_connector_cleanup(&priv->connector);
1573 drm_encoder_cleanup(&priv->encoder); 1476 drm_encoder_cleanup(&priv->encoder);
1574 tda998x_destroy(&priv->base); 1477 tda998x_destroy(priv);
1575} 1478}
1576 1479
1577static const struct component_ops tda998x_ops = { 1480static const struct component_ops tda998x_ops = {
@@ -1605,38 +1508,18 @@ static struct i2c_device_id tda998x_ids[] = {
1605}; 1508};
1606MODULE_DEVICE_TABLE(i2c, tda998x_ids); 1509MODULE_DEVICE_TABLE(i2c, tda998x_ids);
1607 1510
1608static struct drm_i2c_encoder_driver tda998x_driver = { 1511static struct i2c_driver tda998x_driver = {
1609 .i2c_driver = { 1512 .probe = tda998x_probe,
1610 .probe = tda998x_probe, 1513 .remove = tda998x_remove,
1611 .remove = tda998x_remove, 1514 .driver = {
1612 .driver = { 1515 .name = "tda998x",
1613 .name = "tda998x", 1516 .of_match_table = of_match_ptr(tda998x_dt_ids),
1614 .of_match_table = of_match_ptr(tda998x_dt_ids),
1615 },
1616 .id_table = tda998x_ids,
1617 }, 1517 },
1618 .encoder_init = tda998x_encoder_init, 1518 .id_table = tda998x_ids,
1619}; 1519};
1620 1520
1621/* Module initialization */ 1521module_i2c_driver(tda998x_driver);
1622
1623static int __init
1624tda998x_init(void)
1625{
1626 DBG("");
1627 return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
1628}
1629
1630static void __exit
1631tda998x_exit(void)
1632{
1633 DBG("");
1634 drm_i2c_encoder_unregister(&tda998x_driver);
1635}
1636 1522
1637MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); 1523MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1638MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder"); 1524MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
1639MODULE_LICENSE("GPL"); 1525MODULE_LICENSE("GPL");
1640
1641module_init(tda998x_init);
1642module_exit(tda998x_exit);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 998b4643109f..44d290ae1999 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,10 @@ i915-y += i915_cmd_parser.o \
40 intel_ringbuffer.o \ 40 intel_ringbuffer.o \
41 intel_uncore.o 41 intel_uncore.o
42 42
43# general-purpose microcontroller (GuC) support
44i915-y += intel_guc_loader.o \
45 i915_guc_submission.o
46
43# autogenerated null render state 47# autogenerated null render state
44i915-y += intel_renderstate_gen6.o \ 48i915-y += intel_renderstate_gen6.o \
45 intel_renderstate_gen7.o \ 49 intel_renderstate_gen7.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 312163379db9..0e2c1b9648a7 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -94,8 +94,8 @@ struct intel_dvo_dev_ops {
94 * after this function is called. 94 * after this function is called.
95 */ 95 */
96 void (*mode_set)(struct intel_dvo_device *dvo, 96 void (*mode_set)(struct intel_dvo_device *dvo,
97 struct drm_display_mode *mode, 97 const struct drm_display_mode *mode,
98 struct drm_display_mode *adjusted_mode); 98 const struct drm_display_mode *adjusted_mode);
99 99
100 /* 100 /*
101 * Probe for a connected output, and return detect_status. 101 * Probe for a connected output, and return detect_status.
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 86b27d1d90c2..cbb22027a3ce 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -255,8 +255,8 @@ static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
255} 255}
256 256
257static void ch7017_mode_set(struct intel_dvo_device *dvo, 257static void ch7017_mode_set(struct intel_dvo_device *dvo,
258 struct drm_display_mode *mode, 258 const struct drm_display_mode *mode,
259 struct drm_display_mode *adjusted_mode) 259 const struct drm_display_mode *adjusted_mode)
260{ 260{
261 uint8_t lvds_pll_feedback_div, lvds_pll_vco_control; 261 uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
262 uint8_t outputs_enable, lvds_control_2, lvds_power_down; 262 uint8_t outputs_enable, lvds_control_2, lvds_power_down;
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 80449f475960..4b4acc1a06fe 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -275,8 +275,8 @@ static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
275} 275}
276 276
277static void ch7xxx_mode_set(struct intel_dvo_device *dvo, 277static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
278 struct drm_display_mode *mode, 278 const struct drm_display_mode *mode,
279 struct drm_display_mode *adjusted_mode) 279 const struct drm_display_mode *adjusted_mode)
280{ 280{
281 uint8_t tvco, tpcp, tpd, tlpf, idf; 281 uint8_t tvco, tpcp, tpd, tlpf, idf;
282 282
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 732ce8785945..ff9f1b077d83 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -394,8 +394,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
394} 394}
395 395
396static void ivch_mode_set(struct intel_dvo_device *dvo, 396static void ivch_mode_set(struct intel_dvo_device *dvo,
397 struct drm_display_mode *mode, 397 const struct drm_display_mode *mode,
398 struct drm_display_mode *adjusted_mode) 398 const struct drm_display_mode *adjusted_mode)
399{ 399{
400 struct ivch_priv *priv = dvo->dev_priv; 400 struct ivch_priv *priv = dvo->dev_priv;
401 uint16_t vr40 = 0; 401 uint16_t vr40 = 0;
@@ -414,16 +414,16 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
414 vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | 414 vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
415 VR40_HORIZONTAL_INTERP_ENABLE); 415 VR40_HORIZONTAL_INTERP_ENABLE);
416 416
417 if (mode->hdisplay != adjusted_mode->hdisplay || 417 if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
418 mode->vdisplay != adjusted_mode->vdisplay) { 418 mode->vdisplay != adjusted_mode->crtc_vdisplay) {
419 uint16_t x_ratio, y_ratio; 419 uint16_t x_ratio, y_ratio;
420 420
421 vr01 |= VR01_PANEL_FIT_ENABLE; 421 vr01 |= VR01_PANEL_FIT_ENABLE;
422 vr40 |= VR40_CLOCK_GATING_ENABLE; 422 vr40 |= VR40_CLOCK_GATING_ENABLE;
423 x_ratio = (((mode->hdisplay - 1) << 16) / 423 x_ratio = (((mode->hdisplay - 1) << 16) /
424 (adjusted_mode->hdisplay - 1)) >> 2; 424 (adjusted_mode->crtc_hdisplay - 1)) >> 2;
425 y_ratio = (((mode->vdisplay - 1) << 16) / 425 y_ratio = (((mode->vdisplay - 1) << 16) /
426 (adjusted_mode->vdisplay - 1)) >> 2; 426 (adjusted_mode->crtc_vdisplay - 1)) >> 2;
427 ivch_write(dvo, VR42, x_ratio); 427 ivch_write(dvo, VR42, x_ratio);
428 ivch_write(dvo, VR41, y_ratio); 428 ivch_write(dvo, VR41, y_ratio);
429 } else { 429 } else {
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 97ae8aa157e9..063859fff0f0 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -546,8 +546,8 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
546} 546}
547 547
548static void ns2501_mode_set(struct intel_dvo_device *dvo, 548static void ns2501_mode_set(struct intel_dvo_device *dvo,
549 struct drm_display_mode *mode, 549 const struct drm_display_mode *mode,
550 struct drm_display_mode *adjusted_mode) 550 const struct drm_display_mode *adjusted_mode)
551{ 551{
552 const struct ns2501_configuration *conf; 552 const struct ns2501_configuration *conf;
553 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 553 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index fa0114967076..26f13eb634f9 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -190,8 +190,8 @@ static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
190} 190}
191 191
192static void sil164_mode_set(struct intel_dvo_device *dvo, 192static void sil164_mode_set(struct intel_dvo_device *dvo,
193 struct drm_display_mode *mode, 193 const struct drm_display_mode *mode,
194 struct drm_display_mode *adjusted_mode) 194 const struct drm_display_mode *adjusted_mode)
195{ 195{
196 /* As long as the basics are set up, since we don't have clock 196 /* As long as the basics are set up, since we don't have clock
197 * dependencies in the mode setup, we can just leave the 197 * dependencies in the mode setup, we can just leave the
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 7853719a0e81..6f1a0a6d4e22 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -222,8 +222,8 @@ static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
222} 222}
223 223
224static void tfp410_mode_set(struct intel_dvo_device *dvo, 224static void tfp410_mode_set(struct intel_dvo_device *dvo,
225 struct drm_display_mode *mode, 225 const struct drm_display_mode *mode,
226 struct drm_display_mode *adjusted_mode) 226 const struct drm_display_mode *adjusted_mode)
227{ 227{
228 /* As long as the basics are set up, since we don't have clock dependencies 228 /* As long as the basics are set up, since we don't have clock dependencies
229 * in the mode setup, we can just leave the registers alone and everything 229 * in the mode setup, we can just leave the registers alone and everything
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 237ff6884a22..db58c8d664c2 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -94,7 +94,7 @@
94#define CMD(op, opm, f, lm, fl, ...) \ 94#define CMD(op, opm, f, lm, fl, ...) \
95 { \ 95 { \
96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ 96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
97 .cmd = { (op), (opm) }, \ 97 .cmd = { (op), (opm) }, \
98 .length = { (lm) }, \ 98 .length = { (lm) }, \
99 __VA_ARGS__ \ 99 __VA_ARGS__ \
100 } 100 }
@@ -124,14 +124,14 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ), 124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, 125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
126 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), 126 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, 127 CMD( MI_STORE_REGISTER_MEM, SMI, F, 3, W | B,
128 .reg = { .offset = 1, .mask = 0x007FFFFC }, 128 .reg = { .offset = 1, .mask = 0x007FFFFC },
129 .bits = {{ 129 .bits = {{
130 .offset = 0, 130 .offset = 0,
131 .mask = MI_GLOBAL_GTT, 131 .mask = MI_GLOBAL_GTT,
132 .expected = 0, 132 .expected = 0,
133 }}, ), 133 }}, ),
134 CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, 134 CMD( MI_LOAD_REGISTER_MEM, SMI, F, 3, W | B,
135 .reg = { .offset = 1, .mask = 0x007FFFFC }, 135 .reg = { .offset = 1, .mask = 0x007FFFFC },
136 .bits = {{ 136 .bits = {{
137 .offset = 0, 137 .offset = 0,
@@ -448,6 +448,9 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
448 REG32(GEN7_3DPRIM_INSTANCE_COUNT), 448 REG32(GEN7_3DPRIM_INSTANCE_COUNT),
449 REG32(GEN7_3DPRIM_START_INSTANCE), 449 REG32(GEN7_3DPRIM_START_INSTANCE),
450 REG32(GEN7_3DPRIM_BASE_VERTEX), 450 REG32(GEN7_3DPRIM_BASE_VERTEX),
451 REG32(GEN7_GPGPU_DISPATCHDIMX),
452 REG32(GEN7_GPGPU_DISPATCHDIMY),
453 REG32(GEN7_GPGPU_DISPATCHDIMZ),
451 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), 454 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
452 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), 455 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
453 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), 456 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
@@ -1021,7 +1024,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1021 * only MI_LOAD_REGISTER_IMM commands. 1024 * only MI_LOAD_REGISTER_IMM commands.
1022 */ 1025 */
1023 if (reg_addr == OACONTROL) { 1026 if (reg_addr == OACONTROL) {
1024 if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { 1027 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1025 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); 1028 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
1026 return false; 1029 return false;
1027 } 1030 }
@@ -1035,7 +1038,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1035 * allowed mask/value pair given in the whitelist entry. 1038 * allowed mask/value pair given in the whitelist entry.
1036 */ 1039 */
1037 if (reg->mask) { 1040 if (reg->mask) {
1038 if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { 1041 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1039 DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", 1042 DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
1040 reg_addr); 1043 reg_addr);
1041 return false; 1044 return false;
@@ -1213,6 +1216,8 @@ int i915_cmd_parser_get_version(void)
1213 * 2. Allow access to the MI_PREDICATE_SRC0 and 1216 * 2. Allow access to the MI_PREDICATE_SRC0 and
1214 * MI_PREDICATE_SRC1 registers. 1217 * MI_PREDICATE_SRC1 registers.
1215 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. 1218 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
1219 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1220 * 5. GPGPU dispatch compute indirect registers.
1216 */ 1221 */
1217 return 3; 1222 return 5;
1218} 1223}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e3ec9049081f..a3b22bdacd44 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -46,11 +46,6 @@ enum {
46 PINNED_LIST, 46 PINNED_LIST,
47}; 47};
48 48
49static const char *yesno(int v)
50{
51 return v ? "yes" : "no";
52}
53
54/* As the drm_debugfs_init() routines are called before dev->dev_private is 49/* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */ 50 * allocated we need to hook into the minor for release. */
56static int 51static int
@@ -258,7 +253,11 @@ static int obj_rank_by_stolen(void *priv,
258 struct drm_i915_gem_object *b = 253 struct drm_i915_gem_object *b =
259 container_of(B, struct drm_i915_gem_object, obj_exec_link); 254 container_of(B, struct drm_i915_gem_object, obj_exec_link);
260 255
261 return a->stolen->start - b->stolen->start; 256 if (a->stolen->start < b->stolen->start)
257 return -1;
258 if (a->stolen->start > b->stolen->start)
259 return 1;
260 return 0;
262} 261}
263 262
264static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 263static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
@@ -957,7 +956,6 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
957 if (ret) 956 if (ret)
958 return ret; 957 return ret;
959 958
960 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
961 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 959 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
962 for (i = 0; i < dev_priv->num_fence_regs; i++) { 960 for (i = 0; i < dev_priv->num_fence_regs; i++) {
963 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 961 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
@@ -1314,6 +1312,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1314 seq_puts(m, "no P-state info available\n"); 1312 seq_puts(m, "no P-state info available\n");
1315 } 1313 }
1316 1314
1315 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1316 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1317 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1318
1317out: 1319out:
1318 intel_runtime_pm_put(dev_priv); 1320 intel_runtime_pm_put(dev_priv);
1319 return ret; 1321 return ret;
@@ -1387,17 +1389,16 @@ static int ironlake_drpc_info(struct seq_file *m)
1387 intel_runtime_pm_put(dev_priv); 1389 intel_runtime_pm_put(dev_priv);
1388 mutex_unlock(&dev->struct_mutex); 1390 mutex_unlock(&dev->struct_mutex);
1389 1391
1390 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1392 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1391 "yes" : "no");
1392 seq_printf(m, "Boost freq: %d\n", 1393 seq_printf(m, "Boost freq: %d\n",
1393 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1394 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1394 MEMMODE_BOOST_FREQ_SHIFT); 1395 MEMMODE_BOOST_FREQ_SHIFT);
1395 seq_printf(m, "HW control enabled: %s\n", 1396 seq_printf(m, "HW control enabled: %s\n",
1396 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1397 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1397 seq_printf(m, "SW control enabled: %s\n", 1398 seq_printf(m, "SW control enabled: %s\n",
1398 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1399 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1399 seq_printf(m, "Gated voltage change: %s\n", 1400 seq_printf(m, "Gated voltage change: %s\n",
1400 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1401 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1401 seq_printf(m, "Starting frequency: P%d\n", 1402 seq_printf(m, "Starting frequency: P%d\n",
1402 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1403 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1403 seq_printf(m, "Max P-state: P%d\n", 1404 seq_printf(m, "Max P-state: P%d\n",
@@ -1406,7 +1407,7 @@ static int ironlake_drpc_info(struct seq_file *m)
1406 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1407 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1407 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1408 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1408 seq_printf(m, "Render standby enabled: %s\n", 1409 seq_printf(m, "Render standby enabled: %s\n",
1409 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1410 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1410 seq_puts(m, "Current RS state: "); 1411 seq_puts(m, "Current RS state: ");
1411 switch (rstdbyctl & RSX_STATUS_MASK) { 1412 switch (rstdbyctl & RSX_STATUS_MASK) {
1412 case RSX_STATUS_ON: 1413 case RSX_STATUS_ON:
@@ -1849,7 +1850,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
1849 goto out; 1850 goto out;
1850 1851
1851 if (opregion->header) { 1852 if (opregion->header) {
1852 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1853 memcpy(data, opregion->header, OPREGION_SIZE);
1853 seq_write(m, data, OPREGION_SIZE); 1854 seq_write(m, data, OPREGION_SIZE);
1854 } 1855 }
1855 1856
@@ -1995,7 +1996,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
1995 return; 1996 return;
1996 } 1997 }
1997 1998
1998 page = i915_gem_object_get_page(ctx_obj, 1); 1999 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
1999 if (!WARN_ON(page == NULL)) { 2000 if (!WARN_ON(page == NULL)) {
2000 reg_state = kmap_atomic(page); 2001 reg_state = kmap_atomic(page);
2001 2002
@@ -2075,8 +2076,8 @@ static int i915_execlists(struct seq_file *m, void *data)
2075 2076
2076 seq_printf(m, "%s\n", ring->name); 2077 seq_printf(m, "%s\n", ring->name);
2077 2078
2078 status = I915_READ(RING_EXECLIST_STATUS(ring)); 2079 status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
2079 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4); 2080 ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
2080 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 2081 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2081 status, ctx_id); 2082 status, ctx_id);
2082 2083
@@ -2091,8 +2092,8 @@ static int i915_execlists(struct seq_file *m, void *data)
2091 read_pointer, write_pointer); 2092 read_pointer, write_pointer);
2092 2093
2093 for (i = 0; i < 6; i++) { 2094 for (i = 0; i < 6; i++) {
2094 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i); 2095 status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
2095 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4); 2096 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
2096 2097
2097 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 2098 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2098 i, status, ctx_id); 2099 i, status, ctx_id);
@@ -2237,10 +2238,9 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2237 for_each_ring(ring, dev_priv, unused) { 2238 for_each_ring(ring, dev_priv, unused) {
2238 seq_printf(m, "%s\n", ring->name); 2239 seq_printf(m, "%s\n", ring->name);
2239 for (i = 0; i < 4; i++) { 2240 for (i = 0; i < 4; i++) {
2240 u32 offset = 0x270 + i * 8; 2241 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
2241 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
2242 pdp <<= 32; 2242 pdp <<= 32;
2243 pdp |= I915_READ(ring->mmio_base + offset); 2243 pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
2244 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2244 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2245 } 2245 }
2246 } 2246 }
@@ -2250,7 +2250,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2250{ 2250{
2251 struct drm_i915_private *dev_priv = dev->dev_private; 2251 struct drm_i915_private *dev_priv = dev->dev_private;
2252 struct intel_engine_cs *ring; 2252 struct intel_engine_cs *ring;
2253 struct drm_file *file;
2254 int i; 2253 int i;
2255 2254
2256 if (INTEL_INFO(dev)->gen == 6) 2255 if (INTEL_INFO(dev)->gen == 6)
@@ -2273,13 +2272,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2273 ppgtt->debug_dump(ppgtt, m); 2272 ppgtt->debug_dump(ppgtt, m);
2274 } 2273 }
2275 2274
2276 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2277 struct drm_i915_file_private *file_priv = file->driver_priv;
2278
2279 seq_printf(m, "proc: %s\n",
2280 get_pid_task(file->pid, PIDTYPE_PID)->comm);
2281 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
2282 }
2283 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2275 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2284} 2276}
2285 2277
@@ -2288,6 +2280,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2288 struct drm_info_node *node = m->private; 2280 struct drm_info_node *node = m->private;
2289 struct drm_device *dev = node->minor->dev; 2281 struct drm_device *dev = node->minor->dev;
2290 struct drm_i915_private *dev_priv = dev->dev_private; 2282 struct drm_i915_private *dev_priv = dev->dev_private;
2283 struct drm_file *file;
2291 2284
2292 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2285 int ret = mutex_lock_interruptible(&dev->struct_mutex);
2293 if (ret) 2286 if (ret)
@@ -2299,10 +2292,26 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2299 else if (INTEL_INFO(dev)->gen >= 6) 2292 else if (INTEL_INFO(dev)->gen >= 6)
2300 gen6_ppgtt_info(m, dev); 2293 gen6_ppgtt_info(m, dev);
2301 2294
2295 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2296 struct drm_i915_file_private *file_priv = file->driver_priv;
2297 struct task_struct *task;
2298
2299 task = get_pid_task(file->pid, PIDTYPE_PID);
2300 if (!task) {
2301 ret = -ESRCH;
2302 goto out_put;
2303 }
2304 seq_printf(m, "\nproc: %s\n", task->comm);
2305 put_task_struct(task);
2306 idr_for_each(&file_priv->context_idr, per_file_ctx,
2307 (void *)(unsigned long)m);
2308 }
2309
2310out_put:
2302 intel_runtime_pm_put(dev_priv); 2311 intel_runtime_pm_put(dev_priv);
2303 mutex_unlock(&dev->struct_mutex); 2312 mutex_unlock(&dev->struct_mutex);
2304 2313
2305 return 0; 2314 return ret;
2306} 2315}
2307 2316
2308static int count_irq_waiters(struct drm_i915_private *i915) 2317static int count_irq_waiters(struct drm_i915_private *i915)
@@ -2372,6 +2381,147 @@ static int i915_llc(struct seq_file *m, void *data)
2372 return 0; 2381 return 0;
2373} 2382}
2374 2383
2384static int i915_guc_load_status_info(struct seq_file *m, void *data)
2385{
2386 struct drm_info_node *node = m->private;
2387 struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
2388 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2389 u32 tmp, i;
2390
2391 if (!HAS_GUC_UCODE(dev_priv->dev))
2392 return 0;
2393
2394 seq_printf(m, "GuC firmware status:\n");
2395 seq_printf(m, "\tpath: %s\n",
2396 guc_fw->guc_fw_path);
2397 seq_printf(m, "\tfetch: %s\n",
2398 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2399 seq_printf(m, "\tload: %s\n",
2400 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2401 seq_printf(m, "\tversion wanted: %d.%d\n",
2402 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2403 seq_printf(m, "\tversion found: %d.%d\n",
2404 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2405
2406 tmp = I915_READ(GUC_STATUS);
2407
2408 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2409 seq_printf(m, "\tBootrom status = 0x%x\n",
2410 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2411 seq_printf(m, "\tuKernel status = 0x%x\n",
2412 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2413 seq_printf(m, "\tMIA Core status = 0x%x\n",
2414 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2415 seq_puts(m, "\nScratch registers:\n");
2416 for (i = 0; i < 16; i++)
2417 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2418
2419 return 0;
2420}
2421
2422static void i915_guc_client_info(struct seq_file *m,
2423 struct drm_i915_private *dev_priv,
2424 struct i915_guc_client *client)
2425{
2426 struct intel_engine_cs *ring;
2427 uint64_t tot = 0;
2428 uint32_t i;
2429
2430 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2431 client->priority, client->ctx_index, client->proc_desc_offset);
2432 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2433 client->doorbell_id, client->doorbell_offset, client->cookie);
2434 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2435 client->wq_size, client->wq_offset, client->wq_tail);
2436
2437 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2438 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2439 seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2440
2441 for_each_ring(ring, dev_priv, i) {
2442 seq_printf(m, "\tSubmissions: %llu %s\n",
2443 client->submissions[i],
2444 ring->name);
2445 tot += client->submissions[i];
2446 }
2447 seq_printf(m, "\tTotal: %llu\n", tot);
2448}
2449
2450static int i915_guc_info(struct seq_file *m, void *data)
2451{
2452 struct drm_info_node *node = m->private;
2453 struct drm_device *dev = node->minor->dev;
2454 struct drm_i915_private *dev_priv = dev->dev_private;
2455 struct intel_guc guc;
2456 struct i915_guc_client client = {};
2457 struct intel_engine_cs *ring;
2458 enum intel_ring_id i;
2459 u64 total = 0;
2460
2461 if (!HAS_GUC_SCHED(dev_priv->dev))
2462 return 0;
2463
2464 /* Take a local copy of the GuC data, so we can dump it at leisure */
2465 spin_lock(&dev_priv->guc.host2guc_lock);
2466 guc = dev_priv->guc;
2467 if (guc.execbuf_client) {
2468 spin_lock(&guc.execbuf_client->wq_lock);
2469 client = *guc.execbuf_client;
2470 spin_unlock(&guc.execbuf_client->wq_lock);
2471 }
2472 spin_unlock(&dev_priv->guc.host2guc_lock);
2473
2474 seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2475 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2476 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
2477 seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
2478 seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
2479
2480 seq_printf(m, "\nGuC submissions:\n");
2481 for_each_ring(ring, dev_priv, i) {
2482 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
2483 ring->name, guc.submissions[i],
2484 guc.last_seqno[i], guc.last_seqno[i]);
2485 total += guc.submissions[i];
2486 }
2487 seq_printf(m, "\t%s: %llu\n", "Total", total);
2488
2489 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
2490 i915_guc_client_info(m, dev_priv, &client);
2491
2492 /* Add more as required ... */
2493
2494 return 0;
2495}
2496
2497static int i915_guc_log_dump(struct seq_file *m, void *data)
2498{
2499 struct drm_info_node *node = m->private;
2500 struct drm_device *dev = node->minor->dev;
2501 struct drm_i915_private *dev_priv = dev->dev_private;
2502 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2503 u32 *log;
2504 int i = 0, pg;
2505
2506 if (!log_obj)
2507 return 0;
2508
2509 for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
2510 log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
2511
2512 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2513 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2514 *(log + i), *(log + i + 1),
2515 *(log + i + 2), *(log + i + 3));
2516
2517 kunmap_atomic(log);
2518 }
2519
2520 seq_putc(m, '\n');
2521
2522 return 0;
2523}
2524
2375static int i915_edp_psr_status(struct seq_file *m, void *data) 2525static int i915_edp_psr_status(struct seq_file *m, void *data)
2376{ 2526{
2377 struct drm_info_node *node = m->private; 2527 struct drm_info_node *node = m->private;
@@ -2680,11 +2830,13 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2680 struct drm_device *dev = node->minor->dev; 2830 struct drm_device *dev = node->minor->dev;
2681 struct drm_crtc *crtc = &intel_crtc->base; 2831 struct drm_crtc *crtc = &intel_crtc->base;
2682 struct intel_encoder *intel_encoder; 2832 struct intel_encoder *intel_encoder;
2833 struct drm_plane_state *plane_state = crtc->primary->state;
2834 struct drm_framebuffer *fb = plane_state->fb;
2683 2835
2684 if (crtc->primary->fb) 2836 if (fb)
2685 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2837 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2686 crtc->primary->fb->base.id, crtc->x, crtc->y, 2838 fb->base.id, plane_state->src_x >> 16,
2687 crtc->primary->fb->width, crtc->primary->fb->height); 2839 plane_state->src_y >> 16, fb->width, fb->height);
2688 else 2840 else
2689 seq_puts(m, "\tprimary plane disabled\n"); 2841 seq_puts(m, "\tprimary plane disabled\n");
2690 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2842 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -2706,8 +2858,7 @@ static void intel_dp_info(struct seq_file *m,
2706 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2858 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2707 2859
2708 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2860 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2709 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2861 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2710 "no");
2711 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2862 if (intel_encoder->type == INTEL_OUTPUT_EDP)
2712 intel_panel_info(m, &intel_connector->panel); 2863 intel_panel_info(m, &intel_connector->panel);
2713} 2864}
@@ -2718,8 +2869,7 @@ static void intel_hdmi_info(struct seq_file *m,
2718 struct intel_encoder *intel_encoder = intel_connector->encoder; 2869 struct intel_encoder *intel_encoder = intel_connector->encoder;
2719 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2870 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2720 2871
2721 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2872 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2722 "no");
2723} 2873}
2724 2874
2725static void intel_lvds_info(struct seq_file *m, 2875static void intel_lvds_info(struct seq_file *m,
@@ -2769,7 +2919,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
2769 u32 state; 2919 u32 state;
2770 2920
2771 if (IS_845G(dev) || IS_I865G(dev)) 2921 if (IS_845G(dev) || IS_I865G(dev))
2772 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2922 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2773 else 2923 else
2774 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2924 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2775 2925
@@ -3007,7 +3157,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
3007 skl_ddb_entry_size(entry)); 3157 skl_ddb_entry_size(entry));
3008 } 3158 }
3009 3159
3010 entry = &ddb->cursor[pipe]; 3160 entry = &ddb->plane[pipe][PLANE_CURSOR];
3011 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3161 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3012 entry->end, skl_ddb_entry_size(entry)); 3162 entry->end, skl_ddb_entry_size(entry));
3013 } 3163 }
@@ -4807,7 +4957,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
4807 struct sseu_dev_status *stat) 4957 struct sseu_dev_status *stat)
4808{ 4958{
4809 struct drm_i915_private *dev_priv = dev->dev_private; 4959 struct drm_i915_private *dev_priv = dev->dev_private;
4810 const int ss_max = 2; 4960 int ss_max = 2;
4811 int ss; 4961 int ss;
4812 u32 sig1[ss_max], sig2[ss_max]; 4962 u32 sig1[ss_max], sig2[ss_max];
4813 4963
@@ -4900,13 +5050,38 @@ static void gen9_sseu_device_status(struct drm_device *dev,
4900 } 5050 }
4901} 5051}
4902 5052
5053static void broadwell_sseu_device_status(struct drm_device *dev,
5054 struct sseu_dev_status *stat)
5055{
5056 struct drm_i915_private *dev_priv = dev->dev_private;
5057 int s;
5058 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
5059
5060 stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
5061
5062 if (stat->slice_total) {
5063 stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
5064 stat->subslice_total = stat->slice_total *
5065 stat->subslice_per_slice;
5066 stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
5067 stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
5068
5069 /* subtract fused off EU(s) from enabled slice(s) */
5070 for (s = 0; s < stat->slice_total; s++) {
5071 u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
5072
5073 stat->eu_total -= hweight8(subslice_7eu);
5074 }
5075 }
5076}
5077
4903static int i915_sseu_status(struct seq_file *m, void *unused) 5078static int i915_sseu_status(struct seq_file *m, void *unused)
4904{ 5079{
4905 struct drm_info_node *node = (struct drm_info_node *) m->private; 5080 struct drm_info_node *node = (struct drm_info_node *) m->private;
4906 struct drm_device *dev = node->minor->dev; 5081 struct drm_device *dev = node->minor->dev;
4907 struct sseu_dev_status stat; 5082 struct sseu_dev_status stat;
4908 5083
4909 if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev)) 5084 if (INTEL_INFO(dev)->gen < 8)
4910 return -ENODEV; 5085 return -ENODEV;
4911 5086
4912 seq_puts(m, "SSEU Device Info\n"); 5087 seq_puts(m, "SSEU Device Info\n");
@@ -4931,6 +5106,8 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
4931 memset(&stat, 0, sizeof(stat)); 5106 memset(&stat, 0, sizeof(stat));
4932 if (IS_CHERRYVIEW(dev)) { 5107 if (IS_CHERRYVIEW(dev)) {
4933 cherryview_sseu_device_status(dev, &stat); 5108 cherryview_sseu_device_status(dev, &stat);
5109 } else if (IS_BROADWELL(dev)) {
5110 broadwell_sseu_device_status(dev, &stat);
4934 } else if (INTEL_INFO(dev)->gen >= 9) { 5111 } else if (INTEL_INFO(dev)->gen >= 9) {
4935 gen9_sseu_device_status(dev, &stat); 5112 gen9_sseu_device_status(dev, &stat);
4936 } 5113 }
@@ -5033,6 +5210,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
5033 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 5210 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
5034 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 5211 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5035 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 5212 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5213 {"i915_guc_info", i915_guc_info, 0},
5214 {"i915_guc_load_status", i915_guc_load_status_info, 0},
5215 {"i915_guc_log_dump", i915_guc_log_dump, 0},
5036 {"i915_frequency_info", i915_frequency_info, 0}, 5216 {"i915_frequency_info", i915_frequency_info, 0},
5037 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 5217 {"i915_hangcheck_info", i915_hangcheck_info, 0},
5038 {"i915_drpc_info", i915_drpc_info, 0}, 5218 {"i915_drpc_info", i915_drpc_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 990f656e6ab0..b4741d121a74 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -75,7 +75,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
75 value = 1; 75 value = 1;
76 break; 76 break;
77 case I915_PARAM_NUM_FENCES_AVAIL: 77 case I915_PARAM_NUM_FENCES_AVAIL:
78 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 78 value = dev_priv->num_fence_regs;
79 break; 79 break;
80 case I915_PARAM_HAS_OVERLAY: 80 case I915_PARAM_HAS_OVERLAY:
81 value = dev_priv->overlay ? 1 : 0; 81 value = dev_priv->overlay ? 1 : 0;
@@ -183,35 +183,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
183 return 0; 183 return 0;
184} 184}
185 185
186static int i915_setparam(struct drm_device *dev, void *data,
187 struct drm_file *file_priv)
188{
189 struct drm_i915_private *dev_priv = dev->dev_private;
190 drm_i915_setparam_t *param = data;
191
192 switch (param->param) {
193 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
194 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
195 case I915_SETPARAM_ALLOW_BATCHBUFFER:
196 /* Reject all old ums/dri params. */
197 return -ENODEV;
198
199 case I915_SETPARAM_NUM_USED_FENCES:
200 if (param->value > dev_priv->num_fence_regs ||
201 param->value < 0)
202 return -EINVAL;
203 /* Userspace can use first N regs */
204 dev_priv->fence_reg_start = param->value;
205 break;
206 default:
207 DRM_DEBUG_DRIVER("unknown parameter %d\n",
208 param->param);
209 return -EINVAL;
210 }
211
212 return 0;
213}
214
215static int i915_get_bridge_dev(struct drm_device *dev) 186static int i915_get_bridge_dev(struct drm_device *dev)
216{ 187{
217 struct drm_i915_private *dev_priv = dev->dev_private; 188 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -364,12 +335,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
364 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 335 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
365 /* i915 resume handler doesn't set to D0 */ 336 /* i915 resume handler doesn't set to D0 */
366 pci_set_power_state(dev->pdev, PCI_D0); 337 pci_set_power_state(dev->pdev, PCI_D0);
367 i915_resume_legacy(dev); 338 i915_resume_switcheroo(dev);
368 dev->switch_power_state = DRM_SWITCH_POWER_ON; 339 dev->switch_power_state = DRM_SWITCH_POWER_ON;
369 } else { 340 } else {
370 pr_err("switched off\n"); 341 pr_err("switched off\n");
371 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 342 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
372 i915_suspend_legacy(dev, pmm); 343 i915_suspend_switcheroo(dev, pmm);
373 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 344 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
374 } 345 }
375} 346}
@@ -435,6 +406,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
435 * working irqs for e.g. gmbus and dp aux transfers. */ 406 * working irqs for e.g. gmbus and dp aux transfers. */
436 intel_modeset_init(dev); 407 intel_modeset_init(dev);
437 408
409 intel_guc_ucode_init(dev);
410
438 ret = i915_gem_init(dev); 411 ret = i915_gem_init(dev);
439 if (ret) 412 if (ret)
440 goto cleanup_irq; 413 goto cleanup_irq;
@@ -476,6 +449,7 @@ cleanup_gem:
476 i915_gem_context_fini(dev); 449 i915_gem_context_fini(dev);
477 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
478cleanup_irq: 451cleanup_irq:
452 intel_guc_ucode_fini(dev);
479 drm_irq_uninstall(dev); 453 drm_irq_uninstall(dev);
480cleanup_gem_stolen: 454cleanup_gem_stolen:
481 i915_gem_cleanup_stolen(dev); 455 i915_gem_cleanup_stolen(dev);
@@ -623,17 +597,6 @@ static void gen9_sseu_info_init(struct drm_device *dev)
623 u32 fuse2, s_enable, ss_disable, eu_disable; 597 u32 fuse2, s_enable, ss_disable, eu_disable;
624 u8 eu_mask = 0xff; 598 u8 eu_mask = 0xff;
625 599
626 /*
627 * BXT has a single slice. BXT also has at most 6 EU per subslice,
628 * and therefore only the lowest 6 bits of the 8-bit EU disable
629 * fields are valid.
630 */
631 if (IS_BROXTON(dev)) {
632 s_max = 1;
633 eu_max = 6;
634 eu_mask = 0x3f;
635 }
636
637 info = (struct intel_device_info *)&dev_priv->info; 600 info = (struct intel_device_info *)&dev_priv->info;
638 fuse2 = I915_READ(GEN8_FUSE2); 601 fuse2 = I915_READ(GEN8_FUSE2);
639 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> 602 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
@@ -705,6 +668,82 @@ static void gen9_sseu_info_init(struct drm_device *dev)
705 info->has_eu_pg = (info->eu_per_subslice > 2); 668 info->has_eu_pg = (info->eu_per_subslice > 2);
706} 669}
707 670
671static void broadwell_sseu_info_init(struct drm_device *dev)
672{
673 struct drm_i915_private *dev_priv = dev->dev_private;
674 struct intel_device_info *info;
675 const int s_max = 3, ss_max = 3, eu_max = 8;
676 int s, ss;
677 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
678
679 fuse2 = I915_READ(GEN8_FUSE2);
680 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
681 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
682
683 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
684 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
685 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
686 (32 - GEN8_EU_DIS0_S1_SHIFT));
687 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
688 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
689 (32 - GEN8_EU_DIS1_S2_SHIFT));
690
691
692 info = (struct intel_device_info *)&dev_priv->info;
693 info->slice_total = hweight32(s_enable);
694
695 /*
696 * The subslice disable field is global, i.e. it applies
697 * to each of the enabled slices.
698 */
699 info->subslice_per_slice = ss_max - hweight32(ss_disable);
700 info->subslice_total = info->slice_total * info->subslice_per_slice;
701
702 /*
703 * Iterate through enabled slices and subslices to
704 * count the total enabled EU.
705 */
706 for (s = 0; s < s_max; s++) {
707 if (!(s_enable & (0x1 << s)))
708 /* skip disabled slice */
709 continue;
710
711 for (ss = 0; ss < ss_max; ss++) {
712 u32 n_disabled;
713
714 if (ss_disable & (0x1 << ss))
715 /* skip disabled subslice */
716 continue;
717
718 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
719
720 /*
721 * Record which subslices have 7 EUs.
722 */
723 if (eu_max - n_disabled == 7)
724 info->subslice_7eu[s] |= 1 << ss;
725
726 info->eu_total += eu_max - n_disabled;
727 }
728 }
729
730 /*
731 * BDW is expected to always have a uniform distribution of EU across
732 * subslices with the exception that any one EU in any one subslice may
733 * be fused off for die recovery.
734 */
735 info->eu_per_subslice = info->subslice_total ?
736 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
737
738 /*
739 * BDW supports slice power gating on devices with more than
740 * one slice.
741 */
742 info->has_slice_pg = (info->slice_total > 1);
743 info->has_subslice_pg = 0;
744 info->has_eu_pg = 0;
745}
746
708/* 747/*
709 * Determine various intel_device_info fields at runtime. 748 * Determine various intel_device_info fields at runtime.
710 * 749 *
@@ -775,6 +814,8 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
775 /* Initialize slice/subslice/EU info */ 814 /* Initialize slice/subslice/EU info */
776 if (IS_CHERRYVIEW(dev)) 815 if (IS_CHERRYVIEW(dev))
777 cherryview_sseu_info_init(dev); 816 cherryview_sseu_info_init(dev);
817 else if (IS_BROADWELL(dev))
818 broadwell_sseu_info_init(dev);
778 else if (INTEL_INFO(dev)->gen >= 9) 819 else if (INTEL_INFO(dev)->gen >= 9)
779 gen9_sseu_info_init(dev); 820 gen9_sseu_info_init(dev);
780 821
@@ -791,6 +832,24 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
791 info->has_eu_pg ? "y" : "n"); 832 info->has_eu_pg ? "y" : "n");
792} 833}
793 834
835static void intel_init_dpio(struct drm_i915_private *dev_priv)
836{
837 if (!IS_VALLEYVIEW(dev_priv))
838 return;
839
840 /*
841 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
842 * CHV x1 PHY (DP/HDMI D)
843 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
844 */
845 if (IS_CHERRYVIEW(dev_priv)) {
846 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
847 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
848 } else {
849 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
850 }
851}
852
794/** 853/**
795 * i915_driver_load - setup chip and create an initial config 854 * i915_driver_load - setup chip and create an initial config
796 * @dev: DRM device 855 * @dev: DRM device
@@ -972,8 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
972 intel_setup_gmbus(dev); 1031 intel_setup_gmbus(dev);
973 intel_opregion_setup(dev); 1032 intel_opregion_setup(dev);
974 1033
975 intel_setup_bios(dev);
976
977 i915_gem_load(dev); 1034 i915_gem_load(dev);
978 1035
979 /* On the 945G/GM, the chipset reports the MSI capability on the 1036 /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -992,6 +1049,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
992 1049
993 intel_device_info_runtime_init(dev); 1050 intel_device_info_runtime_init(dev);
994 1051
1052 intel_init_dpio(dev_priv);
1053
995 if (INTEL_INFO(dev)->num_pipes) { 1054 if (INTEL_INFO(dev)->num_pipes) {
996 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); 1055 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
997 if (ret) 1056 if (ret)
@@ -1060,12 +1119,9 @@ out_freecsr:
1060put_bridge: 1119put_bridge:
1061 pci_dev_put(dev_priv->bridge_dev); 1120 pci_dev_put(dev_priv->bridge_dev);
1062free_priv: 1121free_priv:
1063 if (dev_priv->requests) 1122 kmem_cache_destroy(dev_priv->requests);
1064 kmem_cache_destroy(dev_priv->requests); 1123 kmem_cache_destroy(dev_priv->vmas);
1065 if (dev_priv->vmas) 1124 kmem_cache_destroy(dev_priv->objects);
1066 kmem_cache_destroy(dev_priv->vmas);
1067 if (dev_priv->objects)
1068 kmem_cache_destroy(dev_priv->objects);
1069 kfree(dev_priv); 1125 kfree(dev_priv);
1070 return ret; 1126 return ret;
1071} 1127}
@@ -1112,6 +1168,10 @@ int i915_driver_unload(struct drm_device *dev)
1112 dev_priv->vbt.child_dev = NULL; 1168 dev_priv->vbt.child_dev = NULL;
1113 dev_priv->vbt.child_dev_num = 0; 1169 dev_priv->vbt.child_dev_num = 0;
1114 } 1170 }
1171 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1172 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1173 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1174 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1115 1175
1116 vga_switcheroo_unregister_client(dev->pdev); 1176 vga_switcheroo_unregister_client(dev->pdev);
1117 vga_client_register(dev->pdev, NULL, NULL, NULL); 1177 vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -1128,6 +1188,7 @@ int i915_driver_unload(struct drm_device *dev)
1128 /* Flush any outstanding unpin_work. */ 1188 /* Flush any outstanding unpin_work. */
1129 flush_workqueue(dev_priv->wq); 1189 flush_workqueue(dev_priv->wq);
1130 1190
1191 intel_guc_ucode_fini(dev);
1131 mutex_lock(&dev->struct_mutex); 1192 mutex_lock(&dev->struct_mutex);
1132 i915_gem_cleanup_ringbuffer(dev); 1193 i915_gem_cleanup_ringbuffer(dev);
1133 i915_gem_context_fini(dev); 1194 i915_gem_context_fini(dev);
@@ -1151,13 +1212,9 @@ int i915_driver_unload(struct drm_device *dev)
1151 if (dev_priv->regs != NULL) 1212 if (dev_priv->regs != NULL)
1152 pci_iounmap(dev->pdev, dev_priv->regs); 1213 pci_iounmap(dev->pdev, dev_priv->regs);
1153 1214
1154 if (dev_priv->requests) 1215 kmem_cache_destroy(dev_priv->requests);
1155 kmem_cache_destroy(dev_priv->requests); 1216 kmem_cache_destroy(dev_priv->vmas);
1156 if (dev_priv->vmas) 1217 kmem_cache_destroy(dev_priv->objects);
1157 kmem_cache_destroy(dev_priv->vmas);
1158 if (dev_priv->objects)
1159 kmem_cache_destroy(dev_priv->objects);
1160
1161 pci_dev_put(dev_priv->bridge_dev); 1218 pci_dev_put(dev_priv->bridge_dev);
1162 kfree(dev_priv); 1219 kfree(dev_priv);
1163 1220
@@ -1227,7 +1284,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
1227 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 1284 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1228 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 1285 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1229 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 1286 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1230 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1287 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1231 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1288 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1232 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1289 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1233 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1290 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -1237,41 +1294,41 @@ const struct drm_ioctl_desc i915_ioctls[] = {
1237 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 1294 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1238 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 1295 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1239 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1296 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1240 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1297 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1241 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 1298 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1242 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1299 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1243 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1300 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1244 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1301 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1245 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1302 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1246 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1303 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1247 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1304 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1248 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1305 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1249 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1306 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1250 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1307 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1251 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1308 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1252 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1309 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1253 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1310 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1254 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1311 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1255 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1312 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1256 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1313 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1257 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1314 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1258 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1315 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1259 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1316 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1260 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1317 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1261 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 1318 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1262 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1319 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1263 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1320 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1264 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1321 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1265 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1322 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1266 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1323 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1267 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1324 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1268 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1325 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1269 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1326 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1270 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1327 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1271 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1328 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
1272 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1329 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1273 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1330 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1274 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1331 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1275}; 1332};
1276 1333
1277int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); 1334int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ab64d68388f2..760e0ce4aa26 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -362,6 +362,7 @@ static const struct intel_device_info intel_skylake_info = {
362 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 362 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
363 .has_llc = 1, 363 .has_llc = 1,
364 .has_ddi = 1, 364 .has_ddi = 1,
365 .has_fpga_dbg = 1,
365 .has_fbc = 1, 366 .has_fbc = 1,
366 GEN_DEFAULT_PIPEOFFSETS, 367 GEN_DEFAULT_PIPEOFFSETS,
367 IVB_CURSOR_OFFSETS, 368 IVB_CURSOR_OFFSETS,
@@ -374,6 +375,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
374 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 375 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
375 .has_llc = 1, 376 .has_llc = 1,
376 .has_ddi = 1, 377 .has_ddi = 1,
378 .has_fpga_dbg = 1,
377 .has_fbc = 1, 379 .has_fbc = 1,
378 GEN_DEFAULT_PIPEOFFSETS, 380 GEN_DEFAULT_PIPEOFFSETS,
379 IVB_CURSOR_OFFSETS, 381 IVB_CURSOR_OFFSETS,
@@ -386,6 +388,7 @@ static const struct intel_device_info intel_broxton_info = {
386 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 388 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
387 .num_pipes = 3, 389 .num_pipes = 3,
388 .has_ddi = 1, 390 .has_ddi = 1,
391 .has_fpga_dbg = 1,
389 .has_fbc = 1, 392 .has_fbc = 1,
390 GEN_DEFAULT_PIPEOFFSETS, 393 GEN_DEFAULT_PIPEOFFSETS,
391 IVB_CURSOR_OFFSETS, 394 IVB_CURSOR_OFFSETS,
@@ -440,6 +443,34 @@ static const struct pci_device_id pciidlist[] = { /* aka */
440 443
441MODULE_DEVICE_TABLE(pci, pciidlist); 444MODULE_DEVICE_TABLE(pci, pciidlist);
442 445
446static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
447{
448 enum intel_pch ret = PCH_NOP;
449
450 /*
451 * In a virtualized passthrough environment we can be in a
452 * setup where the ISA bridge is not able to be passed through.
453 * In this case, a south bridge can be emulated and we have to
454 * make an educated guess as to which PCH is really there.
455 */
456
457 if (IS_GEN5(dev)) {
458 ret = PCH_IBX;
459 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
460 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
461 ret = PCH_CPT;
462 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
463 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
464 ret = PCH_LPT;
465 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
466 } else if (IS_SKYLAKE(dev)) {
467 ret = PCH_SPT;
468 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
469 }
470
471 return ret;
472}
473
443void intel_detect_pch(struct drm_device *dev) 474void intel_detect_pch(struct drm_device *dev)
444{ 475{
445 struct drm_i915_private *dev_priv = dev->dev_private; 476 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -500,6 +531,8 @@ void intel_detect_pch(struct drm_device *dev)
500 dev_priv->pch_type = PCH_SPT; 531 dev_priv->pch_type = PCH_SPT;
501 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 532 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
502 WARN_ON(!IS_SKYLAKE(dev)); 533 WARN_ON(!IS_SKYLAKE(dev));
534 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
535 dev_priv->pch_type = intel_virt_detect_pch(dev);
503 } else 536 } else
504 continue; 537 continue;
505 538
@@ -605,6 +638,8 @@ static int i915_drm_suspend(struct drm_device *dev)
605 return error; 638 return error;
606 } 639 }
607 640
641 intel_guc_suspend(dev);
642
608 intel_suspend_gt_powersave(dev); 643 intel_suspend_gt_powersave(dev);
609 644
610 /* 645 /*
@@ -679,7 +714,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
679 return 0; 714 return 0;
680} 715}
681 716
682int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) 717int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
683{ 718{
684 int error; 719 int error;
685 720
@@ -734,6 +769,8 @@ static int i915_drm_resume(struct drm_device *dev)
734 } 769 }
735 mutex_unlock(&dev->struct_mutex); 770 mutex_unlock(&dev->struct_mutex);
736 771
772 intel_guc_resume(dev);
773
737 intel_modeset_init_hw(dev); 774 intel_modeset_init_hw(dev);
738 775
739 spin_lock_irq(&dev_priv->irq_lock); 776 spin_lock_irq(&dev_priv->irq_lock);
@@ -812,7 +849,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
812 return ret; 849 return ret;
813} 850}
814 851
815int i915_resume_legacy(struct drm_device *dev) 852int i915_resume_switcheroo(struct drm_device *dev)
816{ 853{
817 int ret; 854 int ret;
818 855
@@ -1018,12 +1055,6 @@ static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1018{ 1055{
1019 /* Enabling DC6 is not a hard requirement to enter runtime D3 */ 1056 /* Enabling DC6 is not a hard requirement to enter runtime D3 */
1020 1057
1021 /*
1022 * This is to ensure that CSR isn't identified as loaded before
1023 * CSR-loading program is called during runtime-resume.
1024 */
1025 intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
1026
1027 skl_uninit_cdclk(dev_priv); 1058 skl_uninit_cdclk(dev_priv);
1028 1059
1029 return 0; 1060 return 0;
@@ -1117,7 +1148,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1117 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 1148 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1118 1149
1119 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1150 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1120 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); 1151 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
1121 1152
1122 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 1153 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1123 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 1154 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
@@ -1161,7 +1192,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1161 s->pm_ier = I915_READ(GEN6_PMIER); 1192 s->pm_ier = I915_READ(GEN6_PMIER);
1162 1193
1163 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1194 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1164 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4); 1195 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
1165 1196
1166 /* GT SA CZ domain, 0x100000-0x138124 */ 1197 /* GT SA CZ domain, 0x100000-0x138124 */
1167 s->tilectl = I915_READ(TILECTL); 1198 s->tilectl = I915_READ(TILECTL);
@@ -1199,7 +1230,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1199 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 1230 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1200 1231
1201 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1232 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1202 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); 1233 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
1203 1234
1204 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 1235 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1205 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 1236 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
@@ -1243,7 +1274,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1243 I915_WRITE(GEN6_PMIER, s->pm_ier); 1274 I915_WRITE(GEN6_PMIER, s->pm_ier);
1244 1275
1245 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1276 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1246 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]); 1277 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
1247 1278
1248 /* GT SA CZ domain, 0x100000-0x138124 */ 1279 /* GT SA CZ domain, 0x100000-0x138124 */
1249 I915_WRITE(TILECTL, s->tilectl); 1280 I915_WRITE(TILECTL, s->tilectl);
@@ -1473,6 +1504,8 @@ static int intel_runtime_suspend(struct device *device)
1473 i915_gem_release_all_mmaps(dev_priv); 1504 i915_gem_release_all_mmaps(dev_priv);
1474 mutex_unlock(&dev->struct_mutex); 1505 mutex_unlock(&dev->struct_mutex);
1475 1506
1507 intel_guc_suspend(dev);
1508
1476 intel_suspend_gt_powersave(dev); 1509 intel_suspend_gt_powersave(dev);
1477 intel_runtime_pm_disable_interrupts(dev_priv); 1510 intel_runtime_pm_disable_interrupts(dev_priv);
1478 1511
@@ -1532,6 +1565,8 @@ static int intel_runtime_resume(struct device *device)
1532 intel_opregion_notify_adapter(dev, PCI_D0); 1565 intel_opregion_notify_adapter(dev, PCI_D0);
1533 dev_priv->pm.suspended = false; 1566 dev_priv->pm.suspended = false;
1534 1567
1568 intel_guc_resume(dev);
1569
1535 if (IS_GEN6(dev_priv)) 1570 if (IS_GEN6(dev_priv))
1536 intel_init_pch_refclk(dev); 1571 intel_init_pch_refclk(dev);
1537 1572
@@ -1552,6 +1587,15 @@ static int intel_runtime_resume(struct device *device)
1552 gen6_update_ring_freq(dev); 1587 gen6_update_ring_freq(dev);
1553 1588
1554 intel_runtime_pm_enable_interrupts(dev_priv); 1589 intel_runtime_pm_enable_interrupts(dev_priv);
1590
1591 /*
1592 * On VLV/CHV display interrupts are part of the display
1593 * power well, so hpd is reinitialized from there. For
1594 * everyone else do it here.
1595 */
1596 if (!IS_VALLEYVIEW(dev_priv))
1597 intel_hpd_init(dev_priv);
1598
1555 intel_enable_gt_powersave(dev); 1599 intel_enable_gt_powersave(dev);
1556 1600
1557 if (ret) 1601 if (ret)
@@ -1649,7 +1693,7 @@ static struct drm_driver driver = {
1649 */ 1693 */
1650 .driver_features = 1694 .driver_features =
1651 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | 1695 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1652 DRIVER_RENDER, 1696 DRIVER_RENDER | DRIVER_MODESET,
1653 .load = i915_driver_load, 1697 .load = i915_driver_load,
1654 .unload = i915_driver_unload, 1698 .unload = i915_driver_unload,
1655 .open = i915_driver_open, 1699 .open = i915_driver_open,
@@ -1658,10 +1702,6 @@ static struct drm_driver driver = {
1658 .postclose = i915_driver_postclose, 1702 .postclose = i915_driver_postclose,
1659 .set_busid = drm_pci_set_busid, 1703 .set_busid = drm_pci_set_busid,
1660 1704
1661 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1662 .suspend = i915_suspend_legacy,
1663 .resume = i915_resume_legacy,
1664
1665#if defined(CONFIG_DEBUG_FS) 1705#if defined(CONFIG_DEBUG_FS)
1666 .debugfs_init = i915_debugfs_init, 1706 .debugfs_init = i915_debugfs_init,
1667 .debugfs_cleanup = i915_debugfs_cleanup, 1707 .debugfs_cleanup = i915_debugfs_cleanup,
@@ -1704,7 +1744,6 @@ static int __init i915_init(void)
1704 * either the i915.modeset prarameter or by the 1744 * either the i915.modeset prarameter or by the
1705 * vga_text_mode_force boot option. 1745 * vga_text_mode_force boot option.
1706 */ 1746 */
1707 driver.driver_features |= DRIVER_MODESET;
1708 1747
1709 if (i915.modeset == 0) 1748 if (i915.modeset == 0)
1710 driver.driver_features &= ~DRIVER_MODESET; 1749 driver.driver_features &= ~DRIVER_MODESET;
@@ -1715,18 +1754,12 @@ static int __init i915_init(void)
1715#endif 1754#endif
1716 1755
1717 if (!(driver.driver_features & DRIVER_MODESET)) { 1756 if (!(driver.driver_features & DRIVER_MODESET)) {
1718 driver.get_vblank_timestamp = NULL;
1719 /* Silently fail loading to not upset userspace. */ 1757 /* Silently fail loading to not upset userspace. */
1720 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); 1758 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1721 return 0; 1759 return 0;
1722 } 1760 }
1723 1761
1724 /* 1762 if (i915.nuclear_pageflip)
1725 * FIXME: Note that we're lying to the DRM core here so that we can get access
1726 * to the atomic ioctl and the atomic properties. Only plane operations on
1727 * a single CRTC will actually work.
1728 */
1729 if (driver.driver_features & DRIVER_MODESET)
1730 driver.driver_features |= DRIVER_ATOMIC; 1763 driver.driver_features |= DRIVER_ATOMIC;
1731 1764
1732 return drm_pci_init(&driver, &i915_pci_driver); 1765 return drm_pci_init(&driver, &i915_pci_driver);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 22dd7043c9ef..8afda459a26e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -50,13 +50,14 @@
50#include <linux/intel-iommu.h> 50#include <linux/intel-iommu.h>
51#include <linux/kref.h> 51#include <linux/kref.h>
52#include <linux/pm_qos.h> 52#include <linux/pm_qos.h>
53#include "intel_guc.h"
53 54
54/* General customization: 55/* General customization:
55 */ 56 */
56 57
57#define DRIVER_NAME "i915" 58#define DRIVER_NAME "i915"
58#define DRIVER_DESC "Intel Graphics" 59#define DRIVER_DESC "Intel Graphics"
59#define DRIVER_DATE "20150731" 60#define DRIVER_DATE "20151010"
60 61
61#undef WARN_ON 62#undef WARN_ON
62/* Many gcc seem to no see through this and fall over :( */ 63/* Many gcc seem to no see through this and fall over :( */
@@ -67,11 +68,11 @@
67 BUILD_BUG_ON(__i915_warn_cond); \ 68 BUILD_BUG_ON(__i915_warn_cond); \
68 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 69 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
69#else 70#else
70#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") 71#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
71#endif 72#endif
72 73
73#undef WARN_ON_ONCE 74#undef WARN_ON_ONCE
74#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") 75#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
75 76
76#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 77#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
77 (long) (x), __func__); 78 (long) (x), __func__);
@@ -105,6 +106,11 @@
105 unlikely(__ret_warn_on); \ 106 unlikely(__ret_warn_on); \
106}) 107})
107 108
109static inline const char *yesno(bool v)
110{
111 return v ? "yes" : "no";
112}
113
108enum pipe { 114enum pipe {
109 INVALID_PIPE = -1, 115 INVALID_PIPE = -1,
110 PIPE_A = 0, 116 PIPE_A = 0,
@@ -125,17 +131,17 @@ enum transcoder {
125#define transcoder_name(t) ((t) + 'A') 131#define transcoder_name(t) ((t) + 'A')
126 132
127/* 133/*
128 * This is the maximum (across all platforms) number of planes (primary + 134 * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
129 * sprites) that can be active at the same time on one pipe. 135 * number of planes per CRTC. Not all platforms really have this many planes,
130 * 136 * which means some arrays of size I915_MAX_PLANES may have unused entries
131 * This value doesn't count the cursor plane. 137 * between the topmost sprite plane and the cursor plane.
132 */ 138 */
133#define I915_MAX_PLANES 4
134
135enum plane { 139enum plane {
136 PLANE_A = 0, 140 PLANE_A = 0,
137 PLANE_B, 141 PLANE_B,
138 PLANE_C, 142 PLANE_C,
143 PLANE_CURSOR,
144 I915_MAX_PLANES,
139}; 145};
140#define plane_name(p) ((p) + 'A') 146#define plane_name(p) ((p) + 'A')
141 147
@@ -444,14 +450,14 @@ struct opregion_swsci;
444struct opregion_asle; 450struct opregion_asle;
445 451
446struct intel_opregion { 452struct intel_opregion {
447 struct opregion_header __iomem *header; 453 struct opregion_header *header;
448 struct opregion_acpi __iomem *acpi; 454 struct opregion_acpi *acpi;
449 struct opregion_swsci __iomem *swsci; 455 struct opregion_swsci *swsci;
450 u32 swsci_gbda_sub_functions; 456 u32 swsci_gbda_sub_functions;
451 u32 swsci_sbcb_sub_functions; 457 u32 swsci_sbcb_sub_functions;
452 struct opregion_asle __iomem *asle; 458 struct opregion_asle *asle;
453 void __iomem *vbt; 459 void *vbt;
454 u32 __iomem *lid_state; 460 u32 *lid_state;
455 struct work_struct asle_work; 461 struct work_struct asle_work;
456}; 462};
457#define OPREGION_SIZE (8*1024) 463#define OPREGION_SIZE (8*1024)
@@ -549,7 +555,7 @@ struct drm_i915_error_state {
549 555
550 struct drm_i915_error_object { 556 struct drm_i915_error_object {
551 int page_count; 557 int page_count;
552 u32 gtt_offset; 558 u64 gtt_offset;
553 u32 *pages[0]; 559 u32 *pages[0];
554 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 560 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
555 561
@@ -575,7 +581,7 @@ struct drm_i915_error_state {
575 u32 size; 581 u32 size;
576 u32 name; 582 u32 name;
577 u32 rseqno[I915_NUM_RINGS], wseqno; 583 u32 rseqno[I915_NUM_RINGS], wseqno;
578 u32 gtt_offset; 584 u64 gtt_offset;
579 u32 read_domains; 585 u32 read_domains;
580 u32 write_domain; 586 u32 write_domain;
581 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 587 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
@@ -640,7 +646,7 @@ struct drm_i915_display_funcs {
640 void (*crtc_disable)(struct drm_crtc *crtc); 646 void (*crtc_disable)(struct drm_crtc *crtc);
641 void (*audio_codec_enable)(struct drm_connector *connector, 647 void (*audio_codec_enable)(struct drm_connector *connector,
642 struct intel_encoder *encoder, 648 struct intel_encoder *encoder,
643 struct drm_display_mode *mode); 649 const struct drm_display_mode *adjusted_mode);
644 void (*audio_codec_disable)(struct intel_encoder *encoder); 650 void (*audio_codec_disable)(struct intel_encoder *encoder);
645 void (*fdi_link_train)(struct drm_crtc *crtc); 651 void (*fdi_link_train)(struct drm_crtc *crtc);
646 void (*init_clock_gating)(struct drm_device *dev); 652 void (*init_clock_gating)(struct drm_device *dev);
@@ -658,13 +664,6 @@ struct drm_i915_display_funcs {
658 /* render clock increase/decrease */ 664 /* render clock increase/decrease */
659 /* display clock increase/decrease */ 665 /* display clock increase/decrease */
660 /* pll clock increase/decrease */ 666 /* pll clock increase/decrease */
661
662 int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
663 uint32_t (*get_backlight)(struct intel_connector *connector);
664 void (*set_backlight)(struct intel_connector *connector,
665 uint32_t level);
666 void (*disable_backlight)(struct intel_connector *connector);
667 void (*enable_backlight)(struct intel_connector *connector);
668}; 667};
669 668
670enum forcewake_domain_id { 669enum forcewake_domain_id {
@@ -882,7 +881,6 @@ struct intel_context {
882 } legacy_hw_ctx; 881 } legacy_hw_ctx;
883 882
884 /* Execlists */ 883 /* Execlists */
885 bool rcs_initialized;
886 struct { 884 struct {
887 struct drm_i915_gem_object *state; 885 struct drm_i915_gem_object *state;
888 struct intel_ringbuffer *ringbuf; 886 struct intel_ringbuffer *ringbuf;
@@ -941,6 +939,9 @@ struct i915_fbc {
941 FBC_CHIP_DEFAULT, /* disabled by default on this chip */ 939 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
942 FBC_ROTATION, /* rotation is not supported */ 940 FBC_ROTATION, /* rotation is not supported */
943 FBC_IN_DBG_MASTER, /* kernel debugger is active */ 941 FBC_IN_DBG_MASTER, /* kernel debugger is active */
942 FBC_BAD_STRIDE, /* stride is not supported */
943 FBC_PIXEL_RATE, /* pixel rate is too big */
944 FBC_PIXEL_FORMAT /* pixel format is invalid */
944 } no_fbc_reason; 945 } no_fbc_reason;
945 946
946 bool (*fbc_enabled)(struct drm_i915_private *dev_priv); 947 bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
@@ -1034,7 +1035,7 @@ struct i915_suspend_saved_registers {
1034 u32 saveMI_ARB_STATE; 1035 u32 saveMI_ARB_STATE;
1035 u32 saveSWF0[16]; 1036 u32 saveSWF0[16];
1036 u32 saveSWF1[16]; 1037 u32 saveSWF1[16];
1037 u32 saveSWF2[3]; 1038 u32 saveSWF3[3];
1038 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1039 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1039 u32 savePCH_PORT_HOTPLUG; 1040 u32 savePCH_PORT_HOTPLUG;
1040 u16 saveGCDGMBUS; 1041 u16 saveGCDGMBUS;
@@ -1136,7 +1137,6 @@ struct intel_gen6_power_mgmt {
1136 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1137 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
1137 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1138 u8 rp1_freq; /* "less than" RP0 power/freqency */
1138 u8 rp0_freq; /* Non-overclocked max frequency. */ 1139 u8 rp0_freq; /* Non-overclocked max frequency. */
1139 u32 cz_freq;
1140 1140
1141 u8 up_threshold; /* Current %busy required to uplock */ 1141 u8 up_threshold; /* Current %busy required to uplock */
1142 u8 down_threshold; /* Current %busy required to downclock */ 1142 u8 down_threshold; /* Current %busy required to downclock */
@@ -1578,8 +1578,7 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1578struct skl_ddb_allocation { 1578struct skl_ddb_allocation {
1579 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1579 struct skl_ddb_entry pipe[I915_MAX_PIPES];
1580 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1580 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
1581 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */ 1581 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
1582 struct skl_ddb_entry cursor[I915_MAX_PIPES];
1583}; 1582};
1584 1583
1585struct skl_wm_values { 1584struct skl_wm_values {
@@ -1587,18 +1586,13 @@ struct skl_wm_values {
1587 struct skl_ddb_allocation ddb; 1586 struct skl_ddb_allocation ddb;
1588 uint32_t wm_linetime[I915_MAX_PIPES]; 1587 uint32_t wm_linetime[I915_MAX_PIPES];
1589 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1588 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
1590 uint32_t cursor[I915_MAX_PIPES][8];
1591 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1589 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
1592 uint32_t cursor_trans[I915_MAX_PIPES];
1593}; 1590};
1594 1591
1595struct skl_wm_level { 1592struct skl_wm_level {
1596 bool plane_en[I915_MAX_PLANES]; 1593 bool plane_en[I915_MAX_PLANES];
1597 bool cursor_en;
1598 uint16_t plane_res_b[I915_MAX_PLANES]; 1594 uint16_t plane_res_b[I915_MAX_PLANES];
1599 uint8_t plane_res_l[I915_MAX_PLANES]; 1595 uint8_t plane_res_l[I915_MAX_PLANES];
1600 uint16_t cursor_res_b;
1601 uint8_t cursor_res_l;
1602}; 1596};
1603 1597
1604/* 1598/*
@@ -1693,7 +1687,7 @@ struct i915_execbuffer_params {
1693 struct drm_file *file; 1687 struct drm_file *file;
1694 uint32_t dispatch_flags; 1688 uint32_t dispatch_flags;
1695 uint32_t args_batch_start_offset; 1689 uint32_t args_batch_start_offset;
1696 uint32_t batch_obj_vm_offset; 1690 uint64_t batch_obj_vm_offset;
1697 struct intel_engine_cs *ring; 1691 struct intel_engine_cs *ring;
1698 struct drm_i915_gem_object *batch_obj; 1692 struct drm_i915_gem_object *batch_obj;
1699 struct intel_context *ctx; 1693 struct intel_context *ctx;
@@ -1716,6 +1710,8 @@ struct drm_i915_private {
1716 1710
1717 struct i915_virtual_gpu vgpu; 1711 struct i915_virtual_gpu vgpu;
1718 1712
1713 struct intel_guc guc;
1714
1719 struct intel_csr csr; 1715 struct intel_csr csr;
1720 1716
1721 /* Display CSR-related protection */ 1717 /* Display CSR-related protection */
@@ -1790,13 +1786,14 @@ struct drm_i915_private {
1790 struct mutex pps_mutex; 1786 struct mutex pps_mutex;
1791 1787
1792 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1788 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1793 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1794 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1789 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1795 1790
1796 unsigned int fsb_freq, mem_freq, is_ddr3; 1791 unsigned int fsb_freq, mem_freq, is_ddr3;
1797 unsigned int skl_boot_cdclk; 1792 unsigned int skl_boot_cdclk;
1798 unsigned int cdclk_freq, max_cdclk_freq; 1793 unsigned int cdclk_freq, max_cdclk_freq;
1794 unsigned int max_dotclk_freq;
1799 unsigned int hpll_freq; 1795 unsigned int hpll_freq;
1796 unsigned int czclk_freq;
1800 1797
1801 /** 1798 /**
1802 * wq - Driver workqueue for GEM. 1799 * wq - Driver workqueue for GEM.
@@ -1952,6 +1949,9 @@ struct drm_i915_private {
1952 1949
1953 bool edp_low_vswing; 1950 bool edp_low_vswing;
1954 1951
1952 /* perform PHY state sanity checks? */
1953 bool chv_phy_assert[2];
1954
1955 /* 1955 /*
1956 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1956 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1957 * will be rejected. Instead look for a better place. 1957 * will be rejected. Instead look for a better place.
@@ -1968,6 +1968,11 @@ static inline struct drm_i915_private *dev_to_i915(struct device *dev)
1968 return to_i915(dev_get_drvdata(dev)); 1968 return to_i915(dev_get_drvdata(dev));
1969} 1969}
1970 1970
1971static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
1972{
1973 return container_of(guc, struct drm_i915_private, guc);
1974}
1975
1971/* Iterate over initialised rings */ 1976/* Iterate over initialised rings */
1972#define for_each_ring(ring__, dev_priv__, i__) \ 1977#define for_each_ring(ring__, dev_priv__, i__) \
1973 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1978 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -2004,25 +2009,26 @@ struct drm_i915_gem_object_ops {
2004 2009
2005/* 2010/*
2006 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2011 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
2007 * considered to be the frontbuffer for the given plane interface-vise. This 2012 * considered to be the frontbuffer for the given plane interface-wise. This
2008 * doesn't mean that the hw necessarily already scans it out, but that any 2013 * doesn't mean that the hw necessarily already scans it out, but that any
2009 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2014 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2010 * 2015 *
2011 * We have one bit per pipe and per scanout plane type. 2016 * We have one bit per pipe and per scanout plane type.
2012 */ 2017 */
2013#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 2018#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
2019#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2014#define INTEL_FRONTBUFFER_BITS \ 2020#define INTEL_FRONTBUFFER_BITS \
2015 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 2021 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
2016#define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2022#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
2017 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2023 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2018#define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2024#define INTEL_FRONTBUFFER_CURSOR(pipe) \
2019 (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2025 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2020#define INTEL_FRONTBUFFER_SPRITE(pipe) \ 2026#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
2021 (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2027 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2022#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2028#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2023 (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2029 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2024#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2030#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2025 (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2031 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2026 2032
2027struct drm_i915_gem_object { 2033struct drm_i915_gem_object {
2028 struct drm_gem_object base; 2034 struct drm_gem_object base;
@@ -2480,6 +2486,11 @@ struct drm_i915_cmd_table {
2480#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ 2486#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
2481 INTEL_DEVID(dev) == 0x1915 || \ 2487 INTEL_DEVID(dev) == 0x1915 || \
2482 INTEL_DEVID(dev) == 0x191E) 2488 INTEL_DEVID(dev) == 0x191E)
2489#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
2490 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2491#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
2492 (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
2493
2483#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2494#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2484 2495
2485#define SKL_REVID_A0 (0x0) 2496#define SKL_REVID_A0 (0x0)
@@ -2491,7 +2502,7 @@ struct drm_i915_cmd_table {
2491 2502
2492#define BXT_REVID_A0 (0x0) 2503#define BXT_REVID_A0 (0x0)
2493#define BXT_REVID_B0 (0x3) 2504#define BXT_REVID_B0 (0x3)
2494#define BXT_REVID_C0 (0x6) 2505#define BXT_REVID_C0 (0x9)
2495 2506
2496/* 2507/*
2497 * The genX designation typically refers to the render engine, so render 2508 * The genX designation typically refers to the render engine, so render
@@ -2525,7 +2536,8 @@ struct drm_i915_cmd_table {
2525#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2536#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
2526#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2537#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
2527#define USES_PPGTT(dev) (i915.enable_ppgtt) 2538#define USES_PPGTT(dev) (i915.enable_ppgtt)
2528#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) 2539#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
2540#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
2529 2541
2530#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2542#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
2531#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2543#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -2569,7 +2581,10 @@ struct drm_i915_cmd_table {
2569#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2581#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2570#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2582#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
2571 2583
2572#define HAS_CSR(dev) (IS_SKYLAKE(dev)) 2584#define HAS_CSR(dev) (IS_GEN9(dev))
2585
2586#define HAS_GUC_UCODE(dev) (IS_GEN9(dev))
2587#define HAS_GUC_SCHED(dev) (IS_GEN9(dev))
2573 2588
2574#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2589#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
2575 INTEL_INFO(dev)->gen >= 8) 2590 INTEL_INFO(dev)->gen >= 8)
@@ -2585,10 +2600,12 @@ struct drm_i915_cmd_table {
2585#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2600#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
2586#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2601#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2587#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2602#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2603#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
2588 2604
2589#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2605#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
2590#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2606#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2591#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2607#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2608#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
2592#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2609#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2593#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2610#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
2594#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2611#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
@@ -2608,8 +2625,8 @@ struct drm_i915_cmd_table {
2608extern const struct drm_ioctl_desc i915_ioctls[]; 2625extern const struct drm_ioctl_desc i915_ioctls[];
2609extern int i915_max_ioctl; 2626extern int i915_max_ioctl;
2610 2627
2611extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); 2628extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2612extern int i915_resume_legacy(struct drm_device *dev); 2629extern int i915_resume_switcheroo(struct drm_device *dev);
2613 2630
2614/* i915_params.c */ 2631/* i915_params.c */
2615struct i915_params { 2632struct i915_params {
@@ -2631,7 +2648,6 @@ struct i915_params {
2631 int enable_cmd_parser; 2648 int enable_cmd_parser;
2632 /* leave bools at the end to not create holes */ 2649 /* leave bools at the end to not create holes */
2633 bool enable_hangcheck; 2650 bool enable_hangcheck;
2634 bool fastboot;
2635 bool prefault_disable; 2651 bool prefault_disable;
2636 bool load_detect_test; 2652 bool load_detect_test;
2637 bool reset; 2653 bool reset;
@@ -2642,6 +2658,7 @@ struct i915_params {
2642 int use_mmio_flip; 2658 int use_mmio_flip;
2643 int mmio_debug; 2659 int mmio_debug;
2644 bool verbose_state_checks; 2660 bool verbose_state_checks;
2661 bool nuclear_pageflip;
2645 int edp_vswing; 2662 int edp_vswing;
2646}; 2663};
2647extern struct i915_params i915 __read_mostly; 2664extern struct i915_params i915 __read_mostly;
@@ -2721,6 +2738,9 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2721 2738
2722void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2739void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2723void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2740void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2741void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2742 uint32_t mask,
2743 uint32_t bits);
2724void 2744void
2725ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2745ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
2726void 2746void
@@ -2788,8 +2808,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2788 size_t size); 2808 size_t size);
2789struct drm_i915_gem_object *i915_gem_object_create_from_data( 2809struct drm_i915_gem_object *i915_gem_object_create_from_data(
2790 struct drm_device *dev, const void *data, size_t size); 2810 struct drm_device *dev, const void *data, size_t size);
2791void i915_init_vm(struct drm_i915_private *dev_priv,
2792 struct i915_address_space *vm);
2793void i915_gem_free_object(struct drm_gem_object *obj); 2811void i915_gem_free_object(struct drm_gem_object *obj);
2794void i915_gem_vma_destroy(struct i915_vma *vma); 2812void i915_gem_vma_destroy(struct i915_vma *vma);
2795 2813
@@ -2800,6 +2818,8 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2800#define PIN_OFFSET_BIAS (1<<3) 2818#define PIN_OFFSET_BIAS (1<<3)
2801#define PIN_USER (1<<4) 2819#define PIN_USER (1<<4)
2802#define PIN_UPDATE (1<<5) 2820#define PIN_UPDATE (1<<5)
2821#define PIN_ZONE_4G (1<<6)
2822#define PIN_HIGH (1<<7)
2803#define PIN_OFFSET_MASK (~4095) 2823#define PIN_OFFSET_MASK (~4095)
2804int __must_check 2824int __must_check
2805i915_gem_object_pin(struct drm_i915_gem_object *obj, 2825i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -2815,6 +2835,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2815int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2835int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
2816 u32 flags); 2836 u32 flags);
2817int __must_check i915_vma_unbind(struct i915_vma *vma); 2837int __must_check i915_vma_unbind(struct i915_vma *vma);
2838/*
2839 * BEWARE: Do not use the function below unless you can _absolutely_
2840 * _guarantee_ VMA in question is _not in use_ anywhere.
2841 */
2842int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
2818int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2843int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2819void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2844void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2820void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2845void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
@@ -2991,13 +3016,11 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2991struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3016struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2992 struct drm_gem_object *gem_obj, int flags); 3017 struct drm_gem_object *gem_obj, int flags);
2993 3018
2994unsigned long 3019u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
2995i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 3020 const struct i915_ggtt_view *view);
2996 const struct i915_ggtt_view *view); 3021u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
2997unsigned long 3022 struct i915_address_space *vm);
2998i915_gem_obj_offset(struct drm_i915_gem_object *o, 3023static inline u64
2999 struct i915_address_space *vm);
3000static inline unsigned long
3001i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 3024i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
3002{ 3025{
3003 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 3026 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
@@ -3145,7 +3168,6 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
3145 unsigned long end, 3168 unsigned long end,
3146 unsigned flags); 3169 unsigned flags);
3147int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3170int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3148int i915_gem_evict_everything(struct drm_device *dev);
3149 3171
3150/* belongs in i915_gem_gtt.h */ 3172/* belongs in i915_gem_gtt.h */
3151static inline void i915_gem_chipset_flush(struct drm_device *dev) 3173static inline void i915_gem_chipset_flush(struct drm_device *dev)
@@ -3158,6 +3180,10 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
3158int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3180int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3159 struct drm_mm_node *node, u64 size, 3181 struct drm_mm_node *node, u64 size,
3160 unsigned alignment); 3182 unsigned alignment);
3183int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3184 struct drm_mm_node *node, u64 size,
3185 unsigned alignment, u64 start,
3186 u64 end);
3161void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3187void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3162 struct drm_mm_node *node); 3188 struct drm_mm_node *node);
3163int i915_gem_init_stolen(struct drm_device *dev); 3189int i915_gem_init_stolen(struct drm_device *dev);
@@ -3172,11 +3198,12 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
3172 3198
3173/* i915_gem_shrinker.c */ 3199/* i915_gem_shrinker.c */
3174unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3200unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
3175 long target, 3201 unsigned long target,
3176 unsigned flags); 3202 unsigned flags);
3177#define I915_SHRINK_PURGEABLE 0x1 3203#define I915_SHRINK_PURGEABLE 0x1
3178#define I915_SHRINK_UNBOUND 0x2 3204#define I915_SHRINK_UNBOUND 0x2
3179#define I915_SHRINK_BOUND 0x4 3205#define I915_SHRINK_BOUND 0x4
3206#define I915_SHRINK_ACTIVE 0x8
3180unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3207unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3181void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3208void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
3182 3209
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 399aab265db3..5cf4a1998273 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1005,12 +1005,14 @@ out:
1005 if (!needs_clflush_after && 1005 if (!needs_clflush_after &&
1006 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 1006 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1007 if (i915_gem_clflush_object(obj, obj->pin_display)) 1007 if (i915_gem_clflush_object(obj, obj->pin_display))
1008 i915_gem_chipset_flush(dev); 1008 needs_clflush_after = true;
1009 } 1009 }
1010 } 1010 }
1011 1011
1012 if (needs_clflush_after) 1012 if (needs_clflush_after)
1013 i915_gem_chipset_flush(dev); 1013 i915_gem_chipset_flush(dev);
1014 else
1015 obj->cache_dirty = true;
1014 1016
1015 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 1017 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1016 return ret; 1018 return ret;
@@ -1711,8 +1713,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1711 1713
1712/** 1714/**
1713 * i915_gem_fault - fault a page into the GTT 1715 * i915_gem_fault - fault a page into the GTT
1714 * vma: VMA in question 1716 * @vma: VMA in question
1715 * vmf: fault info 1717 * @vmf: fault info
1716 * 1718 *
1717 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1719 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1718 * from userspace. The fault handler takes care of binding the object to 1720 * from userspace. The fault handler takes care of binding the object to
@@ -3205,7 +3207,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3205 old_write_domain); 3207 old_write_domain);
3206} 3208}
3207 3209
3208int i915_vma_unbind(struct i915_vma *vma) 3210static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3209{ 3211{
3210 struct drm_i915_gem_object *obj = vma->obj; 3212 struct drm_i915_gem_object *obj = vma->obj;
3211 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3213 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -3224,13 +3226,11 @@ int i915_vma_unbind(struct i915_vma *vma)
3224 3226
3225 BUG_ON(obj->pages == NULL); 3227 BUG_ON(obj->pages == NULL);
3226 3228
3227 ret = i915_gem_object_wait_rendering(obj, false); 3229 if (wait) {
3228 if (ret) 3230 ret = i915_gem_object_wait_rendering(obj, false);
3229 return ret; 3231 if (ret)
3230 /* Continue on if we fail due to EIO, the GPU is hung so we 3232 return ret;
3231 * should be safe and we need to cleanup or else we might 3233 }
3232 * cause memory corruption through use-after-free.
3233 */
3234 3234
3235 if (i915_is_ggtt(vma->vm) && 3235 if (i915_is_ggtt(vma->vm) &&
3236 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { 3236 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
@@ -3275,6 +3275,16 @@ int i915_vma_unbind(struct i915_vma *vma)
3275 return 0; 3275 return 0;
3276} 3276}
3277 3277
3278int i915_vma_unbind(struct i915_vma *vma)
3279{
3280 return __i915_vma_unbind(vma, true);
3281}
3282
3283int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3284{
3285 return __i915_vma_unbind(vma, false);
3286}
3287
3278int i915_gpu_idle(struct drm_device *dev) 3288int i915_gpu_idle(struct drm_device *dev)
3279{ 3289{
3280 struct drm_i915_private *dev_priv = dev->dev_private; 3290 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3354,11 +3364,10 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3354{ 3364{
3355 struct drm_device *dev = obj->base.dev; 3365 struct drm_device *dev = obj->base.dev;
3356 struct drm_i915_private *dev_priv = dev->dev_private; 3366 struct drm_i915_private *dev_priv = dev->dev_private;
3357 u32 size, fence_size, fence_alignment, unfenced_alignment; 3367 u32 fence_alignment, unfenced_alignment;
3358 u64 start = 3368 u32 search_flag, alloc_flag;
3359 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 3369 u64 start, end;
3360 u64 end = 3370 u64 size, fence_size;
3361 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3362 struct i915_vma *vma; 3371 struct i915_vma *vma;
3363 int ret; 3372 int ret;
3364 3373
@@ -3398,6 +3407,13 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3398 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; 3407 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3399 } 3408 }
3400 3409
3410 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3411 end = vm->total;
3412 if (flags & PIN_MAPPABLE)
3413 end = min_t(u64, end, dev_priv->gtt.mappable_end);
3414 if (flags & PIN_ZONE_4G)
3415 end = min_t(u64, end, (1ULL << 32));
3416
3401 if (alignment == 0) 3417 if (alignment == 0)
3402 alignment = flags & PIN_MAPPABLE ? fence_alignment : 3418 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3403 unfenced_alignment; 3419 unfenced_alignment;
@@ -3413,7 +3429,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3413 * attempt to find space. 3429 * attempt to find space.
3414 */ 3430 */
3415 if (size > end) { 3431 if (size > end) {
3416 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n", 3432 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3417 ggtt_view ? ggtt_view->type : 0, 3433 ggtt_view ? ggtt_view->type : 0,
3418 size, 3434 size,
3419 flags & PIN_MAPPABLE ? "mappable" : "total", 3435 flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3433,13 +3449,21 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3433 if (IS_ERR(vma)) 3449 if (IS_ERR(vma))
3434 goto err_unpin; 3450 goto err_unpin;
3435 3451
3452 if (flags & PIN_HIGH) {
3453 search_flag = DRM_MM_SEARCH_BELOW;
3454 alloc_flag = DRM_MM_CREATE_TOP;
3455 } else {
3456 search_flag = DRM_MM_SEARCH_DEFAULT;
3457 alloc_flag = DRM_MM_CREATE_DEFAULT;
3458 }
3459
3436search_free: 3460search_free:
3437 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3461 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3438 size, alignment, 3462 size, alignment,
3439 obj->cache_level, 3463 obj->cache_level,
3440 start, end, 3464 start, end,
3441 DRM_MM_SEARCH_DEFAULT, 3465 search_flag,
3442 DRM_MM_CREATE_DEFAULT); 3466 alloc_flag);
3443 if (ret) { 3467 if (ret) {
3444 ret = i915_gem_evict_something(dev, vm, size, alignment, 3468 ret = i915_gem_evict_something(dev, vm, size, alignment,
3445 obj->cache_level, 3469 obj->cache_level,
@@ -3632,59 +3656,117 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3632 return 0; 3656 return 0;
3633} 3657}
3634 3658
3659/**
3660 * Changes the cache-level of an object across all VMA.
3661 *
3662 * After this function returns, the object will be in the new cache-level
3663 * across all GTT and the contents of the backing storage will be coherent,
3664 * with respect to the new cache-level. In order to keep the backing storage
3665 * coherent for all users, we only allow a single cache level to be set
3666 * globally on the object and prevent it from being changed whilst the
3667 * hardware is reading from the object. That is if the object is currently
3668 * on the scanout it will be set to uncached (or equivalent display
3669 * cache coherency) and all non-MOCS GPU access will also be uncached so
3670 * that all direct access to the scanout remains coherent.
3671 */
3635int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3672int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3636 enum i915_cache_level cache_level) 3673 enum i915_cache_level cache_level)
3637{ 3674{
3638 struct drm_device *dev = obj->base.dev; 3675 struct drm_device *dev = obj->base.dev;
3639 struct i915_vma *vma, *next; 3676 struct i915_vma *vma, *next;
3640 int ret; 3677 bool bound = false;
3678 int ret = 0;
3641 3679
3642 if (obj->cache_level == cache_level) 3680 if (obj->cache_level == cache_level)
3643 return 0; 3681 goto out;
3644
3645 if (i915_gem_obj_is_pinned(obj)) {
3646 DRM_DEBUG("can not change the cache level of pinned objects\n");
3647 return -EBUSY;
3648 }
3649 3682
3683 /* Inspect the list of currently bound VMA and unbind any that would
3684 * be invalid given the new cache-level. This is principally to
3685 * catch the issue of the CS prefetch crossing page boundaries and
3686 * reading an invalid PTE on older architectures.
3687 */
3650 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3688 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3689 if (!drm_mm_node_allocated(&vma->node))
3690 continue;
3691
3692 if (vma->pin_count) {
3693 DRM_DEBUG("can not change the cache level of pinned objects\n");
3694 return -EBUSY;
3695 }
3696
3651 if (!i915_gem_valid_gtt_space(vma, cache_level)) { 3697 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3652 ret = i915_vma_unbind(vma); 3698 ret = i915_vma_unbind(vma);
3653 if (ret) 3699 if (ret)
3654 return ret; 3700 return ret;
3655 } 3701 } else
3702 bound = true;
3656 } 3703 }
3657 3704
3658 if (i915_gem_obj_bound_any(obj)) { 3705 /* We can reuse the existing drm_mm nodes but need to change the
3706 * cache-level on the PTE. We could simply unbind them all and
3707 * rebind with the correct cache-level on next use. However since
3708 * we already have a valid slot, dma mapping, pages etc, we may as
3709 * rewrite the PTE in the belief that doing so tramples upon less
3710 * state and so involves less work.
3711 */
3712 if (bound) {
3713 /* Before we change the PTE, the GPU must not be accessing it.
3714 * If we wait upon the object, we know that all the bound
3715 * VMA are no longer active.
3716 */
3659 ret = i915_gem_object_wait_rendering(obj, false); 3717 ret = i915_gem_object_wait_rendering(obj, false);
3660 if (ret) 3718 if (ret)
3661 return ret; 3719 return ret;
3662 3720
3663 i915_gem_object_finish_gtt(obj); 3721 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
3664 3722 /* Access to snoopable pages through the GTT is
3665 /* Before SandyBridge, you could not use tiling or fence 3723 * incoherent and on some machines causes a hard
3666 * registers with snooped memory, so relinquish any fences 3724 * lockup. Relinquish the CPU mmaping to force
3667 * currently pointing to our region in the aperture. 3725 * userspace to refault in the pages and we can
3668 */ 3726 * then double check if the GTT mapping is still
3669 if (INTEL_INFO(dev)->gen < 6) { 3727 * valid for that pointer access.
3728 */
3729 i915_gem_release_mmap(obj);
3730
3731 /* As we no longer need a fence for GTT access,
3732 * we can relinquish it now (and so prevent having
3733 * to steal a fence from someone else on the next
3734 * fence request). Note GPU activity would have
3735 * dropped the fence as all snoopable access is
3736 * supposed to be linear.
3737 */
3670 ret = i915_gem_object_put_fence(obj); 3738 ret = i915_gem_object_put_fence(obj);
3671 if (ret) 3739 if (ret)
3672 return ret; 3740 return ret;
3741 } else {
3742 /* We either have incoherent backing store and
3743 * so no GTT access or the architecture is fully
3744 * coherent. In such cases, existing GTT mmaps
3745 * ignore the cache bit in the PTE and we can
3746 * rewrite it without confusing the GPU or having
3747 * to force userspace to fault back in its mmaps.
3748 */
3673 } 3749 }
3674 3750
3675 list_for_each_entry(vma, &obj->vma_list, vma_link) 3751 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3676 if (drm_mm_node_allocated(&vma->node)) { 3752 if (!drm_mm_node_allocated(&vma->node))
3677 ret = i915_vma_bind(vma, cache_level, 3753 continue;
3678 PIN_UPDATE); 3754
3679 if (ret) 3755 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3680 return ret; 3756 if (ret)
3681 } 3757 return ret;
3758 }
3682 } 3759 }
3683 3760
3684 list_for_each_entry(vma, &obj->vma_list, vma_link) 3761 list_for_each_entry(vma, &obj->vma_list, vma_link)
3685 vma->node.color = cache_level; 3762 vma->node.color = cache_level;
3686 obj->cache_level = cache_level; 3763 obj->cache_level = cache_level;
3687 3764
3765out:
3766 /* Flush the dirty CPU caches to the backing storage so that the
3767 * object is now coherent at its new cache level (with respect
3768 * to the access domain).
3769 */
3688 if (obj->cache_dirty && 3770 if (obj->cache_dirty &&
3689 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 3771 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3690 cpu_write_needs_clflush(obj)) { 3772 cpu_write_needs_clflush(obj)) {
@@ -3737,6 +3819,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3737 level = I915_CACHE_NONE; 3819 level = I915_CACHE_NONE;
3738 break; 3820 break;
3739 case I915_CACHING_CACHED: 3821 case I915_CACHING_CACHED:
3822 /*
3823 * Due to a HW issue on BXT A stepping, GPU stores via a
3824 * snooped mapping may leave stale data in a corresponding CPU
3825 * cacheline, whereas normally such cachelines would get
3826 * invalidated.
3827 */
3828 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
3829 return -ENODEV;
3830
3740 level = I915_CACHE_LLC; 3831 level = I915_CACHE_LLC;
3741 break; 3832 break;
3742 case I915_CACHING_DISPLAY: 3833 case I915_CACHING_DISPLAY:
@@ -4010,15 +4101,13 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4010 return -EBUSY; 4101 return -EBUSY;
4011 4102
4012 if (i915_vma_misplaced(vma, alignment, flags)) { 4103 if (i915_vma_misplaced(vma, alignment, flags)) {
4013 unsigned long offset;
4014 offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
4015 i915_gem_obj_offset(obj, vm);
4016 WARN(vma->pin_count, 4104 WARN(vma->pin_count,
4017 "bo is already pinned in %s with incorrect alignment:" 4105 "bo is already pinned in %s with incorrect alignment:"
4018 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 4106 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4019 " obj->map_and_fenceable=%d\n", 4107 " obj->map_and_fenceable=%d\n",
4020 ggtt_view ? "ggtt" : "ppgtt", 4108 ggtt_view ? "ggtt" : "ppgtt",
4021 offset, 4109 upper_32_bits(vma->node.start),
4110 lower_32_bits(vma->node.start),
4022 alignment, 4111 alignment,
4023 !!(flags & PIN_MAPPABLE), 4112 !!(flags & PIN_MAPPABLE),
4024 obj->map_and_fenceable); 4113 obj->map_and_fenceable);
@@ -4525,22 +4614,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
4525 BUG(); 4614 BUG();
4526} 4615}
4527 4616
4528static bool
4529intel_enable_blt(struct drm_device *dev)
4530{
4531 if (!HAS_BLT(dev))
4532 return false;
4533
4534 /* The blitter was dysfunctional on early prototypes */
4535 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4536 DRM_INFO("BLT not supported on this pre-production hardware;"
4537 " graphics performance will be degraded.\n");
4538 return false;
4539 }
4540
4541 return true;
4542}
4543
4544static void init_unused_ring(struct drm_device *dev, u32 base) 4617static void init_unused_ring(struct drm_device *dev, u32 base)
4545{ 4618{
4546 struct drm_i915_private *dev_priv = dev->dev_private; 4619 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4583,7 +4656,7 @@ int i915_gem_init_rings(struct drm_device *dev)
4583 goto cleanup_render_ring; 4656 goto cleanup_render_ring;
4584 } 4657 }
4585 4658
4586 if (intel_enable_blt(dev)) { 4659 if (HAS_BLT(dev)) {
4587 ret = intel_init_blt_ring_buffer(dev); 4660 ret = intel_init_blt_ring_buffer(dev);
4588 if (ret) 4661 if (ret)
4589 goto cleanup_bsd_ring; 4662 goto cleanup_bsd_ring;
@@ -4601,14 +4674,8 @@ int i915_gem_init_rings(struct drm_device *dev)
4601 goto cleanup_vebox_ring; 4674 goto cleanup_vebox_ring;
4602 } 4675 }
4603 4676
4604 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4605 if (ret)
4606 goto cleanup_bsd2_ring;
4607
4608 return 0; 4677 return 0;
4609 4678
4610cleanup_bsd2_ring:
4611 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4612cleanup_vebox_ring: 4679cleanup_vebox_ring:
4613 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); 4680 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4614cleanup_blt_ring: 4681cleanup_blt_ring:
@@ -4678,6 +4745,33 @@ i915_gem_init_hw(struct drm_device *dev)
4678 goto out; 4745 goto out;
4679 } 4746 }
4680 4747
4748 /* We can't enable contexts until all firmware is loaded */
4749 if (HAS_GUC_UCODE(dev)) {
4750 ret = intel_guc_ucode_load(dev);
4751 if (ret) {
4752 /*
4753 * If we got an error and GuC submission is enabled, map
4754 * the error to -EIO so the GPU will be declared wedged.
4755 * OTOH, if we didn't intend to use the GuC anyway, just
4756 * discard the error and carry on.
4757 */
4758 DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
4759 i915.enable_guc_submission ? "" :
4760 " (ignored)");
4761 ret = i915.enable_guc_submission ? -EIO : 0;
4762 if (ret)
4763 goto out;
4764 }
4765 }
4766
4767 /*
4768 * Increment the next seqno by 0x100 so we have a visible break
4769 * on re-initialisation
4770 */
4771 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
4772 if (ret)
4773 goto out;
4774
4681 /* Now it is safe to go back round and do everything else: */ 4775 /* Now it is safe to go back round and do everything else: */
4682 for_each_ring(ring, dev_priv, i) { 4776 for_each_ring(ring, dev_priv, i) {
4683 struct drm_i915_gem_request *req; 4777 struct drm_i915_gem_request *req;
@@ -4815,18 +4909,6 @@ init_ring_lists(struct intel_engine_cs *ring)
4815 INIT_LIST_HEAD(&ring->request_list); 4909 INIT_LIST_HEAD(&ring->request_list);
4816} 4910}
4817 4911
4818void i915_init_vm(struct drm_i915_private *dev_priv,
4819 struct i915_address_space *vm)
4820{
4821 if (!i915_is_ggtt(vm))
4822 drm_mm_init(&vm->mm, vm->start, vm->total);
4823 vm->dev = dev_priv->dev;
4824 INIT_LIST_HEAD(&vm->active_list);
4825 INIT_LIST_HEAD(&vm->inactive_list);
4826 INIT_LIST_HEAD(&vm->global_link);
4827 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4828}
4829
4830void 4912void
4831i915_gem_load(struct drm_device *dev) 4913i915_gem_load(struct drm_device *dev)
4832{ 4914{
@@ -4850,8 +4932,6 @@ i915_gem_load(struct drm_device *dev)
4850 NULL); 4932 NULL);
4851 4933
4852 INIT_LIST_HEAD(&dev_priv->vm_list); 4934 INIT_LIST_HEAD(&dev_priv->vm_list);
4853 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4854
4855 INIT_LIST_HEAD(&dev_priv->context_list); 4935 INIT_LIST_HEAD(&dev_priv->context_list);
4856 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4936 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4857 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4937 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
@@ -4879,6 +4959,14 @@ i915_gem_load(struct drm_device *dev)
4879 dev_priv->num_fence_regs = 4959 dev_priv->num_fence_regs =
4880 I915_READ(vgtif_reg(avail_rs.fence_num)); 4960 I915_READ(vgtif_reg(avail_rs.fence_num));
4881 4961
4962 /*
4963 * Set initial sequence number for requests.
4964 * Using this number allows the wraparound to happen early,
4965 * catching any obvious problems.
4966 */
4967 dev_priv->next_seqno = ((u32)~0 - 0x1100);
4968 dev_priv->last_seqno = ((u32)~0 - 0x1101);
4969
4882 /* Initialize fence registers to zero */ 4970 /* Initialize fence registers to zero */
4883 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4971 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4884 i915_gem_restore_fences(dev); 4972 i915_gem_restore_fences(dev);
@@ -4948,9 +5036,9 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4948 5036
4949/** 5037/**
4950 * i915_gem_track_fb - update frontbuffer tracking 5038 * i915_gem_track_fb - update frontbuffer tracking
4951 * old: current GEM buffer for the frontbuffer slots 5039 * @old: current GEM buffer for the frontbuffer slots
4952 * new: new GEM buffer for the frontbuffer slots 5040 * @new: new GEM buffer for the frontbuffer slots
4953 * frontbuffer_bits: bitmask of frontbuffer slots 5041 * @frontbuffer_bits: bitmask of frontbuffer slots
4954 * 5042 *
4955 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5043 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4956 * from @old and setting them in @new. Both @old and @new can be NULL. 5044 * from @old and setting them in @new. Both @old and @new can be NULL.
@@ -4973,9 +5061,8 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
4973} 5061}
4974 5062
4975/* All the new VM stuff */ 5063/* All the new VM stuff */
4976unsigned long 5064u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4977i915_gem_obj_offset(struct drm_i915_gem_object *o, 5065 struct i915_address_space *vm)
4978 struct i915_address_space *vm)
4979{ 5066{
4980 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5067 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4981 struct i915_vma *vma; 5068 struct i915_vma *vma;
@@ -4995,9 +5082,8 @@ i915_gem_obj_offset(struct drm_i915_gem_object *o,
4995 return -1; 5082 return -1;
4996} 5083}
4997 5084
4998unsigned long 5085u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4999i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 5086 const struct i915_ggtt_view *view)
5000 const struct i915_ggtt_view *view)
5001{ 5087{
5002 struct i915_address_space *ggtt = i915_obj_to_ggtt(o); 5088 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5003 struct i915_vma *vma; 5089 struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8e893b354bcc..8c688a5f1589 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -133,6 +133,23 @@ static int get_context_size(struct drm_device *dev)
133 return ret; 133 return ret;
134} 134}
135 135
136static void i915_gem_context_clean(struct intel_context *ctx)
137{
138 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
139 struct i915_vma *vma, *next;
140
141 if (!ppgtt)
142 return;
143
144 WARN_ON(!list_empty(&ppgtt->base.active_list));
145
146 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
147 mm_list) {
148 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
149 break;
150 }
151}
152
136void i915_gem_context_free(struct kref *ctx_ref) 153void i915_gem_context_free(struct kref *ctx_ref)
137{ 154{
138 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 155 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -142,6 +159,13 @@ void i915_gem_context_free(struct kref *ctx_ref)
142 if (i915.enable_execlists) 159 if (i915.enable_execlists)
143 intel_lr_context_free(ctx); 160 intel_lr_context_free(ctx);
144 161
162 /*
163 * This context is going away and we need to remove all VMAs still
164 * around. This is to handle imported shared objects for which
165 * destructor did not run when their handles were closed.
166 */
167 i915_gem_context_clean(ctx);
168
145 i915_ppgtt_put(ctx->ppgtt); 169 i915_ppgtt_put(ctx->ppgtt);
146 170
147 if (ctx->legacy_hw_ctx.rcs_state) 171 if (ctx->legacy_hw_ctx.rcs_state)
@@ -332,6 +356,13 @@ int i915_gem_context_init(struct drm_device *dev)
332 if (WARN_ON(dev_priv->ring[RCS].default_context)) 356 if (WARN_ON(dev_priv->ring[RCS].default_context))
333 return 0; 357 return 0;
334 358
359 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
360 if (!i915.enable_execlists) {
361 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
362 return -EINVAL;
363 }
364 }
365
335 if (i915.enable_execlists) { 366 if (i915.enable_execlists) {
336 /* NB: intentionally left blank. We will allocate our own 367 /* NB: intentionally left blank. We will allocate our own
337 * backing objects as we need them, thank you very much */ 368 * backing objects as we need them, thank you very much */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d09e35ed9c9a..d71a133ceff5 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -237,48 +237,3 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
237 237
238 return 0; 238 return 0;
239} 239}
240
241/**
242 * i915_gem_evict_everything - Try to evict all objects
243 * @dev: Device to evict objects for
244 *
245 * This functions tries to evict all gem objects from all address spaces. Used
246 * by the shrinker as a last-ditch effort and for suspend, before releasing the
247 * backing storage of all unbound objects.
248 */
249int
250i915_gem_evict_everything(struct drm_device *dev)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 struct i915_address_space *vm, *v;
254 bool lists_empty = true;
255 int ret;
256
257 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
258 lists_empty = (list_empty(&vm->inactive_list) &&
259 list_empty(&vm->active_list));
260 if (!lists_empty)
261 lists_empty = false;
262 }
263
264 if (lists_empty)
265 return -ENOSPC;
266
267 trace_i915_gem_evict_everything(dev);
268
269 /* The gpu_idle will flush everything in the write domain to the
270 * active list. Then we must move everything off the active list
271 * with retire requests.
272 */
273 ret = i915_gpu_idle(dev);
274 if (ret)
275 return ret;
276
277 i915_gem_retire_requests(dev);
278
279 /* Having flushed everything, unbind() should never raise an error */
280 list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
281 WARN_ON(i915_gem_evict_vm(vm, false));
282
283 return 0;
284}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a953d4975b8c..6ed7d63a0688 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -590,10 +590,17 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
590 flags |= PIN_GLOBAL; 590 flags |= PIN_GLOBAL;
591 591
592 if (!drm_mm_node_allocated(&vma->node)) { 592 if (!drm_mm_node_allocated(&vma->node)) {
593 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
594 * limit address to the first 4GBs for unflagged objects.
595 */
596 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
597 flags |= PIN_ZONE_4G;
593 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) 598 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
594 flags |= PIN_GLOBAL | PIN_MAPPABLE; 599 flags |= PIN_GLOBAL | PIN_MAPPABLE;
595 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) 600 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
596 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; 601 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
602 if ((flags & PIN_MAPPABLE) == 0)
603 flags |= PIN_HIGH;
597 } 604 }
598 605
599 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 606 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
@@ -671,6 +678,10 @@ eb_vma_misplaced(struct i915_vma *vma)
671 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) 678 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
672 return !only_mappable_for_reloc(entry->flags); 679 return !only_mappable_for_reloc(entry->flags);
673 680
681 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
682 (vma->node.start + vma->node.size - 1) >> 32)
683 return true;
684
674 return false; 685 return false;
675} 686}
676 687
@@ -934,7 +945,21 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
934 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) 945 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
935 return false; 946 return false;
936 947
937 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; 948 /* Kernel clipping was a DRI1 misfeature */
949 if (exec->num_cliprects || exec->cliprects_ptr)
950 return false;
951
952 if (exec->DR4 == 0xffffffff) {
953 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
954 exec->DR4 = 0;
955 }
956 if (exec->DR1 || exec->DR4)
957 return false;
958
959 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
960 return false;
961
962 return true;
938} 963}
939 964
940static int 965static int
@@ -1009,7 +1034,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1009 } 1034 }
1010 1035
1011 if (i915.enable_execlists && !ctx->engine[ring->id].state) { 1036 if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1012 int ret = intel_lr_context_deferred_create(ctx, ring); 1037 int ret = intel_lr_context_deferred_alloc(ctx, ring);
1013 if (ret) { 1038 if (ret) {
1014 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); 1039 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1015 return ERR_PTR(ret); 1040 return ERR_PTR(ret);
@@ -1098,47 +1123,6 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1098 return 0; 1123 return 0;
1099} 1124}
1100 1125
1101static int
1102i915_emit_box(struct drm_i915_gem_request *req,
1103 struct drm_clip_rect *box,
1104 int DR1, int DR4)
1105{
1106 struct intel_engine_cs *ring = req->ring;
1107 int ret;
1108
1109 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
1110 box->y2 <= 0 || box->x2 <= 0) {
1111 DRM_ERROR("Bad box %d,%d..%d,%d\n",
1112 box->x1, box->y1, box->x2, box->y2);
1113 return -EINVAL;
1114 }
1115
1116 if (INTEL_INFO(ring->dev)->gen >= 4) {
1117 ret = intel_ring_begin(req, 4);
1118 if (ret)
1119 return ret;
1120
1121 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
1122 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1123 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1124 intel_ring_emit(ring, DR4);
1125 } else {
1126 ret = intel_ring_begin(req, 6);
1127 if (ret)
1128 return ret;
1129
1130 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
1131 intel_ring_emit(ring, DR1);
1132 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1133 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1134 intel_ring_emit(ring, DR4);
1135 intel_ring_emit(ring, 0);
1136 }
1137 intel_ring_advance(ring);
1138
1139 return 0;
1140}
1141
1142static struct drm_i915_gem_object* 1126static struct drm_i915_gem_object*
1143i915_gem_execbuffer_parse(struct intel_engine_cs *ring, 1127i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1144 struct drm_i915_gem_exec_object2 *shadow_exec_entry, 1128 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
@@ -1197,65 +1181,21 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1197 struct drm_i915_gem_execbuffer2 *args, 1181 struct drm_i915_gem_execbuffer2 *args,
1198 struct list_head *vmas) 1182 struct list_head *vmas)
1199{ 1183{
1200 struct drm_clip_rect *cliprects = NULL;
1201 struct drm_device *dev = params->dev; 1184 struct drm_device *dev = params->dev;
1202 struct intel_engine_cs *ring = params->ring; 1185 struct intel_engine_cs *ring = params->ring;
1203 struct drm_i915_private *dev_priv = dev->dev_private; 1186 struct drm_i915_private *dev_priv = dev->dev_private;
1204 u64 exec_start, exec_len; 1187 u64 exec_start, exec_len;
1205 int instp_mode; 1188 int instp_mode;
1206 u32 instp_mask; 1189 u32 instp_mask;
1207 int i, ret = 0; 1190 int ret;
1208
1209 if (args->num_cliprects != 0) {
1210 if (ring != &dev_priv->ring[RCS]) {
1211 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1212 return -EINVAL;
1213 }
1214
1215 if (INTEL_INFO(dev)->gen >= 5) {
1216 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1217 return -EINVAL;
1218 }
1219
1220 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1221 DRM_DEBUG("execbuf with %u cliprects\n",
1222 args->num_cliprects);
1223 return -EINVAL;
1224 }
1225
1226 cliprects = kcalloc(args->num_cliprects,
1227 sizeof(*cliprects),
1228 GFP_KERNEL);
1229 if (cliprects == NULL) {
1230 ret = -ENOMEM;
1231 goto error;
1232 }
1233
1234 if (copy_from_user(cliprects,
1235 to_user_ptr(args->cliprects_ptr),
1236 sizeof(*cliprects)*args->num_cliprects)) {
1237 ret = -EFAULT;
1238 goto error;
1239 }
1240 } else {
1241 if (args->DR4 == 0xffffffff) {
1242 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1243 args->DR4 = 0;
1244 }
1245
1246 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1247 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1248 return -EINVAL;
1249 }
1250 }
1251 1191
1252 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); 1192 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1253 if (ret) 1193 if (ret)
1254 goto error; 1194 return ret;
1255 1195
1256 ret = i915_switch_context(params->request); 1196 ret = i915_switch_context(params->request);
1257 if (ret) 1197 if (ret)
1258 goto error; 1198 return ret;
1259 1199
1260 WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id), 1200 WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1261 "%s didn't clear reload\n", ring->name); 1201 "%s didn't clear reload\n", ring->name);
@@ -1268,22 +1208,19 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1268 case I915_EXEC_CONSTANTS_REL_SURFACE: 1208 case I915_EXEC_CONSTANTS_REL_SURFACE:
1269 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { 1209 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1270 DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); 1210 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1271 ret = -EINVAL; 1211 return -EINVAL;
1272 goto error;
1273 } 1212 }
1274 1213
1275 if (instp_mode != dev_priv->relative_constants_mode) { 1214 if (instp_mode != dev_priv->relative_constants_mode) {
1276 if (INTEL_INFO(dev)->gen < 4) { 1215 if (INTEL_INFO(dev)->gen < 4) {
1277 DRM_DEBUG("no rel constants on pre-gen4\n"); 1216 DRM_DEBUG("no rel constants on pre-gen4\n");
1278 ret = -EINVAL; 1217 return -EINVAL;
1279 goto error;
1280 } 1218 }
1281 1219
1282 if (INTEL_INFO(dev)->gen > 5 && 1220 if (INTEL_INFO(dev)->gen > 5 &&
1283 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { 1221 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1284 DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); 1222 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1285 ret = -EINVAL; 1223 return -EINVAL;
1286 goto error;
1287 } 1224 }
1288 1225
1289 /* The HW changed the meaning on this bit on gen6 */ 1226 /* The HW changed the meaning on this bit on gen6 */
@@ -1293,15 +1230,14 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1293 break; 1230 break;
1294 default: 1231 default:
1295 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); 1232 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1296 ret = -EINVAL; 1233 return -EINVAL;
1297 goto error;
1298 } 1234 }
1299 1235
1300 if (ring == &dev_priv->ring[RCS] && 1236 if (ring == &dev_priv->ring[RCS] &&
1301 instp_mode != dev_priv->relative_constants_mode) { 1237 instp_mode != dev_priv->relative_constants_mode) {
1302 ret = intel_ring_begin(params->request, 4); 1238 ret = intel_ring_begin(params->request, 4);
1303 if (ret) 1239 if (ret)
1304 goto error; 1240 return ret;
1305 1241
1306 intel_ring_emit(ring, MI_NOOP); 1242 intel_ring_emit(ring, MI_NOOP);
1307 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 1243 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -1315,42 +1251,25 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1315 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 1251 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1316 ret = i915_reset_gen7_sol_offsets(dev, params->request); 1252 ret = i915_reset_gen7_sol_offsets(dev, params->request);
1317 if (ret) 1253 if (ret)
1318 goto error; 1254 return ret;
1319 } 1255 }
1320 1256
1321 exec_len = args->batch_len; 1257 exec_len = args->batch_len;
1322 exec_start = params->batch_obj_vm_offset + 1258 exec_start = params->batch_obj_vm_offset +
1323 params->args_batch_start_offset; 1259 params->args_batch_start_offset;
1324 1260
1325 if (cliprects) { 1261 ret = ring->dispatch_execbuffer(params->request,
1326 for (i = 0; i < args->num_cliprects; i++) { 1262 exec_start, exec_len,
1327 ret = i915_emit_box(params->request, &cliprects[i], 1263 params->dispatch_flags);
1328 args->DR1, args->DR4); 1264 if (ret)
1329 if (ret) 1265 return ret;
1330 goto error;
1331
1332 ret = ring->dispatch_execbuffer(params->request,
1333 exec_start, exec_len,
1334 params->dispatch_flags);
1335 if (ret)
1336 goto error;
1337 }
1338 } else {
1339 ret = ring->dispatch_execbuffer(params->request,
1340 exec_start, exec_len,
1341 params->dispatch_flags);
1342 if (ret)
1343 return ret;
1344 }
1345 1266
1346 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); 1267 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1347 1268
1348 i915_gem_execbuffer_move_to_active(vmas, params->request); 1269 i915_gem_execbuffer_move_to_active(vmas, params->request);
1349 i915_gem_execbuffer_retire_commands(params); 1270 i915_gem_execbuffer_retire_commands(params);
1350 1271
1351error: 1272 return 0;
1352 kfree(cliprects);
1353 return ret;
1354} 1273}
1355 1274
1356/** 1275/**
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index af1f8c461060..40a10b25956c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -59,19 +59,19 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
59 struct drm_i915_gem_object *obj) 59 struct drm_i915_gem_object *obj)
60{ 60{
61 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = dev->dev_private;
62 int fence_reg; 62 int fence_reg_lo, fence_reg_hi;
63 int fence_pitch_shift; 63 int fence_pitch_shift;
64 64
65 if (INTEL_INFO(dev)->gen >= 6) { 65 if (INTEL_INFO(dev)->gen >= 6) {
66 fence_reg = FENCE_REG_SANDYBRIDGE_0; 66 fence_reg_lo = FENCE_REG_GEN6_LO(reg);
67 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; 67 fence_reg_hi = FENCE_REG_GEN6_HI(reg);
68 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
68 } else { 69 } else {
69 fence_reg = FENCE_REG_965_0; 70 fence_reg_lo = FENCE_REG_965_LO(reg);
71 fence_reg_hi = FENCE_REG_965_HI(reg);
70 fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 72 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
71 } 73 }
72 74
73 fence_reg += reg * 8;
74
75 /* To w/a incoherency with non-atomic 64-bit register updates, 75 /* To w/a incoherency with non-atomic 64-bit register updates,
76 * we split the 64-bit update into two 32-bit writes. In order 76 * we split the 64-bit update into two 32-bit writes. In order
77 * for a partial fence not to be evaluated between writes, we 77 * for a partial fence not to be evaluated between writes, we
@@ -81,8 +81,8 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
81 * For extra levels of paranoia, we make sure each step lands 81 * For extra levels of paranoia, we make sure each step lands
82 * before applying the next step. 82 * before applying the next step.
83 */ 83 */
84 I915_WRITE(fence_reg, 0); 84 I915_WRITE(fence_reg_lo, 0);
85 POSTING_READ(fence_reg); 85 POSTING_READ(fence_reg_lo);
86 86
87 if (obj) { 87 if (obj) {
88 u32 size = i915_gem_obj_ggtt_size(obj); 88 u32 size = i915_gem_obj_ggtt_size(obj);
@@ -103,14 +103,14 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
103 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 103 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
104 val |= I965_FENCE_REG_VALID; 104 val |= I965_FENCE_REG_VALID;
105 105
106 I915_WRITE(fence_reg + 4, val >> 32); 106 I915_WRITE(fence_reg_hi, val >> 32);
107 POSTING_READ(fence_reg + 4); 107 POSTING_READ(fence_reg_hi);
108 108
109 I915_WRITE(fence_reg + 0, val); 109 I915_WRITE(fence_reg_lo, val);
110 POSTING_READ(fence_reg); 110 POSTING_READ(fence_reg_lo);
111 } else { 111 } else {
112 I915_WRITE(fence_reg + 4, 0); 112 I915_WRITE(fence_reg_hi, 0);
113 POSTING_READ(fence_reg + 4); 113 POSTING_READ(fence_reg_hi);
114 } 114 }
115} 115}
116 116
@@ -128,7 +128,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
128 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || 128 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
129 (size & -size) != size || 129 (size & -size) != size ||
130 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 130 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
131 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 131 "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
132 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); 132 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
133 133
134 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 134 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
@@ -149,13 +149,8 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
149 } else 149 } else
150 val = 0; 150 val = 0;
151 151
152 if (reg < 8) 152 I915_WRITE(FENCE_REG(reg), val);
153 reg = FENCE_REG_830_0 + reg * 4; 153 POSTING_READ(FENCE_REG(reg));
154 else
155 reg = FENCE_REG_945_8 + (reg - 8) * 4;
156
157 I915_WRITE(reg, val);
158 POSTING_READ(reg);
159} 154}
160 155
161static void i830_write_fence_reg(struct drm_device *dev, int reg, 156static void i830_write_fence_reg(struct drm_device *dev, int reg,
@@ -171,7 +166,7 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
171 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || 166 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
172 (size & -size) != size || 167 (size & -size) != size ||
173 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 168 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
174 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", 169 "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
175 i915_gem_obj_ggtt_offset(obj), size); 170 i915_gem_obj_ggtt_offset(obj), size);
176 171
177 pitch_val = obj->stride / 128; 172 pitch_val = obj->stride / 128;
@@ -186,8 +181,8 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
186 } else 181 } else
187 val = 0; 182 val = 0;
188 183
189 I915_WRITE(FENCE_REG_830_0 + reg * 4, val); 184 I915_WRITE(FENCE_REG(reg), val);
190 POSTING_READ(FENCE_REG_830_0 + reg * 4); 185 POSTING_READ(FENCE_REG(reg));
191} 186}
192 187
193inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) 188inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
@@ -322,7 +317,7 @@ i915_find_fence_reg(struct drm_device *dev)
322 317
323 /* First try to find a free reg */ 318 /* First try to find a free reg */
324 avail = NULL; 319 avail = NULL;
325 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 320 for (i = 0; i < dev_priv->num_fence_regs; i++) {
326 reg = &dev_priv->fence_regs[i]; 321 reg = &dev_priv->fence_regs[i];
327 if (!reg->obj) 322 if (!reg->obj)
328 return reg; 323 return reg;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 96054a560f4f..43f35d12b677 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -204,6 +204,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
204 return pde; 204 return pde;
205} 205}
206 206
207#define gen8_pdpe_encode gen8_pde_encode
208#define gen8_pml4e_encode gen8_pde_encode
209
207static gen6_pte_t snb_pte_encode(dma_addr_t addr, 210static gen6_pte_t snb_pte_encode(dma_addr_t addr,
208 enum i915_cache_level level, 211 enum i915_cache_level level,
209 bool valid, u32 unused) 212 bool valid, u32 unused)
@@ -522,6 +525,127 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
522 fill_px(vm->dev, pd, scratch_pde); 525 fill_px(vm->dev, pd, scratch_pde);
523} 526}
524 527
528static int __pdp_init(struct drm_device *dev,
529 struct i915_page_directory_pointer *pdp)
530{
531 size_t pdpes = I915_PDPES_PER_PDP(dev);
532
533 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
534 sizeof(unsigned long),
535 GFP_KERNEL);
536 if (!pdp->used_pdpes)
537 return -ENOMEM;
538
539 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
540 GFP_KERNEL);
541 if (!pdp->page_directory) {
542 kfree(pdp->used_pdpes);
543 /* the PDP might be the statically allocated top level. Keep it
544 * as clean as possible */
545 pdp->used_pdpes = NULL;
546 return -ENOMEM;
547 }
548
549 return 0;
550}
551
552static void __pdp_fini(struct i915_page_directory_pointer *pdp)
553{
554 kfree(pdp->used_pdpes);
555 kfree(pdp->page_directory);
556 pdp->page_directory = NULL;
557}
558
559static struct
560i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
561{
562 struct i915_page_directory_pointer *pdp;
563 int ret = -ENOMEM;
564
565 WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
566
567 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
568 if (!pdp)
569 return ERR_PTR(-ENOMEM);
570
571 ret = __pdp_init(dev, pdp);
572 if (ret)
573 goto fail_bitmap;
574
575 ret = setup_px(dev, pdp);
576 if (ret)
577 goto fail_page_m;
578
579 return pdp;
580
581fail_page_m:
582 __pdp_fini(pdp);
583fail_bitmap:
584 kfree(pdp);
585
586 return ERR_PTR(ret);
587}
588
589static void free_pdp(struct drm_device *dev,
590 struct i915_page_directory_pointer *pdp)
591{
592 __pdp_fini(pdp);
593 if (USES_FULL_48BIT_PPGTT(dev)) {
594 cleanup_px(dev, pdp);
595 kfree(pdp);
596 }
597}
598
599static void gen8_initialize_pdp(struct i915_address_space *vm,
600 struct i915_page_directory_pointer *pdp)
601{
602 gen8_ppgtt_pdpe_t scratch_pdpe;
603
604 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
605
606 fill_px(vm->dev, pdp, scratch_pdpe);
607}
608
609static void gen8_initialize_pml4(struct i915_address_space *vm,
610 struct i915_pml4 *pml4)
611{
612 gen8_ppgtt_pml4e_t scratch_pml4e;
613
614 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
615 I915_CACHE_LLC);
616
617 fill_px(vm->dev, pml4, scratch_pml4e);
618}
619
620static void
621gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
622 struct i915_page_directory_pointer *pdp,
623 struct i915_page_directory *pd,
624 int index)
625{
626 gen8_ppgtt_pdpe_t *page_directorypo;
627
628 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
629 return;
630
631 page_directorypo = kmap_px(pdp);
632 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
633 kunmap_px(ppgtt, page_directorypo);
634}
635
636static void
637gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
638 struct i915_pml4 *pml4,
639 struct i915_page_directory_pointer *pdp,
640 int index)
641{
642 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
643
644 WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
645 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
646 kunmap_px(ppgtt, pagemap);
647}
648
525/* Broadwell Page Directory Pointer Descriptors */ 649/* Broadwell Page Directory Pointer Descriptors */
526static int gen8_write_pdp(struct drm_i915_gem_request *req, 650static int gen8_write_pdp(struct drm_i915_gem_request *req,
527 unsigned entry, 651 unsigned entry,
@@ -547,8 +671,8 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
547 return 0; 671 return 0;
548} 672}
549 673
550static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, 674static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
551 struct drm_i915_gem_request *req) 675 struct drm_i915_gem_request *req)
552{ 676{
553 int i, ret; 677 int i, ret;
554 678
@@ -563,31 +687,38 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
563 return 0; 687 return 0;
564} 688}
565 689
566static void gen8_ppgtt_clear_range(struct i915_address_space *vm, 690static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
567 uint64_t start, 691 struct drm_i915_gem_request *req)
568 uint64_t length, 692{
569 bool use_scratch) 693 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
694}
695
696static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
697 struct i915_page_directory_pointer *pdp,
698 uint64_t start,
699 uint64_t length,
700 gen8_pte_t scratch_pte)
570{ 701{
571 struct i915_hw_ppgtt *ppgtt = 702 struct i915_hw_ppgtt *ppgtt =
572 container_of(vm, struct i915_hw_ppgtt, base); 703 container_of(vm, struct i915_hw_ppgtt, base);
573 gen8_pte_t *pt_vaddr, scratch_pte; 704 gen8_pte_t *pt_vaddr;
574 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; 705 unsigned pdpe = gen8_pdpe_index(start);
575 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; 706 unsigned pde = gen8_pde_index(start);
576 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; 707 unsigned pte = gen8_pte_index(start);
577 unsigned num_entries = length >> PAGE_SHIFT; 708 unsigned num_entries = length >> PAGE_SHIFT;
578 unsigned last_pte, i; 709 unsigned last_pte, i;
579 710
580 scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page), 711 if (WARN_ON(!pdp))
581 I915_CACHE_LLC, use_scratch); 712 return;
582 713
583 while (num_entries) { 714 while (num_entries) {
584 struct i915_page_directory *pd; 715 struct i915_page_directory *pd;
585 struct i915_page_table *pt; 716 struct i915_page_table *pt;
586 717
587 if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) 718 if (WARN_ON(!pdp->page_directory[pdpe]))
588 break; 719 break;
589 720
590 pd = ppgtt->pdp.page_directory[pdpe]; 721 pd = pdp->page_directory[pdpe];
591 722
592 if (WARN_ON(!pd->page_table[pde])) 723 if (WARN_ON(!pd->page_table[pde]))
593 break; 724 break;
@@ -612,45 +743,69 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
612 743
613 pte = 0; 744 pte = 0;
614 if (++pde == I915_PDES) { 745 if (++pde == I915_PDES) {
615 pdpe++; 746 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
747 break;
616 pde = 0; 748 pde = 0;
617 } 749 }
618 } 750 }
619} 751}
620 752
621static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, 753static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
622 struct sg_table *pages, 754 uint64_t start,
623 uint64_t start, 755 uint64_t length,
624 enum i915_cache_level cache_level, u32 unused) 756 bool use_scratch)
757{
758 struct i915_hw_ppgtt *ppgtt =
759 container_of(vm, struct i915_hw_ppgtt, base);
760 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
761 I915_CACHE_LLC, use_scratch);
762
763 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
764 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
765 scratch_pte);
766 } else {
767 uint64_t templ4, pml4e;
768 struct i915_page_directory_pointer *pdp;
769
770 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
771 gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
772 scratch_pte);
773 }
774 }
775}
776
777static void
778gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
779 struct i915_page_directory_pointer *pdp,
780 struct sg_page_iter *sg_iter,
781 uint64_t start,
782 enum i915_cache_level cache_level)
625{ 783{
626 struct i915_hw_ppgtt *ppgtt = 784 struct i915_hw_ppgtt *ppgtt =
627 container_of(vm, struct i915_hw_ppgtt, base); 785 container_of(vm, struct i915_hw_ppgtt, base);
628 gen8_pte_t *pt_vaddr; 786 gen8_pte_t *pt_vaddr;
629 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; 787 unsigned pdpe = gen8_pdpe_index(start);
630 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; 788 unsigned pde = gen8_pde_index(start);
631 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; 789 unsigned pte = gen8_pte_index(start);
632 struct sg_page_iter sg_iter;
633 790
634 pt_vaddr = NULL; 791 pt_vaddr = NULL;
635 792
636 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 793 while (__sg_page_iter_next(sg_iter)) {
637 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
638 break;
639
640 if (pt_vaddr == NULL) { 794 if (pt_vaddr == NULL) {
641 struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe]; 795 struct i915_page_directory *pd = pdp->page_directory[pdpe];
642 struct i915_page_table *pt = pd->page_table[pde]; 796 struct i915_page_table *pt = pd->page_table[pde];
643 pt_vaddr = kmap_px(pt); 797 pt_vaddr = kmap_px(pt);
644 } 798 }
645 799
646 pt_vaddr[pte] = 800 pt_vaddr[pte] =
647 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), 801 gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
648 cache_level, true); 802 cache_level, true);
649 if (++pte == GEN8_PTES) { 803 if (++pte == GEN8_PTES) {
650 kunmap_px(ppgtt, pt_vaddr); 804 kunmap_px(ppgtt, pt_vaddr);
651 pt_vaddr = NULL; 805 pt_vaddr = NULL;
652 if (++pde == I915_PDES) { 806 if (++pde == I915_PDES) {
653 pdpe++; 807 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
808 break;
654 pde = 0; 809 pde = 0;
655 } 810 }
656 pte = 0; 811 pte = 0;
@@ -661,6 +816,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
661 kunmap_px(ppgtt, pt_vaddr); 816 kunmap_px(ppgtt, pt_vaddr);
662} 817}
663 818
819static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
820 struct sg_table *pages,
821 uint64_t start,
822 enum i915_cache_level cache_level,
823 u32 unused)
824{
825 struct i915_hw_ppgtt *ppgtt =
826 container_of(vm, struct i915_hw_ppgtt, base);
827 struct sg_page_iter sg_iter;
828
829 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
830
831 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
832 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
833 cache_level);
834 } else {
835 struct i915_page_directory_pointer *pdp;
836 uint64_t templ4, pml4e;
837 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
838
839 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
840 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
841 start, cache_level);
842 }
843 }
844}
845
664static void gen8_free_page_tables(struct drm_device *dev, 846static void gen8_free_page_tables(struct drm_device *dev,
665 struct i915_page_directory *pd) 847 struct i915_page_directory *pd)
666{ 848{
@@ -699,8 +881,55 @@ static int gen8_init_scratch(struct i915_address_space *vm)
699 return PTR_ERR(vm->scratch_pd); 881 return PTR_ERR(vm->scratch_pd);
700 } 882 }
701 883
884 if (USES_FULL_48BIT_PPGTT(dev)) {
885 vm->scratch_pdp = alloc_pdp(dev);
886 if (IS_ERR(vm->scratch_pdp)) {
887 free_pd(dev, vm->scratch_pd);
888 free_pt(dev, vm->scratch_pt);
889 free_scratch_page(dev, vm->scratch_page);
890 return PTR_ERR(vm->scratch_pdp);
891 }
892 }
893
702 gen8_initialize_pt(vm, vm->scratch_pt); 894 gen8_initialize_pt(vm, vm->scratch_pt);
703 gen8_initialize_pd(vm, vm->scratch_pd); 895 gen8_initialize_pd(vm, vm->scratch_pd);
896 if (USES_FULL_48BIT_PPGTT(dev))
897 gen8_initialize_pdp(vm, vm->scratch_pdp);
898
899 return 0;
900}
901
902static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
903{
904 enum vgt_g2v_type msg;
905 struct drm_device *dev = ppgtt->base.dev;
906 struct drm_i915_private *dev_priv = dev->dev_private;
907 unsigned int offset = vgtif_reg(pdp0_lo);
908 int i;
909
910 if (USES_FULL_48BIT_PPGTT(dev)) {
911 u64 daddr = px_dma(&ppgtt->pml4);
912
913 I915_WRITE(offset, lower_32_bits(daddr));
914 I915_WRITE(offset + 4, upper_32_bits(daddr));
915
916 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
917 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
918 } else {
919 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
920 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
921
922 I915_WRITE(offset, lower_32_bits(daddr));
923 I915_WRITE(offset + 4, upper_32_bits(daddr));
924
925 offset += 8;
926 }
927
928 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
929 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
930 }
931
932 I915_WRITE(vgtif_reg(g2v_notify), msg);
704 933
705 return 0; 934 return 0;
706} 935}
@@ -709,35 +938,65 @@ static void gen8_free_scratch(struct i915_address_space *vm)
709{ 938{
710 struct drm_device *dev = vm->dev; 939 struct drm_device *dev = vm->dev;
711 940
941 if (USES_FULL_48BIT_PPGTT(dev))
942 free_pdp(dev, vm->scratch_pdp);
712 free_pd(dev, vm->scratch_pd); 943 free_pd(dev, vm->scratch_pd);
713 free_pt(dev, vm->scratch_pt); 944 free_pt(dev, vm->scratch_pt);
714 free_scratch_page(dev, vm->scratch_page); 945 free_scratch_page(dev, vm->scratch_page);
715} 946}
716 947
717static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 948static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
949 struct i915_page_directory_pointer *pdp)
718{ 950{
719 struct i915_hw_ppgtt *ppgtt =
720 container_of(vm, struct i915_hw_ppgtt, base);
721 int i; 951 int i;
722 952
723 for_each_set_bit(i, ppgtt->pdp.used_pdpes, GEN8_LEGACY_PDPES) { 953 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
724 if (WARN_ON(!ppgtt->pdp.page_directory[i])) 954 if (WARN_ON(!pdp->page_directory[i]))
725 continue; 955 continue;
726 956
727 gen8_free_page_tables(ppgtt->base.dev, 957 gen8_free_page_tables(dev, pdp->page_directory[i]);
728 ppgtt->pdp.page_directory[i]); 958 free_pd(dev, pdp->page_directory[i]);
729 free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
730 } 959 }
731 960
961 free_pdp(dev, pdp);
962}
963
964static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
965{
966 int i;
967
968 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
969 if (WARN_ON(!ppgtt->pml4.pdps[i]))
970 continue;
971
972 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
973 }
974
975 cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
976}
977
978static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
979{
980 struct i915_hw_ppgtt *ppgtt =
981 container_of(vm, struct i915_hw_ppgtt, base);
982
983 if (intel_vgpu_active(vm->dev))
984 gen8_ppgtt_notify_vgt(ppgtt, false);
985
986 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
987 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
988 else
989 gen8_ppgtt_cleanup_4lvl(ppgtt);
990
732 gen8_free_scratch(vm); 991 gen8_free_scratch(vm);
733} 992}
734 993
735/** 994/**
736 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. 995 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
737 * @ppgtt: Master ppgtt structure. 996 * @vm: Master vm structure.
738 * @pd: Page directory for this address range. 997 * @pd: Page directory for this address range.
739 * @start: Starting virtual address to begin allocations. 998 * @start: Starting virtual address to begin allocations.
740 * @length Size of the allocations. 999 * @length: Size of the allocations.
741 * @new_pts: Bitmap set by function with new allocations. Likely used by the 1000 * @new_pts: Bitmap set by function with new allocations. Likely used by the
742 * caller to free on error. 1001 * caller to free on error.
743 * 1002 *
@@ -750,22 +1009,22 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
750 * 1009 *
751 * Return: 0 if success; negative error code otherwise. 1010 * Return: 0 if success; negative error code otherwise.
752 */ 1011 */
753static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, 1012static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
754 struct i915_page_directory *pd, 1013 struct i915_page_directory *pd,
755 uint64_t start, 1014 uint64_t start,
756 uint64_t length, 1015 uint64_t length,
757 unsigned long *new_pts) 1016 unsigned long *new_pts)
758{ 1017{
759 struct drm_device *dev = ppgtt->base.dev; 1018 struct drm_device *dev = vm->dev;
760 struct i915_page_table *pt; 1019 struct i915_page_table *pt;
761 uint64_t temp; 1020 uint64_t temp;
762 uint32_t pde; 1021 uint32_t pde;
763 1022
764 gen8_for_each_pde(pt, pd, start, length, temp, pde) { 1023 gen8_for_each_pde(pt, pd, start, length, temp, pde) {
765 /* Don't reallocate page tables */ 1024 /* Don't reallocate page tables */
766 if (pt) { 1025 if (test_bit(pde, pd->used_pdes)) {
767 /* Scratch is never allocated this way */ 1026 /* Scratch is never allocated this way */
768 WARN_ON(pt == ppgtt->base.scratch_pt); 1027 WARN_ON(pt == vm->scratch_pt);
769 continue; 1028 continue;
770 } 1029 }
771 1030
@@ -773,9 +1032,10 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
773 if (IS_ERR(pt)) 1032 if (IS_ERR(pt))
774 goto unwind_out; 1033 goto unwind_out;
775 1034
776 gen8_initialize_pt(&ppgtt->base, pt); 1035 gen8_initialize_pt(vm, pt);
777 pd->page_table[pde] = pt; 1036 pd->page_table[pde] = pt;
778 __set_bit(pde, new_pts); 1037 __set_bit(pde, new_pts);
1038 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
779 } 1039 }
780 1040
781 return 0; 1041 return 0;
@@ -789,11 +1049,11 @@ unwind_out:
789 1049
790/** 1050/**
791 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. 1051 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
792 * @ppgtt: Master ppgtt structure. 1052 * @vm: Master vm structure.
793 * @pdp: Page directory pointer for this address range. 1053 * @pdp: Page directory pointer for this address range.
794 * @start: Starting virtual address to begin allocations. 1054 * @start: Starting virtual address to begin allocations.
795 * @length Size of the allocations. 1055 * @length: Size of the allocations.
796 * @new_pds Bitmap set by function with new allocations. Likely used by the 1056 * @new_pds: Bitmap set by function with new allocations. Likely used by the
797 * caller to free on error. 1057 * caller to free on error.
798 * 1058 *
799 * Allocate the required number of page directories starting at the pde index of 1059 * Allocate the required number of page directories starting at the pde index of
@@ -810,48 +1070,102 @@ unwind_out:
810 * 1070 *
811 * Return: 0 if success; negative error code otherwise. 1071 * Return: 0 if success; negative error code otherwise.
812 */ 1072 */
813static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, 1073static int
814 struct i915_page_directory_pointer *pdp, 1074gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
815 uint64_t start, 1075 struct i915_page_directory_pointer *pdp,
816 uint64_t length, 1076 uint64_t start,
817 unsigned long *new_pds) 1077 uint64_t length,
1078 unsigned long *new_pds)
818{ 1079{
819 struct drm_device *dev = ppgtt->base.dev; 1080 struct drm_device *dev = vm->dev;
820 struct i915_page_directory *pd; 1081 struct i915_page_directory *pd;
821 uint64_t temp; 1082 uint64_t temp;
822 uint32_t pdpe; 1083 uint32_t pdpe;
1084 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
823 1085
824 WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES)); 1086 WARN_ON(!bitmap_empty(new_pds, pdpes));
825 1087
826 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { 1088 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
827 if (pd) 1089 if (test_bit(pdpe, pdp->used_pdpes))
828 continue; 1090 continue;
829 1091
830 pd = alloc_pd(dev); 1092 pd = alloc_pd(dev);
831 if (IS_ERR(pd)) 1093 if (IS_ERR(pd))
832 goto unwind_out; 1094 goto unwind_out;
833 1095
834 gen8_initialize_pd(&ppgtt->base, pd); 1096 gen8_initialize_pd(vm, pd);
835 pdp->page_directory[pdpe] = pd; 1097 pdp->page_directory[pdpe] = pd;
836 __set_bit(pdpe, new_pds); 1098 __set_bit(pdpe, new_pds);
1099 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
837 } 1100 }
838 1101
839 return 0; 1102 return 0;
840 1103
841unwind_out: 1104unwind_out:
842 for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES) 1105 for_each_set_bit(pdpe, new_pds, pdpes)
843 free_pd(dev, pdp->page_directory[pdpe]); 1106 free_pd(dev, pdp->page_directory[pdpe]);
844 1107
845 return -ENOMEM; 1108 return -ENOMEM;
846} 1109}
847 1110
848static void 1111/**
849free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts) 1112 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1113 * @vm: Master vm structure.
1114 * @pml4: Page map level 4 for this address range.
1115 * @start: Starting virtual address to begin allocations.
1116 * @length: Size of the allocations.
1117 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1118 * caller to free on error.
1119 *
1120 * Allocate the required number of page directory pointers. Extremely similar to
1121 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1122 * The main difference is here we are limited by the pml4 boundary (instead of
1123 * the page directory pointer).
1124 *
1125 * Return: 0 if success; negative error code otherwise.
1126 */
1127static int
1128gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1129 struct i915_pml4 *pml4,
1130 uint64_t start,
1131 uint64_t length,
1132 unsigned long *new_pdps)
850{ 1133{
851 int i; 1134 struct drm_device *dev = vm->dev;
1135 struct i915_page_directory_pointer *pdp;
1136 uint64_t temp;
1137 uint32_t pml4e;
1138
1139 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1140
1141 gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
1142 if (!test_bit(pml4e, pml4->used_pml4es)) {
1143 pdp = alloc_pdp(dev);
1144 if (IS_ERR(pdp))
1145 goto unwind_out;
1146
1147 gen8_initialize_pdp(vm, pdp);
1148 pml4->pdps[pml4e] = pdp;
1149 __set_bit(pml4e, new_pdps);
1150 trace_i915_page_directory_pointer_entry_alloc(vm,
1151 pml4e,
1152 start,
1153 GEN8_PML4E_SHIFT);
1154 }
1155 }
852 1156
853 for (i = 0; i < GEN8_LEGACY_PDPES; i++) 1157 return 0;
854 kfree(new_pts[i]); 1158
1159unwind_out:
1160 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1161 free_pdp(dev, pml4->pdps[pml4e]);
1162
1163 return -ENOMEM;
1164}
1165
1166static void
1167free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1168{
855 kfree(new_pts); 1169 kfree(new_pts);
856 kfree(new_pds); 1170 kfree(new_pds);
857} 1171}
@@ -861,28 +1175,20 @@ free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts)
861 */ 1175 */
862static 1176static
863int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, 1177int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
864 unsigned long ***new_pts) 1178 unsigned long **new_pts,
1179 uint32_t pdpes)
865{ 1180{
866 int i;
867 unsigned long *pds; 1181 unsigned long *pds;
868 unsigned long **pts; 1182 unsigned long *pts;
869 1183
870 pds = kcalloc(BITS_TO_LONGS(GEN8_LEGACY_PDPES), sizeof(unsigned long), GFP_KERNEL); 1184 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
871 if (!pds) 1185 if (!pds)
872 return -ENOMEM; 1186 return -ENOMEM;
873 1187
874 pts = kcalloc(GEN8_LEGACY_PDPES, sizeof(unsigned long *), GFP_KERNEL); 1188 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
875 if (!pts) { 1189 GFP_TEMPORARY);
876 kfree(pds); 1190 if (!pts)
877 return -ENOMEM; 1191 goto err_out;
878 }
879
880 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
881 pts[i] = kcalloc(BITS_TO_LONGS(I915_PDES),
882 sizeof(unsigned long), GFP_KERNEL);
883 if (!pts[i])
884 goto err_out;
885 }
886 1192
887 *new_pds = pds; 1193 *new_pds = pds;
888 *new_pts = pts; 1194 *new_pts = pts;
@@ -904,18 +1210,21 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
904 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; 1210 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
905} 1211}
906 1212
907static int gen8_alloc_va_range(struct i915_address_space *vm, 1213static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
908 uint64_t start, 1214 struct i915_page_directory_pointer *pdp,
909 uint64_t length) 1215 uint64_t start,
1216 uint64_t length)
910{ 1217{
911 struct i915_hw_ppgtt *ppgtt = 1218 struct i915_hw_ppgtt *ppgtt =
912 container_of(vm, struct i915_hw_ppgtt, base); 1219 container_of(vm, struct i915_hw_ppgtt, base);
913 unsigned long *new_page_dirs, **new_page_tables; 1220 unsigned long *new_page_dirs, *new_page_tables;
1221 struct drm_device *dev = vm->dev;
914 struct i915_page_directory *pd; 1222 struct i915_page_directory *pd;
915 const uint64_t orig_start = start; 1223 const uint64_t orig_start = start;
916 const uint64_t orig_length = length; 1224 const uint64_t orig_length = length;
917 uint64_t temp; 1225 uint64_t temp;
918 uint32_t pdpe; 1226 uint32_t pdpe;
1227 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
919 int ret; 1228 int ret;
920 1229
921 /* Wrap is never okay since we can only represent 48b, and we don't 1230 /* Wrap is never okay since we can only represent 48b, and we don't
@@ -924,25 +1233,25 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
924 if (WARN_ON(start + length < start)) 1233 if (WARN_ON(start + length < start))
925 return -ENODEV; 1234 return -ENODEV;
926 1235
927 if (WARN_ON(start + length > ppgtt->base.total)) 1236 if (WARN_ON(start + length > vm->total))
928 return -ENODEV; 1237 return -ENODEV;
929 1238
930 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables); 1239 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
931 if (ret) 1240 if (ret)
932 return ret; 1241 return ret;
933 1242
934 /* Do the allocations first so we can easily bail out */ 1243 /* Do the allocations first so we can easily bail out */
935 ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length, 1244 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
936 new_page_dirs); 1245 new_page_dirs);
937 if (ret) { 1246 if (ret) {
938 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1247 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
939 return ret; 1248 return ret;
940 } 1249 }
941 1250
942 /* For every page directory referenced, allocate page tables */ 1251 /* For every page directory referenced, allocate page tables */
943 gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { 1252 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
944 ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length, 1253 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
945 new_page_tables[pdpe]); 1254 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
946 if (ret) 1255 if (ret)
947 goto err_out; 1256 goto err_out;
948 } 1257 }
@@ -952,10 +1261,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
952 1261
953 /* Allocations have completed successfully, so set the bitmaps, and do 1262 /* Allocations have completed successfully, so set the bitmaps, and do
954 * the mappings. */ 1263 * the mappings. */
955 gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { 1264 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
956 gen8_pde_t *const page_directory = kmap_px(pd); 1265 gen8_pde_t *const page_directory = kmap_px(pd);
957 struct i915_page_table *pt; 1266 struct i915_page_table *pt;
958 uint64_t pd_len = gen8_clamp_pd(start, length); 1267 uint64_t pd_len = length;
959 uint64_t pd_start = start; 1268 uint64_t pd_start = start;
960 uint32_t pde; 1269 uint32_t pde;
961 1270
@@ -979,14 +1288,18 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
979 /* Map the PDE to the page table */ 1288 /* Map the PDE to the page table */
980 page_directory[pde] = gen8_pde_encode(px_dma(pt), 1289 page_directory[pde] = gen8_pde_encode(px_dma(pt),
981 I915_CACHE_LLC); 1290 I915_CACHE_LLC);
1291 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1292 gen8_pte_index(start),
1293 gen8_pte_count(start, length),
1294 GEN8_PTES);
982 1295
983 /* NB: We haven't yet mapped ptes to pages. At this 1296 /* NB: We haven't yet mapped ptes to pages. At this
984 * point we're still relying on insert_entries() */ 1297 * point we're still relying on insert_entries() */
985 } 1298 }
986 1299
987 kunmap_px(ppgtt, page_directory); 1300 kunmap_px(ppgtt, page_directory);
988 1301 __set_bit(pdpe, pdp->used_pdpes);
989 __set_bit(pdpe, ppgtt->pdp.used_pdpes); 1302 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
990 } 1303 }
991 1304
992 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1305 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
@@ -995,18 +1308,191 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
995 1308
996err_out: 1309err_out:
997 while (pdpe--) { 1310 while (pdpe--) {
998 for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES) 1311 for_each_set_bit(temp, new_page_tables + pdpe *
999 free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]); 1312 BITS_TO_LONGS(I915_PDES), I915_PDES)
1313 free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1000 } 1314 }
1001 1315
1002 for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES) 1316 for_each_set_bit(pdpe, new_page_dirs, pdpes)
1003 free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]); 1317 free_pd(dev, pdp->page_directory[pdpe]);
1004 1318
1005 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1319 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1006 mark_tlbs_dirty(ppgtt); 1320 mark_tlbs_dirty(ppgtt);
1007 return ret; 1321 return ret;
1008} 1322}
1009 1323
1324static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1325 struct i915_pml4 *pml4,
1326 uint64_t start,
1327 uint64_t length)
1328{
1329 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1330 struct i915_hw_ppgtt *ppgtt =
1331 container_of(vm, struct i915_hw_ppgtt, base);
1332 struct i915_page_directory_pointer *pdp;
1333 uint64_t temp, pml4e;
1334 int ret = 0;
1335
1336 /* Do the pml4 allocations first, so we don't need to track the newly
1337 * allocated tables below the pdp */
1338 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1339
1340 /* The pagedirectory and pagetable allocations are done in the shared 3
1341 * and 4 level code. Just allocate the pdps.
1342 */
1343 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1344 new_pdps);
1345 if (ret)
1346 return ret;
1347
1348 WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1349 "The allocation has spanned more than 512GB. "
1350 "It is highly likely this is incorrect.");
1351
1352 gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
1353 WARN_ON(!pdp);
1354
1355 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1356 if (ret)
1357 goto err_out;
1358
1359 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
1360 }
1361
1362 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1363 GEN8_PML4ES_PER_PML4);
1364
1365 return 0;
1366
1367err_out:
1368 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1369 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1370
1371 return ret;
1372}
1373
1374static int gen8_alloc_va_range(struct i915_address_space *vm,
1375 uint64_t start, uint64_t length)
1376{
1377 struct i915_hw_ppgtt *ppgtt =
1378 container_of(vm, struct i915_hw_ppgtt, base);
1379
1380 if (USES_FULL_48BIT_PPGTT(vm->dev))
1381 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1382 else
1383 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1384}
1385
1386static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1387 uint64_t start, uint64_t length,
1388 gen8_pte_t scratch_pte,
1389 struct seq_file *m)
1390{
1391 struct i915_page_directory *pd;
1392 uint64_t temp;
1393 uint32_t pdpe;
1394
1395 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
1396 struct i915_page_table *pt;
1397 uint64_t pd_len = length;
1398 uint64_t pd_start = start;
1399 uint32_t pde;
1400
1401 if (!test_bit(pdpe, pdp->used_pdpes))
1402 continue;
1403
1404 seq_printf(m, "\tPDPE #%d\n", pdpe);
1405 gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
1406 uint32_t pte;
1407 gen8_pte_t *pt_vaddr;
1408
1409 if (!test_bit(pde, pd->used_pdes))
1410 continue;
1411
1412 pt_vaddr = kmap_px(pt);
1413 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1414 uint64_t va =
1415 (pdpe << GEN8_PDPE_SHIFT) |
1416 (pde << GEN8_PDE_SHIFT) |
1417 (pte << GEN8_PTE_SHIFT);
1418 int i;
1419 bool found = false;
1420
1421 for (i = 0; i < 4; i++)
1422 if (pt_vaddr[pte + i] != scratch_pte)
1423 found = true;
1424 if (!found)
1425 continue;
1426
1427 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1428 for (i = 0; i < 4; i++) {
1429 if (pt_vaddr[pte + i] != scratch_pte)
1430 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1431 else
1432 seq_puts(m, " SCRATCH ");
1433 }
1434 seq_puts(m, "\n");
1435 }
1436 /* don't use kunmap_px, it could trigger
1437 * an unnecessary flush.
1438 */
1439 kunmap_atomic(pt_vaddr);
1440 }
1441 }
1442}
1443
1444static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1445{
1446 struct i915_address_space *vm = &ppgtt->base;
1447 uint64_t start = ppgtt->base.start;
1448 uint64_t length = ppgtt->base.total;
1449 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
1450 I915_CACHE_LLC, true);
1451
1452 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1453 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1454 } else {
1455 uint64_t templ4, pml4e;
1456 struct i915_pml4 *pml4 = &ppgtt->pml4;
1457 struct i915_page_directory_pointer *pdp;
1458
1459 gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) {
1460 if (!test_bit(pml4e, pml4->used_pml4es))
1461 continue;
1462
1463 seq_printf(m, " PML4E #%llu\n", pml4e);
1464 gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1465 }
1466 }
1467}
1468
1469static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1470{
1471 unsigned long *new_page_dirs, *new_page_tables;
1472 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1473 int ret;
1474
1475 /* We allocate temp bitmap for page tables for no gain
1476 * but as this is for init only, lets keep the things simple
1477 */
1478 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1479 if (ret)
1480 return ret;
1481
1482 /* Allocate for all pdps regardless of how the ppgtt
1483 * was defined.
1484 */
1485 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1486 0, 1ULL << 32,
1487 new_page_dirs);
1488 if (!ret)
1489 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1490
1491 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1492
1493 return ret;
1494}
1495
1010/* 1496/*
1011 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 1497 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1012 * with a net effect resembling a 2-level page table in normal x86 terms. Each 1498 * with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1023,24 +1509,49 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1023 return ret; 1509 return ret;
1024 1510
1025 ppgtt->base.start = 0; 1511 ppgtt->base.start = 0;
1026 ppgtt->base.total = 1ULL << 32;
1027 if (IS_ENABLED(CONFIG_X86_32))
1028 /* While we have a proliferation of size_t variables
1029 * we cannot represent the full ppgtt size on 32bit,
1030 * so limit it to the same size as the GGTT (currently
1031 * 2GiB).
1032 */
1033 ppgtt->base.total = to_i915(ppgtt->base.dev)->gtt.base.total;
1034 ppgtt->base.cleanup = gen8_ppgtt_cleanup; 1512 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1035 ppgtt->base.allocate_va_range = gen8_alloc_va_range; 1513 ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1036 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 1514 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1037 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 1515 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1038 ppgtt->base.unbind_vma = ppgtt_unbind_vma; 1516 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1039 ppgtt->base.bind_vma = ppgtt_bind_vma; 1517 ppgtt->base.bind_vma = ppgtt_bind_vma;
1518 ppgtt->debug_dump = gen8_dump_ppgtt;
1040 1519
1041 ppgtt->switch_mm = gen8_mm_switch; 1520 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1521 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1522 if (ret)
1523 goto free_scratch;
1524
1525 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1526
1527 ppgtt->base.total = 1ULL << 48;
1528 ppgtt->switch_mm = gen8_48b_mm_switch;
1529 } else {
1530 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
1531 if (ret)
1532 goto free_scratch;
1533
1534 ppgtt->base.total = 1ULL << 32;
1535 ppgtt->switch_mm = gen8_legacy_mm_switch;
1536 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1537 0, 0,
1538 GEN8_PML4E_SHIFT);
1539
1540 if (intel_vgpu_active(ppgtt->base.dev)) {
1541 ret = gen8_preallocate_top_level_pdps(ppgtt);
1542 if (ret)
1543 goto free_scratch;
1544 }
1545 }
1546
1547 if (intel_vgpu_active(ppgtt->base.dev))
1548 gen8_ppgtt_notify_vgt(ppgtt, true);
1042 1549
1043 return 0; 1550 return 0;
1551
1552free_scratch:
1553 gen8_free_scratch(&ppgtt->base);
1554 return ret;
1044} 1555}
1045 1556
1046static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1557static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
@@ -1228,8 +1739,9 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
1228 int j; 1739 int j;
1229 1740
1230 for_each_ring(ring, dev_priv, j) { 1741 for_each_ring(ring, dev_priv, j) {
1742 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
1231 I915_WRITE(RING_MODE_GEN7(ring), 1743 I915_WRITE(RING_MODE_GEN7(ring),
1232 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1744 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1233 } 1745 }
1234} 1746}
1235 1747
@@ -1609,6 +2121,16 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1609 return gen8_ppgtt_init(ppgtt); 2121 return gen8_ppgtt_init(ppgtt);
1610} 2122}
1611 2123
2124static void i915_address_space_init(struct i915_address_space *vm,
2125 struct drm_i915_private *dev_priv)
2126{
2127 drm_mm_init(&vm->mm, vm->start, vm->total);
2128 vm->dev = dev_priv->dev;
2129 INIT_LIST_HEAD(&vm->active_list);
2130 INIT_LIST_HEAD(&vm->inactive_list);
2131 list_add_tail(&vm->global_link, &dev_priv->vm_list);
2132}
2133
1612int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2134int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1613{ 2135{
1614 struct drm_i915_private *dev_priv = dev->dev_private; 2136 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1617,9 +2139,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1617 ret = __hw_ppgtt_init(dev, ppgtt); 2139 ret = __hw_ppgtt_init(dev, ppgtt);
1618 if (ret == 0) { 2140 if (ret == 0) {
1619 kref_init(&ppgtt->ref); 2141 kref_init(&ppgtt->ref);
1620 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 2142 i915_address_space_init(&ppgtt->base, dev_priv);
1621 ppgtt->base.total);
1622 i915_init_vm(dev_priv, &ppgtt->base);
1623 } 2143 }
1624 2144
1625 return ret; 2145 return ret;
@@ -1982,6 +2502,36 @@ static int ggtt_bind_vma(struct i915_vma *vma,
1982 enum i915_cache_level cache_level, 2502 enum i915_cache_level cache_level,
1983 u32 flags) 2503 u32 flags)
1984{ 2504{
2505 struct drm_i915_gem_object *obj = vma->obj;
2506 u32 pte_flags = 0;
2507 int ret;
2508
2509 ret = i915_get_ggtt_vma_pages(vma);
2510 if (ret)
2511 return ret;
2512
2513 /* Currently applicable only to VLV */
2514 if (obj->gt_ro)
2515 pte_flags |= PTE_READ_ONLY;
2516
2517 vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
2518 vma->node.start,
2519 cache_level, pte_flags);
2520
2521 /*
2522 * Without aliasing PPGTT there's no difference between
2523 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2524 * upgrade to both bound if we bind either to avoid double-binding.
2525 */
2526 vma->bound |= GLOBAL_BIND | LOCAL_BIND;
2527
2528 return 0;
2529}
2530
2531static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2532 enum i915_cache_level cache_level,
2533 u32 flags)
2534{
1985 struct drm_device *dev = vma->vm->dev; 2535 struct drm_device *dev = vma->vm->dev;
1986 struct drm_i915_private *dev_priv = dev->dev_private; 2536 struct drm_i915_private *dev_priv = dev->dev_private;
1987 struct drm_i915_gem_object *obj = vma->obj; 2537 struct drm_i915_gem_object *obj = vma->obj;
@@ -1999,24 +2549,13 @@ static int ggtt_bind_vma(struct i915_vma *vma,
1999 pte_flags |= PTE_READ_ONLY; 2549 pte_flags |= PTE_READ_ONLY;
2000 2550
2001 2551
2002 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { 2552 if (flags & GLOBAL_BIND) {
2003 vma->vm->insert_entries(vma->vm, pages, 2553 vma->vm->insert_entries(vma->vm, pages,
2004 vma->node.start, 2554 vma->node.start,
2005 cache_level, pte_flags); 2555 cache_level, pte_flags);
2006
2007 /* Note the inconsistency here is due to absence of the
2008 * aliasing ppgtt on gen4 and earlier. Though we always
2009 * request PIN_USER for execbuffer (translated to LOCAL_BIND),
2010 * without the appgtt, we cannot honour that request and so
2011 * must substitute it with a global binding. Since we do this
2012 * behind the upper layers back, we need to explicitly set
2013 * the bound flag ourselves.
2014 */
2015 vma->bound |= GLOBAL_BIND;
2016
2017 } 2556 }
2018 2557
2019 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { 2558 if (flags & LOCAL_BIND) {
2020 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 2559 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2021 appgtt->base.insert_entries(&appgtt->base, pages, 2560 appgtt->base.insert_entries(&appgtt->base, pages,
2022 vma->node.start, 2561 vma->node.start,
@@ -2084,9 +2623,9 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
2084} 2623}
2085 2624
2086static int i915_gem_setup_global_gtt(struct drm_device *dev, 2625static int i915_gem_setup_global_gtt(struct drm_device *dev,
2087 unsigned long start, 2626 u64 start,
2088 unsigned long mappable_end, 2627 u64 mappable_end,
2089 unsigned long end) 2628 u64 end)
2090{ 2629{
2091 /* Let GEM Manage all of the aperture. 2630 /* Let GEM Manage all of the aperture.
2092 * 2631 *
@@ -2106,11 +2645,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2106 2645
2107 BUG_ON(mappable_end > end); 2646 BUG_ON(mappable_end > end);
2108 2647
2109 /* Subtract the guard page ... */ 2648 ggtt_vm->start = start;
2110 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
2111 2649
2112 dev_priv->gtt.base.start = start; 2650 /* Subtract the guard page before address space initialization to
2113 dev_priv->gtt.base.total = end - start; 2651 * shrink the range used by drm_mm */
2652 ggtt_vm->total = end - start - PAGE_SIZE;
2653 i915_address_space_init(ggtt_vm, dev_priv);
2654 ggtt_vm->total += PAGE_SIZE;
2114 2655
2115 if (intel_vgpu_active(dev)) { 2656 if (intel_vgpu_active(dev)) {
2116 ret = intel_vgt_balloon(dev); 2657 ret = intel_vgt_balloon(dev);
@@ -2119,13 +2660,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2119 } 2660 }
2120 2661
2121 if (!HAS_LLC(dev)) 2662 if (!HAS_LLC(dev))
2122 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; 2663 ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
2123 2664
2124 /* Mark any preallocated objects as occupied */ 2665 /* Mark any preallocated objects as occupied */
2125 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2666 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2126 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 2667 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2127 2668
2128 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", 2669 DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
2129 i915_gem_obj_ggtt_offset(obj), obj->base.size); 2670 i915_gem_obj_ggtt_offset(obj), obj->base.size);
2130 2671
2131 WARN_ON(i915_gem_obj_ggtt_bound(obj)); 2672 WARN_ON(i915_gem_obj_ggtt_bound(obj));
@@ -2135,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2135 return ret; 2676 return ret;
2136 } 2677 }
2137 vma->bound |= GLOBAL_BIND; 2678 vma->bound |= GLOBAL_BIND;
2679 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2138 } 2680 }
2139 2681
2140 /* Clear any non-preallocated blocks */ 2682 /* Clear any non-preallocated blocks */
@@ -2177,6 +2719,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2177 true); 2719 true);
2178 2720
2179 dev_priv->mm.aliasing_ppgtt = ppgtt; 2721 dev_priv->mm.aliasing_ppgtt = ppgtt;
2722 WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
2723 dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
2180 } 2724 }
2181 2725
2182 return 0; 2726 return 0;
@@ -2367,8 +2911,8 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2367 2911
2368 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 2912 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2369 * write would work. */ 2913 * write would work. */
2370 I915_WRITE(GEN8_PRIVATE_PAT, pat); 2914 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2371 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); 2915 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
2372} 2916}
2373 2917
2374static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) 2918static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
@@ -2402,8 +2946,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2402 GEN8_PPAT(6, CHV_PPAT_SNOOP) | 2946 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2403 GEN8_PPAT(7, CHV_PPAT_SNOOP); 2947 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2404 2948
2405 I915_WRITE(GEN8_PRIVATE_PAT, pat); 2949 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2406 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); 2950 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
2407} 2951}
2408 2952
2409static int gen8_gmch_probe(struct drm_device *dev, 2953static int gen8_gmch_probe(struct drm_device *dev,
@@ -2722,15 +3266,18 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
2722 3266
2723} 3267}
2724 3268
2725static void 3269static struct scatterlist *
2726rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height, 3270rotate_pages(dma_addr_t *in, unsigned int offset,
2727 struct sg_table *st) 3271 unsigned int width, unsigned int height,
3272 struct sg_table *st, struct scatterlist *sg)
2728{ 3273{
2729 unsigned int column, row; 3274 unsigned int column, row;
2730 unsigned int src_idx; 3275 unsigned int src_idx;
2731 struct scatterlist *sg = st->sgl;
2732 3276
2733 st->nents = 0; 3277 if (!sg) {
3278 st->nents = 0;
3279 sg = st->sgl;
3280 }
2734 3281
2735 for (column = 0; column < width; column++) { 3282 for (column = 0; column < width; column++) {
2736 src_idx = width * (height - 1) + column; 3283 src_idx = width * (height - 1) + column;
@@ -2741,12 +3288,14 @@ rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
2741 * The only thing we need are DMA addresses. 3288 * The only thing we need are DMA addresses.
2742 */ 3289 */
2743 sg_set_page(sg, NULL, PAGE_SIZE, 0); 3290 sg_set_page(sg, NULL, PAGE_SIZE, 0);
2744 sg_dma_address(sg) = in[src_idx]; 3291 sg_dma_address(sg) = in[offset + src_idx];
2745 sg_dma_len(sg) = PAGE_SIZE; 3292 sg_dma_len(sg) = PAGE_SIZE;
2746 sg = sg_next(sg); 3293 sg = sg_next(sg);
2747 src_idx -= width; 3294 src_idx -= width;
2748 } 3295 }
2749 } 3296 }
3297
3298 return sg;
2750} 3299}
2751 3300
2752static struct sg_table * 3301static struct sg_table *
@@ -2755,10 +3304,13 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
2755{ 3304{
2756 struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; 3305 struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
2757 unsigned int size_pages = rot_info->size >> PAGE_SHIFT; 3306 unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3307 unsigned int size_pages_uv;
2758 struct sg_page_iter sg_iter; 3308 struct sg_page_iter sg_iter;
2759 unsigned long i; 3309 unsigned long i;
2760 dma_addr_t *page_addr_list; 3310 dma_addr_t *page_addr_list;
2761 struct sg_table *st; 3311 struct sg_table *st;
3312 unsigned int uv_start_page;
3313 struct scatterlist *sg;
2762 int ret = -ENOMEM; 3314 int ret = -ENOMEM;
2763 3315
2764 /* Allocate a temporary list of source pages for random access. */ 3316 /* Allocate a temporary list of source pages for random access. */
@@ -2767,12 +3319,18 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
2767 if (!page_addr_list) 3319 if (!page_addr_list)
2768 return ERR_PTR(ret); 3320 return ERR_PTR(ret);
2769 3321
3322 /* Account for UV plane with NV12. */
3323 if (rot_info->pixel_format == DRM_FORMAT_NV12)
3324 size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
3325 else
3326 size_pages_uv = 0;
3327
2770 /* Allocate target SG list. */ 3328 /* Allocate target SG list. */
2771 st = kmalloc(sizeof(*st), GFP_KERNEL); 3329 st = kmalloc(sizeof(*st), GFP_KERNEL);
2772 if (!st) 3330 if (!st)
2773 goto err_st_alloc; 3331 goto err_st_alloc;
2774 3332
2775 ret = sg_alloc_table(st, size_pages, GFP_KERNEL); 3333 ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
2776 if (ret) 3334 if (ret)
2777 goto err_sg_alloc; 3335 goto err_sg_alloc;
2778 3336
@@ -2784,15 +3342,32 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
2784 } 3342 }
2785 3343
2786 /* Rotate the pages. */ 3344 /* Rotate the pages. */
2787 rotate_pages(page_addr_list, 3345 sg = rotate_pages(page_addr_list, 0,
2788 rot_info->width_pages, rot_info->height_pages, 3346 rot_info->width_pages, rot_info->height_pages,
2789 st); 3347 st, NULL);
3348
3349 /* Append the UV plane if NV12. */
3350 if (rot_info->pixel_format == DRM_FORMAT_NV12) {
3351 uv_start_page = size_pages;
3352
3353 /* Check for tile-row un-alignment. */
3354 if (offset_in_page(rot_info->uv_offset))
3355 uv_start_page--;
3356
3357 rot_info->uv_start_page = uv_start_page;
3358
3359 rotate_pages(page_addr_list, uv_start_page,
3360 rot_info->width_pages_uv,
3361 rot_info->height_pages_uv,
3362 st, sg);
3363 }
2790 3364
2791 DRM_DEBUG_KMS( 3365 DRM_DEBUG_KMS(
2792 "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n", 3366 "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
2793 obj->base.size, rot_info->pitch, rot_info->height, 3367 obj->base.size, rot_info->pitch, rot_info->height,
2794 rot_info->pixel_format, rot_info->width_pages, 3368 rot_info->pixel_format, rot_info->width_pages,
2795 rot_info->height_pages, size_pages); 3369 rot_info->height_pages, size_pages + size_pages_uv,
3370 size_pages);
2796 3371
2797 drm_free_large(page_addr_list); 3372 drm_free_large(page_addr_list);
2798 3373
@@ -2804,10 +3379,11 @@ err_st_alloc:
2804 drm_free_large(page_addr_list); 3379 drm_free_large(page_addr_list);
2805 3380
2806 DRM_DEBUG_KMS( 3381 DRM_DEBUG_KMS(
2807 "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n", 3382 "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
2808 obj->base.size, ret, rot_info->pitch, rot_info->height, 3383 obj->base.size, ret, rot_info->pitch, rot_info->height,
2809 rot_info->pixel_format, rot_info->width_pages, 3384 rot_info->pixel_format, rot_info->width_pages,
2810 rot_info->height_pages, size_pages); 3385 rot_info->height_pages, size_pages + size_pages_uv,
3386 size_pages);
2811 return ERR_PTR(ret); 3387 return ERR_PTR(ret);
2812} 3388}
2813 3389
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e1cfa292f9ad..a216397ead52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -39,6 +39,8 @@ struct drm_i915_file_private;
39typedef uint32_t gen6_pte_t; 39typedef uint32_t gen6_pte_t;
40typedef uint64_t gen8_pte_t; 40typedef uint64_t gen8_pte_t;
41typedef uint64_t gen8_pde_t; 41typedef uint64_t gen8_pde_t;
42typedef uint64_t gen8_ppgtt_pdpe_t;
43typedef uint64_t gen8_ppgtt_pml4e_t;
42 44
43#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) 45#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
44 46
@@ -88,9 +90,18 @@ typedef uint64_t gen8_pde_t;
88 * PDPE | PDE | PTE | offset 90 * PDPE | PDE | PTE | offset
89 * The difference as compared to normal x86 3 level page table is the PDPEs are 91 * The difference as compared to normal x86 3 level page table is the PDPEs are
90 * programmed via register. 92 * programmed via register.
93 *
94 * GEN8 48b legacy style address is defined as a 4 level page table:
95 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
96 * PML4E | PDPE | PDE | PTE | offset
91 */ 97 */
98#define GEN8_PML4ES_PER_PML4 512
99#define GEN8_PML4E_SHIFT 39
100#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
92#define GEN8_PDPE_SHIFT 30 101#define GEN8_PDPE_SHIFT 30
93#define GEN8_PDPE_MASK 0x3 102/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
103 * tables */
104#define GEN8_PDPE_MASK 0x1ff
94#define GEN8_PDE_SHIFT 21 105#define GEN8_PDE_SHIFT 21
95#define GEN8_PDE_MASK 0x1ff 106#define GEN8_PDE_MASK 0x1ff
96#define GEN8_PTE_SHIFT 12 107#define GEN8_PTE_SHIFT 12
@@ -98,6 +109,9 @@ typedef uint64_t gen8_pde_t;
98#define GEN8_LEGACY_PDPES 4 109#define GEN8_LEGACY_PDPES 4
99#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) 110#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
100 111
112#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
113 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
114
101#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) 115#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
102#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ 116#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
103#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ 117#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
@@ -124,10 +138,14 @@ enum i915_ggtt_view_type {
124struct intel_rotation_info { 138struct intel_rotation_info {
125 unsigned int height; 139 unsigned int height;
126 unsigned int pitch; 140 unsigned int pitch;
141 unsigned int uv_offset;
127 uint32_t pixel_format; 142 uint32_t pixel_format;
128 uint64_t fb_modifier; 143 uint64_t fb_modifier;
129 unsigned int width_pages, height_pages; 144 unsigned int width_pages, height_pages;
130 uint64_t size; 145 uint64_t size;
146 unsigned int width_pages_uv, height_pages_uv;
147 uint64_t size_uv;
148 unsigned int uv_start_page;
131}; 149};
132 150
133struct i915_ggtt_view { 151struct i915_ggtt_view {
@@ -135,7 +153,7 @@ struct i915_ggtt_view {
135 153
136 union { 154 union {
137 struct { 155 struct {
138 unsigned long offset; 156 u64 offset;
139 unsigned int size; 157 unsigned int size;
140 } partial; 158 } partial;
141 } params; 159 } params;
@@ -241,9 +259,17 @@ struct i915_page_directory {
241}; 259};
242 260
243struct i915_page_directory_pointer { 261struct i915_page_directory_pointer {
244 /* struct page *page; */ 262 struct i915_page_dma base;
245 DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES); 263
246 struct i915_page_directory *page_directory[GEN8_LEGACY_PDPES]; 264 unsigned long *used_pdpes;
265 struct i915_page_directory **page_directory;
266};
267
268struct i915_pml4 {
269 struct i915_page_dma base;
270
271 DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
272 struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
247}; 273};
248 274
249struct i915_address_space { 275struct i915_address_space {
@@ -256,6 +282,7 @@ struct i915_address_space {
256 struct i915_page_scratch *scratch_page; 282 struct i915_page_scratch *scratch_page;
257 struct i915_page_table *scratch_pt; 283 struct i915_page_table *scratch_pt;
258 struct i915_page_directory *scratch_pd; 284 struct i915_page_directory *scratch_pd;
285 struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
259 286
260 /** 287 /**
261 * List of objects currently involved in rendering. 288 * List of objects currently involved in rendering.
@@ -318,6 +345,7 @@ struct i915_gtt {
318 struct i915_address_space base; 345 struct i915_address_space base;
319 346
320 size_t stolen_size; /* Total size of stolen memory */ 347 size_t stolen_size; /* Total size of stolen memory */
348 size_t stolen_usable_size; /* Total size minus BIOS reserved */
321 u64 mappable_end; /* End offset that we can CPU map */ 349 u64 mappable_end; /* End offset that we can CPU map */
322 struct io_mapping *mappable; /* Mapping to our CPU mappable region */ 350 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
323 phys_addr_t mappable_base; /* PA of our GMADR */ 351 phys_addr_t mappable_base; /* PA of our GMADR */
@@ -341,8 +369,9 @@ struct i915_hw_ppgtt {
341 struct drm_mm_node node; 369 struct drm_mm_node node;
342 unsigned long pd_dirty_rings; 370 unsigned long pd_dirty_rings;
343 union { 371 union {
344 struct i915_page_directory_pointer pdp; 372 struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
345 struct i915_page_directory pd; 373 struct i915_page_directory_pointer pdp; /* GEN8+ */
374 struct i915_page_directory pd; /* GEN6-7 */
346 }; 375 };
347 376
348 struct drm_i915_file_private *file_priv; 377 struct drm_i915_file_private *file_priv;
@@ -365,7 +394,8 @@ struct i915_hw_ppgtt {
365 */ 394 */
366#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ 395#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
367 for (iter = gen6_pde_index(start); \ 396 for (iter = gen6_pde_index(start); \
368 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ 397 length > 0 && iter < I915_PDES ? \
398 (pt = (pd)->page_table[iter]), 1 : 0; \
369 iter++, \ 399 iter++, \
370 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ 400 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
371 temp = min_t(unsigned, temp, length), \ 401 temp = min_t(unsigned, temp, length), \
@@ -430,30 +460,30 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
430 */ 460 */
431#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \ 461#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
432 for (iter = gen8_pde_index(start); \ 462 for (iter = gen8_pde_index(start); \
433 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ 463 length > 0 && iter < I915_PDES ? \
464 (pt = (pd)->page_table[iter]), 1 : 0; \
434 iter++, \ 465 iter++, \
435 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \ 466 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
436 temp = min(temp, length), \ 467 temp = min(temp, length), \
437 start += temp, length -= temp) 468 start += temp, length -= temp)
438 469
439#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ 470#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
440 for (iter = gen8_pdpe_index(start); \ 471 for (iter = gen8_pdpe_index(start); \
441 pd = (pdp)->page_directory[iter], length > 0 && iter < GEN8_LEGACY_PDPES; \ 472 length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \
473 (pd = (pdp)->page_directory[iter]), 1 : 0; \
442 iter++, \ 474 iter++, \
443 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ 475 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
444 temp = min(temp, length), \ 476 temp = min(temp, length), \
445 start += temp, length -= temp) 477 start += temp, length -= temp)
446 478
447/* Clamp length to the next page_directory boundary */ 479#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \
448static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length) 480 for (iter = gen8_pml4e_index(start); \
449{ 481 length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \
450 uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT); 482 (pdp = (pml4)->pdps[iter]), 1 : 0; \
451 483 iter++, \
452 if (next_pd > (start + length)) 484 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \
453 return length; 485 temp = min(temp, length), \
454 486 start += temp, length -= temp)
455 return next_pd - start;
456}
457 487
458static inline uint32_t gen8_pte_index(uint64_t address) 488static inline uint32_t gen8_pte_index(uint64_t address)
459{ 489{
@@ -472,8 +502,7 @@ static inline uint32_t gen8_pdpe_index(uint64_t address)
472 502
473static inline uint32_t gen8_pml4e_index(uint64_t address) 503static inline uint32_t gen8_pml4e_index(uint64_t address)
474{ 504{
475 WARN_ON(1); /* For 64B */ 505 return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
476 return 0;
477} 506}
478 507
479static inline size_t gen8_pte_count(uint64_t address, uint64_t length) 508static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 674341708033..f7df54a8ee2b 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -73,7 +73,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
73 */ 73 */
74unsigned long 74unsigned long
75i915_gem_shrink(struct drm_i915_private *dev_priv, 75i915_gem_shrink(struct drm_i915_private *dev_priv,
76 long target, unsigned flags) 76 unsigned long target, unsigned flags)
77{ 77{
78 const struct { 78 const struct {
79 struct list_head *list; 79 struct list_head *list;
@@ -85,6 +85,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
85 }, *phase; 85 }, *phase;
86 unsigned long count = 0; 86 unsigned long count = 0;
87 87
88 trace_i915_gem_shrink(dev_priv, target, flags);
89 i915_gem_retire_requests(dev_priv->dev);
90
88 /* 91 /*
89 * As we may completely rewrite the (un)bound list whilst unbinding 92 * As we may completely rewrite the (un)bound list whilst unbinding
90 * (due to retiring requests) we have to strictly process only 93 * (due to retiring requests) we have to strictly process only
@@ -123,6 +126,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
123 obj->madv != I915_MADV_DONTNEED) 126 obj->madv != I915_MADV_DONTNEED)
124 continue; 127 continue;
125 128
129 if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
130 continue;
131
126 drm_gem_object_reference(&obj->base); 132 drm_gem_object_reference(&obj->base);
127 133
128 /* For the unbound phase, this should be a no-op! */ 134 /* For the unbound phase, this should be a no-op! */
@@ -139,6 +145,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
139 list_splice(&still_in_list, phase->list); 145 list_splice(&still_in_list, phase->list);
140 } 146 }
141 147
148 i915_gem_retire_requests(dev_priv->dev);
149
142 return count; 150 return count;
143} 151}
144 152
@@ -158,9 +166,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
158 */ 166 */
159unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) 167unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
160{ 168{
161 i915_gem_evict_everything(dev_priv->dev); 169 return i915_gem_shrink(dev_priv, -1UL,
162 return i915_gem_shrink(dev_priv, LONG_MAX, 170 I915_SHRINK_BOUND |
163 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); 171 I915_SHRINK_UNBOUND |
172 I915_SHRINK_ACTIVE);
164} 173}
165 174
166static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) 175static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
@@ -213,7 +222,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
213 count += obj->base.size >> PAGE_SHIFT; 222 count += obj->base.size >> PAGE_SHIFT;
214 223
215 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 224 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
216 if (obj->pages_pin_count == num_vma_bound(obj)) 225 if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
217 count += obj->base.size >> PAGE_SHIFT; 226 count += obj->base.size >> PAGE_SHIFT;
218 } 227 }
219 228
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f361c4a56995..cdacf3f5b77a 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -30,6 +30,9 @@
30#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33#define KB(x) ((x) * 1024)
34#define MB(x) (KB(x) * 1024)
35
33/* 36/*
34 * The BIOS typically reserves some of the system's memory for the exclusive 37 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for 38 * use of the integrated graphics. This memory is no longer available for
@@ -42,23 +45,38 @@
42 * for is a boon. 45 * for is a boon.
43 */ 46 */
44 47
45int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 48int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
46 struct drm_mm_node *node, u64 size, 49 struct drm_mm_node *node, u64 size,
47 unsigned alignment) 50 unsigned alignment, u64 start, u64 end)
48{ 51{
49 int ret; 52 int ret;
50 53
51 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 54 if (!drm_mm_initialized(&dev_priv->mm.stolen))
52 return -ENODEV; 55 return -ENODEV;
53 56
57 /* See the comment at the drm_mm_init() call for more about this check.
58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
59 if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
60 start = 4096;
61
54 mutex_lock(&dev_priv->mm.stolen_lock); 62 mutex_lock(&dev_priv->mm.stolen_lock);
55 ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment, 63 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
56 DRM_MM_SEARCH_DEFAULT); 64 alignment, start, end,
65 DRM_MM_SEARCH_DEFAULT);
57 mutex_unlock(&dev_priv->mm.stolen_lock); 66 mutex_unlock(&dev_priv->mm.stolen_lock);
58 67
59 return ret; 68 return ret;
60} 69}
61 70
71int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
72 struct drm_mm_node *node, u64 size,
73 unsigned alignment)
74{
75 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
76 alignment, 0,
77 dev_priv->gtt.stolen_usable_size);
78}
79
62void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 80void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
63 struct drm_mm_node *node) 81 struct drm_mm_node *node)
64{ 82{
@@ -76,24 +94,91 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
76 /* Almost universally we can find the Graphics Base of Stolen Memory 94 /* Almost universally we can find the Graphics Base of Stolen Memory
77 * at offset 0x5c in the igfx configuration space. On a few (desktop) 95 * at offset 0x5c in the igfx configuration space. On a few (desktop)
78 * machines this is also mirrored in the bridge device at different 96 * machines this is also mirrored in the bridge device at different
79 * locations, or in the MCHBAR. On gen2, the layout is again slightly 97 * locations, or in the MCHBAR.
80 * different with the Graphics Segment immediately following Top of 98 *
81 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only 99 * On 865 we just check the TOUD register.
82 * reported by 865g, so we just use the top of memory as determined 100 *
83 * by the e820 probe. 101 * On 830/845/85x the stolen memory base isn't available in any
102 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
84 * 103 *
85 * XXX However gen2 requires an unavailable symbol.
86 */ 104 */
87 base = 0; 105 base = 0;
88 if (INTEL_INFO(dev)->gen >= 3) { 106 if (INTEL_INFO(dev)->gen >= 3) {
89 /* Read Graphics Base of Stolen Memory directly */ 107 /* Read Graphics Base of Stolen Memory directly */
90 pci_read_config_dword(dev->pdev, 0x5c, &base); 108 pci_read_config_dword(dev->pdev, 0x5c, &base);
91 base &= ~((1<<20) - 1); 109 base &= ~((1<<20) - 1);
92 } else { /* GEN2 */ 110 } else if (IS_I865G(dev)) {
93#if 0 111 u16 toud = 0;
94 /* Stolen is immediately above Top of Memory */ 112
95 base = max_low_pfn_mapped << PAGE_SHIFT; 113 /*
96#endif 114 * FIXME is the graphics stolen memory region
115 * always at TOUD? Ie. is it always the last
116 * one to be allocated by the BIOS?
117 */
118 pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
119 I865_TOUD, &toud);
120
121 base = toud << 16;
122 } else if (IS_I85X(dev)) {
123 u32 tseg_size = 0;
124 u32 tom;
125 u8 tmp;
126
127 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
128 I85X_ESMRAMC, &tmp);
129
130 if (tmp & TSEG_ENABLE)
131 tseg_size = MB(1);
132
133 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
134 I85X_DRB3, &tmp);
135 tom = tmp * MB(32);
136
137 base = tom - tseg_size - dev_priv->gtt.stolen_size;
138 } else if (IS_845G(dev)) {
139 u32 tseg_size = 0;
140 u32 tom;
141 u8 tmp;
142
143 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
144 I845_ESMRAMC, &tmp);
145
146 if (tmp & TSEG_ENABLE) {
147 switch (tmp & I845_TSEG_SIZE_MASK) {
148 case I845_TSEG_SIZE_512K:
149 tseg_size = KB(512);
150 break;
151 case I845_TSEG_SIZE_1M:
152 tseg_size = MB(1);
153 break;
154 }
155 }
156
157 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
158 I830_DRB3, &tmp);
159 tom = tmp * MB(32);
160
161 base = tom - tseg_size - dev_priv->gtt.stolen_size;
162 } else if (IS_I830(dev)) {
163 u32 tseg_size = 0;
164 u32 tom;
165 u8 tmp;
166
167 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
168 I830_ESMRAMC, &tmp);
169
170 if (tmp & TSEG_ENABLE) {
171 if (tmp & I830_TSEG_SIZE_1M)
172 tseg_size = MB(1);
173 else
174 tseg_size = KB(512);
175 }
176
177 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
178 I830_DRB3, &tmp);
179 tom = tmp * MB(32);
180
181 base = tom - tseg_size - dev_priv->gtt.stolen_size;
97 } 182 }
98 183
99 if (base == 0) 184 if (base == 0)
@@ -186,6 +271,29 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
186 drm_mm_takedown(&dev_priv->mm.stolen); 271 drm_mm_takedown(&dev_priv->mm.stolen);
187} 272}
188 273
274static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
275 unsigned long *base, unsigned long *size)
276{
277 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
278 CTG_STOLEN_RESERVED :
279 ELK_STOLEN_RESERVED);
280 unsigned long stolen_top = dev_priv->mm.stolen_base +
281 dev_priv->gtt.stolen_size;
282
283 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
284
285 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
286
287 /* On these platforms, the register doesn't have a size field, so the
288 * size is the distance between the base and the top of the stolen
289 * memory. We also have the genuine case where base is zero and there's
290 * nothing reserved. */
291 if (*base == 0)
292 *size = 0;
293 else
294 *size = stolen_top - *base;
295}
296
189static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, 297static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
190 unsigned long *base, unsigned long *size) 298 unsigned long *base, unsigned long *size)
191{ 299{
@@ -281,7 +389,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
281int i915_gem_init_stolen(struct drm_device *dev) 389int i915_gem_init_stolen(struct drm_device *dev)
282{ 390{
283 struct drm_i915_private *dev_priv = dev->dev_private; 391 struct drm_i915_private *dev_priv = dev->dev_private;
284 unsigned long reserved_total, reserved_base, reserved_size; 392 unsigned long reserved_total, reserved_base = 0, reserved_size;
285 unsigned long stolen_top; 393 unsigned long stolen_top;
286 394
287 mutex_init(&dev_priv->mm.stolen_lock); 395 mutex_init(&dev_priv->mm.stolen_lock);
@@ -305,7 +413,12 @@ int i915_gem_init_stolen(struct drm_device *dev)
305 switch (INTEL_INFO(dev_priv)->gen) { 413 switch (INTEL_INFO(dev_priv)->gen) {
306 case 2: 414 case 2:
307 case 3: 415 case 3:
416 break;
308 case 4: 417 case 4:
418 if (IS_G4X(dev))
419 g4x_get_stolen_reserved(dev_priv, &reserved_base,
420 &reserved_size);
421 break;
309 case 5: 422 case 5:
310 /* Assume the gen6 maximum for the older platforms. */ 423 /* Assume the gen6 maximum for the older platforms. */
311 reserved_size = 1024 * 1024; 424 reserved_size = 1024 * 1024;
@@ -352,9 +465,21 @@ int i915_gem_init_stolen(struct drm_device *dev)
352 dev_priv->gtt.stolen_size >> 10, 465 dev_priv->gtt.stolen_size >> 10,
353 (dev_priv->gtt.stolen_size - reserved_total) >> 10); 466 (dev_priv->gtt.stolen_size - reserved_total) >> 10);
354 467
355 /* Basic memrange allocator for stolen space */ 468 dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
356 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - 469 reserved_total;
357 reserved_total); 470
471 /*
472 * Basic memrange allocator for stolen space.
473 *
474 * TODO: Notice that some platforms require us to not use the first page
475 * of the stolen memory but their BIOSes may still put the framebuffer
476 * on the first page. So we don't reserve this page for now because of
477 * that. Our current solution is to just prevent new nodes from being
478 * inserted on the first page - see the check we have at
479 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
480 * problem later.
481 */
482 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
358 483
359 return 0; 484 return 0;
360} 485}
@@ -544,7 +669,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
544 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); 669 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
545 if (IS_ERR(vma)) { 670 if (IS_ERR(vma)) {
546 ret = PTR_ERR(vma); 671 ret = PTR_ERR(vma);
547 goto err_out; 672 goto err;
548 } 673 }
549 674
550 /* To simplify the initialisation sequence between KMS and GTT, 675 /* To simplify the initialisation sequence between KMS and GTT,
@@ -558,23 +683,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
558 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); 683 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
559 if (ret) { 684 if (ret) {
560 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 685 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
561 goto err_vma; 686 goto err;
562 } 687 }
563 }
564 688
565 vma->bound |= GLOBAL_BIND; 689 vma->bound |= GLOBAL_BIND;
690 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
691 }
566 692
567 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 693 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
568 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
569 i915_gem_object_pin_pages(obj); 694 i915_gem_object_pin_pages(obj);
570 695
571 return obj; 696 return obj;
572 697
573err_vma: 698err:
574 i915_gem_vma_destroy(vma);
575err_out:
576 i915_gem_stolen_remove_node(dev_priv, stolen);
577 kfree(stolen);
578 drm_gem_object_unreference(&obj->base); 699 drm_gem_object_unreference(&obj->base);
579 return NULL; 700 return NULL;
580} 701}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index a96b9006a51e..19fb0bddc1cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -50,7 +50,6 @@ struct i915_mmu_notifier {
50 struct mmu_notifier mn; 50 struct mmu_notifier mn;
51 struct rb_root objects; 51 struct rb_root objects;
52 struct list_head linear; 52 struct list_head linear;
53 unsigned long serial;
54 bool has_linear; 53 bool has_linear;
55}; 54};
56 55
@@ -59,13 +58,16 @@ struct i915_mmu_object {
59 struct interval_tree_node it; 58 struct interval_tree_node it;
60 struct list_head link; 59 struct list_head link;
61 struct drm_i915_gem_object *obj; 60 struct drm_i915_gem_object *obj;
61 struct work_struct work;
62 bool active;
62 bool is_linear; 63 bool is_linear;
63}; 64};
64 65
65static unsigned long cancel_userptr(struct drm_i915_gem_object *obj) 66static void __cancel_userptr__worker(struct work_struct *work)
66{ 67{
68 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
69 struct drm_i915_gem_object *obj = mo->obj;
67 struct drm_device *dev = obj->base.dev; 70 struct drm_device *dev = obj->base.dev;
68 unsigned long end;
69 71
70 mutex_lock(&dev->struct_mutex); 72 mutex_lock(&dev->struct_mutex);
71 /* Cancel any active worker and force us to re-evaluate gup */ 73 /* Cancel any active worker and force us to re-evaluate gup */
@@ -88,45 +90,28 @@ static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
88 dev_priv->mm.interruptible = was_interruptible; 90 dev_priv->mm.interruptible = was_interruptible;
89 } 91 }
90 92
91 end = obj->userptr.ptr + obj->base.size;
92
93 drm_gem_object_unreference(&obj->base); 93 drm_gem_object_unreference(&obj->base);
94 mutex_unlock(&dev->struct_mutex); 94 mutex_unlock(&dev->struct_mutex);
95
96 return end;
97} 95}
98 96
99static void *invalidate_range__linear(struct i915_mmu_notifier *mn, 97static unsigned long cancel_userptr(struct i915_mmu_object *mo)
100 struct mm_struct *mm,
101 unsigned long start,
102 unsigned long end)
103{ 98{
104 struct i915_mmu_object *mo; 99 unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
105 unsigned long serial; 100
106 101 /* The mmu_object is released late when destroying the
107restart: 102 * GEM object so it is entirely possible to gain a
108 serial = mn->serial; 103 * reference on an object in the process of being freed
109 list_for_each_entry(mo, &mn->linear, link) { 104 * since our serialisation is via the spinlock and not
110 struct drm_i915_gem_object *obj; 105 * the struct_mutex - and consequently use it after it
111 106 * is freed and then double free it.
112 if (mo->it.last < start || mo->it.start > end) 107 */
113 continue; 108 if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
114 109 schedule_work(&mo->work);
115 obj = mo->obj; 110 /* only schedule one work packet to avoid the refleak */
116 111 mo->active = false;
117 if (!kref_get_unless_zero(&obj->base.refcount))
118 continue;
119
120 spin_unlock(&mn->lock);
121
122 cancel_userptr(obj);
123
124 spin_lock(&mn->lock);
125 if (serial != mn->serial)
126 goto restart;
127 } 112 }
128 113
129 return NULL; 114 return end;
130} 115}
131 116
132static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, 117static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
@@ -134,46 +119,32 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
134 unsigned long start, 119 unsigned long start,
135 unsigned long end) 120 unsigned long end)
136{ 121{
137 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); 122 struct i915_mmu_notifier *mn =
138 struct interval_tree_node *it = NULL; 123 container_of(_mn, struct i915_mmu_notifier, mn);
139 unsigned long next = start; 124 struct i915_mmu_object *mo;
140 unsigned long serial = 0; 125
141 126 /* interval ranges are inclusive, but invalidate range is exclusive */
142 end--; /* interval ranges are inclusive, but invalidate range is exclusive */ 127 end--;
143 while (next < end) { 128
144 struct drm_i915_gem_object *obj = NULL; 129 spin_lock(&mn->lock);
145 130 if (mn->has_linear) {
146 spin_lock(&mn->lock); 131 list_for_each_entry(mo, &mn->linear, link) {
147 if (mn->has_linear) 132 if (mo->it.last < start || mo->it.start > end)
148 it = invalidate_range__linear(mn, mm, start, end);
149 else if (serial == mn->serial)
150 it = interval_tree_iter_next(it, next, end);
151 else
152 it = interval_tree_iter_first(&mn->objects, start, end);
153 if (it != NULL) {
154 obj = container_of(it, struct i915_mmu_object, it)->obj;
155
156 /* The mmu_object is released late when destroying the
157 * GEM object so it is entirely possible to gain a
158 * reference on an object in the process of being freed
159 * since our serialisation is via the spinlock and not
160 * the struct_mutex - and consequently use it after it
161 * is freed and then double free it.
162 */
163 if (!kref_get_unless_zero(&obj->base.refcount)) {
164 spin_unlock(&mn->lock);
165 serial = 0;
166 continue; 133 continue;
167 }
168 134
169 serial = mn->serial; 135 cancel_userptr(mo);
170 } 136 }
171 spin_unlock(&mn->lock); 137 } else {
172 if (obj == NULL) 138 struct interval_tree_node *it;
173 return;
174 139
175 next = cancel_userptr(obj); 140 it = interval_tree_iter_first(&mn->objects, start, end);
141 while (it) {
142 mo = container_of(it, struct i915_mmu_object, it);
143 start = cancel_userptr(mo);
144 it = interval_tree_iter_next(it, start, end);
145 }
176 } 146 }
147 spin_unlock(&mn->lock);
177} 148}
178 149
179static const struct mmu_notifier_ops i915_gem_userptr_notifier = { 150static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -193,7 +164,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
193 spin_lock_init(&mn->lock); 164 spin_lock_init(&mn->lock);
194 mn->mn.ops = &i915_gem_userptr_notifier; 165 mn->mn.ops = &i915_gem_userptr_notifier;
195 mn->objects = RB_ROOT; 166 mn->objects = RB_ROOT;
196 mn->serial = 1;
197 INIT_LIST_HEAD(&mn->linear); 167 INIT_LIST_HEAD(&mn->linear);
198 mn->has_linear = false; 168 mn->has_linear = false;
199 169
@@ -207,12 +177,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
207 return mn; 177 return mn;
208} 178}
209 179
210static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
211{
212 if (++mn->serial == 0)
213 mn->serial = 1;
214}
215
216static int 180static int
217i915_mmu_notifier_add(struct drm_device *dev, 181i915_mmu_notifier_add(struct drm_device *dev,
218 struct i915_mmu_notifier *mn, 182 struct i915_mmu_notifier *mn,
@@ -259,10 +223,9 @@ i915_mmu_notifier_add(struct drm_device *dev,
259 } else 223 } else
260 interval_tree_insert(&mo->it, &mn->objects); 224 interval_tree_insert(&mo->it, &mn->objects);
261 225
262 if (ret == 0) { 226 if (ret == 0)
263 list_add(&mo->link, &mn->linear); 227 list_add(&mo->link, &mn->linear);
264 __i915_mmu_notifier_update_serial(mn); 228
265 }
266 spin_unlock(&mn->lock); 229 spin_unlock(&mn->lock);
267 mutex_unlock(&dev->struct_mutex); 230 mutex_unlock(&dev->struct_mutex);
268 231
@@ -290,7 +253,6 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
290 mn->has_linear = i915_mmu_notifier_has_linear(mn); 253 mn->has_linear = i915_mmu_notifier_has_linear(mn);
291 else 254 else
292 interval_tree_remove(&mo->it, &mn->objects); 255 interval_tree_remove(&mo->it, &mn->objects);
293 __i915_mmu_notifier_update_serial(mn);
294 spin_unlock(&mn->lock); 256 spin_unlock(&mn->lock);
295} 257}
296 258
@@ -357,6 +319,7 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
357 mo->it.start = obj->userptr.ptr; 319 mo->it.start = obj->userptr.ptr;
358 mo->it.last = mo->it.start + obj->base.size - 1; 320 mo->it.last = mo->it.start + obj->base.size - 1;
359 mo->obj = obj; 321 mo->obj = obj;
322 INIT_WORK(&mo->work, __cancel_userptr__worker);
360 323
361 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); 324 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
362 if (ret) { 325 if (ret) {
@@ -565,31 +528,65 @@ __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
565 return ret; 528 return ret;
566} 529}
567 530
531static int
532__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
533 bool value)
534{
535 int ret = 0;
536
537 /* During mm_invalidate_range we need to cancel any userptr that
538 * overlaps the range being invalidated. Doing so requires the
539 * struct_mutex, and that risks recursion. In order to cause
540 * recursion, the user must alias the userptr address space with
541 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
542 * to invalidate that mmaping, mm_invalidate_range is called with
543 * the userptr address *and* the struct_mutex held. To prevent that
544 * we set a flag under the i915_mmu_notifier spinlock to indicate
545 * whether this object is valid.
546 */
547#if defined(CONFIG_MMU_NOTIFIER)
548 if (obj->userptr.mmu_object == NULL)
549 return 0;
550
551 spin_lock(&obj->userptr.mmu_object->mn->lock);
552 /* In order to serialise get_pages with an outstanding
553 * cancel_userptr, we must drop the struct_mutex and try again.
554 */
555 if (!value || !work_pending(&obj->userptr.mmu_object->work))
556 obj->userptr.mmu_object->active = value;
557 else
558 ret = -EAGAIN;
559 spin_unlock(&obj->userptr.mmu_object->mn->lock);
560#endif
561
562 return ret;
563}
564
568static void 565static void
569__i915_gem_userptr_get_pages_worker(struct work_struct *_work) 566__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
570{ 567{
571 struct get_pages_work *work = container_of(_work, typeof(*work), work); 568 struct get_pages_work *work = container_of(_work, typeof(*work), work);
572 struct drm_i915_gem_object *obj = work->obj; 569 struct drm_i915_gem_object *obj = work->obj;
573 struct drm_device *dev = obj->base.dev; 570 struct drm_device *dev = obj->base.dev;
574 const int num_pages = obj->base.size >> PAGE_SHIFT; 571 const int npages = obj->base.size >> PAGE_SHIFT;
575 struct page **pvec; 572 struct page **pvec;
576 int pinned, ret; 573 int pinned, ret;
577 574
578 ret = -ENOMEM; 575 ret = -ENOMEM;
579 pinned = 0; 576 pinned = 0;
580 577
581 pvec = kmalloc(num_pages*sizeof(struct page *), 578 pvec = kmalloc(npages*sizeof(struct page *),
582 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 579 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
583 if (pvec == NULL) 580 if (pvec == NULL)
584 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 581 pvec = drm_malloc_ab(npages, sizeof(struct page *));
585 if (pvec != NULL) { 582 if (pvec != NULL) {
586 struct mm_struct *mm = obj->userptr.mm->mm; 583 struct mm_struct *mm = obj->userptr.mm->mm;
587 584
588 down_read(&mm->mmap_sem); 585 down_read(&mm->mmap_sem);
589 while (pinned < num_pages) { 586 while (pinned < npages) {
590 ret = get_user_pages(work->task, mm, 587 ret = get_user_pages(work->task, mm,
591 obj->userptr.ptr + pinned * PAGE_SIZE, 588 obj->userptr.ptr + pinned * PAGE_SIZE,
592 num_pages - pinned, 589 npages - pinned,
593 !obj->userptr.read_only, 0, 590 !obj->userptr.read_only, 0,
594 pvec + pinned, NULL); 591 pvec + pinned, NULL);
595 if (ret < 0) 592 if (ret < 0)
@@ -601,20 +598,22 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
601 } 598 }
602 599
603 mutex_lock(&dev->struct_mutex); 600 mutex_lock(&dev->struct_mutex);
604 if (obj->userptr.work != &work->work) { 601 if (obj->userptr.work == &work->work) {
605 ret = 0; 602 if (pinned == npages) {
606 } else if (pinned == num_pages) { 603 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
607 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); 604 if (ret == 0) {
608 if (ret == 0) { 605 list_add_tail(&obj->global_list,
609 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); 606 &to_i915(dev)->mm.unbound_list);
610 obj->get_page.sg = obj->pages->sgl; 607 obj->get_page.sg = obj->pages->sgl;
611 obj->get_page.last = 0; 608 obj->get_page.last = 0;
612 609 pinned = 0;
613 pinned = 0; 610 }
614 } 611 }
612 obj->userptr.work = ERR_PTR(ret);
613 if (ret)
614 __i915_gem_userptr_set_active(obj, false);
615 } 615 }
616 616
617 obj->userptr.work = ERR_PTR(ret);
618 obj->userptr.workers--; 617 obj->userptr.workers--;
619 drm_gem_object_unreference(&obj->base); 618 drm_gem_object_unreference(&obj->base);
620 mutex_unlock(&dev->struct_mutex); 619 mutex_unlock(&dev->struct_mutex);
@@ -627,11 +626,60 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
627} 626}
628 627
629static int 628static int
629__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
630 bool *active)
631{
632 struct get_pages_work *work;
633
634 /* Spawn a worker so that we can acquire the
635 * user pages without holding our mutex. Access
636 * to the user pages requires mmap_sem, and we have
637 * a strict lock ordering of mmap_sem, struct_mutex -
638 * we already hold struct_mutex here and so cannot
639 * call gup without encountering a lock inversion.
640 *
641 * Userspace will keep on repeating the operation
642 * (thanks to EAGAIN) until either we hit the fast
643 * path or the worker completes. If the worker is
644 * cancelled or superseded, the task is still run
645 * but the results ignored. (This leads to
646 * complications that we may have a stray object
647 * refcount that we need to be wary of when
648 * checking for existing objects during creation.)
649 * If the worker encounters an error, it reports
650 * that error back to this function through
651 * obj->userptr.work = ERR_PTR.
652 */
653 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
654 return -EAGAIN;
655
656 work = kmalloc(sizeof(*work), GFP_KERNEL);
657 if (work == NULL)
658 return -ENOMEM;
659
660 obj->userptr.work = &work->work;
661 obj->userptr.workers++;
662
663 work->obj = obj;
664 drm_gem_object_reference(&obj->base);
665
666 work->task = current;
667 get_task_struct(work->task);
668
669 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
670 schedule_work(&work->work);
671
672 *active = true;
673 return -EAGAIN;
674}
675
676static int
630i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) 677i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
631{ 678{
632 const int num_pages = obj->base.size >> PAGE_SHIFT; 679 const int num_pages = obj->base.size >> PAGE_SHIFT;
633 struct page **pvec; 680 struct page **pvec;
634 int pinned, ret; 681 int pinned, ret;
682 bool active;
635 683
636 /* If userspace should engineer that these pages are replaced in 684 /* If userspace should engineer that these pages are replaced in
637 * the vma between us binding this page into the GTT and completion 685 * the vma between us binding this page into the GTT and completion
@@ -649,6 +697,20 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
649 * to the vma (discard or cloning) which should prevent the more 697 * to the vma (discard or cloning) which should prevent the more
650 * egregious cases from causing harm. 698 * egregious cases from causing harm.
651 */ 699 */
700 if (IS_ERR(obj->userptr.work)) {
701 /* active flag will have been dropped already by the worker */
702 ret = PTR_ERR(obj->userptr.work);
703 obj->userptr.work = NULL;
704 return ret;
705 }
706 if (obj->userptr.work)
707 /* active flag should still be held for the pending work */
708 return -EAGAIN;
709
710 /* Let the mmu-notifier know that we have begun and need cancellation */
711 ret = __i915_gem_userptr_set_active(obj, true);
712 if (ret)
713 return ret;
652 714
653 pvec = NULL; 715 pvec = NULL;
654 pinned = 0; 716 pinned = 0;
@@ -657,73 +719,27 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
657 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 719 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
658 if (pvec == NULL) { 720 if (pvec == NULL) {
659 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 721 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
660 if (pvec == NULL) 722 if (pvec == NULL) {
723 __i915_gem_userptr_set_active(obj, false);
661 return -ENOMEM; 724 return -ENOMEM;
725 }
662 } 726 }
663 727
664 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, 728 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
665 !obj->userptr.read_only, pvec); 729 !obj->userptr.read_only, pvec);
666 } 730 }
667 if (pinned < num_pages) { 731
668 if (pinned < 0) { 732 active = false;
669 ret = pinned; 733 if (pinned < 0)
670 pinned = 0; 734 ret = pinned, pinned = 0;
671 } else { 735 else if (pinned < num_pages)
672 /* Spawn a worker so that we can acquire the 736 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
673 * user pages without holding our mutex. Access 737 else
674 * to the user pages requires mmap_sem, and we have
675 * a strict lock ordering of mmap_sem, struct_mutex -
676 * we already hold struct_mutex here and so cannot
677 * call gup without encountering a lock inversion.
678 *
679 * Userspace will keep on repeating the operation
680 * (thanks to EAGAIN) until either we hit the fast
681 * path or the worker completes. If the worker is
682 * cancelled or superseded, the task is still run
683 * but the results ignored. (This leads to
684 * complications that we may have a stray object
685 * refcount that we need to be wary of when
686 * checking for existing objects during creation.)
687 * If the worker encounters an error, it reports
688 * that error back to this function through
689 * obj->userptr.work = ERR_PTR.
690 */
691 ret = -EAGAIN;
692 if (obj->userptr.work == NULL &&
693 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
694 struct get_pages_work *work;
695
696 work = kmalloc(sizeof(*work), GFP_KERNEL);
697 if (work != NULL) {
698 obj->userptr.work = &work->work;
699 obj->userptr.workers++;
700
701 work->obj = obj;
702 drm_gem_object_reference(&obj->base);
703
704 work->task = current;
705 get_task_struct(work->task);
706
707 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
708 schedule_work(&work->work);
709 } else
710 ret = -ENOMEM;
711 } else {
712 if (IS_ERR(obj->userptr.work)) {
713 ret = PTR_ERR(obj->userptr.work);
714 obj->userptr.work = NULL;
715 }
716 }
717 }
718 } else {
719 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); 738 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
720 if (ret == 0) { 739 if (ret) {
721 obj->userptr.work = NULL; 740 __i915_gem_userptr_set_active(obj, active);
722 pinned = 0; 741 release_pages(pvec, pinned, 0);
723 }
724 } 742 }
725
726 release_pages(pvec, pinned, 0);
727 drm_free_large(pvec); 743 drm_free_large(pvec);
728 return ret; 744 return ret;
729} 745}
@@ -734,6 +750,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
734 struct sg_page_iter sg_iter; 750 struct sg_page_iter sg_iter;
735 751
736 BUG_ON(obj->userptr.work != NULL); 752 BUG_ON(obj->userptr.work != NULL);
753 __i915_gem_userptr_set_active(obj, false);
737 754
738 if (obj->madv != I915_MADV_WILLNEED) 755 if (obj->madv != I915_MADV_WILLNEED)
739 obj->dirty = 0; 756 obj->dirty = 0;
@@ -816,7 +833,6 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
816int 833int
817i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 834i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
818{ 835{
819 struct drm_i915_private *dev_priv = dev->dev_private;
820 struct drm_i915_gem_userptr *args = data; 836 struct drm_i915_gem_userptr *args = data;
821 struct drm_i915_gem_object *obj; 837 struct drm_i915_gem_object *obj;
822 int ret; 838 int ret;
@@ -829,9 +845,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
829 if (offset_in_page(args->user_ptr | args->user_size)) 845 if (offset_in_page(args->user_ptr | args->user_size))
830 return -EINVAL; 846 return -EINVAL;
831 847
832 if (args->user_size > dev_priv->gtt.base.total)
833 return -E2BIG;
834
835 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, 848 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
836 (char __user *)(unsigned long)args->user_ptr, args->user_size)) 849 (char __user *)(unsigned long)args->user_ptr, args->user_size))
837 return -EFAULT; 850 return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 41d0739e6fdf..2f04e4f2ff35 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -30,11 +30,6 @@
30#include <generated/utsrelease.h> 30#include <generated/utsrelease.h>
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33static const char *yesno(int v)
34{
35 return v ? "yes" : "no";
36}
37
38static const char *ring_str(int ring) 33static const char *ring_str(int ring)
39{ 34{
40 switch (ring) { 35 switch (ring) {
@@ -197,8 +192,9 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
197 err_printf(m, " %s [%d]:\n", name, count); 192 err_printf(m, " %s [%d]:\n", name, count);
198 193
199 while (count--) { 194 while (count--) {
200 err_printf(m, " %08x %8u %02x %02x [ ", 195 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
201 err->gtt_offset, 196 upper_32_bits(err->gtt_offset),
197 lower_32_bits(err->gtt_offset),
202 err->size, 198 err->size,
203 err->read_domains, 199 err->read_domains,
204 err->write_domain); 200 err->write_domain);
@@ -427,15 +423,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
427 err_printf(m, " (submitted by %s [%d])", 423 err_printf(m, " (submitted by %s [%d])",
428 error->ring[i].comm, 424 error->ring[i].comm,
429 error->ring[i].pid); 425 error->ring[i].pid);
430 err_printf(m, " --- gtt_offset = 0x%08x\n", 426 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
431 obj->gtt_offset); 427 upper_32_bits(obj->gtt_offset),
428 lower_32_bits(obj->gtt_offset));
432 print_error_obj(m, obj); 429 print_error_obj(m, obj);
433 } 430 }
434 431
435 obj = error->ring[i].wa_batchbuffer; 432 obj = error->ring[i].wa_batchbuffer;
436 if (obj) { 433 if (obj) {
437 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", 434 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
438 dev_priv->ring[i].name, obj->gtt_offset); 435 dev_priv->ring[i].name,
436 lower_32_bits(obj->gtt_offset));
439 print_error_obj(m, obj); 437 print_error_obj(m, obj);
440 } 438 }
441 439
@@ -454,22 +452,28 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
454 if ((obj = error->ring[i].ringbuffer)) { 452 if ((obj = error->ring[i].ringbuffer)) {
455 err_printf(m, "%s --- ringbuffer = 0x%08x\n", 453 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
456 dev_priv->ring[i].name, 454 dev_priv->ring[i].name,
457 obj->gtt_offset); 455 lower_32_bits(obj->gtt_offset));
458 print_error_obj(m, obj); 456 print_error_obj(m, obj);
459 } 457 }
460 458
461 if ((obj = error->ring[i].hws_page)) { 459 if ((obj = error->ring[i].hws_page)) {
462 err_printf(m, "%s --- HW Status = 0x%08x\n", 460 u64 hws_offset = obj->gtt_offset;
463 dev_priv->ring[i].name, 461 u32 *hws_page = &obj->pages[0][0];
464 obj->gtt_offset); 462
463 if (i915.enable_execlists) {
464 hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
465 hws_page = &obj->pages[LRC_PPHWSP_PN][0];
466 }
467 err_printf(m, "%s --- HW Status = 0x%08llx\n",
468 dev_priv->ring[i].name, hws_offset);
465 offset = 0; 469 offset = 0;
466 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 470 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
467 err_printf(m, "[%04x] %08x %08x %08x %08x\n", 471 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
468 offset, 472 offset,
469 obj->pages[0][elt], 473 hws_page[elt],
470 obj->pages[0][elt+1], 474 hws_page[elt+1],
471 obj->pages[0][elt+2], 475 hws_page[elt+2],
472 obj->pages[0][elt+3]); 476 hws_page[elt+3]);
473 offset += 16; 477 offset += 16;
474 } 478 }
475 } 479 }
@@ -477,13 +481,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
477 if ((obj = error->ring[i].ctx)) { 481 if ((obj = error->ring[i].ctx)) {
478 err_printf(m, "%s --- HW Context = 0x%08x\n", 482 err_printf(m, "%s --- HW Context = 0x%08x\n",
479 dev_priv->ring[i].name, 483 dev_priv->ring[i].name,
480 obj->gtt_offset); 484 lower_32_bits(obj->gtt_offset));
481 print_error_obj(m, obj); 485 print_error_obj(m, obj);
482 } 486 }
483 } 487 }
484 488
485 if ((obj = error->semaphore_obj)) { 489 if ((obj = error->semaphore_obj)) {
486 err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset); 490 err_printf(m, "Semaphore page = 0x%08x\n",
491 lower_32_bits(obj->gtt_offset));
487 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 492 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
488 err_printf(m, "[%04x] %08x %08x %08x %08x\n", 493 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
489 elt * 4, 494 elt * 4,
@@ -591,7 +596,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
591 int num_pages; 596 int num_pages;
592 bool use_ggtt; 597 bool use_ggtt;
593 int i = 0; 598 int i = 0;
594 u32 reloc_offset; 599 u64 reloc_offset;
595 600
596 if (src == NULL || src->pages == NULL) 601 if (src == NULL || src->pages == NULL)
597 return NULL; 602 return NULL;
@@ -787,20 +792,15 @@ static void i915_gem_record_fences(struct drm_device *dev,
787 int i; 792 int i;
788 793
789 if (IS_GEN3(dev) || IS_GEN2(dev)) { 794 if (IS_GEN3(dev) || IS_GEN2(dev)) {
790 for (i = 0; i < 8; i++)
791 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
792 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
793 for (i = 0; i < 8; i++)
794 error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
795 (i * 4));
796 } else if (IS_GEN5(dev) || IS_GEN4(dev))
797 for (i = 0; i < 16; i++)
798 error->fence[i] = I915_READ64(FENCE_REG_965_0 +
799 (i * 8));
800 else if (INTEL_INFO(dev)->gen >= 6)
801 for (i = 0; i < dev_priv->num_fence_regs; i++) 795 for (i = 0; i < dev_priv->num_fence_regs; i++)
802 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + 796 error->fence[i] = I915_READ(FENCE_REG(i));
803 (i * 8)); 797 } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
798 for (i = 0; i < dev_priv->num_fence_regs; i++)
799 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
800 } else if (INTEL_INFO(dev)->gen >= 6) {
801 for (i = 0; i < dev_priv->num_fence_regs; i++)
802 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
803 }
804} 804}
805 805
806 806
@@ -886,7 +886,7 @@ static void i915_record_ring_state(struct drm_device *dev,
886 ering->faddr = I915_READ(DMA_FADD_I8XX); 886 ering->faddr = I915_READ(DMA_FADD_I8XX);
887 ering->ipeir = I915_READ(IPEIR); 887 ering->ipeir = I915_READ(IPEIR);
888 ering->ipehr = I915_READ(IPEHR); 888 ering->ipehr = I915_READ(IPEHR);
889 ering->instdone = I915_READ(INSTDONE); 889 ering->instdone = I915_READ(GEN2_INSTDONE);
890 } 890 }
891 891
892 ering->waiting = waitqueue_active(&ring->irq_queue); 892 ering->waiting = waitqueue_active(&ring->irq_queue);
@@ -1388,12 +1388,12 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1388 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1388 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1389 1389
1390 if (IS_GEN2(dev) || IS_GEN3(dev)) 1390 if (IS_GEN2(dev) || IS_GEN3(dev))
1391 instdone[0] = I915_READ(INSTDONE); 1391 instdone[0] = I915_READ(GEN2_INSTDONE);
1392 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1392 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
1393 instdone[0] = I915_READ(INSTDONE_I965); 1393 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1394 instdone[1] = I915_READ(INSTDONE1); 1394 instdone[1] = I915_READ(GEN4_INSTDONE1);
1395 } else if (INTEL_INFO(dev)->gen >= 7) { 1395 } else if (INTEL_INFO(dev)->gen >= 7) {
1396 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1396 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1397 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1397 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1398 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1398 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1399 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 1399 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index ccdc6c8ac20b..c4cb1c0c4d0d 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -37,14 +37,11 @@
37#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) 37#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
38#define GS_MIA_SHIFT 16 38#define GS_MIA_SHIFT 16
39#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) 39#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
40 40#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
41#define GUC_WOPCM_SIZE 0xc050
42#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
43#define GUC_WOPCM_OFFSET 0x80000 /* 512KB */
44 41
45#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) 42#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
46 43
47#define UOS_RSA_SCRATCH_0 0xc200 44#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4)
48#define DMA_ADDR_0_LOW 0xc300 45#define DMA_ADDR_0_LOW 0xc300
49#define DMA_ADDR_0_HIGH 0xc304 46#define DMA_ADDR_0_HIGH 0xc304
50#define DMA_ADDR_1_LOW 0xc308 47#define DMA_ADDR_1_LOW 0xc308
@@ -56,10 +53,19 @@
56#define UOS_MOVE (1<<4) 53#define UOS_MOVE (1<<4)
57#define START_DMA (1<<0) 54#define START_DMA (1<<0)
58#define DMA_GUC_WOPCM_OFFSET 0xc340 55#define DMA_GUC_WOPCM_OFFSET 0xc340
56#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
57#define GUC_MAX_IDLE_COUNT 0xC3E4
58
59#define GUC_WOPCM_SIZE 0xc050
60#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
61
62/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
63#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
59 64
60#define GEN8_GT_PM_CONFIG 0x138140 65#define GEN8_GT_PM_CONFIG 0x138140
66#define GEN9LP_GT_PM_CONFIG 0x138140
61#define GEN9_GT_PM_CONFIG 0x13816c 67#define GEN9_GT_PM_CONFIG 0x13816c
62#define GEN8_GT_DOORBELL_ENABLE (1<<0) 68#define GT_DOORBELL_ENABLE (1<<0)
63 69
64#define GEN8_GTCR 0x4274 70#define GEN8_GTCR 0x4274
65#define GEN8_GTCR_INVALIDATE (1<<0) 71#define GEN8_GTCR_INVALIDATE (1<<0)
@@ -80,7 +86,8 @@
80 GUC_ENABLE_READ_CACHE_LOGIC | \ 86 GUC_ENABLE_READ_CACHE_LOGIC | \
81 GUC_ENABLE_MIA_CACHING | \ 87 GUC_ENABLE_MIA_CACHING | \
82 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \ 88 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
83 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA) 89 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
90 GUC_ENABLE_MIA_CLOCK_GATING)
84 91
85#define HOST2GUC_INTERRUPT 0xc4c8 92#define HOST2GUC_INTERRUPT 0xc4c8
86#define HOST2GUC_TRIGGER (1<<0) 93#define HOST2GUC_TRIGGER (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
new file mode 100644
index 000000000000..036b42bae827
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -0,0 +1,975 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24#include <linux/firmware.h>
25#include <linux/circ_buf.h>
26#include "i915_drv.h"
27#include "intel_guc.h"
28
29/**
30 * DOC: GuC Client
31 *
32 * i915_guc_client:
33 * We use the term client to avoid confusion with contexts. A i915_guc_client is
34 * equivalent to GuC object guc_context_desc. This context descriptor is
35 * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
36 * and workqueue for it. Also the process descriptor (guc_process_desc), which
37 * is mapped to client space. So the client can write Work Item then ring the
38 * doorbell.
39 *
40 * To simplify the implementation, we allocate one gem object that contains all
41 * pages for doorbell, process descriptor and workqueue.
42 *
43 * The Scratch registers:
44 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
45 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
46 * triggers an interrupt on the GuC via another register write (0xC4C8).
47 * Firmware writes a success/fail code back to the action register after
48 * processes the request. The kernel driver polls waiting for this update and
49 * then proceeds.
50 * See host2guc_action()
51 *
52 * Doorbells:
53 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
54 * mapped into process space.
55 *
56 * Work Items:
57 * There are several types of work items that the host may place into a
58 * workqueue, each with its own requirements and limitations. Currently only
59 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
60 * represents in-order queue. The kernel driver packs ring tail pointer and an
61 * ELSP context descriptor dword into Work Item.
62 * See guc_add_workqueue_item()
63 *
64 */
65
66/*
67 * Read GuC command/status register (SOFT_SCRATCH_0)
68 * Return true if it contains a response rather than a command
69 */
70static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
71 u32 *status)
72{
73 u32 val = I915_READ(SOFT_SCRATCH(0));
74 *status = val;
75 return GUC2HOST_IS_RESPONSE(val);
76}
77
78static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
79{
80 struct drm_i915_private *dev_priv = guc_to_i915(guc);
81 u32 status;
82 int i;
83 int ret;
84
85 if (WARN_ON(len < 1 || len > 15))
86 return -EINVAL;
87
88 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
89 spin_lock(&dev_priv->guc.host2guc_lock);
90
91 dev_priv->guc.action_count += 1;
92 dev_priv->guc.action_cmd = data[0];
93
94 for (i = 0; i < len; i++)
95 I915_WRITE(SOFT_SCRATCH(i), data[i]);
96
97 POSTING_READ(SOFT_SCRATCH(i - 1));
98
99 I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
100
101 /* No HOST2GUC command should take longer than 10ms */
102 ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
103 if (status != GUC2HOST_STATUS_SUCCESS) {
104 /*
105 * Either the GuC explicitly returned an error (which
106 * we convert to -EIO here) or no response at all was
107 * received within the timeout limit (-ETIMEDOUT)
108 */
109 if (ret != -ETIMEDOUT)
110 ret = -EIO;
111
112 DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
113 "status=0x%08X response=0x%08X\n",
114 data[0], ret, status,
115 I915_READ(SOFT_SCRATCH(15)));
116
117 dev_priv->guc.action_fail += 1;
118 dev_priv->guc.action_err = ret;
119 }
120 dev_priv->guc.action_status = status;
121
122 spin_unlock(&dev_priv->guc.host2guc_lock);
123 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
124
125 return ret;
126}
127
128/*
129 * Tell the GuC to allocate or deallocate a specific doorbell
130 */
131
132static int host2guc_allocate_doorbell(struct intel_guc *guc,
133 struct i915_guc_client *client)
134{
135 u32 data[2];
136
137 data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
138 data[1] = client->ctx_index;
139
140 return host2guc_action(guc, data, 2);
141}
142
143static int host2guc_release_doorbell(struct intel_guc *guc,
144 struct i915_guc_client *client)
145{
146 u32 data[2];
147
148 data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
149 data[1] = client->ctx_index;
150
151 return host2guc_action(guc, data, 2);
152}
153
154static int host2guc_sample_forcewake(struct intel_guc *guc,
155 struct i915_guc_client *client)
156{
157 struct drm_i915_private *dev_priv = guc_to_i915(guc);
158 struct drm_device *dev = dev_priv->dev;
159 u32 data[2];
160
161 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
162 /* WaRsDisableCoarsePowerGating:skl,bxt */
163 if (!intel_enable_rc6(dev_priv->dev) ||
164 (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
165 (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
166 (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
167 data[1] = 0;
168 else
169 /* bit 0 and 1 are for Render and Media domain separately */
170 data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
171
172 return host2guc_action(guc, data, ARRAY_SIZE(data));
173}
174
175/*
176 * Initialise, update, or clear doorbell data shared with the GuC
177 *
178 * These functions modify shared data and so need access to the mapped
179 * client object which contains the page being used for the doorbell
180 */
181
182static void guc_init_doorbell(struct intel_guc *guc,
183 struct i915_guc_client *client)
184{
185 struct guc_doorbell_info *doorbell;
186 void *base;
187
188 base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
189 doorbell = base + client->doorbell_offset;
190
191 doorbell->db_status = 1;
192 doorbell->cookie = 0;
193
194 kunmap_atomic(base);
195}
196
197static int guc_ring_doorbell(struct i915_guc_client *gc)
198{
199 struct guc_process_desc *desc;
200 union guc_doorbell_qw db_cmp, db_exc, db_ret;
201 union guc_doorbell_qw *db;
202 void *base;
203 int attempt = 2, ret = -EAGAIN;
204
205 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
206 desc = base + gc->proc_desc_offset;
207
208 /* Update the tail so it is visible to GuC */
209 desc->tail = gc->wq_tail;
210
211 /* current cookie */
212 db_cmp.db_status = GUC_DOORBELL_ENABLED;
213 db_cmp.cookie = gc->cookie;
214
215 /* cookie to be updated */
216 db_exc.db_status = GUC_DOORBELL_ENABLED;
217 db_exc.cookie = gc->cookie + 1;
218 if (db_exc.cookie == 0)
219 db_exc.cookie = 1;
220
221 /* pointer of current doorbell cacheline */
222 db = base + gc->doorbell_offset;
223
224 while (attempt--) {
225 /* lets ring the doorbell */
226 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
227 db_cmp.value_qw, db_exc.value_qw);
228
229 /* if the exchange was successfully executed */
230 if (db_ret.value_qw == db_cmp.value_qw) {
231 /* db was successfully rung */
232 gc->cookie = db_exc.cookie;
233 ret = 0;
234 break;
235 }
236
237 /* XXX: doorbell was lost and need to acquire it again */
238 if (db_ret.db_status == GUC_DOORBELL_DISABLED)
239 break;
240
241 DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
242 db_cmp.cookie, db_ret.cookie);
243
244 /* update the cookie to newly read cookie from GuC */
245 db_cmp.cookie = db_ret.cookie;
246 db_exc.cookie = db_ret.cookie + 1;
247 if (db_exc.cookie == 0)
248 db_exc.cookie = 1;
249 }
250
251 kunmap_atomic(base);
252 return ret;
253}
254
255static void guc_disable_doorbell(struct intel_guc *guc,
256 struct i915_guc_client *client)
257{
258 struct drm_i915_private *dev_priv = guc_to_i915(guc);
259 struct guc_doorbell_info *doorbell;
260 void *base;
261 int drbreg = GEN8_DRBREGL(client->doorbell_id);
262 int value;
263
264 base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
265 doorbell = base + client->doorbell_offset;
266
267 doorbell->db_status = 0;
268
269 kunmap_atomic(base);
270
271 I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
272
273 value = I915_READ(drbreg);
274 WARN_ON((value & GEN8_DRB_VALID) != 0);
275
276 I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
277 I915_WRITE(drbreg, 0);
278
279 /* XXX: wait for any interrupts */
280 /* XXX: wait for workqueue to drain */
281}
282
283/*
284 * Select, assign and relase doorbell cachelines
285 *
286 * These functions track which doorbell cachelines are in use.
287 * The data they manipulate is protected by the host2guc lock.
288 */
289
290static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
291{
292 const uint32_t cacheline_size = cache_line_size();
293 uint32_t offset;
294
295 spin_lock(&guc->host2guc_lock);
296
297 /* Doorbell uses a single cache line within a page */
298 offset = offset_in_page(guc->db_cacheline);
299
300 /* Moving to next cache line to reduce contention */
301 guc->db_cacheline += cacheline_size;
302
303 spin_unlock(&guc->host2guc_lock);
304
305 DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
306 offset, guc->db_cacheline, cacheline_size);
307
308 return offset;
309}
310
311static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
312{
313 /*
314 * The bitmap is split into two halves; the first half is used for
315 * normal priority contexts, the second half for high-priority ones.
316 * Note that logically higher priorities are numerically less than
317 * normal ones, so the test below means "is it high-priority?"
318 */
319 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
320 const uint16_t half = GUC_MAX_DOORBELLS / 2;
321 const uint16_t start = hi_pri ? half : 0;
322 const uint16_t end = start + half;
323 uint16_t id;
324
325 spin_lock(&guc->host2guc_lock);
326 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
327 if (id == end)
328 id = GUC_INVALID_DOORBELL_ID;
329 else
330 bitmap_set(guc->doorbell_bitmap, id, 1);
331 spin_unlock(&guc->host2guc_lock);
332
333 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
334 hi_pri ? "high" : "normal", id);
335
336 return id;
337}
338
339static void release_doorbell(struct intel_guc *guc, uint16_t id)
340{
341 spin_lock(&guc->host2guc_lock);
342 bitmap_clear(guc->doorbell_bitmap, id, 1);
343 spin_unlock(&guc->host2guc_lock);
344}
345
346/*
347 * Initialise the process descriptor shared with the GuC firmware.
348 */
349static void guc_init_proc_desc(struct intel_guc *guc,
350 struct i915_guc_client *client)
351{
352 struct guc_process_desc *desc;
353 void *base;
354
355 base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
356 desc = base + client->proc_desc_offset;
357
358 memset(desc, 0, sizeof(*desc));
359
360 /*
361 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
362 * space for ring3 clients (set them as in mmap_ioctl) or kernel
363 * space for kernel clients (map on demand instead? May make debug
364 * easier to have it mapped).
365 */
366 desc->wq_base_addr = 0;
367 desc->db_base_addr = 0;
368
369 desc->context_id = client->ctx_index;
370 desc->wq_size_bytes = client->wq_size;
371 desc->wq_status = WQ_STATUS_ACTIVE;
372 desc->priority = client->priority;
373
374 kunmap_atomic(base);
375}
376
377/*
378 * Initialise/clear the context descriptor shared with the GuC firmware.
379 *
380 * This descriptor tells the GuC where (in GGTT space) to find the important
381 * data structures relating to this client (doorbell, process descriptor,
382 * write queue, etc).
383 */
384
385static void guc_init_ctx_desc(struct intel_guc *guc,
386 struct i915_guc_client *client)
387{
388 struct intel_context *ctx = client->owner;
389 struct guc_context_desc desc;
390 struct sg_table *sg;
391 int i;
392
393 memset(&desc, 0, sizeof(desc));
394
395 desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
396 desc.context_id = client->ctx_index;
397 desc.priority = client->priority;
398 desc.db_id = client->doorbell_id;
399
400 for (i = 0; i < I915_NUM_RINGS; i++) {
401 struct guc_execlist_context *lrc = &desc.lrc[i];
402 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
403 struct intel_engine_cs *ring;
404 struct drm_i915_gem_object *obj;
405 uint64_t ctx_desc;
406
407 /* TODO: We have a design issue to be solved here. Only when we
408 * receive the first batch, we know which engine is used by the
409 * user. But here GuC expects the lrc and ring to be pinned. It
410 * is not an issue for default context, which is the only one
411 * for now who owns a GuC client. But for future owner of GuC
412 * client, need to make sure lrc is pinned prior to enter here.
413 */
414 obj = ctx->engine[i].state;
415 if (!obj)
416 break; /* XXX: continue? */
417
418 ring = ringbuf->ring;
419 ctx_desc = intel_lr_context_descriptor(ctx, ring);
420 lrc->context_desc = (u32)ctx_desc;
421
422 /* The state page is after PPHWSP */
423 lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
424 LRC_STATE_PN * PAGE_SIZE;
425 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
426 (ring->id << GUC_ELC_ENGINE_OFFSET);
427
428 obj = ringbuf->obj;
429
430 lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
431 lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
432 lrc->ring_next_free_location = lrc->ring_begin;
433 lrc->ring_current_tail_pointer_value = 0;
434
435 desc.engines_used |= (1 << ring->id);
436 }
437
438 WARN_ON(desc.engines_used == 0);
439
440 /*
441 * The CPU address is only needed at certain points, so kmap_atomic on
442 * demand instead of storing it in the ctx descriptor.
443 * XXX: May make debug easier to have it mapped
444 */
445 desc.db_trigger_cpu = 0;
446 desc.db_trigger_uk = client->doorbell_offset +
447 i915_gem_obj_ggtt_offset(client->client_obj);
448 desc.db_trigger_phy = client->doorbell_offset +
449 sg_dma_address(client->client_obj->pages->sgl);
450
451 desc.process_desc = client->proc_desc_offset +
452 i915_gem_obj_ggtt_offset(client->client_obj);
453
454 desc.wq_addr = client->wq_offset +
455 i915_gem_obj_ggtt_offset(client->client_obj);
456
457 desc.wq_size = client->wq_size;
458
459 /*
460 * XXX: Take LRCs from an existing intel_context if this is not an
461 * IsKMDCreatedContext client
462 */
463 desc.desc_private = (uintptr_t)client;
464
465 /* Pool context is pinned already */
466 sg = guc->ctx_pool_obj->pages;
467 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
468 sizeof(desc) * client->ctx_index);
469}
470
471static void guc_fini_ctx_desc(struct intel_guc *guc,
472 struct i915_guc_client *client)
473{
474 struct guc_context_desc desc;
475 struct sg_table *sg;
476
477 memset(&desc, 0, sizeof(desc));
478
479 sg = guc->ctx_pool_obj->pages;
480 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
481 sizeof(desc) * client->ctx_index);
482}
483
484/* Get valid workqueue item and return it back to offset */
485static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
486{
487 struct guc_process_desc *desc;
488 void *base;
489 u32 size = sizeof(struct guc_wq_item);
490 int ret = 0, timeout_counter = 200;
491
492 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
493 desc = base + gc->proc_desc_offset;
494
495 while (timeout_counter-- > 0) {
496 ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
497 gc->wq_size) >= size, 1);
498
499 if (!ret) {
500 *offset = gc->wq_tail;
501
502 /* advance the tail for next workqueue item */
503 gc->wq_tail += size;
504 gc->wq_tail &= gc->wq_size - 1;
505
506 /* this will break the loop */
507 timeout_counter = 0;
508 }
509 };
510
511 kunmap_atomic(base);
512
513 return ret;
514}
515
516static int guc_add_workqueue_item(struct i915_guc_client *gc,
517 struct drm_i915_gem_request *rq)
518{
519 enum intel_ring_id ring_id = rq->ring->id;
520 struct guc_wq_item *wqi;
521 void *base;
522 u32 tail, wq_len, wq_off = 0;
523 int ret;
524
525 ret = guc_get_workqueue_space(gc, &wq_off);
526 if (ret)
527 return ret;
528
529 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
530 * should not have the case where structure wqi is across page, neither
531 * wrapped to the beginning. This simplifies the implementation below.
532 *
533 * XXX: if not the case, we need save data to a temp wqi and copy it to
534 * workqueue buffer dw by dw.
535 */
536 WARN_ON(sizeof(struct guc_wq_item) != 16);
537 WARN_ON(wq_off & 3);
538
539 /* wq starts from the page after doorbell / process_desc */
540 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
541 (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
542 wq_off &= PAGE_SIZE - 1;
543 wqi = (struct guc_wq_item *)((char *)base + wq_off);
544
545 /* len does not include the header */
546 wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
547 wqi->header = WQ_TYPE_INORDER |
548 (wq_len << WQ_LEN_SHIFT) |
549 (ring_id << WQ_TARGET_SHIFT) |
550 WQ_NO_WCFLUSH_WAIT;
551
552 /* The GuC wants only the low-order word of the context descriptor */
553 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
554
555 /* The GuC firmware wants the tail index in QWords, not bytes */
556 tail = rq->ringbuf->tail >> 3;
557 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
558 wqi->fence_id = 0; /*XXX: what fence to be here */
559
560 kunmap_atomic(base);
561
562 return 0;
563}
564
565#define CTX_RING_BUFFER_START 0x08
566
567/* Update the ringbuffer pointer in a saved context image */
568static void lr_context_update(struct drm_i915_gem_request *rq)
569{
570 enum intel_ring_id ring_id = rq->ring->id;
571 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
572 struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
573 struct page *page;
574 uint32_t *reg_state;
575
576 BUG_ON(!ctx_obj);
577 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
578 WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
579
580 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
581 reg_state = kmap_atomic(page);
582
583 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
584
585 kunmap_atomic(reg_state);
586}
587
588/**
589 * i915_guc_submit() - Submit commands through GuC
590 * @client: the guc client where commands will go through
591 * @ctx: LRC where commands come from
592 * @ring: HW engine that will excute the commands
593 *
594 * Return: 0 if succeed
595 */
596int i915_guc_submit(struct i915_guc_client *client,
597 struct drm_i915_gem_request *rq)
598{
599 struct intel_guc *guc = client->guc;
600 enum intel_ring_id ring_id = rq->ring->id;
601 unsigned long flags;
602 int q_ret, b_ret;
603
604 /* Need this because of the deferred pin ctx and ring */
605 /* Shall we move this right after ring is pinned? */
606 lr_context_update(rq);
607
608 spin_lock_irqsave(&client->wq_lock, flags);
609
610 q_ret = guc_add_workqueue_item(client, rq);
611 if (q_ret == 0)
612 b_ret = guc_ring_doorbell(client);
613
614 client->submissions[ring_id] += 1;
615 if (q_ret) {
616 client->q_fail += 1;
617 client->retcode = q_ret;
618 } else if (b_ret) {
619 client->b_fail += 1;
620 client->retcode = q_ret = b_ret;
621 } else {
622 client->retcode = 0;
623 }
624 spin_unlock_irqrestore(&client->wq_lock, flags);
625
626 spin_lock(&guc->host2guc_lock);
627 guc->submissions[ring_id] += 1;
628 guc->last_seqno[ring_id] = rq->seqno;
629 spin_unlock(&guc->host2guc_lock);
630
631 return q_ret;
632}
633
634/*
635 * Everything below here is concerned with setup & teardown, and is
636 * therefore not part of the somewhat time-critical batch-submission
637 * path of i915_guc_submit() above.
638 */
639
640/**
641 * gem_allocate_guc_obj() - Allocate gem object for GuC usage
642 * @dev: drm device
643 * @size: size of object
644 *
645 * This is a wrapper to create a gem obj. In order to use it inside GuC, the
646 * object needs to be pinned lifetime. Also we must pin it to gtt space other
647 * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
648 *
649 * Return: A drm_i915_gem_object if successful, otherwise NULL.
650 */
651static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
652 u32 size)
653{
654 struct drm_i915_private *dev_priv = dev->dev_private;
655 struct drm_i915_gem_object *obj;
656
657 obj = i915_gem_alloc_object(dev, size);
658 if (!obj)
659 return NULL;
660
661 if (i915_gem_object_get_pages(obj)) {
662 drm_gem_object_unreference(&obj->base);
663 return NULL;
664 }
665
666 if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
667 PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
668 drm_gem_object_unreference(&obj->base);
669 return NULL;
670 }
671
672 /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
673 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
674
675 return obj;
676}
677
678/**
679 * gem_release_guc_obj() - Release gem object allocated for GuC usage
680 * @obj: gem obj to be released
681 */
682static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
683{
684 if (!obj)
685 return;
686
687 if (i915_gem_obj_is_pinned(obj))
688 i915_gem_object_ggtt_unpin(obj);
689
690 drm_gem_object_unreference(&obj->base);
691}
692
693static void guc_client_free(struct drm_device *dev,
694 struct i915_guc_client *client)
695{
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 struct intel_guc *guc = &dev_priv->guc;
698
699 if (!client)
700 return;
701
702 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
703 /*
704 * First disable the doorbell, then tell the GuC we've
705 * finished with it, finally deallocate it in our bitmap
706 */
707 guc_disable_doorbell(guc, client);
708 host2guc_release_doorbell(guc, client);
709 release_doorbell(guc, client->doorbell_id);
710 }
711
712 /*
713 * XXX: wait for any outstanding submissions before freeing memory.
714 * Be sure to drop any locks
715 */
716
717 gem_release_guc_obj(client->client_obj);
718
719 if (client->ctx_index != GUC_INVALID_CTX_ID) {
720 guc_fini_ctx_desc(guc, client);
721 ida_simple_remove(&guc->ctx_ids, client->ctx_index);
722 }
723
724 kfree(client);
725}
726
727/**
728 * guc_client_alloc() - Allocate an i915_guc_client
729 * @dev: drm device
730 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
731 * The kernel client to replace ExecList submission is created with
732 * NORMAL priority. Priority of a client for scheduler can be HIGH,
733 * while a preemption context can use CRITICAL.
734 * @ctx the context to own the client (we use the default render context)
735 *
736 * Return: An i915_guc_client object if success.
737 */
738static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
739 uint32_t priority,
740 struct intel_context *ctx)
741{
742 struct i915_guc_client *client;
743 struct drm_i915_private *dev_priv = dev->dev_private;
744 struct intel_guc *guc = &dev_priv->guc;
745 struct drm_i915_gem_object *obj;
746
747 client = kzalloc(sizeof(*client), GFP_KERNEL);
748 if (!client)
749 return NULL;
750
751 client->doorbell_id = GUC_INVALID_DOORBELL_ID;
752 client->priority = priority;
753 client->owner = ctx;
754 client->guc = guc;
755
756 client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
757 GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
758 if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
759 client->ctx_index = GUC_INVALID_CTX_ID;
760 goto err;
761 }
762
763 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
764 obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
765 if (!obj)
766 goto err;
767
768 client->client_obj = obj;
769 client->wq_offset = GUC_DB_SIZE;
770 client->wq_size = GUC_WQ_SIZE;
771 spin_lock_init(&client->wq_lock);
772
773 client->doorbell_offset = select_doorbell_cacheline(guc);
774
775 /*
776 * Since the doorbell only requires a single cacheline, we can save
777 * space by putting the application process descriptor in the same
778 * page. Use the half of the page that doesn't include the doorbell.
779 */
780 if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
781 client->proc_desc_offset = 0;
782 else
783 client->proc_desc_offset = (GUC_DB_SIZE / 2);
784
785 client->doorbell_id = assign_doorbell(guc, client->priority);
786 if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
787 /* XXX: evict a doorbell instead */
788 goto err;
789
790 guc_init_proc_desc(guc, client);
791 guc_init_ctx_desc(guc, client);
792 guc_init_doorbell(guc, client);
793
794 /* XXX: Any cache flushes needed? General domain mgmt calls? */
795
796 if (host2guc_allocate_doorbell(guc, client))
797 goto err;
798
799 DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
800 priority, client, client->ctx_index, client->doorbell_id);
801
802 return client;
803
804err:
805 DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
806
807 guc_client_free(dev, client);
808 return NULL;
809}
810
811static void guc_create_log(struct intel_guc *guc)
812{
813 struct drm_i915_private *dev_priv = guc_to_i915(guc);
814 struct drm_i915_gem_object *obj;
815 unsigned long offset;
816 uint32_t size, flags;
817
818 if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
819 return;
820
821 if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
822 i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
823
824 /* The first page is to save log buffer state. Allocate one
825 * extra page for others in case for overlap */
826 size = (1 + GUC_LOG_DPC_PAGES + 1 +
827 GUC_LOG_ISR_PAGES + 1 +
828 GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
829
830 obj = guc->log_obj;
831 if (!obj) {
832 obj = gem_allocate_guc_obj(dev_priv->dev, size);
833 if (!obj) {
834 /* logging will be off */
835 i915.guc_log_level = -1;
836 return;
837 }
838
839 guc->log_obj = obj;
840 }
841
842 /* each allocated unit is a page */
843 flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
844 (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
845 (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
846 (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
847
848 offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
849 guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
850}
851
852/*
853 * Set up the memory resources to be shared with the GuC. At this point,
854 * we require just one object that can be mapped through the GGTT.
855 */
856int i915_guc_submission_init(struct drm_device *dev)
857{
858 struct drm_i915_private *dev_priv = dev->dev_private;
859 const size_t ctxsize = sizeof(struct guc_context_desc);
860 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
861 const size_t gemsize = round_up(poolsize, PAGE_SIZE);
862 struct intel_guc *guc = &dev_priv->guc;
863
864 if (!i915.enable_guc_submission)
865 return 0; /* not enabled */
866
867 if (guc->ctx_pool_obj)
868 return 0; /* already allocated */
869
870 guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
871 if (!guc->ctx_pool_obj)
872 return -ENOMEM;
873
874 spin_lock_init(&dev_priv->guc.host2guc_lock);
875
876 ida_init(&guc->ctx_ids);
877
878 guc_create_log(guc);
879
880 return 0;
881}
882
883int i915_guc_submission_enable(struct drm_device *dev)
884{
885 struct drm_i915_private *dev_priv = dev->dev_private;
886 struct intel_guc *guc = &dev_priv->guc;
887 struct intel_context *ctx = dev_priv->ring[RCS].default_context;
888 struct i915_guc_client *client;
889
890 /* client for execbuf submission */
891 client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
892 if (!client) {
893 DRM_ERROR("Failed to create execbuf guc_client\n");
894 return -ENOMEM;
895 }
896
897 guc->execbuf_client = client;
898
899 host2guc_sample_forcewake(guc, client);
900
901 return 0;
902}
903
904void i915_guc_submission_disable(struct drm_device *dev)
905{
906 struct drm_i915_private *dev_priv = dev->dev_private;
907 struct intel_guc *guc = &dev_priv->guc;
908
909 guc_client_free(dev, guc->execbuf_client);
910 guc->execbuf_client = NULL;
911}
912
913void i915_guc_submission_fini(struct drm_device *dev)
914{
915 struct drm_i915_private *dev_priv = dev->dev_private;
916 struct intel_guc *guc = &dev_priv->guc;
917
918 gem_release_guc_obj(dev_priv->guc.log_obj);
919 guc->log_obj = NULL;
920
921 if (guc->ctx_pool_obj)
922 ida_destroy(&guc->ctx_ids);
923 gem_release_guc_obj(guc->ctx_pool_obj);
924 guc->ctx_pool_obj = NULL;
925}
926
927/**
928 * intel_guc_suspend() - notify GuC entering suspend state
929 * @dev: drm device
930 */
931int intel_guc_suspend(struct drm_device *dev)
932{
933 struct drm_i915_private *dev_priv = dev->dev_private;
934 struct intel_guc *guc = &dev_priv->guc;
935 struct intel_context *ctx;
936 u32 data[3];
937
938 if (!i915.enable_guc_submission)
939 return 0;
940
941 ctx = dev_priv->ring[RCS].default_context;
942
943 data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
944 /* any value greater than GUC_POWER_D0 */
945 data[1] = GUC_POWER_D1;
946 /* first page is shared data with GuC */
947 data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
948
949 return host2guc_action(guc, data, ARRAY_SIZE(data));
950}
951
952
953/**
954 * intel_guc_resume() - notify GuC resuming from suspend state
955 * @dev: drm device
956 */
957int intel_guc_resume(struct drm_device *dev)
958{
959 struct drm_i915_private *dev_priv = dev->dev_private;
960 struct intel_guc *guc = &dev_priv->guc;
961 struct intel_context *ctx;
962 u32 data[3];
963
964 if (!i915.enable_guc_submission)
965 return 0;
966
967 ctx = dev_priv->ring[RCS].default_context;
968
969 data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
970 data[1] = GUC_POWER_D0;
971 /* first page is shared data with GuC */
972 data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
973
974 return host2guc_action(guc, data, ARRAY_SIZE(data));
975}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 39d73dbc1c47..0d228f909dcb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -45,6 +45,18 @@
45 * and related files, but that will be described in separate chapters. 45 * and related files, but that will be described in separate chapters.
46 */ 46 */
47 47
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
56static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
48static const u32 hpd_ibx[HPD_NUM_PINS] = { 60static const u32 hpd_ibx[HPD_NUM_PINS] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG, 61 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -62,6 +74,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
62}; 74};
63 75
64static const u32 hpd_spt[HPD_NUM_PINS] = { 76static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
@@ -97,6 +110,7 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
97 110
98/* BXT hpd list */ 111/* BXT hpd list */
99static const u32 hpd_bxt[HPD_NUM_PINS] = { 112static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
100 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
101 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
102}; 116};
@@ -125,27 +139,30 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
125/* 139/*
126 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
127 */ 141 */
128#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
129 u32 val = I915_READ(reg); \ 143{
130 if (val) { \ 144 u32 val = I915_READ(reg);
131 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 145
132 (reg), val); \ 146 if (val == 0)
133 I915_WRITE((reg), 0xffffffff); \ 147 return;
134 POSTING_READ(reg); \ 148
135 I915_WRITE((reg), 0xffffffff); \ 149 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
136 POSTING_READ(reg); \ 150 reg, val);
137 } \ 151 I915_WRITE(reg, 0xffffffff);
138} while (0) 152 POSTING_READ(reg);
153 I915_WRITE(reg, 0xffffffff);
154 POSTING_READ(reg);
155}
139 156
140#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 157#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
141 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 158 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
142 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 159 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
143 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 160 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
144 POSTING_READ(GEN8_##type##_IMR(which)); \ 161 POSTING_READ(GEN8_##type##_IMR(which)); \
145} while (0) 162} while (0)
146 163
147#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 164#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
148 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 165 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
149 I915_WRITE(type##IER, (ier_val)); \ 166 I915_WRITE(type##IER, (ier_val)); \
150 I915_WRITE(type##IMR, (imr_val)); \ 167 I915_WRITE(type##IMR, (imr_val)); \
151 POSTING_READ(type##IMR); \ 168 POSTING_READ(type##IMR); \
@@ -154,36 +171,85 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
154static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 171static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
155 172
156/* For display hotplug interrupt */ 173/* For display hotplug interrupt */
157void 174static inline void
158ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 175i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
176 uint32_t mask,
177 uint32_t bits)
159{ 178{
179 uint32_t val;
180
160 assert_spin_locked(&dev_priv->irq_lock); 181 assert_spin_locked(&dev_priv->irq_lock);
182 WARN_ON(bits & ~mask);
161 183
162 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 184 val = I915_READ(PORT_HOTPLUG_EN);
163 return; 185 val &= ~mask;
186 val |= bits;
187 I915_WRITE(PORT_HOTPLUG_EN, val);
188}
164 189
165 if ((dev_priv->irq_mask & mask) != 0) { 190/**
166 dev_priv->irq_mask &= ~mask; 191 * i915_hotplug_interrupt_update - update hotplug interrupt enable
167 I915_WRITE(DEIMR, dev_priv->irq_mask); 192 * @dev_priv: driver private
168 POSTING_READ(DEIMR); 193 * @mask: bits to update
169 } 194 * @bits: bits to enable
195 * NOTE: the HPD enable bits are modified both inside and outside
196 * of an interrupt context. To avoid that read-modify-write cycles
197 * interfer, these bits are protected by a spinlock. Since this
198 * function is usually not called from a context where the lock is
199 * held already, this function acquires the lock itself. A non-locking
200 * version is also available.
201 */
202void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
203 uint32_t mask,
204 uint32_t bits)
205{
206 spin_lock_irq(&dev_priv->irq_lock);
207 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
208 spin_unlock_irq(&dev_priv->irq_lock);
170} 209}
171 210
172void 211/**
173ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 212 * ilk_update_display_irq - update DEIMR
213 * @dev_priv: driver private
214 * @interrupt_mask: mask of interrupt bits to update
215 * @enabled_irq_mask: mask of interrupt bits to enable
216 */
217static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
218 uint32_t interrupt_mask,
219 uint32_t enabled_irq_mask)
174{ 220{
221 uint32_t new_val;
222
175 assert_spin_locked(&dev_priv->irq_lock); 223 assert_spin_locked(&dev_priv->irq_lock);
176 224
225 WARN_ON(enabled_irq_mask & ~interrupt_mask);
226
177 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 227 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
178 return; 228 return;
179 229
180 if ((dev_priv->irq_mask & mask) != mask) { 230 new_val = dev_priv->irq_mask;
181 dev_priv->irq_mask |= mask; 231 new_val &= ~interrupt_mask;
232 new_val |= (~enabled_irq_mask & interrupt_mask);
233
234 if (new_val != dev_priv->irq_mask) {
235 dev_priv->irq_mask = new_val;
182 I915_WRITE(DEIMR, dev_priv->irq_mask); 236 I915_WRITE(DEIMR, dev_priv->irq_mask);
183 POSTING_READ(DEIMR); 237 POSTING_READ(DEIMR);
184 } 238 }
185} 239}
186 240
241void
242ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
243{
244 ilk_update_display_irq(dev_priv, mask, mask);
245}
246
247void
248ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
249{
250 ilk_update_display_irq(dev_priv, mask, 0);
251}
252
187/** 253/**
188 * ilk_update_gt_irq - update GTIMR 254 * ilk_update_gt_irq - update GTIMR
189 * @dev_priv: driver private 255 * @dev_priv: driver private
@@ -351,6 +417,38 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
351} 417}
352 418
353/** 419/**
420 * bdw_update_port_irq - update DE port interrupt
421 * @dev_priv: driver private
422 * @interrupt_mask: mask of interrupt bits to update
423 * @enabled_irq_mask: mask of interrupt bits to enable
424 */
425static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
426 uint32_t interrupt_mask,
427 uint32_t enabled_irq_mask)
428{
429 uint32_t new_val;
430 uint32_t old_val;
431
432 assert_spin_locked(&dev_priv->irq_lock);
433
434 WARN_ON(enabled_irq_mask & ~interrupt_mask);
435
436 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
437 return;
438
439 old_val = I915_READ(GEN8_DE_PORT_IMR);
440
441 new_val = old_val;
442 new_val &= ~interrupt_mask;
443 new_val |= (~enabled_irq_mask & interrupt_mask);
444
445 if (new_val != old_val) {
446 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
447 POSTING_READ(GEN8_DE_PORT_IMR);
448 }
449}
450
451/**
354 * ibx_display_interrupt_update - update SDEIMR 452 * ibx_display_interrupt_update - update SDEIMR
355 * @dev_priv: driver private 453 * @dev_priv: driver private
356 * @interrupt_mask: mask of interrupt bits to update 454 * @interrupt_mask: mask of interrupt bits to update
@@ -486,6 +584,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
486 584
487/** 585/**
488 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 586 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
587 * @dev: drm device
489 */ 588 */
490static void i915_enable_asle_pipestat(struct drm_device *dev) 589static void i915_enable_asle_pipestat(struct drm_device *dev)
491{ 590{
@@ -554,7 +653,7 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
554 * of horizontal active on the first line of vertical active 653 * of horizontal active on the first line of vertical active
555 */ 654 */
556 655
557static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 656static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
558{ 657{
559 /* Gen2 doesn't have a hardware frame counter */ 658 /* Gen2 doesn't have a hardware frame counter */
560 return 0; 659 return 0;
@@ -563,7 +662,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
563/* Called from drm generic code, passed a 'crtc', which 662/* Called from drm generic code, passed a 'crtc', which
564 * we use as a pipe index 663 * we use as a pipe index
565 */ 664 */
566static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 665static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
567{ 666{
568 struct drm_i915_private *dev_priv = dev->dev_private; 667 struct drm_i915_private *dev_priv = dev->dev_private;
569 unsigned long high_frame; 668 unsigned long high_frame;
@@ -611,12 +710,11 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
611 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 710 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
612} 711}
613 712
614static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 713static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
615{ 714{
616 struct drm_i915_private *dev_priv = dev->dev_private; 715 struct drm_i915_private *dev_priv = dev->dev_private;
617 int reg = PIPE_FRMCOUNT_GM45(pipe);
618 716
619 return I915_READ(reg); 717 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
620} 718}
621 719
622/* raw reads, only for fast reads of display block, no need for forcewake etc. */ 720/* raw reads, only for fast reads of display block, no need for forcewake etc. */
@@ -651,7 +749,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
651 * problem. We may need to extend this to include other platforms, 749 * problem. We may need to extend this to include other platforms,
652 * but so far testing only shows the problem on HSW. 750 * but so far testing only shows the problem on HSW.
653 */ 751 */
654 if (IS_HASWELL(dev) && !position) { 752 if (HAS_DDI(dev) && !position) {
655 int i, temp; 753 int i, temp;
656 754
657 for (i = 0; i < 100; i++) { 755 for (i = 0; i < 100; i++) {
@@ -672,14 +770,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
672 return (position + crtc->scanline_offset) % vtotal; 770 return (position + crtc->scanline_offset) % vtotal;
673} 771}
674 772
675static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 773static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
676 unsigned int flags, int *vpos, int *hpos, 774 unsigned int flags, int *vpos, int *hpos,
677 ktime_t *stime, ktime_t *etime) 775 ktime_t *stime, ktime_t *etime,
776 const struct drm_display_mode *mode)
678{ 777{
679 struct drm_i915_private *dev_priv = dev->dev_private; 778 struct drm_i915_private *dev_priv = dev->dev_private;
680 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
681 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
682 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
683 int position; 781 int position;
684 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 782 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
685 bool in_vbl = true; 783 bool in_vbl = true;
@@ -809,34 +907,33 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
809 return position; 907 return position;
810} 908}
811 909
812static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 910static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
813 int *max_error, 911 int *max_error,
814 struct timeval *vblank_time, 912 struct timeval *vblank_time,
815 unsigned flags) 913 unsigned flags)
816{ 914{
817 struct drm_crtc *crtc; 915 struct drm_crtc *crtc;
818 916
819 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 917 if (pipe >= INTEL_INFO(dev)->num_pipes) {
820 DRM_ERROR("Invalid crtc %d\n", pipe); 918 DRM_ERROR("Invalid crtc %u\n", pipe);
821 return -EINVAL; 919 return -EINVAL;
822 } 920 }
823 921
824 /* Get drm_crtc to timestamp: */ 922 /* Get drm_crtc to timestamp: */
825 crtc = intel_get_crtc_for_pipe(dev, pipe); 923 crtc = intel_get_crtc_for_pipe(dev, pipe);
826 if (crtc == NULL) { 924 if (crtc == NULL) {
827 DRM_ERROR("Invalid crtc %d\n", pipe); 925 DRM_ERROR("Invalid crtc %u\n", pipe);
828 return -EINVAL; 926 return -EINVAL;
829 } 927 }
830 928
831 if (!crtc->hwmode.crtc_clock) { 929 if (!crtc->hwmode.crtc_clock) {
832 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
833 return -EBUSY; 931 return -EBUSY;
834 } 932 }
835 933
836 /* Helper routine in DRM core does all the work: */ 934 /* Helper routine in DRM core does all the work: */
837 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
838 vblank_time, flags, 936 vblank_time, flags,
839 crtc,
840 &crtc->hwmode); 937 &crtc->hwmode);
841} 938}
842 939
@@ -903,12 +1000,16 @@ static bool vlv_c0_above(struct drm_i915_private *dev_priv,
903 int threshold) 1000 int threshold)
904{ 1001{
905 u64 time, c0; 1002 u64 time, c0;
1003 unsigned int mul = 100;
906 1004
907 if (old->cz_clock == 0) 1005 if (old->cz_clock == 0)
908 return false; 1006 return false;
909 1007
1008 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1009 mul <<= 8;
1010
910 time = now->cz_clock - old->cz_clock; 1011 time = now->cz_clock - old->cz_clock;
911 time *= threshold * dev_priv->mem_freq; 1012 time *= threshold * dev_priv->czclk_freq;
912 1013
913 /* Workload can be split between render + media, e.g. SwapBuffers 1014 /* Workload can be split between render + media, e.g. SwapBuffers
914 * being blitted in X after being rendered in mesa. To account for 1015 * being blitted in X after being rendered in mesa. To account for
@@ -916,7 +1017,7 @@ static bool vlv_c0_above(struct drm_i915_private *dev_priv,
916 */ 1017 */
917 c0 = now->render_c0 - old->render_c0; 1018 c0 = now->render_c0 - old->render_c0;
918 c0 += now->media_c0 - old->media_c0; 1019 c0 += now->media_c0 - old->media_c0;
919 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 1020 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
920 1021
921 return c0 >= time; 1022 return c0 >= time;
922} 1023}
@@ -1264,7 +1365,31 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1264{ 1365{
1265 switch (port) { 1366 switch (port) {
1266 case PORT_A: 1367 case PORT_A:
1267 return val & BXT_PORTA_HOTPLUG_LONG_DETECT; 1368 return val & PORTA_HOTPLUG_LONG_DETECT;
1369 case PORT_B:
1370 return val & PORTB_HOTPLUG_LONG_DETECT;
1371 case PORT_C:
1372 return val & PORTC_HOTPLUG_LONG_DETECT;
1373 default:
1374 return false;
1375 }
1376}
1377
1378static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1379{
1380 switch (port) {
1381 case PORT_E:
1382 return val & PORTE_HOTPLUG_LONG_DETECT;
1383 default:
1384 return false;
1385 }
1386}
1387
1388static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1389{
1390 switch (port) {
1391 case PORT_A:
1392 return val & PORTA_HOTPLUG_LONG_DETECT;
1268 case PORT_B: 1393 case PORT_B:
1269 return val & PORTB_HOTPLUG_LONG_DETECT; 1394 return val & PORTB_HOTPLUG_LONG_DETECT;
1270 case PORT_C: 1395 case PORT_C:
@@ -1276,6 +1401,16 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1276 } 1401 }
1277} 1402}
1278 1403
1404static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1405{
1406 switch (port) {
1407 case PORT_A:
1408 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1409 default:
1410 return false;
1411 }
1412}
1413
1279static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1414static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1280{ 1415{
1281 switch (port) { 1416 switch (port) {
@@ -1285,8 +1420,6 @@ static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1285 return val & PORTC_HOTPLUG_LONG_DETECT; 1420 return val & PORTC_HOTPLUG_LONG_DETECT;
1286 case PORT_D: 1421 case PORT_D:
1287 return val & PORTD_HOTPLUG_LONG_DETECT; 1422 return val & PORTD_HOTPLUG_LONG_DETECT;
1288 case PORT_E:
1289 return val & PORTE_HOTPLUG_LONG_DETECT;
1290 default: 1423 default:
1291 return false; 1424 return false;
1292 } 1425 }
@@ -1306,7 +1439,13 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1306 } 1439 }
1307} 1440}
1308 1441
1309/* Get a bit mask of pins that have triggered, and which ones may be long. */ 1442/*
1443 * Get a bit mask of pins that have triggered, and which ones may be long.
1444 * This can be called multiple times with the same masks to accumulate
1445 * hotplug detection results from several registers.
1446 *
1447 * Note that the caller is expected to zero out the masks initially.
1448 */
1310static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1449static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1311 u32 hotplug_trigger, u32 dig_hotplug_reg, 1450 u32 hotplug_trigger, u32 dig_hotplug_reg,
1312 const u32 hpd[HPD_NUM_PINS], 1451 const u32 hpd[HPD_NUM_PINS],
@@ -1315,9 +1454,6 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1315 enum port port; 1454 enum port port;
1316 int i; 1455 int i;
1317 1456
1318 *pin_mask = 0;
1319 *long_mask = 0;
1320
1321 for_each_hpd_pin(i) { 1457 for_each_hpd_pin(i) {
1322 if ((hpd[i] & hotplug_trigger) == 0) 1458 if ((hpd[i] & hotplug_trigger) == 0)
1323 continue; 1459 continue;
@@ -1558,7 +1694,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
1558{ 1694{
1559 struct drm_i915_private *dev_priv = dev->dev_private; 1695 struct drm_i915_private *dev_priv = dev->dev_private;
1560 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1696 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1561 u32 pin_mask, long_mask; 1697 u32 pin_mask = 0, long_mask = 0;
1562 1698
1563 if (!hotplug_status) 1699 if (!hotplug_status)
1564 return; 1700 return;
@@ -1573,20 +1709,25 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
1573 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 1709 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1574 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1710 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1575 1711
1576 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1712 if (hotplug_trigger) {
1577 hotplug_trigger, hpd_status_g4x, 1713 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1578 i9xx_port_hotplug_long_detect); 1714 hotplug_trigger, hpd_status_g4x,
1579 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1715 i9xx_port_hotplug_long_detect);
1716
1717 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1718 }
1580 1719
1581 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1720 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1582 dp_aux_irq_handler(dev); 1721 dp_aux_irq_handler(dev);
1583 } else { 1722 } else {
1584 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1723 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1585 1724
1586 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1725 if (hotplug_trigger) {
1587 hotplug_trigger, hpd_status_i915, 1726 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1588 i9xx_port_hotplug_long_detect); 1727 hotplug_trigger, hpd_status_i915,
1589 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1728 i9xx_port_hotplug_long_detect);
1729 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1730 }
1590 } 1731 }
1591} 1732}
1592 1733
@@ -1680,23 +1821,30 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1680 return ret; 1821 return ret;
1681} 1822}
1682 1823
1824static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1825 const u32 hpd[HPD_NUM_PINS])
1826{
1827 struct drm_i915_private *dev_priv = to_i915(dev);
1828 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1829
1830 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1831 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1832
1833 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1834 dig_hotplug_reg, hpd,
1835 pch_port_hotplug_long_detect);
1836
1837 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1838}
1839
1683static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1840static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1684{ 1841{
1685 struct drm_i915_private *dev_priv = dev->dev_private; 1842 struct drm_i915_private *dev_priv = dev->dev_private;
1686 int pipe; 1843 int pipe;
1687 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1844 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1688 1845
1689 if (hotplug_trigger) { 1846 if (hotplug_trigger)
1690 u32 dig_hotplug_reg, pin_mask, long_mask; 1847 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1691
1692 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1693 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1694
1695 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1696 dig_hotplug_reg, hpd_ibx,
1697 pch_port_hotplug_long_detect);
1698 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1699 }
1700 1848
1701 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1849 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1702 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1850 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1787,38 +1935,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1787{ 1935{
1788 struct drm_i915_private *dev_priv = dev->dev_private; 1936 struct drm_i915_private *dev_priv = dev->dev_private;
1789 int pipe; 1937 int pipe;
1790 u32 hotplug_trigger; 1938 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1791 1939
1792 if (HAS_PCH_SPT(dev)) 1940 if (hotplug_trigger)
1793 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT; 1941 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1794 else
1795 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1796
1797 if (hotplug_trigger) {
1798 u32 dig_hotplug_reg, pin_mask, long_mask;
1799
1800 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1801 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1802
1803 if (HAS_PCH_SPT(dev)) {
1804 intel_get_hpd_pins(&pin_mask, &long_mask,
1805 hotplug_trigger,
1806 dig_hotplug_reg, hpd_spt,
1807 pch_port_hotplug_long_detect);
1808
1809 /* detect PORTE HP event */
1810 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1811 if (pch_port_hotplug_long_detect(PORT_E,
1812 dig_hotplug_reg))
1813 long_mask |= 1 << HPD_PORT_E;
1814 } else
1815 intel_get_hpd_pins(&pin_mask, &long_mask,
1816 hotplug_trigger,
1817 dig_hotplug_reg, hpd_cpt,
1818 pch_port_hotplug_long_detect);
1819
1820 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1821 }
1822 1942
1823 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1943 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1824 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -1849,10 +1969,67 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1849 cpt_serr_int_handler(dev); 1969 cpt_serr_int_handler(dev);
1850} 1970}
1851 1971
1972static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1973{
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1976 ~SDE_PORTE_HOTPLUG_SPT;
1977 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1978 u32 pin_mask = 0, long_mask = 0;
1979
1980 if (hotplug_trigger) {
1981 u32 dig_hotplug_reg;
1982
1983 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1984 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1985
1986 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1987 dig_hotplug_reg, hpd_spt,
1988 spt_port_hotplug_long_detect);
1989 }
1990
1991 if (hotplug2_trigger) {
1992 u32 dig_hotplug_reg;
1993
1994 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1995 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1996
1997 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1998 dig_hotplug_reg, hpd_spt,
1999 spt_port_hotplug2_long_detect);
2000 }
2001
2002 if (pin_mask)
2003 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2004
2005 if (pch_iir & SDE_GMBUS_CPT)
2006 gmbus_irq_handler(dev);
2007}
2008
2009static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2010 const u32 hpd[HPD_NUM_PINS])
2011{
2012 struct drm_i915_private *dev_priv = to_i915(dev);
2013 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2014
2015 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2016 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2017
2018 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2019 dig_hotplug_reg, hpd,
2020 ilk_port_hotplug_long_detect);
2021
2022 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2023}
2024
1852static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2025static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1853{ 2026{
1854 struct drm_i915_private *dev_priv = dev->dev_private; 2027 struct drm_i915_private *dev_priv = dev->dev_private;
1855 enum pipe pipe; 2028 enum pipe pipe;
2029 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2030
2031 if (hotplug_trigger)
2032 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
1856 2033
1857 if (de_iir & DE_AUX_CHANNEL_A) 2034 if (de_iir & DE_AUX_CHANNEL_A)
1858 dp_aux_irq_handler(dev); 2035 dp_aux_irq_handler(dev);
@@ -1902,6 +2079,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1902{ 2079{
1903 struct drm_i915_private *dev_priv = dev->dev_private; 2080 struct drm_i915_private *dev_priv = dev->dev_private;
1904 enum pipe pipe; 2081 enum pipe pipe;
2082 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2083
2084 if (hotplug_trigger)
2085 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
1905 2086
1906 if (de_iir & DE_ERR_INT_IVB) 2087 if (de_iir & DE_ERR_INT_IVB)
1907 ivb_err_int_handler(dev); 2088 ivb_err_int_handler(dev);
@@ -2014,27 +2195,19 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2014 return ret; 2195 return ret;
2015} 2196}
2016 2197
2017static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) 2198static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2199 const u32 hpd[HPD_NUM_PINS])
2018{ 2200{
2019 struct drm_i915_private *dev_priv = dev->dev_private; 2201 struct drm_i915_private *dev_priv = to_i915(dev);
2020 u32 hp_control, hp_trigger; 2202 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2021 u32 pin_mask, long_mask;
2022 2203
2023 /* Get the status */ 2204 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2024 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; 2205 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2025 hp_control = I915_READ(BXT_HOTPLUG_CTL);
2026 2206
2027 /* Hotplug not enabled ? */ 2207 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2028 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) { 2208 dig_hotplug_reg, hpd,
2029 DRM_ERROR("Interrupt when HPD disabled\n"); 2209 bxt_port_hotplug_long_detect);
2030 return;
2031 }
2032 2210
2033 /* Clear sticky bits in hpd status */
2034 I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
2035
2036 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
2037 hpd_bxt, bxt_port_hotplug_long_detect);
2038 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2211 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2039} 2212}
2040 2213
@@ -2051,7 +2224,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2051 if (!intel_irqs_enabled(dev_priv)) 2224 if (!intel_irqs_enabled(dev_priv))
2052 return IRQ_NONE; 2225 return IRQ_NONE;
2053 2226
2054 if (IS_GEN9(dev)) 2227 if (INTEL_INFO(dev_priv)->gen >= 9)
2055 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2228 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2056 GEN9_AUX_CHANNEL_D; 2229 GEN9_AUX_CHANNEL_D;
2057 2230
@@ -2084,6 +2257,12 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2084 tmp = I915_READ(GEN8_DE_PORT_IIR); 2257 tmp = I915_READ(GEN8_DE_PORT_IIR);
2085 if (tmp) { 2258 if (tmp) {
2086 bool found = false; 2259 bool found = false;
2260 u32 hotplug_trigger = 0;
2261
2262 if (IS_BROXTON(dev_priv))
2263 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2264 else if (IS_BROADWELL(dev_priv))
2265 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2087 2266
2088 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2267 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2089 ret = IRQ_HANDLED; 2268 ret = IRQ_HANDLED;
@@ -2093,8 +2272,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2093 found = true; 2272 found = true;
2094 } 2273 }
2095 2274
2096 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) { 2275 if (hotplug_trigger) {
2097 bxt_hpd_handler(dev, tmp); 2276 if (IS_BROXTON(dev))
2277 bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2278 else
2279 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2098 found = true; 2280 found = true;
2099 } 2281 }
2100 2282
@@ -2125,7 +2307,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2125 intel_pipe_handle_vblank(dev, pipe)) 2307 intel_pipe_handle_vblank(dev, pipe))
2126 intel_check_page_flip(dev, pipe); 2308 intel_check_page_flip(dev, pipe);
2127 2309
2128 if (IS_GEN9(dev)) 2310 if (INTEL_INFO(dev_priv)->gen >= 9)
2129 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2311 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2130 else 2312 else
2131 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2313 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
@@ -2143,7 +2325,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2143 pipe); 2325 pipe);
2144 2326
2145 2327
2146 if (IS_GEN9(dev)) 2328 if (INTEL_INFO(dev_priv)->gen >= 9)
2147 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2329 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2148 else 2330 else
2149 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2331 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2167,7 +2349,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2167 if (pch_iir) { 2349 if (pch_iir) {
2168 I915_WRITE(SDEIIR, pch_iir); 2350 I915_WRITE(SDEIIR, pch_iir);
2169 ret = IRQ_HANDLED; 2351 ret = IRQ_HANDLED;
2170 cpt_irq_handler(dev, pch_iir); 2352
2353 if (HAS_PCH_SPT(dev_priv))
2354 spt_irq_handler(dev, pch_iir);
2355 else
2356 cpt_irq_handler(dev, pch_iir);
2171 } else 2357 } else
2172 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2358 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2173 2359
@@ -2209,6 +2395,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2209 2395
2210/** 2396/**
2211 * i915_reset_and_wakeup - do process context error handling work 2397 * i915_reset_and_wakeup - do process context error handling work
2398 * @dev: drm device
2212 * 2399 *
2213 * Fire an error uevent so userspace can see that a hang or error 2400 * Fire an error uevent so userspace can see that a hang or error
2214 * was detected. 2401 * was detected.
@@ -2386,7 +2573,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2386 * i915_handle_error - handle a gpu error 2573 * i915_handle_error - handle a gpu error
2387 * @dev: drm device 2574 * @dev: drm device
2388 * 2575 *
2389 * Do some basic checking of regsiter state at error time and 2576 * Do some basic checking of register state at error time and
2390 * dump it to the syslog. Also call i915_capture_error_state() to make 2577 * dump it to the syslog. Also call i915_capture_error_state() to make
2391 * sure we get a record and make it available in debugfs. Fire a uevent 2578 * sure we get a record and make it available in debugfs. Fire a uevent
2392 * so userspace knows something bad happened (should trigger collection 2579 * so userspace knows something bad happened (should trigger collection
@@ -2432,7 +2619,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
2432/* Called from drm generic code, passed 'crtc' which 2619/* Called from drm generic code, passed 'crtc' which
2433 * we use as a pipe index 2620 * we use as a pipe index
2434 */ 2621 */
2435static int i915_enable_vblank(struct drm_device *dev, int pipe) 2622static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2436{ 2623{
2437 struct drm_i915_private *dev_priv = dev->dev_private; 2624 struct drm_i915_private *dev_priv = dev->dev_private;
2438 unsigned long irqflags; 2625 unsigned long irqflags;
@@ -2449,7 +2636,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
2449 return 0; 2636 return 0;
2450} 2637}
2451 2638
2452static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2639static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2453{ 2640{
2454 struct drm_i915_private *dev_priv = dev->dev_private; 2641 struct drm_i915_private *dev_priv = dev->dev_private;
2455 unsigned long irqflags; 2642 unsigned long irqflags;
@@ -2463,7 +2650,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2463 return 0; 2650 return 0;
2464} 2651}
2465 2652
2466static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2653static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2467{ 2654{
2468 struct drm_i915_private *dev_priv = dev->dev_private; 2655 struct drm_i915_private *dev_priv = dev->dev_private;
2469 unsigned long irqflags; 2656 unsigned long irqflags;
@@ -2476,7 +2663,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2476 return 0; 2663 return 0;
2477} 2664}
2478 2665
2479static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2666static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2480{ 2667{
2481 struct drm_i915_private *dev_priv = dev->dev_private; 2668 struct drm_i915_private *dev_priv = dev->dev_private;
2482 unsigned long irqflags; 2669 unsigned long irqflags;
@@ -2492,7 +2679,7 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2492/* Called from drm generic code, passed 'crtc' which 2679/* Called from drm generic code, passed 'crtc' which
2493 * we use as a pipe index 2680 * we use as a pipe index
2494 */ 2681 */
2495static void i915_disable_vblank(struct drm_device *dev, int pipe) 2682static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2496{ 2683{
2497 struct drm_i915_private *dev_priv = dev->dev_private; 2684 struct drm_i915_private *dev_priv = dev->dev_private;
2498 unsigned long irqflags; 2685 unsigned long irqflags;
@@ -2504,7 +2691,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
2504 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2691 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2505} 2692}
2506 2693
2507static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2694static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2508{ 2695{
2509 struct drm_i915_private *dev_priv = dev->dev_private; 2696 struct drm_i915_private *dev_priv = dev->dev_private;
2510 unsigned long irqflags; 2697 unsigned long irqflags;
@@ -2516,7 +2703,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2516 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2703 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2517} 2704}
2518 2705
2519static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2706static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2520{ 2707{
2521 struct drm_i915_private *dev_priv = dev->dev_private; 2708 struct drm_i915_private *dev_priv = dev->dev_private;
2522 unsigned long irqflags; 2709 unsigned long irqflags;
@@ -2527,7 +2714,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2527 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2528} 2715}
2529 2716
2530static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2717static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2531{ 2718{
2532 struct drm_i915_private *dev_priv = dev->dev_private; 2719 struct drm_i915_private *dev_priv = dev->dev_private;
2533 unsigned long irqflags; 2720 unsigned long irqflags;
@@ -2599,6 +2786,26 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2599 u64 offset = 0; 2786 u64 offset = 0;
2600 int i, backwards; 2787 int i, backwards;
2601 2788
2789 /*
2790 * This function does not support execlist mode - any attempt to
2791 * proceed further into this function will result in a kernel panic
2792 * when dereferencing ring->buffer, which is not set up in execlist
2793 * mode.
2794 *
2795 * The correct way of doing it would be to derive the currently
2796 * executing ring buffer from the current context, which is derived
2797 * from the currently running request. Unfortunately, to get the
2798 * current request we would have to grab the struct_mutex before doing
2799 * anything else, which would be ill-advised since some other thread
2800 * might have grabbed it already and managed to hang itself, causing
2801 * the hang checker to deadlock.
2802 *
2803 * Therefore, this function does not support execlist mode in its
2804 * current form. Just return NULL and move on.
2805 */
2806 if (ring->buffer == NULL)
2807 return NULL;
2808
2602 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2809 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2603 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2810 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2604 return NULL; 2811 return NULL;
@@ -2933,7 +3140,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2933{ 3140{
2934 enum pipe pipe; 3141 enum pipe pipe;
2935 3142
2936 I915_WRITE(PORT_HOTPLUG_EN, 0); 3143 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
2937 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3144 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2938 3145
2939 for_each_pipe(dev_priv, pipe) 3146 for_each_pipe(dev_priv, pipe)
@@ -3027,86 +3234,124 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3027 vlv_display_irq_reset(dev_priv); 3234 vlv_display_irq_reset(dev_priv);
3028} 3235}
3029 3236
3237static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3238 const u32 hpd[HPD_NUM_PINS])
3239{
3240 struct drm_i915_private *dev_priv = to_i915(dev);
3241 struct intel_encoder *encoder;
3242 u32 enabled_irqs = 0;
3243
3244 for_each_intel_encoder(dev, encoder)
3245 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3246 enabled_irqs |= hpd[encoder->hpd_pin];
3247
3248 return enabled_irqs;
3249}
3250
3030static void ibx_hpd_irq_setup(struct drm_device *dev) 3251static void ibx_hpd_irq_setup(struct drm_device *dev)
3031{ 3252{
3032 struct drm_i915_private *dev_priv = dev->dev_private; 3253 struct drm_i915_private *dev_priv = dev->dev_private;
3033 struct intel_encoder *intel_encoder; 3254 u32 hotplug_irqs, hotplug, enabled_irqs;
3034 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3035 3255
3036 if (HAS_PCH_IBX(dev)) { 3256 if (HAS_PCH_IBX(dev)) {
3037 hotplug_irqs = SDE_HOTPLUG_MASK; 3257 hotplug_irqs = SDE_HOTPLUG_MASK;
3038 for_each_intel_encoder(dev, intel_encoder) 3258 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3039 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3040 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3041 } else if (HAS_PCH_SPT(dev)) {
3042 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3043 for_each_intel_encoder(dev, intel_encoder)
3044 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3045 enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
3046 } else { 3259 } else {
3047 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3260 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3048 for_each_intel_encoder(dev, intel_encoder) 3261 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3049 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3050 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3051 } 3262 }
3052 3263
3053 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3264 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3054 3265
3055 /* 3266 /*
3056 * Enable digital hotplug on the PCH, and configure the DP short pulse 3267 * Enable digital hotplug on the PCH, and configure the DP short pulse
3057 * duration to 2ms (which is the minimum in the Display Port spec) 3268 * duration to 2ms (which is the minimum in the Display Port spec).
3058 * 3269 * The pulse duration bits are reserved on LPT+.
3059 * This register is the same on all known PCH chips.
3060 */ 3270 */
3061 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3271 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3062 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3272 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3063 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3273 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3064 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3274 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3065 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3275 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3276 /*
3277 * When CPU and PCH are on the same package, port A
3278 * HPD must be enabled in both north and south.
3279 */
3280 if (HAS_PCH_LPT_LP(dev))
3281 hotplug |= PORTA_HOTPLUG_ENABLE;
3066 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3282 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3283}
3067 3284
3068 /* enable SPT PORTE hot plug */ 3285static void spt_hpd_irq_setup(struct drm_device *dev)
3069 if (HAS_PCH_SPT(dev)) { 3286{
3070 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3287 struct drm_i915_private *dev_priv = dev->dev_private;
3071 hotplug |= PORTE_HOTPLUG_ENABLE; 3288 u32 hotplug_irqs, hotplug, enabled_irqs;
3072 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3289
3073 } 3290 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3291 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3292
3293 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3294
3295 /* Enable digital hotplug on the PCH */
3296 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3297 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3298 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3299 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3300
3301 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3302 hotplug |= PORTE_HOTPLUG_ENABLE;
3303 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3074} 3304}
3075 3305
3076static void bxt_hpd_irq_setup(struct drm_device *dev) 3306static void ilk_hpd_irq_setup(struct drm_device *dev)
3077{ 3307{
3078 struct drm_i915_private *dev_priv = dev->dev_private; 3308 struct drm_i915_private *dev_priv = dev->dev_private;
3079 struct intel_encoder *intel_encoder; 3309 u32 hotplug_irqs, hotplug, enabled_irqs;
3080 u32 hotplug_port = 0; 3310
3081 u32 hotplug_ctrl; 3311 if (INTEL_INFO(dev)->gen >= 8) {
3082 3312 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3083 /* Now, enable HPD */ 3313 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3084 for_each_intel_encoder(dev, intel_encoder) { 3314
3085 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state 3315 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3086 == HPD_ENABLED) 3316 } else if (INTEL_INFO(dev)->gen >= 7) {
3087 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; 3317 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3318 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3319
3320 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3321 } else {
3322 hotplug_irqs = DE_DP_A_HOTPLUG;
3323 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3324
3325 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3088 } 3326 }
3089 3327
3090 /* Mask all HPD control bits */ 3328 /*
3091 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK; 3329 * Enable digital hotplug on the CPU, and configure the DP short pulse
3330 * duration to 2ms (which is the minimum in the Display Port spec)
3331 * The pulse duration bits are reserved on HSW+.
3332 */
3333 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3334 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3335 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3336 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3337
3338 ibx_hpd_irq_setup(dev);
3339}
3340
3341static void bxt_hpd_irq_setup(struct drm_device *dev)
3342{
3343 struct drm_i915_private *dev_priv = dev->dev_private;
3344 u32 hotplug_irqs, hotplug, enabled_irqs;
3092 3345
3093 /* Enable requested port in hotplug control */ 3346 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3094 /* TODO: implement (short) HPD support on port A */ 3347 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3095 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
3096 if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3097 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3098 if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3099 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3100 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3101 3348
3102 /* Unmask DDI hotplug in IMR */ 3349 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3103 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3104 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3105 3350
3106 /* Enable DDI hotplug in IER */ 3351 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3107 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port; 3352 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3108 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl); 3353 PORTA_HOTPLUG_ENABLE;
3109 POSTING_READ(GEN8_DE_PORT_IER); 3354 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3110} 3355}
3111 3356
3112static void ibx_irq_postinstall(struct drm_device *dev) 3357static void ibx_irq_postinstall(struct drm_device *dev)
@@ -3122,7 +3367,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
3122 else 3367 else
3123 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3368 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3124 3369
3125 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3370 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3126 I915_WRITE(SDEIMR, ~mask); 3371 I915_WRITE(SDEIMR, ~mask);
3127} 3372}
3128 3373
@@ -3174,15 +3419,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3174 DE_PLANEB_FLIP_DONE_IVB | 3419 DE_PLANEB_FLIP_DONE_IVB |
3175 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3420 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3176 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3421 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3177 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3422 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3423 DE_DP_A_HOTPLUG_IVB);
3178 } else { 3424 } else {
3179 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3425 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3180 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3426 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3181 DE_AUX_CHANNEL_A | 3427 DE_AUX_CHANNEL_A |
3182 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3428 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3183 DE_POISON); 3429 DE_POISON);
3184 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3430 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3185 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3431 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3432 DE_DP_A_HOTPLUG);
3186 } 3433 }
3187 3434
3188 dev_priv->irq_mask = ~display_mask; 3435 dev_priv->irq_mask = ~display_mask;
@@ -3309,7 +3556,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3309{ 3556{
3310 dev_priv->irq_mask = ~0; 3557 dev_priv->irq_mask = ~0;
3311 3558
3312 I915_WRITE(PORT_HOTPLUG_EN, 0); 3559 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3313 POSTING_READ(PORT_HOTPLUG_EN); 3560 POSTING_READ(PORT_HOTPLUG_EN);
3314 3561
3315 I915_WRITE(VLV_IIR, 0xffffffff); 3562 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3378,24 +3625,31 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3378{ 3625{
3379 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3626 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3380 uint32_t de_pipe_enables; 3627 uint32_t de_pipe_enables;
3381 int pipe; 3628 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3382 u32 de_port_en = GEN8_AUX_CHANNEL_A; 3629 u32 de_port_enables;
3630 enum pipe pipe;
3383 3631
3384 if (IS_GEN9(dev_priv)) { 3632 if (INTEL_INFO(dev_priv)->gen >= 9) {
3385 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3633 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3386 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3634 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3387 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3635 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3388 GEN9_AUX_CHANNEL_D; 3636 GEN9_AUX_CHANNEL_D;
3389
3390 if (IS_BROXTON(dev_priv)) 3637 if (IS_BROXTON(dev_priv))
3391 de_port_en |= BXT_DE_PORT_GMBUS; 3638 de_port_masked |= BXT_DE_PORT_GMBUS;
3392 } else 3639 } else {
3393 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3640 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3394 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3641 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3642 }
3395 3643
3396 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3644 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3397 GEN8_PIPE_FIFO_UNDERRUN; 3645 GEN8_PIPE_FIFO_UNDERRUN;
3398 3646
3647 de_port_enables = de_port_masked;
3648 if (IS_BROXTON(dev_priv))
3649 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3650 else if (IS_BROADWELL(dev_priv))
3651 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3652
3399 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3653 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3400 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3654 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3401 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3655 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
@@ -3407,7 +3661,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3407 dev_priv->de_irq_mask[pipe], 3661 dev_priv->de_irq_mask[pipe],
3408 de_pipe_enables); 3662 de_pipe_enables);
3409 3663
3410 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en); 3664 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3411} 3665}
3412 3666
3413static int gen8_irq_postinstall(struct drm_device *dev) 3667static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3676,7 +3930,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
3676 int pipe; 3930 int pipe;
3677 3931
3678 if (I915_HAS_HOTPLUG(dev)) { 3932 if (I915_HAS_HOTPLUG(dev)) {
3679 I915_WRITE(PORT_HOTPLUG_EN, 0); 3933 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3680 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3934 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3681 } 3935 }
3682 3936
@@ -3710,7 +3964,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
3710 I915_USER_INTERRUPT; 3964 I915_USER_INTERRUPT;
3711 3965
3712 if (I915_HAS_HOTPLUG(dev)) { 3966 if (I915_HAS_HOTPLUG(dev)) {
3713 I915_WRITE(PORT_HOTPLUG_EN, 0); 3967 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3714 POSTING_READ(PORT_HOTPLUG_EN); 3968 POSTING_READ(PORT_HOTPLUG_EN);
3715 3969
3716 /* Enable in IER... */ 3970 /* Enable in IER... */
@@ -3872,7 +4126,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
3872 int pipe; 4126 int pipe;
3873 4127
3874 if (I915_HAS_HOTPLUG(dev)) { 4128 if (I915_HAS_HOTPLUG(dev)) {
3875 I915_WRITE(PORT_HOTPLUG_EN, 0); 4129 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3876 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4130 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3877 } 4131 }
3878 4132
@@ -3893,7 +4147,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
3893 struct drm_i915_private *dev_priv = dev->dev_private; 4147 struct drm_i915_private *dev_priv = dev->dev_private;
3894 int pipe; 4148 int pipe;
3895 4149
3896 I915_WRITE(PORT_HOTPLUG_EN, 0); 4150 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3897 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4151 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3898 4152
3899 I915_WRITE(HWSTAM, 0xeffe); 4153 I915_WRITE(HWSTAM, 0xeffe);
@@ -3954,7 +4208,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
3954 I915_WRITE(IER, enable_mask); 4208 I915_WRITE(IER, enable_mask);
3955 POSTING_READ(IER); 4209 POSTING_READ(IER);
3956 4210
3957 I915_WRITE(PORT_HOTPLUG_EN, 0); 4211 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3958 POSTING_READ(PORT_HOTPLUG_EN); 4212 POSTING_READ(PORT_HOTPLUG_EN);
3959 4213
3960 i915_enable_asle_pipestat(dev); 4214 i915_enable_asle_pipestat(dev);
@@ -3965,29 +4219,27 @@ static int i965_irq_postinstall(struct drm_device *dev)
3965static void i915_hpd_irq_setup(struct drm_device *dev) 4219static void i915_hpd_irq_setup(struct drm_device *dev)
3966{ 4220{
3967 struct drm_i915_private *dev_priv = dev->dev_private; 4221 struct drm_i915_private *dev_priv = dev->dev_private;
3968 struct intel_encoder *intel_encoder;
3969 u32 hotplug_en; 4222 u32 hotplug_en;
3970 4223
3971 assert_spin_locked(&dev_priv->irq_lock); 4224 assert_spin_locked(&dev_priv->irq_lock);
3972 4225
3973 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3974 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3975 /* Note HDMI and DP share hotplug bits */ 4226 /* Note HDMI and DP share hotplug bits */
3976 /* enable bits are the same for all generations */ 4227 /* enable bits are the same for all generations */
3977 for_each_intel_encoder(dev, intel_encoder) 4228 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
3978 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3979 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3980 /* Programming the CRT detection parameters tends 4229 /* Programming the CRT detection parameters tends
3981 to generate a spurious hotplug event about three 4230 to generate a spurious hotplug event about three
3982 seconds later. So just do it once. 4231 seconds later. So just do it once.
3983 */ 4232 */
3984 if (IS_G4X(dev)) 4233 if (IS_G4X(dev))
3985 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4234 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3986 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3987 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4235 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3988 4236
3989 /* Ignore TV since it's buggy */ 4237 /* Ignore TV since it's buggy */
3990 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4238 i915_hotplug_interrupt_update_locked(dev_priv,
4239 HOTPLUG_INT_EN_MASK |
4240 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4241 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4242 hotplug_en);
3991} 4243}
3992 4244
3993static irqreturn_t i965_irq_handler(int irq, void *arg) 4245static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -4100,7 +4352,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
4100 if (!dev_priv) 4352 if (!dev_priv)
4101 return; 4353 return;
4102 4354
4103 I915_WRITE(PORT_HOTPLUG_EN, 0); 4355 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4104 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4356 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4105 4357
4106 I915_WRITE(HWSTAM, 0xffffffff); 4358 I915_WRITE(HWSTAM, 0xffffffff);
@@ -4148,7 +4400,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4148 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4400 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4149 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4401 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4150 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4402 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4151 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4403 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4152 } else { 4404 } else {
4153 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4405 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4154 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4406 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -4188,10 +4440,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4188 dev->driver->irq_uninstall = gen8_irq_uninstall; 4440 dev->driver->irq_uninstall = gen8_irq_uninstall;
4189 dev->driver->enable_vblank = gen8_enable_vblank; 4441 dev->driver->enable_vblank = gen8_enable_vblank;
4190 dev->driver->disable_vblank = gen8_disable_vblank; 4442 dev->driver->disable_vblank = gen8_disable_vblank;
4191 if (HAS_PCH_SPLIT(dev)) 4443 if (IS_BROXTON(dev))
4192 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4193 else
4194 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4444 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4445 else if (HAS_PCH_SPT(dev))
4446 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4447 else
4448 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4195 } else if (HAS_PCH_SPLIT(dev)) { 4449 } else if (HAS_PCH_SPLIT(dev)) {
4196 dev->driver->irq_handler = ironlake_irq_handler; 4450 dev->driver->irq_handler = ironlake_irq_handler;
4197 dev->driver->irq_preinstall = ironlake_irq_reset; 4451 dev->driver->irq_preinstall = ironlake_irq_reset;
@@ -4199,7 +4453,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4199 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4453 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4200 dev->driver->enable_vblank = ironlake_enable_vblank; 4454 dev->driver->enable_vblank = ironlake_enable_vblank;
4201 dev->driver->disable_vblank = ironlake_disable_vblank; 4455 dev->driver->disable_vblank = ironlake_disable_vblank;
4202 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4456 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4203 } else { 4457 } else {
4204 if (INTEL_INFO(dev_priv)->gen == 2) { 4458 if (INTEL_INFO(dev_priv)->gen == 2) {
4205 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4459 dev->driver->irq_preinstall = i8xx_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 5ae4b0aba564..96bb23865eac 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -38,9 +38,8 @@ struct i915_params i915 __read_mostly = {
38 .enable_ppgtt = -1, 38 .enable_ppgtt = -1,
39 .enable_psr = 0, 39 .enable_psr = 0,
40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
41 .disable_power_well = 1, 41 .disable_power_well = -1,
42 .enable_ips = 1, 42 .enable_ips = 1,
43 .fastboot = 0,
44 .prefault_disable = 0, 43 .prefault_disable = 0,
45 .load_detect_test = 0, 44 .load_detect_test = 0,
46 .reset = true, 45 .reset = true,
@@ -51,6 +50,7 @@ struct i915_params i915 __read_mostly = {
51 .use_mmio_flip = 0, 50 .use_mmio_flip = 0,
52 .mmio_debug = 0, 51 .mmio_debug = 0,
53 .verbose_state_checks = 1, 52 .verbose_state_checks = 1,
53 .nuclear_pageflip = 0,
54 .edp_vswing = 0, 54 .edp_vswing = 0,
55 .enable_guc_submission = false, 55 .enable_guc_submission = false,
56 .guc_log_level = -1, 56 .guc_log_level = -1,
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(modeset,
61 "Use kernel modesetting [KMS] (0=disable, " 61 "Use kernel modesetting [KMS] (0=disable, "
62 "1=on, -1=force vga console preference [default])"); 62 "1=on, -1=force vga console preference [default])");
63 63
64module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); 64module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
65MODULE_PARM_DESC(panel_ignore_lid, 65MODULE_PARM_DESC(panel_ignore_lid,
66 "Override lid status (0=autodetect, 1=autodetect disabled [default], " 66 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
67 "-1=force lid closed, -2=force lid open)"); 67 "-1=force lid closed, -2=force lid open)");
@@ -84,17 +84,17 @@ MODULE_PARM_DESC(enable_fbc,
84 "Enable frame buffer compression for power savings " 84 "Enable frame buffer compression for power savings "
85 "(default: -1 (use per-chip default))"); 85 "(default: -1 (use per-chip default))");
86 86
87module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); 87module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
88MODULE_PARM_DESC(lvds_channel_mode, 88MODULE_PARM_DESC(lvds_channel_mode,
89 "Specify LVDS channel mode " 89 "Specify LVDS channel mode "
90 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); 90 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
91 91
92module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600); 92module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
93MODULE_PARM_DESC(lvds_use_ssc, 93MODULE_PARM_DESC(lvds_use_ssc,
94 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 94 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
95 "(default: auto from VBT)"); 95 "(default: auto from VBT)");
96 96
97module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600); 97module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
98MODULE_PARM_DESC(vbt_sdvo_panel_type, 98MODULE_PARM_DESC(vbt_sdvo_panel_type,
99 "Override/Ignore selection of SDVO panel mode in the VBT " 99 "Override/Ignore selection of SDVO panel mode in the VBT "
100 "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 100 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
@@ -102,7 +102,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
102module_param_named_unsafe(reset, i915.reset, bool, 0600); 102module_param_named_unsafe(reset, i915.reset, bool, 0600);
103MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); 103MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
104 104
105module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644); 105module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
106MODULE_PARM_DESC(enable_hangcheck, 106MODULE_PARM_DESC(enable_hangcheck,
107 "Periodically check GPU activity for detecting hangs. " 107 "Periodically check GPU activity for detecting hangs. "
108 "WARNING: Disabling this can cause system wide hangs. " 108 "WARNING: Disabling this can cause system wide hangs. "
@@ -113,29 +113,26 @@ MODULE_PARM_DESC(enable_ppgtt,
113 "Override PPGTT usage. " 113 "Override PPGTT usage. "
114 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 114 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
115 115
116module_param_named(enable_execlists, i915.enable_execlists, int, 0400); 116module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
117MODULE_PARM_DESC(enable_execlists, 117MODULE_PARM_DESC(enable_execlists,
118 "Override execlists usage. " 118 "Override execlists usage. "
119 "(-1=auto [default], 0=disabled, 1=enabled)"); 119 "(-1=auto [default], 0=disabled, 1=enabled)");
120 120
121module_param_named(enable_psr, i915.enable_psr, int, 0600); 121module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
122MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 122MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
123 123
124module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); 124module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
125MODULE_PARM_DESC(preliminary_hw_support, 125MODULE_PARM_DESC(preliminary_hw_support,
126 "Enable preliminary hardware support."); 126 "Enable preliminary hardware support.");
127 127
128module_param_named(disable_power_well, i915.disable_power_well, int, 0600); 128module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600);
129MODULE_PARM_DESC(disable_power_well, 129MODULE_PARM_DESC(disable_power_well,
130 "Disable the power well when possible (default: true)"); 130 "Disable display power wells when possible "
131 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
131 132
132module_param_named(enable_ips, i915.enable_ips, int, 0600); 133module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
133MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 134MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
134 135
135module_param_named(fastboot, i915.fastboot, bool, 0600);
136MODULE_PARM_DESC(fastboot,
137 "Try to skip unnecessary mode sets at boot time (default: false)");
138
139module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); 136module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
140MODULE_PARM_DESC(prefault_disable, 137MODULE_PARM_DESC(prefault_disable,
141 "Disable page prefaulting for pread/pwrite/reloc (default:false). " 138 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
@@ -146,7 +143,7 @@ MODULE_PARM_DESC(load_detect_test,
146 "Force-enable the VGA load detect code for testing (default:false). " 143 "Force-enable the VGA load detect code for testing (default:false). "
147 "For developers only."); 144 "For developers only.");
148 145
149module_param_named(invert_brightness, i915.invert_brightness, int, 0600); 146module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
150MODULE_PARM_DESC(invert_brightness, 147MODULE_PARM_DESC(invert_brightness,
151 "Invert backlight brightness " 148 "Invert backlight brightness "
152 "(-1 force normal, 0 machine defaults, 1 force inversion), please " 149 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
@@ -157,14 +154,14 @@ MODULE_PARM_DESC(invert_brightness,
157module_param_named(disable_display, i915.disable_display, bool, 0600); 154module_param_named(disable_display, i915.disable_display, bool, 0600);
158MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); 155MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
159 156
160module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600); 157module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
161MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"); 158MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
162 159
163module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); 160module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
164MODULE_PARM_DESC(enable_cmd_parser, 161MODULE_PARM_DESC(enable_cmd_parser,
165 "Enable command parsing (1=enabled [default], 0=disabled)"); 162 "Enable command parsing (1=enabled [default], 0=disabled)");
166 163
167module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); 164module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
168MODULE_PARM_DESC(use_mmio_flip, 165MODULE_PARM_DESC(use_mmio_flip,
169 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); 166 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
170 167
@@ -177,6 +174,10 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
177MODULE_PARM_DESC(verbose_state_checks, 174MODULE_PARM_DESC(verbose_state_checks,
178 "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); 175 "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
179 176
177module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
178MODULE_PARM_DESC(nuclear_pageflip,
179 "Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false).");
180
180/* WA to get away with the default setting in VBT for early platforms.Will be removed */ 181/* WA to get away with the default setting in VBT for early platforms.Will be removed */
181module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); 182module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
182MODULE_PARM_DESC(edp_vswing, 183MODULE_PARM_DESC(edp_vswing,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 83a0888756d6..bc7b8faba84d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -105,7 +105,7 @@
105#define GRDOM_RESET_STATUS (1<<1) 105#define GRDOM_RESET_STATUS (1<<1)
106#define GRDOM_RESET_ENABLE (1<<0) 106#define GRDOM_RESET_ENABLE (1<<0)
107 107
108#define ILK_GDSR 0x2ca4 /* MCHBAR offset */ 108#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4)
109#define ILK_GRDOM_FULL (0<<1) 109#define ILK_GRDOM_FULL (0<<1)
110#define ILK_GRDOM_RENDER (1<<1) 110#define ILK_GRDOM_RENDER (1<<1)
111#define ILK_GRDOM_MEDIA (3<<1) 111#define ILK_GRDOM_MEDIA (3<<1)
@@ -352,8 +352,8 @@
352 */ 352 */
353#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) 353#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
354#define MI_LRI_FORCE_POSTED (1<<12) 354#define MI_LRI_FORCE_POSTED (1<<12)
355#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1) 355#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
356#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1) 356#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
357#define MI_SRM_LRM_GLOBAL_GTT (1<<22) 357#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
358#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 358#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
359#define MI_FLUSH_DW_STORE_INDEX (1<<21) 359#define MI_FLUSH_DW_STORE_INDEX (1<<21)
@@ -364,8 +364,8 @@
364#define MI_INVALIDATE_BSD (1<<7) 364#define MI_INVALIDATE_BSD (1<<7)
365#define MI_FLUSH_DW_USE_GTT (1<<2) 365#define MI_FLUSH_DW_USE_GTT (1<<2)
366#define MI_FLUSH_DW_USE_PPGTT (0<<2) 366#define MI_FLUSH_DW_USE_PPGTT (0<<2)
367#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1) 367#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
368#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1) 368#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
369#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 369#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
370#define MI_BATCH_NON_SECURE (1) 370#define MI_BATCH_NON_SECURE (1)
371/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ 371/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -429,7 +429,7 @@
429#define ASYNC_FLIP (1<<22) 429#define ASYNC_FLIP (1<<22)
430#define DISPLAY_PLANE_A (0<<20) 430#define DISPLAY_PLANE_A (0<<20)
431#define DISPLAY_PLANE_B (1<<20) 431#define DISPLAY_PLANE_B (1<<20)
432#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) 432#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
433#define PIPE_CONTROL_FLUSH_L3 (1<<27) 433#define PIPE_CONTROL_FLUSH_L3 (1<<27)
434#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ 434#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
435#define PIPE_CONTROL_MMIO_WRITE (1<<23) 435#define PIPE_CONTROL_MMIO_WRITE (1<<23)
@@ -536,6 +536,10 @@
536#define GEN7_3DPRIM_START_INSTANCE 0x243C 536#define GEN7_3DPRIM_START_INSTANCE 0x243C
537#define GEN7_3DPRIM_BASE_VERTEX 0x2440 537#define GEN7_3DPRIM_BASE_VERTEX 0x2440
538 538
539#define GEN7_GPGPU_DISPATCHDIMX 0x2500
540#define GEN7_GPGPU_DISPATCHDIMY 0x2504
541#define GEN7_GPGPU_DISPATCHDIMZ 0x2508
542
539#define OACONTROL 0x2360 543#define OACONTROL 0x2360
540 544
541#define _GEN7_PIPEA_DE_LOAD_SL 0x70068 545#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
@@ -728,12 +732,13 @@ enum skl_disp_power_wells {
728#define DSI_PLL_N1_DIV_MASK (3 << 16) 732#define DSI_PLL_N1_DIV_MASK (3 << 16)
729#define DSI_PLL_M1_DIV_SHIFT 0 733#define DSI_PLL_M1_DIV_SHIFT 0
730#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) 734#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
735#define CCK_CZ_CLOCK_CONTROL 0x62
731#define CCK_DISPLAY_CLOCK_CONTROL 0x6b 736#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
732#define DISPLAY_TRUNK_FORCE_ON (1 << 17) 737#define CCK_TRUNK_FORCE_ON (1 << 17)
733#define DISPLAY_TRUNK_FORCE_OFF (1 << 16) 738#define CCK_TRUNK_FORCE_OFF (1 << 16)
734#define DISPLAY_FREQUENCY_STATUS (0x1f << 8) 739#define CCK_FREQUENCY_STATUS (0x1f << 8)
735#define DISPLAY_FREQUENCY_STATUS_SHIFT 8 740#define CCK_FREQUENCY_STATUS_SHIFT 8
736#define DISPLAY_FREQUENCY_VALUES (0x1f << 0) 741#define CCK_FREQUENCY_VALUES (0x1f << 0)
737 742
738/** 743/**
739 * DOC: DPIO 744 * DOC: DPIO
@@ -1099,6 +1104,12 @@ enum skl_disp_power_wells {
1099#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ 1104#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
1100#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) 1105#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
1101 1106
1107#define _CHV_CMN_DW0_CH0 0x8100
1108#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19
1109#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18
1110#define DPIO_ALLDL_POWERDOWN (1 << 1)
1111#define DPIO_ANYDL_POWERDOWN (1 << 0)
1112
1102#define _CHV_CMN_DW5_CH0 0x8114 1113#define _CHV_CMN_DW5_CH0 0x8114
1103#define CHV_BUFRIGHTENA1_DISABLE (0 << 20) 1114#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
1104#define CHV_BUFRIGHTENA1_NORMAL (1 << 20) 1115#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
@@ -1135,10 +1146,23 @@ enum skl_disp_power_wells {
1135 1146
1136#define _CHV_CMN_DW19_CH0 0x814c 1147#define _CHV_CMN_DW19_CH0 0x814c
1137#define _CHV_CMN_DW6_CH1 0x8098 1148#define _CHV_CMN_DW6_CH1 0x8098
1149#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */
1150#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */
1151#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */
1138#define CHV_CMN_USEDCLKCHANNEL (1 << 13) 1152#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
1153
1139#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1) 1154#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
1140 1155
1156#define CHV_CMN_DW28 0x8170
1157#define DPIO_CL1POWERDOWNEN (1 << 23)
1158#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22)
1159#define DPIO_SUS_CLK_CONFIG_ON (0 << 0)
1160#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0)
1161#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0)
1162#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0)
1163
1141#define CHV_CMN_DW30 0x8178 1164#define CHV_CMN_DW30 0x8178
1165#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6)
1142#define DPIO_LRC_BYPASS (1 << 3) 1166#define DPIO_LRC_BYPASS (1 << 3)
1143 1167
1144#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \ 1168#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
@@ -1231,7 +1255,7 @@ enum skl_disp_power_wells {
1231#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27) 1255#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
1232#define PORT_PLL_DCO_AMP_DEFAULT 15 1256#define PORT_PLL_DCO_AMP_DEFAULT 15
1233#define PORT_PLL_DCO_AMP_MASK 0x3c00 1257#define PORT_PLL_DCO_AMP_MASK 0x3c00
1234#define PORT_PLL_DCO_AMP(x) (x<<10) 1258#define PORT_PLL_DCO_AMP(x) ((x)<<10)
1235#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ 1259#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
1236 _PORT_PLL_0_B, \ 1260 _PORT_PLL_0_B, \
1237 _PORT_PLL_0_C) 1261 _PORT_PLL_0_C)
@@ -1376,7 +1400,8 @@ enum skl_disp_power_wells {
1376#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \ 1400#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \
1377 _PORT_TX_DW3_LN0_B, \ 1401 _PORT_TX_DW3_LN0_B, \
1378 _PORT_TX_DW3_LN0_C) 1402 _PORT_TX_DW3_LN0_C)
1379#define UNIQE_TRANGE_EN_METHOD (1 << 27) 1403#define SCALE_DCOMP_METHOD (1 << 26)
1404#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
1380 1405
1381#define _PORT_TX_DW4_LN0_A 0x162510 1406#define _PORT_TX_DW4_LN0_A 0x162510
1382#define _PORT_TX_DW4_LN0_B 0x6C510 1407#define _PORT_TX_DW4_LN0_B 0x6C510
@@ -1417,9 +1442,15 @@ enum skl_disp_power_wells {
1417 1442
1418/* 1443/*
1419 * Fence registers 1444 * Fence registers
1445 * [0-7] @ 0x2000 gen2,gen3
1446 * [8-15] @ 0x3000 945,g33,pnv
1447 *
1448 * [0-15] @ 0x3000 gen4,gen5
1449 *
1450 * [0-15] @ 0x100000 gen6,vlv,chv
1451 * [0-31] @ 0x100000 gen7+
1420 */ 1452 */
1421#define FENCE_REG_830_0 0x2000 1453#define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
1422#define FENCE_REG_945_8 0x3000
1423#define I830_FENCE_START_MASK 0x07f80000 1454#define I830_FENCE_START_MASK 0x07f80000
1424#define I830_FENCE_TILING_Y_SHIFT 12 1455#define I830_FENCE_TILING_Y_SHIFT 12
1425#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 1456#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
@@ -1432,14 +1463,16 @@ enum skl_disp_power_wells {
1432#define I915_FENCE_START_MASK 0x0ff00000 1463#define I915_FENCE_START_MASK 0x0ff00000
1433#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) 1464#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
1434 1465
1435#define FENCE_REG_965_0 0x03000 1466#define FENCE_REG_965_LO(i) (0x03000 + (i) * 8)
1467#define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4)
1436#define I965_FENCE_PITCH_SHIFT 2 1468#define I965_FENCE_PITCH_SHIFT 2
1437#define I965_FENCE_TILING_Y_SHIFT 1 1469#define I965_FENCE_TILING_Y_SHIFT 1
1438#define I965_FENCE_REG_VALID (1<<0) 1470#define I965_FENCE_REG_VALID (1<<0)
1439#define I965_FENCE_MAX_PITCH_VAL 0x0400 1471#define I965_FENCE_MAX_PITCH_VAL 0x0400
1440 1472
1441#define FENCE_REG_SANDYBRIDGE_0 0x100000 1473#define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8)
1442#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 1474#define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4)
1475#define GEN6_FENCE_PITCH_SHIFT 32
1443#define GEN7_FENCE_MAX_PITCH_VAL 0x0800 1476#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
1444 1477
1445 1478
@@ -1508,7 +1541,7 @@ enum skl_disp_power_wells {
1508#define GEN7_GFX_PEND_TLB0 0x4034 1541#define GEN7_GFX_PEND_TLB0 0x4034
1509#define GEN7_GFX_PEND_TLB1 0x4038 1542#define GEN7_GFX_PEND_TLB1 0x4038
1510/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */ 1543/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
1511#define GEN7_LRA_LIMITS_BASE 0x403C 1544#define GEN7_LRA_LIMITS(i) (0x403C + (i) * 4)
1512#define GEN7_LRA_LIMITS_REG_NUM 13 1545#define GEN7_LRA_LIMITS_REG_NUM 13
1513#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070 1546#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
1514#define GEN7_GFX_MAX_REQ_COUNT 0x4074 1547#define GEN7_GFX_MAX_REQ_COUNT 0x4074
@@ -1519,11 +1552,12 @@ enum skl_disp_power_wells {
1519#define RENDER_HWS_PGA_GEN7 (0x04080) 1552#define RENDER_HWS_PGA_GEN7 (0x04080)
1520#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 1553#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
1521#define RING_FAULT_GTTSEL_MASK (1<<11) 1554#define RING_FAULT_GTTSEL_MASK (1<<11)
1522#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) 1555#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
1523#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) 1556#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
1524#define RING_FAULT_VALID (1<<0) 1557#define RING_FAULT_VALID (1<<0)
1525#define DONE_REG 0x40b0 1558#define DONE_REG 0x40b0
1526#define GEN8_PRIVATE_PAT 0x40e0 1559#define GEN8_PRIVATE_PAT_LO 0x40e0
1560#define GEN8_PRIVATE_PAT_HI (0x40e0 + 4)
1527#define BSD_HWS_PGA_GEN7 (0x04180) 1561#define BSD_HWS_PGA_GEN7 (0x04180)
1528#define BLT_HWS_PGA_GEN7 (0x04280) 1562#define BLT_HWS_PGA_GEN7 (0x04280)
1529#define VEBOX_HWS_PGA_GEN7 (0x04380) 1563#define VEBOX_HWS_PGA_GEN7 (0x04380)
@@ -1563,14 +1597,17 @@ enum skl_disp_power_wells {
1563#endif 1597#endif
1564#define IPEIR_I965 0x02064 1598#define IPEIR_I965 0x02064
1565#define IPEHR_I965 0x02068 1599#define IPEHR_I965 0x02068
1566#define INSTDONE_I965 0x0206c
1567#define GEN7_INSTDONE_1 0x0206c
1568#define GEN7_SC_INSTDONE 0x07100 1600#define GEN7_SC_INSTDONE 0x07100
1569#define GEN7_SAMPLER_INSTDONE 0x0e160 1601#define GEN7_SAMPLER_INSTDONE 0x0e160
1570#define GEN7_ROW_INSTDONE 0x0e164 1602#define GEN7_ROW_INSTDONE 0x0e164
1571#define I915_NUM_INSTDONE_REG 4 1603#define I915_NUM_INSTDONE_REG 4
1572#define RING_IPEIR(base) ((base)+0x64) 1604#define RING_IPEIR(base) ((base)+0x64)
1573#define RING_IPEHR(base) ((base)+0x68) 1605#define RING_IPEHR(base) ((base)+0x68)
1606/*
1607 * On GEN4, only the render ring INSTDONE exists and has a different
1608 * layout than the GEN7+ version.
1609 * The GEN2 counterpart of this register is GEN2_INSTDONE.
1610 */
1574#define RING_INSTDONE(base) ((base)+0x6c) 1611#define RING_INSTDONE(base) ((base)+0x6c)
1575#define RING_INSTPS(base) ((base)+0x70) 1612#define RING_INSTPS(base) ((base)+0x70)
1576#define RING_DMA_FADD(base) ((base)+0x78) 1613#define RING_DMA_FADD(base) ((base)+0x78)
@@ -1578,7 +1615,7 @@ enum skl_disp_power_wells {
1578#define RING_INSTPM(base) ((base)+0xc0) 1615#define RING_INSTPM(base) ((base)+0xc0)
1579#define RING_MI_MODE(base) ((base)+0x9c) 1616#define RING_MI_MODE(base) ((base)+0x9c)
1580#define INSTPS 0x02070 /* 965+ only */ 1617#define INSTPS 0x02070 /* 965+ only */
1581#define INSTDONE1 0x0207c /* 965+ only */ 1618#define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */
1582#define ACTHD_I965 0x02074 1619#define ACTHD_I965 0x02074
1583#define HWS_PGA 0x02080 1620#define HWS_PGA 0x02080
1584#define HWS_ADDRESS_MASK 0xfffff000 1621#define HWS_ADDRESS_MASK 0xfffff000
@@ -1587,7 +1624,7 @@ enum skl_disp_power_wells {
1587#define PWRCTX_EN (1<<0) 1624#define PWRCTX_EN (1<<0)
1588#define IPEIR 0x02088 1625#define IPEIR 0x02088
1589#define IPEHR 0x0208c 1626#define IPEHR 0x0208c
1590#define INSTDONE 0x02090 1627#define GEN2_INSTDONE 0x02090
1591#define NOPID 0x02094 1628#define NOPID 0x02094
1592#define HWSTAM 0x02098 1629#define HWSTAM 0x02098
1593#define DMA_FADD_I8XX 0x020d0 1630#define DMA_FADD_I8XX 0x020d0
@@ -1604,9 +1641,9 @@ enum skl_disp_power_wells {
1604#define ERR_INT_PIPE_CRC_DONE_B (1<<5) 1641#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
1605#define ERR_INT_FIFO_UNDERRUN_B (1<<3) 1642#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
1606#define ERR_INT_PIPE_CRC_DONE_A (1<<2) 1643#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
1607#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3)) 1644#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3))
1608#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 1645#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
1609#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) 1646#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
1610 1647
1611#define GEN8_FAULT_TLB_DATA0 0x04b10 1648#define GEN8_FAULT_TLB_DATA0 0x04b10
1612#define GEN8_FAULT_TLB_DATA1 0x04b14 1649#define GEN8_FAULT_TLB_DATA1 0x04b14
@@ -1667,18 +1704,25 @@ enum skl_disp_power_wells {
1667#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) 1704#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
1668#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) 1705#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
1669#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) 1706#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
1670#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2)) 1707#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
1671#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2)) 1708#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
1672 1709
1673#define GFX_MODE 0x02520 1710#define GFX_MODE 0x02520
1674#define GFX_MODE_GEN7 0x0229c 1711#define GFX_MODE_GEN7 0x0229c
1675#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) 1712#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
1676#define GFX_RUN_LIST_ENABLE (1<<15) 1713#define GFX_RUN_LIST_ENABLE (1<<15)
1714#define GFX_INTERRUPT_STEERING (1<<14)
1677#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) 1715#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
1678#define GFX_SURFACE_FAULT_ENABLE (1<<12) 1716#define GFX_SURFACE_FAULT_ENABLE (1<<12)
1679#define GFX_REPLAY_MODE (1<<11) 1717#define GFX_REPLAY_MODE (1<<11)
1680#define GFX_PSMI_GRANULARITY (1<<10) 1718#define GFX_PSMI_GRANULARITY (1<<10)
1681#define GFX_PPGTT_ENABLE (1<<9) 1719#define GFX_PPGTT_ENABLE (1<<9)
1720#define GEN8_GFX_PPGTT_48B (1<<7)
1721
1722#define GFX_FORWARD_VBLANK_MASK (3<<5)
1723#define GFX_FORWARD_VBLANK_NEVER (0<<5)
1724#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
1725#define GFX_FORWARD_VBLANK_COND (2<<5)
1682 1726
1683#define VLV_DISPLAY_BASE 0x180000 1727#define VLV_DISPLAY_BASE 0x180000
1684#define VLV_MIPI_BASE VLV_DISPLAY_BASE 1728#define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -1850,12 +1894,27 @@ enum skl_disp_power_wells {
1850#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) 1894#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
1851 1895
1852#define GEN8_FUSE2 0x9120 1896#define GEN8_FUSE2 0x9120
1897#define GEN8_F2_SS_DIS_SHIFT 21
1898#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
1853#define GEN8_F2_S_ENA_SHIFT 25 1899#define GEN8_F2_S_ENA_SHIFT 25
1854#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT) 1900#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
1855 1901
1856#define GEN9_F2_SS_DIS_SHIFT 20 1902#define GEN9_F2_SS_DIS_SHIFT 20
1857#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) 1903#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
1858 1904
1905#define GEN8_EU_DISABLE0 0x9134
1906#define GEN8_EU_DIS0_S0_MASK 0xffffff
1907#define GEN8_EU_DIS0_S1_SHIFT 24
1908#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
1909
1910#define GEN8_EU_DISABLE1 0x9138
1911#define GEN8_EU_DIS1_S1_MASK 0xffff
1912#define GEN8_EU_DIS1_S2_SHIFT 16
1913#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
1914
1915#define GEN8_EU_DISABLE2 0x913c
1916#define GEN8_EU_DIS2_S2_MASK 0xff
1917
1859#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4) 1918#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4)
1860 1919
1861#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 1920#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
@@ -1985,7 +2044,7 @@ enum skl_disp_power_wells {
1985#define FBC_CTL_CPU_FENCE (1<<1) 2044#define FBC_CTL_CPU_FENCE (1<<1)
1986#define FBC_CTL_PLANE(plane) ((plane)<<0) 2045#define FBC_CTL_PLANE(plane) ((plane)<<0)
1987#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ 2046#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
1988#define FBC_TAG 0x03300 2047#define FBC_TAG(i) (0x03300 + (i) * 4)
1989 2048
1990#define FBC_STATUS2 0x43214 2049#define FBC_STATUS2 0x43214
1991#define FBC_COMPRESSION_MASK 0x7ff 2050#define FBC_COMPRESSION_MASK 0x7ff
@@ -2085,7 +2144,7 @@ enum skl_disp_power_wells {
2085# define GPIO_DATA_VAL_IN (1 << 12) 2144# define GPIO_DATA_VAL_IN (1 << 12)
2086# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 2145# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
2087 2146
2088#define GMBUS0 0x5100 /* clock/port select */ 2147#define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
2089#define GMBUS_RATE_100KHZ (0<<8) 2148#define GMBUS_RATE_100KHZ (0<<8)
2090#define GMBUS_RATE_50KHZ (1<<8) 2149#define GMBUS_RATE_50KHZ (1<<8)
2091#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ 2150#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
@@ -2104,7 +2163,7 @@ enum skl_disp_power_wells {
2104#define GMBUS_PIN_2_BXT 2 2163#define GMBUS_PIN_2_BXT 2
2105#define GMBUS_PIN_3_BXT 3 2164#define GMBUS_PIN_3_BXT 3
2106#define GMBUS_NUM_PINS 7 /* including 0 */ 2165#define GMBUS_NUM_PINS 7 /* including 0 */
2107#define GMBUS1 0x5104 /* command/status */ 2166#define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */
2108#define GMBUS_SW_CLR_INT (1<<31) 2167#define GMBUS_SW_CLR_INT (1<<31)
2109#define GMBUS_SW_RDY (1<<30) 2168#define GMBUS_SW_RDY (1<<30)
2110#define GMBUS_ENT (1<<29) /* enable timeout */ 2169#define GMBUS_ENT (1<<29) /* enable timeout */
@@ -2118,7 +2177,7 @@ enum skl_disp_power_wells {
2118#define GMBUS_SLAVE_ADDR_SHIFT 1 2177#define GMBUS_SLAVE_ADDR_SHIFT 1
2119#define GMBUS_SLAVE_READ (1<<0) 2178#define GMBUS_SLAVE_READ (1<<0)
2120#define GMBUS_SLAVE_WRITE (0<<0) 2179#define GMBUS_SLAVE_WRITE (0<<0)
2121#define GMBUS2 0x5108 /* status */ 2180#define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */
2122#define GMBUS_INUSE (1<<15) 2181#define GMBUS_INUSE (1<<15)
2123#define GMBUS_HW_WAIT_PHASE (1<<14) 2182#define GMBUS_HW_WAIT_PHASE (1<<14)
2124#define GMBUS_STALL_TIMEOUT (1<<13) 2183#define GMBUS_STALL_TIMEOUT (1<<13)
@@ -2126,14 +2185,14 @@ enum skl_disp_power_wells {
2126#define GMBUS_HW_RDY (1<<11) 2185#define GMBUS_HW_RDY (1<<11)
2127#define GMBUS_SATOER (1<<10) 2186#define GMBUS_SATOER (1<<10)
2128#define GMBUS_ACTIVE (1<<9) 2187#define GMBUS_ACTIVE (1<<9)
2129#define GMBUS3 0x510c /* data buffer bytes 3-0 */ 2188#define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
2130#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ 2189#define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
2131#define GMBUS_SLAVE_TIMEOUT_EN (1<<4) 2190#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
2132#define GMBUS_NAK_EN (1<<3) 2191#define GMBUS_NAK_EN (1<<3)
2133#define GMBUS_IDLE_EN (1<<2) 2192#define GMBUS_IDLE_EN (1<<2)
2134#define GMBUS_HW_WAIT_EN (1<<1) 2193#define GMBUS_HW_WAIT_EN (1<<1)
2135#define GMBUS_HW_RDY_EN (1<<0) 2194#define GMBUS_HW_RDY_EN (1<<0)
2136#define GMBUS5 0x5120 /* byte index */ 2195#define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */
2137#define GMBUS_2BYTE_INDEX_EN (1<<31) 2196#define GMBUS_2BYTE_INDEX_EN (1<<31)
2138 2197
2139/* 2198/*
@@ -2185,16 +2244,20 @@ enum skl_disp_power_wells {
2185#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) 2244#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
2186#define DPLL_PORTD_READY_MASK (0xf) 2245#define DPLL_PORTD_READY_MASK (0xf)
2187#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) 2246#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
2247#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
2188#define PHY_LDO_DELAY_0NS 0x0 2248#define PHY_LDO_DELAY_0NS 0x0
2189#define PHY_LDO_DELAY_200NS 0x1 2249#define PHY_LDO_DELAY_200NS 0x1
2190#define PHY_LDO_DELAY_600NS 0x2 2250#define PHY_LDO_DELAY_600NS 0x2
2191#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23)) 2251#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
2252#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
2192#define PHY_CH_SU_PSR 0x1 2253#define PHY_CH_SU_PSR 0x1
2193#define PHY_CH_DEEP_PSR 0x7 2254#define PHY_CH_DEEP_PSR 0x7
2194#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2)) 2255#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
2195#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) 2256#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
2196#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) 2257#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
2197#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) 2258#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
2259#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
2260#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
2198 2261
2199/* 2262/*
2200 * The i830 generation, in LVDS mode, defines P1 as the bit number set within 2263 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -2445,8 +2508,8 @@ enum skl_disp_power_wells {
2445#define PALETTE_A_OFFSET 0xa000 2508#define PALETTE_A_OFFSET 0xa000
2446#define PALETTE_B_OFFSET 0xa800 2509#define PALETTE_B_OFFSET 0xa800
2447#define CHV_PALETTE_C_OFFSET 0xc000 2510#define CHV_PALETTE_C_OFFSET 0xc000
2448#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \ 2511#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \
2449 dev_priv->info.display_mmio_offset) 2512 dev_priv->info.display_mmio_offset + (i) * 4)
2450 2513
2451/* MCH MMIO space */ 2514/* MCH MMIO space */
2452 2515
@@ -2464,6 +2527,11 @@ enum skl_disp_power_wells {
2464 2527
2465#define MCHBAR_MIRROR_BASE_SNB 0x140000 2528#define MCHBAR_MIRROR_BASE_SNB 0x140000
2466 2529
2530#define CTG_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x34)
2531#define ELK_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x48)
2532#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
2533#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
2534
2467/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ 2535/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
2468#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) 2536#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
2469 2537
@@ -2544,7 +2612,7 @@ enum skl_disp_power_wells {
2544#define TSFS_INTR_MASK 0x000000ff 2612#define TSFS_INTR_MASK 0x000000ff
2545 2613
2546#define CRSTANDVID 0x11100 2614#define CRSTANDVID 0x11100
2547#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ 2615#define PXVFREQ(i) (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
2548#define PXVFREQ_PX_MASK 0x7f000000 2616#define PXVFREQ_PX_MASK 0x7f000000
2549#define PXVFREQ_PX_SHIFT 24 2617#define PXVFREQ_PX_SHIFT 24
2550#define VIDFREQ_BASE 0x11110 2618#define VIDFREQ_BASE 0x11110
@@ -2728,8 +2796,8 @@ enum skl_disp_power_wells {
2728#define CSIEW0 0x11250 2796#define CSIEW0 0x11250
2729#define CSIEW1 0x11254 2797#define CSIEW1 0x11254
2730#define CSIEW2 0x11258 2798#define CSIEW2 0x11258
2731#define PEW 0x1125c 2799#define PEW(i) (0x1125c + (i) * 4) /* 5 registers */
2732#define DEW 0x11270 2800#define DEW(i) (0x11270 + (i) * 4) /* 3 registers */
2733#define MCHAFE 0x112c0 2801#define MCHAFE 0x112c0
2734#define CSIEC 0x112e0 2802#define CSIEC 0x112e0
2735#define DMIEC 0x112e4 2803#define DMIEC 0x112e4
@@ -2753,8 +2821,8 @@ enum skl_disp_power_wells {
2753#define EG5 0x11624 2821#define EG5 0x11624
2754#define EG6 0x11628 2822#define EG6 0x11628
2755#define EG7 0x1162c 2823#define EG7 0x1162c
2756#define PXW 0x11664 2824#define PXW(i) (0x11664 + (i) * 4) /* 4 registers */
2757#define PXWL 0x11680 2825#define PXWL(i) (0x11680 + (i) * 4) /* 8 registers */
2758#define LCFUSE02 0x116c0 2826#define LCFUSE02 0x116c0
2759#define LCFUSE_HIV_MASK 0x000000ff 2827#define LCFUSE_HIV_MASK 0x000000ff
2760#define CSIPLL0 0x12c10 2828#define CSIPLL0 0x12c10
@@ -2772,8 +2840,11 @@ enum skl_disp_power_wells {
2772 2840
2773#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) 2841#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
2774#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) 2842#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
2843#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
2775#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ 2844#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
2776 INTERVAL_1_33_US(us) : \ 2845 (IS_BROXTON(dev_priv) ? \
2846 INTERVAL_0_833_US(us) : \
2847 INTERVAL_1_33_US(us)) : \
2777 INTERVAL_1_28_US(us)) 2848 INTERVAL_1_28_US(us))
2778 2849
2779/* 2850/*
@@ -2795,21 +2866,21 @@ enum skl_disp_power_wells {
2795 * doesn't need saving on GT1 2866 * doesn't need saving on GT1
2796 */ 2867 */
2797#define CXT_SIZE 0x21a0 2868#define CXT_SIZE 0x21a0
2798#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) 2869#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
2799#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) 2870#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
2800#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) 2871#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
2801#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) 2872#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
2802#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) 2873#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
2803#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ 2874#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
2804 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ 2875 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
2805 GEN6_CXT_PIPELINE_SIZE(cxt_reg)) 2876 GEN6_CXT_PIPELINE_SIZE(cxt_reg))
2806#define GEN7_CXT_SIZE 0x21a8 2877#define GEN7_CXT_SIZE 0x21a8
2807#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f) 2878#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
2808#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7) 2879#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
2809#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) 2880#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
2810#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) 2881#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
2811#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) 2882#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
2812#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) 2883#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
2813#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ 2884#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
2814 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 2885 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
2815/* Haswell does have the CXT_SIZE register however it does not appear to be 2886/* Haswell does have the CXT_SIZE register however it does not appear to be
@@ -3229,7 +3300,9 @@ enum skl_disp_power_wells {
3229#define GEN3_SDVOC 0x61160 3300#define GEN3_SDVOC 0x61160
3230#define GEN4_HDMIB GEN3_SDVOB 3301#define GEN4_HDMIB GEN3_SDVOB
3231#define GEN4_HDMIC GEN3_SDVOC 3302#define GEN4_HDMIC GEN3_SDVOC
3232#define CHV_HDMID 0x6116C 3303#define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB)
3304#define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC)
3305#define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C)
3233#define PCH_SDVOB 0xe1140 3306#define PCH_SDVOB 0xe1140
3234#define PCH_HDMIB PCH_SDVOB 3307#define PCH_HDMIB PCH_SDVOB
3235#define PCH_HDMIC 0xe1150 3308#define PCH_HDMIC 0xe1150
@@ -3561,17 +3634,29 @@ enum skl_disp_power_wells {
3561#define UTIL_PIN_CTL 0x48400 3634#define UTIL_PIN_CTL 0x48400
3562#define UTIL_PIN_ENABLE (1 << 31) 3635#define UTIL_PIN_ENABLE (1 << 31)
3563 3636
3637#define UTIL_PIN_PIPE(x) ((x) << 29)
3638#define UTIL_PIN_PIPE_MASK (3 << 29)
3639#define UTIL_PIN_MODE_PWM (1 << 24)
3640#define UTIL_PIN_MODE_MASK (0xf << 24)
3641#define UTIL_PIN_POLARITY (1 << 22)
3642
3564/* BXT backlight register definition. */ 3643/* BXT backlight register definition. */
3565#define BXT_BLC_PWM_CTL1 0xC8250 3644#define _BXT_BLC_PWM_CTL1 0xC8250
3566#define BXT_BLC_PWM_ENABLE (1 << 31) 3645#define BXT_BLC_PWM_ENABLE (1 << 31)
3567#define BXT_BLC_PWM_POLARITY (1 << 29) 3646#define BXT_BLC_PWM_POLARITY (1 << 29)
3568#define BXT_BLC_PWM_FREQ1 0xC8254 3647#define _BXT_BLC_PWM_FREQ1 0xC8254
3569#define BXT_BLC_PWM_DUTY1 0xC8258 3648#define _BXT_BLC_PWM_DUTY1 0xC8258
3570 3649
3571#define BXT_BLC_PWM_CTL2 0xC8350 3650#define _BXT_BLC_PWM_CTL2 0xC8350
3572#define BXT_BLC_PWM_FREQ2 0xC8354 3651#define _BXT_BLC_PWM_FREQ2 0xC8354
3573#define BXT_BLC_PWM_DUTY2 0xC8358 3652#define _BXT_BLC_PWM_DUTY2 0xC8358
3574 3653
3654#define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \
3655 _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
3656#define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \
3657 _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
3658#define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \
3659 _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
3575 3660
3576#define PCH_GTC_CTL 0xe7000 3661#define PCH_GTC_CTL 0xe7000
3577#define PCH_GTC_ENABLE (1 << 31) 3662#define PCH_GTC_ENABLE (1 << 31)
@@ -4047,14 +4132,10 @@ enum skl_disp_power_wells {
4047# define TV_CC_DATA_1_MASK 0x0000007f 4132# define TV_CC_DATA_1_MASK 0x0000007f
4048# define TV_CC_DATA_1_SHIFT 0 4133# define TV_CC_DATA_1_SHIFT 0
4049 4134
4050#define TV_H_LUMA_0 0x68100 4135#define TV_H_LUMA(i) (0x68100 + (i) * 4) /* 60 registers */
4051#define TV_H_LUMA_59 0x681ec 4136#define TV_H_CHROMA(i) (0x68200 + (i) * 4) /* 60 registers */
4052#define TV_H_CHROMA_0 0x68200 4137#define TV_V_LUMA(i) (0x68300 + (i) * 4) /* 43 registers */
4053#define TV_H_CHROMA_59 0x682ec 4138#define TV_V_CHROMA(i) (0x68400 + (i) * 4) /* 43 registers */
4054#define TV_V_LUMA_0 0x68300
4055#define TV_V_LUMA_42 0x683a8
4056#define TV_V_CHROMA_0 0x68400
4057#define TV_V_CHROMA_42 0x684a8
4058 4139
4059/* Display Port */ 4140/* Display Port */
4060#define DP_A 0x64000 /* eDP */ 4141#define DP_A 0x64000 /* eDP */
@@ -4062,6 +4143,10 @@ enum skl_disp_power_wells {
4062#define DP_C 0x64200 4143#define DP_C 0x64200
4063#define DP_D 0x64300 4144#define DP_D 0x64300
4064 4145
4146#define VLV_DP_B (VLV_DISPLAY_BASE + DP_B)
4147#define VLV_DP_C (VLV_DISPLAY_BASE + DP_C)
4148#define CHV_DP_D (VLV_DISPLAY_BASE + DP_D)
4149
4065#define DP_PORT_EN (1 << 31) 4150#define DP_PORT_EN (1 << 31)
4066#define DP_PIPEB_SELECT (1 << 30) 4151#define DP_PIPEB_SELECT (1 << 30)
4067#define DP_PIPE_MASK (1 << 30) 4152#define DP_PIPE_MASK (1 << 30)
@@ -4107,6 +4192,7 @@ enum skl_disp_power_wells {
4107/* How many wires to use. I guess 3 was too hard */ 4192/* How many wires to use. I guess 3 was too hard */
4108#define DP_PORT_WIDTH(width) (((width) - 1) << 19) 4193#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
4109#define DP_PORT_WIDTH_MASK (7 << 19) 4194#define DP_PORT_WIDTH_MASK (7 << 19)
4195#define DP_PORT_WIDTH_SHIFT 19
4110 4196
4111/* Mystic DPCD version 1.1 special mode */ 4197/* Mystic DPCD version 1.1 special mode */
4112#define DP_ENHANCED_FRAMING (1 << 18) 4198#define DP_ENHANCED_FRAMING (1 << 18)
@@ -4198,7 +4284,7 @@ enum skl_disp_power_wells {
4198#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) 4284#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
4199#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) 4285#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
4200#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) 4286#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
4201#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (1f << 5) 4287#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5)
4202#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) 4288#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
4203#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) 4289#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
4204 4290
@@ -4617,6 +4703,7 @@ enum skl_disp_power_wells {
4617 4703
4618#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) 4704#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
4619#define CBR_PND_DEADLINE_DISABLE (1<<31) 4705#define CBR_PND_DEADLINE_DISABLE (1<<31)
4706#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
4620 4707
4621/* FIFO watermark sizes etc */ 4708/* FIFO watermark sizes etc */
4622#define G4X_FIFO_LINE_SIZE 64 4709#define G4X_FIFO_LINE_SIZE 64
@@ -4759,10 +4846,10 @@ enum skl_disp_power_wells {
4759#define PIPE_PIXEL_MASK 0x00ffffff 4846#define PIPE_PIXEL_MASK 0x00ffffff
4760#define PIPE_PIXEL_SHIFT 0 4847#define PIPE_PIXEL_SHIFT 0
4761/* GM45+ just has to be different */ 4848/* GM45+ just has to be different */
4762#define _PIPEA_FRMCOUNT_GM45 0x70040 4849#define _PIPEA_FRMCOUNT_G4X 0x70040
4763#define _PIPEA_FLIPCOUNT_GM45 0x70044 4850#define _PIPEA_FLIPCOUNT_G4X 0x70044
4764#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45) 4851#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
4765#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45) 4852#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
4766 4853
4767/* Cursor A & B regs */ 4854/* Cursor A & B regs */
4768#define _CURACNTR 0x70080 4855#define _CURACNTR 0x70080
@@ -4904,20 +4991,20 @@ enum skl_disp_power_wells {
4904#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) 4991#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
4905#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) 4992#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
4906 4993
4907/* VBIOS flags */ 4994/*
4908#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410) 4995 * VBIOS flags
4909#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414) 4996 * gen2:
4910#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418) 4997 * [00:06] alm,mgm
4911#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c) 4998 * [10:16] all
4912#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420) 4999 * [30:32] alm,mgm
4913#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424) 5000 * gen3+:
4914#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428) 5001 * [00:0f] all
4915#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410) 5002 * [10:1f] all
4916#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414) 5003 * [30:32] all
4917#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420) 5004 */
4918#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414) 5005#define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
4919#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418) 5006#define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
4920#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c) 5007#define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
4921 5008
4922/* Pipe B */ 5009/* Pipe B */
4923#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) 5010#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
@@ -4925,8 +5012,8 @@ enum skl_disp_power_wells {
4925#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) 5012#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
4926#define _PIPEBFRAMEHIGH 0x71040 5013#define _PIPEBFRAMEHIGH 0x71040
4927#define _PIPEBFRAMEPIXEL 0x71044 5014#define _PIPEBFRAMEPIXEL 0x71044
4928#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040) 5015#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040)
4929#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044) 5016#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044)
4930 5017
4931 5018
4932/* Display B control */ 5019/* Display B control */
@@ -5136,18 +5223,18 @@ enum skl_disp_power_wells {
5136#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) 5223#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
5137#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) 5224#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
5138 5225
5139#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) 5226#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
5140#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF) 5227#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
5141#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE) 5228#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
5142#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS) 5229#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
5143#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE) 5230#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
5144#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL) 5231#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
5145#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK) 5232#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
5146#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF) 5233#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
5147#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL) 5234#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
5148#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF) 5235#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
5149#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) 5236#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
5150#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) 5237#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
5151 5238
5152/* 5239/*
5153 * CHV pipe B sprite CSC 5240 * CHV pipe B sprite CSC
@@ -5363,15 +5450,17 @@ enum skl_disp_power_wells {
5363 5450
5364#define CPU_VGACNTRL 0x41000 5451#define CPU_VGACNTRL 0x41000
5365 5452
5366#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 5453#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
5367#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) 5454#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
5368#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2) 5455#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
5369#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2) 5456#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
5370#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2) 5457#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
5371#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2) 5458#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
5372#define DIGITAL_PORTA_NO_DETECT (0 << 0) 5459#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
5373#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1) 5460#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
5374#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0) 5461#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
5462#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
5463#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
5375 5464
5376/* refresh rate hardware control */ 5465/* refresh rate hardware control */
5377#define RR_HW_CTL 0x45300 5466#define RR_HW_CTL 0x45300
@@ -5491,7 +5580,7 @@ enum skl_disp_power_wells {
5491#define PS_SCALER_MODE_DYN (0 << 28) 5580#define PS_SCALER_MODE_DYN (0 << 28)
5492#define PS_SCALER_MODE_HQ (1 << 28) 5581#define PS_SCALER_MODE_HQ (1 << 28)
5493#define PS_PLANE_SEL_MASK (7 << 25) 5582#define PS_PLANE_SEL_MASK (7 << 25)
5494#define PS_PLANE_SEL(plane) ((plane + 1) << 25) 5583#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
5495#define PS_FILTER_MASK (3 << 23) 5584#define PS_FILTER_MASK (3 << 23)
5496#define PS_FILTER_MEDIUM (0 << 23) 5585#define PS_FILTER_MEDIUM (0 << 23)
5497#define PS_FILTER_EDGE_ENHANCE (2 << 23) 5586#define PS_FILTER_EDGE_ENHANCE (2 << 23)
@@ -5596,7 +5685,7 @@ enum skl_disp_power_wells {
5596/* legacy palette */ 5685/* legacy palette */
5597#define _LGC_PALETTE_A 0x4a000 5686#define _LGC_PALETTE_A 0x4a000
5598#define _LGC_PALETTE_B 0x4a800 5687#define _LGC_PALETTE_B 0x4a800
5599#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) 5688#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
5600 5689
5601#define _GAMMA_MODE_A 0x4a480 5690#define _GAMMA_MODE_A 0x4a480
5602#define _GAMMA_MODE_B 0x4ac80 5691#define _GAMMA_MODE_B 0x4ac80
@@ -5656,7 +5745,7 @@ enum skl_disp_power_wells {
5656#define DE_PLANEA_FLIP_DONE_IVB (1<<3) 5745#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
5657#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane))) 5746#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
5658#define DE_PIPEA_VBLANK_IVB (1<<0) 5747#define DE_PIPEA_VBLANK_IVB (1<<0)
5659#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) 5748#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
5660 5749
5661#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 5750#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
5662#define MASTER_INTERRUPT_ENABLE (1<<31) 5751#define MASTER_INTERRUPT_ENABLE (1<<31)
@@ -5680,7 +5769,7 @@ enum skl_disp_power_wells {
5680#define GEN8_DE_PIPE_C_IRQ (1<<18) 5769#define GEN8_DE_PIPE_C_IRQ (1<<18)
5681#define GEN8_DE_PIPE_B_IRQ (1<<17) 5770#define GEN8_DE_PIPE_B_IRQ (1<<17)
5682#define GEN8_DE_PIPE_A_IRQ (1<<16) 5771#define GEN8_DE_PIPE_A_IRQ (1<<16)
5683#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe)) 5772#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
5684#define GEN8_GT_VECS_IRQ (1<<6) 5773#define GEN8_GT_VECS_IRQ (1<<6)
5685#define GEN8_GT_PM_IRQ (1<<4) 5774#define GEN8_GT_PM_IRQ (1<<4)
5686#define GEN8_GT_VCS2_IRQ (1<<3) 5775#define GEN8_GT_VCS2_IRQ (1<<3)
@@ -5693,11 +5782,12 @@ enum skl_disp_power_wells {
5693#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) 5782#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
5694#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) 5783#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
5695 5784
5696#define GEN8_BCS_IRQ_SHIFT 16
5697#define GEN8_RCS_IRQ_SHIFT 0 5785#define GEN8_RCS_IRQ_SHIFT 0
5698#define GEN8_VCS2_IRQ_SHIFT 16 5786#define GEN8_BCS_IRQ_SHIFT 16
5699#define GEN8_VCS1_IRQ_SHIFT 0 5787#define GEN8_VCS1_IRQ_SHIFT 0
5788#define GEN8_VCS2_IRQ_SHIFT 16
5700#define GEN8_VECS_IRQ_SHIFT 0 5789#define GEN8_VECS_IRQ_SHIFT 0
5790#define GEN8_WD_IRQ_SHIFT 16
5701 5791
5702#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) 5792#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
5703#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) 5793#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
@@ -5723,7 +5813,7 @@ enum skl_disp_power_wells {
5723#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5) 5813#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
5724#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4) 5814#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
5725#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3) 5815#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
5726#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p)) 5816#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + (p)))
5727#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ 5817#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
5728 (GEN8_PIPE_CURSOR_FAULT | \ 5818 (GEN8_PIPE_CURSOR_FAULT | \
5729 GEN8_PIPE_SPRITE_FAULT | \ 5819 GEN8_PIPE_SPRITE_FAULT | \
@@ -5763,21 +5853,6 @@ enum skl_disp_power_wells {
5763#define GEN8_PCU_IIR 0x444e8 5853#define GEN8_PCU_IIR 0x444e8
5764#define GEN8_PCU_IER 0x444ec 5854#define GEN8_PCU_IER 0x444ec
5765 5855
5766/* BXT hotplug control */
5767#define BXT_HOTPLUG_CTL 0xC4030
5768#define BXT_DDIA_HPD_ENABLE (1 << 28)
5769#define BXT_DDIA_HPD_STATUS (3 << 24)
5770#define BXT_DDIC_HPD_ENABLE (1 << 12)
5771#define BXT_DDIC_HPD_STATUS (3 << 8)
5772#define BXT_DDIB_HPD_ENABLE (1 << 4)
5773#define BXT_DDIB_HPD_STATUS (3 << 0)
5774#define BXT_HOTPLUG_CTL_MASK (BXT_DDIA_HPD_ENABLE | \
5775 BXT_DDIB_HPD_ENABLE | \
5776 BXT_DDIC_HPD_ENABLE)
5777#define BXT_HPD_STATUS_MASK (BXT_DDIA_HPD_STATUS | \
5778 BXT_DDIB_HPD_STATUS | \
5779 BXT_DDIC_HPD_STATUS)
5780
5781#define ILK_DISPLAY_CHICKEN2 0x42004 5856#define ILK_DISPLAY_CHICKEN2 0x42004
5782/* Required on all Ironlake and Sandybridge according to the B-Spec. */ 5857/* Required on all Ironlake and Sandybridge according to the B-Spec. */
5783#define ILK_ELPIN_409_SELECT (1 << 25) 5858#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -5950,6 +6025,7 @@ enum skl_disp_power_wells {
5950#define SDE_AUXB_CPT (1 << 25) 6025#define SDE_AUXB_CPT (1 << 25)
5951#define SDE_AUX_MASK_CPT (7 << 25) 6026#define SDE_AUX_MASK_CPT (7 << 25)
5952#define SDE_PORTE_HOTPLUG_SPT (1 << 25) 6027#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
6028#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
5953#define SDE_PORTD_HOTPLUG_CPT (1 << 23) 6029#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
5954#define SDE_PORTC_HOTPLUG_CPT (1 << 22) 6030#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
5955#define SDE_PORTB_HOTPLUG_CPT (1 << 21) 6031#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
@@ -5963,7 +6039,8 @@ enum skl_disp_power_wells {
5963#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \ 6039#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
5964 SDE_PORTD_HOTPLUG_CPT | \ 6040 SDE_PORTD_HOTPLUG_CPT | \
5965 SDE_PORTC_HOTPLUG_CPT | \ 6041 SDE_PORTC_HOTPLUG_CPT | \
5966 SDE_PORTB_HOTPLUG_CPT) 6042 SDE_PORTB_HOTPLUG_CPT | \
6043 SDE_PORTA_HOTPLUG_SPT)
5967#define SDE_GMBUS_CPT (1 << 17) 6044#define SDE_GMBUS_CPT (1 << 17)
5968#define SDE_ERROR_CPT (1 << 16) 6045#define SDE_ERROR_CPT (1 << 16)
5969#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) 6046#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
@@ -5995,49 +6072,49 @@ enum skl_disp_power_wells {
5995#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) 6072#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
5996#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) 6073#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
5997#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) 6074#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
5998#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) 6075#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
5999 6076
6000/* digital port hotplug */ 6077/* digital port hotplug */
6001#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 6078#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
6002#define BXT_PORTA_HOTPLUG_ENABLE (1 << 28) 6079#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
6003#define BXT_PORTA_HOTPLUG_STATUS_MASK (0x3 << 24) 6080#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
6004#define BXT_PORTA_HOTPLUG_NO_DETECT (0 << 24) 6081#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
6005#define BXT_PORTA_HOTPLUG_SHORT_DETECT (1 << 24) 6082#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
6006#define BXT_PORTA_HOTPLUG_LONG_DETECT (2 << 24) 6083#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
6007#define PORTD_HOTPLUG_ENABLE (1 << 20) 6084#define PORTD_HOTPLUG_ENABLE (1 << 20)
6008#define PORTD_PULSE_DURATION_2ms (0) 6085#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
6009#define PORTD_PULSE_DURATION_4_5ms (1 << 18) 6086#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
6010#define PORTD_PULSE_DURATION_6ms (2 << 18) 6087#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
6011#define PORTD_PULSE_DURATION_100ms (3 << 18) 6088#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
6012#define PORTD_PULSE_DURATION_MASK (3 << 18) 6089#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
6013#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16) 6090#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
6014#define PORTD_HOTPLUG_NO_DETECT (0 << 16) 6091#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
6015#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) 6092#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
6016#define PORTD_HOTPLUG_LONG_DETECT (2 << 16) 6093#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
6017#define PORTC_HOTPLUG_ENABLE (1 << 12) 6094#define PORTC_HOTPLUG_ENABLE (1 << 12)
6018#define PORTC_PULSE_DURATION_2ms (0) 6095#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
6019#define PORTC_PULSE_DURATION_4_5ms (1 << 10) 6096#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
6020#define PORTC_PULSE_DURATION_6ms (2 << 10) 6097#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
6021#define PORTC_PULSE_DURATION_100ms (3 << 10) 6098#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
6022#define PORTC_PULSE_DURATION_MASK (3 << 10) 6099#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
6023#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8) 6100#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
6024#define PORTC_HOTPLUG_NO_DETECT (0 << 8) 6101#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
6025#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) 6102#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
6026#define PORTC_HOTPLUG_LONG_DETECT (2 << 8) 6103#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
6027#define PORTB_HOTPLUG_ENABLE (1 << 4) 6104#define PORTB_HOTPLUG_ENABLE (1 << 4)
6028#define PORTB_PULSE_DURATION_2ms (0) 6105#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
6029#define PORTB_PULSE_DURATION_4_5ms (1 << 2) 6106#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
6030#define PORTB_PULSE_DURATION_6ms (2 << 2) 6107#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
6031#define PORTB_PULSE_DURATION_100ms (3 << 2) 6108#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
6032#define PORTB_PULSE_DURATION_MASK (3 << 2) 6109#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
6033#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0) 6110#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
6034#define PORTB_HOTPLUG_NO_DETECT (0 << 0) 6111#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
6035#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 6112#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
6036#define PORTB_HOTPLUG_LONG_DETECT (2 << 0) 6113#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
6037 6114
6038#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 */ 6115#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */
6039#define PORTE_HOTPLUG_ENABLE (1 << 4) 6116#define PORTE_HOTPLUG_ENABLE (1 << 4)
6040#define PORTE_HOTPLUG_STATUS_MASK (0x3 << 0) 6117#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
6041#define PORTE_HOTPLUG_NO_DETECT (0 << 0) 6118#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
6042#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) 6119#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
6043#define PORTE_HOTPLUG_LONG_DETECT (2 << 0) 6120#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
@@ -6106,9 +6183,9 @@ enum skl_disp_power_wells {
6106#define PCH_SSC4_AUX_PARMS 0xc6214 6183#define PCH_SSC4_AUX_PARMS 0xc6214
6107 6184
6108#define PCH_DPLL_SEL 0xc7000 6185#define PCH_DPLL_SEL 0xc7000
6109#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4)) 6186#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
6110#define TRANS_DPLLA_SEL(pipe) 0 6187#define TRANS_DPLLA_SEL(pipe) 0
6111#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3)) 6188#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
6112 6189
6113/* transcoder */ 6190/* transcoder */
6114 6191
@@ -6209,16 +6286,16 @@ enum skl_disp_power_wells {
6209 6286
6210#define HSW_TVIDEO_DIP_CTL(trans) \ 6287#define HSW_TVIDEO_DIP_CTL(trans) \
6211 _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) 6288 _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
6212#define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 6289#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \
6213 _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) 6290 (_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4)
6214#define HSW_TVIDEO_DIP_VS_DATA(trans) \ 6291#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \
6215 _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) 6292 (_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4)
6216#define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 6293#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \
6217 _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) 6294 (_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4)
6218#define HSW_TVIDEO_DIP_GCP(trans) \ 6295#define HSW_TVIDEO_DIP_GCP(trans) \
6219 _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) 6296 _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
6220#define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 6297#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \
6221 _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) 6298 (_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4)
6222 6299
6223#define HSW_STEREO_3D_CTL_A 0x70020 6300#define HSW_STEREO_3D_CTL_A 0x70020
6224#define S3D_ENABLE (1<<31) 6301#define S3D_ENABLE (1<<31)
@@ -6304,9 +6381,11 @@ enum skl_disp_power_wells {
6304#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) 6381#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
6305#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 6382#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
6306#define FDI_BC_BIFURCATION_SELECT (1 << 12) 6383#define FDI_BC_BIFURCATION_SELECT (1 << 12)
6384#define SPT_PWM_GRANULARITY (1<<0)
6307#define SOUTH_CHICKEN2 0xc2004 6385#define SOUTH_CHICKEN2 0xc2004
6308#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) 6386#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
6309#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) 6387#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
6388#define LPT_PWM_GRANULARITY (1<<5)
6310#define DPLS_EDP_PPS_FIX_DIS (1<<0) 6389#define DPLS_EDP_PPS_FIX_DIS (1<<0)
6311 6390
6312#define _FDI_RXA_CHICKEN 0xc200c 6391#define _FDI_RXA_CHICKEN 0xc200c
@@ -6508,10 +6587,10 @@ enum skl_disp_power_wells {
6508#define _BXT_PP_ON_DELAYS2 0xc7308 6587#define _BXT_PP_ON_DELAYS2 0xc7308
6509#define _BXT_PP_OFF_DELAYS2 0xc730c 6588#define _BXT_PP_OFF_DELAYS2 0xc730c
6510 6589
6511#define BXT_PP_STATUS(n) ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2) 6590#define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2)
6512#define BXT_PP_CONTROL(n) ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2) 6591#define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2)
6513#define BXT_PP_ON_DELAYS(n) ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2) 6592#define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
6514#define BXT_PP_OFF_DELAYS(n) ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2) 6593#define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
6515 6594
6516#define PCH_DP_B 0xe4100 6595#define PCH_DP_B 0xe4100
6517#define PCH_DPB_AUX_CH_CTL 0xe4110 6596#define PCH_DPB_AUX_CH_CTL 0xe4110
@@ -6784,7 +6863,7 @@ enum skl_disp_power_wells {
6784 GEN6_PM_RP_DOWN_THRESHOLD | \ 6863 GEN6_PM_RP_DOWN_THRESHOLD | \
6785 GEN6_PM_RP_DOWN_TIMEOUT) 6864 GEN6_PM_RP_DOWN_TIMEOUT)
6786 6865
6787#define GEN7_GT_SCRATCH_BASE 0x4F100 6866#define GEN7_GT_SCRATCH(i) (0x4F100 + (i) * 4)
6788#define GEN7_GT_SCRATCH_REG_NUM 8 6867#define GEN7_GT_SCRATCH_REG_NUM 8
6789 6868
6790#define VLV_GTLC_SURVIVABILITY_REG 0x130098 6869#define VLV_GTLC_SURVIVABILITY_REG 0x130098
@@ -6843,6 +6922,9 @@ enum skl_disp_power_wells {
6843#define GEN6_RC6 3 6922#define GEN6_RC6 3
6844#define GEN6_RC7 4 6923#define GEN6_RC7 4
6845 6924
6925#define GEN8_GT_SLICE_INFO 0x138064
6926#define GEN8_LSLICESTAT_MASK 0x7
6927
6846#define CHV_POWER_SS0_SIG1 0xa720 6928#define CHV_POWER_SS0_SIG1 0xa720
6847#define CHV_POWER_SS1_SIG1 0xa728 6929#define CHV_POWER_SS1_SIG1 0xa728
6848#define CHV_SS_PG_ENABLE (1<<1) 6930#define CHV_SS_PG_ENABLE (1<<1)
@@ -6870,7 +6952,10 @@ enum skl_disp_power_wells {
6870#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) 6952#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
6871 6953
6872#define GEN7_MISCCPCTL (0x9424) 6954#define GEN7_MISCCPCTL (0x9424)
6873#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) 6955#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
6956#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
6957#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
6958#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
6874 6959
6875#define GEN8_GARBCNTL 0xB004 6960#define GEN8_GARBCNTL 0xB004
6876#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7) 6961#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
@@ -6916,6 +7001,9 @@ enum skl_disp_power_wells {
6916#define HSW_ROW_CHICKEN3 0xe49c 7001#define HSW_ROW_CHICKEN3 0xe49c
6917#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) 7002#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
6918 7003
7004#define HALF_SLICE_CHICKEN2 0xe180
7005#define GEN8_ST_PO_DISABLE (1<<13)
7006
6919#define HALF_SLICE_CHICKEN3 0xe184 7007#define HALF_SLICE_CHICKEN3 0xe184
6920#define HSW_SAMPLE_C_PERFORMANCE (1<<9) 7008#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
6921#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) 7009#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
@@ -7159,12 +7247,15 @@ enum skl_disp_power_wells {
7159#define DDI_BUF_IS_IDLE (1<<7) 7247#define DDI_BUF_IS_IDLE (1<<7)
7160#define DDI_A_4_LANES (1<<4) 7248#define DDI_A_4_LANES (1<<4)
7161#define DDI_PORT_WIDTH(width) (((width) - 1) << 1) 7249#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
7250#define DDI_PORT_WIDTH_MASK (7 << 1)
7251#define DDI_PORT_WIDTH_SHIFT 1
7162#define DDI_INIT_DISPLAY_DETECTED (1<<0) 7252#define DDI_INIT_DISPLAY_DETECTED (1<<0)
7163 7253
7164/* DDI Buffer Translations */ 7254/* DDI Buffer Translations */
7165#define DDI_BUF_TRANS_A 0x64E00 7255#define DDI_BUF_TRANS_A 0x64E00
7166#define DDI_BUF_TRANS_B 0x64E60 7256#define DDI_BUF_TRANS_B 0x64E60
7167#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) 7257#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8)
7258#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4)
7168 7259
7169/* Sideband Interface (SBI) is programmed indirectly, via 7260/* Sideband Interface (SBI) is programmed indirectly, via
7170 * SBI_ADDR, which contains the register offset; and SBI_DATA, 7261 * SBI_ADDR, which contains the register offset; and SBI_DATA,
@@ -7257,7 +7348,7 @@ enum skl_disp_power_wells {
7257#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) 7348#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
7258/* For each transcoder, we need to select the corresponding port clock */ 7349/* For each transcoder, we need to select the corresponding port clock */
7259#define TRANS_CLK_SEL_DISABLED (0x0<<29) 7350#define TRANS_CLK_SEL_DISABLED (0x0<<29)
7260#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) 7351#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
7261 7352
7262#define TRANSA_MSA_MISC 0x60410 7353#define TRANSA_MSA_MISC 0x60410
7263#define TRANSB_MSA_MISC 0x61410 7354#define TRANSB_MSA_MISC 0x61410
@@ -7330,10 +7421,10 @@ enum skl_disp_power_wells {
7330 7421
7331/* DPLL control2 */ 7422/* DPLL control2 */
7332#define DPLL_CTRL2 0x6C05C 7423#define DPLL_CTRL2 0x6C05C
7333#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15)) 7424#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
7334#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) 7425#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
7335#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) 7426#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
7336#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1)) 7427#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1))
7337#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) 7428#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
7338 7429
7339/* DPLL Status */ 7430/* DPLL Status */
@@ -7346,31 +7437,31 @@ enum skl_disp_power_wells {
7346#define DPLL3_CFGCR1 0x6C050 7437#define DPLL3_CFGCR1 0x6C050
7347#define DPLL_CFGCR1_FREQ_ENABLE (1<<31) 7438#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
7348#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) 7439#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
7349#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9) 7440#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
7350#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) 7441#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
7351 7442
7352#define DPLL1_CFGCR2 0x6C044 7443#define DPLL1_CFGCR2 0x6C044
7353#define DPLL2_CFGCR2 0x6C04C 7444#define DPLL2_CFGCR2 0x6C04C
7354#define DPLL3_CFGCR2 0x6C054 7445#define DPLL3_CFGCR2 0x6C054
7355#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) 7446#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
7356#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8) 7447#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
7357#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7) 7448#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
7358#define DPLL_CFGCR2_KDIV_MASK (3<<5) 7449#define DPLL_CFGCR2_KDIV_MASK (3<<5)
7359#define DPLL_CFGCR2_KDIV(x) (x<<5) 7450#define DPLL_CFGCR2_KDIV(x) ((x)<<5)
7360#define DPLL_CFGCR2_KDIV_5 (0<<5) 7451#define DPLL_CFGCR2_KDIV_5 (0<<5)
7361#define DPLL_CFGCR2_KDIV_2 (1<<5) 7452#define DPLL_CFGCR2_KDIV_2 (1<<5)
7362#define DPLL_CFGCR2_KDIV_3 (2<<5) 7453#define DPLL_CFGCR2_KDIV_3 (2<<5)
7363#define DPLL_CFGCR2_KDIV_1 (3<<5) 7454#define DPLL_CFGCR2_KDIV_1 (3<<5)
7364#define DPLL_CFGCR2_PDIV_MASK (7<<2) 7455#define DPLL_CFGCR2_PDIV_MASK (7<<2)
7365#define DPLL_CFGCR2_PDIV(x) (x<<2) 7456#define DPLL_CFGCR2_PDIV(x) ((x)<<2)
7366#define DPLL_CFGCR2_PDIV_1 (0<<2) 7457#define DPLL_CFGCR2_PDIV_1 (0<<2)
7367#define DPLL_CFGCR2_PDIV_2 (1<<2) 7458#define DPLL_CFGCR2_PDIV_2 (1<<2)
7368#define DPLL_CFGCR2_PDIV_3 (2<<2) 7459#define DPLL_CFGCR2_PDIV_3 (2<<2)
7369#define DPLL_CFGCR2_PDIV_7 (4<<2) 7460#define DPLL_CFGCR2_PDIV_7 (4<<2)
7370#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) 7461#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
7371 7462
7372#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8) 7463#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8)
7373#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8) 7464#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8)
7374 7465
7375/* BXT display engine PLL */ 7466/* BXT display engine PLL */
7376#define BXT_DE_PLL_CTL 0x6d000 7467#define BXT_DE_PLL_CTL 0x6d000
@@ -7475,9 +7566,116 @@ enum skl_disp_power_wells {
7475 7566
7476#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ 7567#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
7477 7568
7569/* BXT MIPI clock controls */
7570#define BXT_MAX_VAR_OUTPUT_KHZ 39500
7571
7572#define BXT_MIPI_CLOCK_CTL 0x46090
7573#define BXT_MIPI1_DIV_SHIFT 26
7574#define BXT_MIPI2_DIV_SHIFT 10
7575#define BXT_MIPI_DIV_SHIFT(port) \
7576 _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
7577 BXT_MIPI2_DIV_SHIFT)
7578/* Var clock divider to generate TX source. Result must be < 39.5 M */
7579#define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26)
7580#define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10)
7581#define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \
7582 _MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
7583 BXT_MIPI2_ESCLK_VAR_DIV_MASK)
7584
7585#define BXT_MIPI_ESCLK_VAR_DIV(port, val) \
7586 (val << BXT_MIPI_DIV_SHIFT(port))
7587/* TX control divider to select actual TX clock output from (8x/var) */
7588#define BXT_MIPI1_TX_ESCLK_SHIFT 21
7589#define BXT_MIPI2_TX_ESCLK_SHIFT 5
7590#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
7591 _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
7592 BXT_MIPI2_TX_ESCLK_SHIFT)
7593#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21)
7594#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5)
7595#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
7596 _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
7597 BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
7598#define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \
7599 (0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
7600#define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \
7601 (0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
7602#define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \
7603 (0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
7604/* RX control divider to select actual RX clock output from 8x*/
7605#define BXT_MIPI1_RX_ESCLK_SHIFT 19
7606#define BXT_MIPI2_RX_ESCLK_SHIFT 3
7607#define BXT_MIPI_RX_ESCLK_SHIFT(port) \
7608 _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
7609 BXT_MIPI2_RX_ESCLK_SHIFT)
7610#define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19)
7611#define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3)
7612#define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \
7613 (3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
7614#define BXT_MIPI_RX_ESCLK_8X_BY2(port) \
7615 (1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
7616#define BXT_MIPI_RX_ESCLK_8X_BY3(port) \
7617 (2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
7618#define BXT_MIPI_RX_ESCLK_8X_BY4(port) \
7619 (3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
7620/* BXT-A WA: Always prog DPHY dividers to 00 */
7621#define BXT_MIPI1_DPHY_DIV_SHIFT 16
7622#define BXT_MIPI2_DPHY_DIV_SHIFT 0
7623#define BXT_MIPI_DPHY_DIV_SHIFT(port) \
7624 _MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
7625 BXT_MIPI2_DPHY_DIV_SHIFT)
7626#define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16)
7627#define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0)
7628#define BXT_MIPI_DPHY_DIVIDER_MASK(port) \
7629 (3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
7630
7631/* BXT MIPI mode configure */
7632#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
7633#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
7634#define BXT_MIPI_TRANS_HACTIVE(tc) _MIPI_PORT(tc, \
7635 _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
7636
7637#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
7638#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
7639#define BXT_MIPI_TRANS_VACTIVE(tc) _MIPI_PORT(tc, \
7640 _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
7641
7642#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
7643#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
7644#define BXT_MIPI_TRANS_VTOTAL(tc) _MIPI_PORT(tc, \
7645 _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
7646
7647#define BXT_DSI_PLL_CTL 0x161000
7648#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
7649#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
7650#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
7651#define BXT_DSIC_16X_BY2 (1 << 10)
7652#define BXT_DSIC_16X_BY3 (2 << 10)
7653#define BXT_DSIC_16X_BY4 (3 << 10)
7654#define BXT_DSIA_16X_BY2 (1 << 8)
7655#define BXT_DSIA_16X_BY3 (2 << 8)
7656#define BXT_DSIA_16X_BY4 (3 << 8)
7657#define BXT_DSI_FREQ_SEL_SHIFT 8
7658#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
7659
7660#define BXT_DSI_PLL_RATIO_MAX 0x7D
7661#define BXT_DSI_PLL_RATIO_MIN 0x22
7662#define BXT_DSI_PLL_RATIO_MASK 0xFF
7663#define BXT_REF_CLOCK_KHZ 19500
7664
7665#define BXT_DSI_PLL_ENABLE 0x46080
7666#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
7667#define BXT_DSI_PLL_LOCKED (1 << 30)
7668
7478#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) 7669#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
7479#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) 7670#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
7480#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) 7671#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
7672
7673 /* BXT port control */
7674#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
7675#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
7676#define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \
7677 _BXT_MIPIC_PORT_CTRL)
7678
7481#define DPI_ENABLE (1 << 31) /* A + C */ 7679#define DPI_ENABLE (1 << 31) /* A + C */
7482#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 7680#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
7483#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) 7681#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@@ -7781,7 +7979,7 @@ enum skl_disp_power_wells {
7781#define VIRTUAL_CHANNEL_SHIFT 6 7979#define VIRTUAL_CHANNEL_SHIFT 6
7782#define VIRTUAL_CHANNEL_MASK (3 << 6) 7980#define VIRTUAL_CHANNEL_MASK (3 << 6)
7783#define DATA_TYPE_SHIFT 0 7981#define DATA_TYPE_SHIFT 0
7784#define DATA_TYPE_MASK (3f << 0) 7982#define DATA_TYPE_MASK (0x3f << 0)
7785/* data type values, see include/video/mipi_display.h */ 7983/* data type values, see include/video/mipi_display.h */
7786 7984
7787#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) 7985#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
@@ -7888,6 +8086,11 @@ enum skl_disp_power_wells {
7888#define READ_REQUEST_PRIORITY_HIGH (3 << 3) 8086#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
7889#define RGB_FLIP_TO_BGR (1 << 2) 8087#define RGB_FLIP_TO_BGR (1 << 2)
7890 8088
8089#define BXT_PIPE_SELECT_MASK (7 << 7)
8090#define BXT_PIPE_SELECT_C (2 << 7)
8091#define BXT_PIPE_SELECT_B (1 << 7)
8092#define BXT_PIPE_SELECT_A (0 << 7)
8093
7891#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) 8094#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
7892#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) 8095#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
7893#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \ 8096#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 1ccac618468e..2d9182189422 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -122,12 +122,24 @@ int i915_save_state(struct drm_device *dev)
122 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 122 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
123 123
124 /* Scratch space */ 124 /* Scratch space */
125 for (i = 0; i < 16; i++) { 125 if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
126 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); 126 for (i = 0; i < 7; i++) {
127 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 127 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
128 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
129 }
130 for (i = 0; i < 3; i++)
131 dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
132 } else if (IS_GEN2(dev_priv)) {
133 for (i = 0; i < 7; i++)
134 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
135 } else if (HAS_GMCH_DISPLAY(dev_priv)) {
136 for (i = 0; i < 16; i++) {
137 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
138 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
139 }
140 for (i = 0; i < 3; i++)
141 dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
128 } 142 }
129 for (i = 0; i < 3; i++)
130 dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
131 143
132 mutex_unlock(&dev->struct_mutex); 144 mutex_unlock(&dev->struct_mutex);
133 145
@@ -156,12 +168,25 @@ int i915_restore_state(struct drm_device *dev)
156 /* Memory arbitration state */ 168 /* Memory arbitration state */
157 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); 169 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
158 170
159 for (i = 0; i < 16; i++) { 171 /* Scratch space */
160 I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); 172 if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
161 I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); 173 for (i = 0; i < 7; i++) {
174 I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
175 I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
176 }
177 for (i = 0; i < 3; i++)
178 I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
179 } else if (IS_GEN2(dev_priv)) {
180 for (i = 0; i < 7; i++)
181 I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
182 } else if (HAS_GMCH_DISPLAY(dev_priv)) {
183 for (i = 0; i < 16; i++) {
184 I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
185 I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
186 }
187 for (i = 0; i < 3; i++)
188 I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
162 } 189 }
163 for (i = 0; i < 3; i++)
164 I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
165 190
166 mutex_unlock(&dev->struct_mutex); 191 mutex_unlock(&dev->struct_mutex);
167 192
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 55bd04c6b939..50ce9ce2b269 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -39,7 +39,7 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
39{ 39{
40 struct drm_i915_private *dev_priv = dev->dev_private; 40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */ 41 u64 raw_time; /* 32b value may overflow during fixed point math */
42 u64 units = 128ULL, div = 100000ULL, bias = 100ULL; 42 u64 units = 128ULL, div = 100000ULL;
43 u32 ret; 43 u32 ret;
44 44
45 if (!intel_enable_rc6(dev)) 45 if (!intel_enable_rc6(dev))
@@ -49,41 +49,19 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
49 49
50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ 50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51 if (IS_VALLEYVIEW(dev)) { 51 if (IS_VALLEYVIEW(dev)) {
52 u32 clk_reg, czcount_30ns; 52 units = 1;
53 53 div = dev_priv->czclk_freq;
54 if (IS_CHERRYVIEW(dev))
55 clk_reg = CHV_CLK_CTL1;
56 else
57 clk_reg = VLV_CLK_CTL2;
58
59 czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
60
61 if (!czcount_30ns) {
62 WARN(!czcount_30ns, "bogus CZ count value");
63 ret = 0;
64 goto out;
65 }
66
67 if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
68 /* Special case for 320Mhz */
69 div = 10000000ULL;
70 units = 3125ULL;
71 } else {
72 czcount_30ns += 1;
73 div = 1000000ULL;
74 units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
75 }
76 54
77 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 55 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
78 units <<= 8; 56 units <<= 8;
79 57 } else if (IS_BROXTON(dev)) {
80 div = div * bias; 58 units = 1;
59 div = 1200; /* 833.33ns */
81 } 60 }
82 61
83 raw_time = I915_READ(reg) * units; 62 raw_time = I915_READ(reg) * units;
84 ret = DIV_ROUND_UP_ULL(raw_time, div); 63 ret = DIV_ROUND_UP_ULL(raw_time, div);
85 64
86out:
87 intel_runtime_pm_put(dev_priv); 65 intel_runtime_pm_put(dev_priv);
88 return ret; 66 return ret;
89} 67}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 2f34c47bd4bf..04fe8491c8b6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -17,8 +17,8 @@
17/* pipe updates */ 17/* pipe updates */
18 18
19TRACE_EVENT(i915_pipe_update_start, 19TRACE_EVENT(i915_pipe_update_start,
20 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max), 20 TP_PROTO(struct intel_crtc *crtc),
21 TP_ARGS(crtc, min, max), 21 TP_ARGS(crtc),
22 22
23 TP_STRUCT__entry( 23 TP_STRUCT__entry(
24 __field(enum pipe, pipe) 24 __field(enum pipe, pipe)
@@ -33,8 +33,8 @@ TRACE_EVENT(i915_pipe_update_start,
33 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 33 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
34 crtc->pipe); 34 crtc->pipe);
35 __entry->scanline = intel_get_crtc_scanline(crtc); 35 __entry->scanline = intel_get_crtc_scanline(crtc);
36 __entry->min = min; 36 __entry->min = crtc->debug.min_vbl;
37 __entry->max = max; 37 __entry->max = crtc->debug.max_vbl;
38 ), 38 ),
39 39
40 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", 40 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
@@ -43,8 +43,8 @@ TRACE_EVENT(i915_pipe_update_start,
43); 43);
44 44
45TRACE_EVENT(i915_pipe_update_vblank_evaded, 45TRACE_EVENT(i915_pipe_update_vblank_evaded,
46 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame), 46 TP_PROTO(struct intel_crtc *crtc),
47 TP_ARGS(crtc, min, max, frame), 47 TP_ARGS(crtc),
48 48
49 TP_STRUCT__entry( 49 TP_STRUCT__entry(
50 __field(enum pipe, pipe) 50 __field(enum pipe, pipe)
@@ -56,10 +56,10 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
56 56
57 TP_fast_assign( 57 TP_fast_assign(
58 __entry->pipe = crtc->pipe; 58 __entry->pipe = crtc->pipe;
59 __entry->frame = frame; 59 __entry->frame = crtc->debug.start_vbl_count;
60 __entry->scanline = intel_get_crtc_scanline(crtc); 60 __entry->scanline = crtc->debug.scanline_start;
61 __entry->min = min; 61 __entry->min = crtc->debug.min_vbl;
62 __entry->max = max; 62 __entry->max = crtc->debug.max_vbl;
63 ), 63 ),
64 64
65 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", 65 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
@@ -68,8 +68,8 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
68); 68);
69 69
70TRACE_EVENT(i915_pipe_update_end, 70TRACE_EVENT(i915_pipe_update_end,
71 TP_PROTO(struct intel_crtc *crtc, u32 frame), 71 TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
72 TP_ARGS(crtc, frame), 72 TP_ARGS(crtc, frame, scanline_end),
73 73
74 TP_STRUCT__entry( 74 TP_STRUCT__entry(
75 __field(enum pipe, pipe) 75 __field(enum pipe, pipe)
@@ -80,7 +80,7 @@ TRACE_EVENT(i915_pipe_update_end,
80 TP_fast_assign( 80 TP_fast_assign(
81 __entry->pipe = crtc->pipe; 81 __entry->pipe = crtc->pipe;
82 __entry->frame = frame; 82 __entry->frame = frame;
83 __entry->scanline = intel_get_crtc_scanline(crtc); 83 __entry->scanline = scanline_end;
84 ), 84 ),
85 85
86 TP_printk("pipe %c, frame=%u, scanline=%u", 86 TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -107,6 +107,26 @@ TRACE_EVENT(i915_gem_object_create,
107 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 107 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
108); 108);
109 109
110TRACE_EVENT(i915_gem_shrink,
111 TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
112 TP_ARGS(i915, target, flags),
113
114 TP_STRUCT__entry(
115 __field(int, dev)
116 __field(unsigned long, target)
117 __field(unsigned, flags)
118 ),
119
120 TP_fast_assign(
121 __entry->dev = i915->dev->primary->index;
122 __entry->target = target;
123 __entry->flags = flags;
124 ),
125
126 TP_printk("dev=%d, target=%lu, flags=%x",
127 __entry->dev, __entry->target, __entry->flags)
128);
129
110TRACE_EVENT(i915_vma_bind, 130TRACE_EVENT(i915_vma_bind,
111 TP_PROTO(struct i915_vma *vma, unsigned flags), 131 TP_PROTO(struct i915_vma *vma, unsigned flags),
112 TP_ARGS(vma, flags), 132 TP_ARGS(vma, flags),
@@ -186,33 +206,49 @@ DEFINE_EVENT(i915_va, i915_va_alloc,
186 TP_ARGS(vm, start, length, name) 206 TP_ARGS(vm, start, length, name)
187); 207);
188 208
189DECLARE_EVENT_CLASS(i915_page_table_entry, 209DECLARE_EVENT_CLASS(i915_px_entry,
190 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), 210 TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
191 TP_ARGS(vm, pde, start, pde_shift), 211 TP_ARGS(vm, px, start, px_shift),
192 212
193 TP_STRUCT__entry( 213 TP_STRUCT__entry(
194 __field(struct i915_address_space *, vm) 214 __field(struct i915_address_space *, vm)
195 __field(u32, pde) 215 __field(u32, px)
196 __field(u64, start) 216 __field(u64, start)
197 __field(u64, end) 217 __field(u64, end)
198 ), 218 ),
199 219
200 TP_fast_assign( 220 TP_fast_assign(
201 __entry->vm = vm; 221 __entry->vm = vm;
202 __entry->pde = pde; 222 __entry->px = px;
203 __entry->start = start; 223 __entry->start = start;
204 __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1; 224 __entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
205 ), 225 ),
206 226
207 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)", 227 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
208 __entry->vm, __entry->pde, __entry->start, __entry->end) 228 __entry->vm, __entry->px, __entry->start, __entry->end)
209); 229);
210 230
211DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc, 231DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
212 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), 232 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
213 TP_ARGS(vm, pde, start, pde_shift) 233 TP_ARGS(vm, pde, start, pde_shift)
214); 234);
215 235
236DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
237 TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
238 TP_ARGS(vm, pdpe, start, pdpe_shift),
239
240 TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
241 __entry->vm, __entry->px, __entry->start, __entry->end)
242);
243
244DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
245 TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
246 TP_ARGS(vm, pml4e, start, pml4e_shift),
247
248 TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
249 __entry->vm, __entry->px, __entry->start, __entry->end)
250);
251
216/* Avoid extra math because we only support two sizes. The format is defined by 252/* Avoid extra math because we only support two sizes. The format is defined by
217 * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */ 253 * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
218#define TRACE_PT_SIZE(bits) \ 254#define TRACE_PT_SIZE(bits) \
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 97a88b5f6a26..21c97f44d637 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -40,6 +40,19 @@
40#define INTEL_VGT_IF_VERSION \ 40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) 41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42 42
43/*
44 * notifications from guest to vgpu device model
45 */
46enum vgt_g2v_type {
47 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
48 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
49 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
50 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
51 VGT_G2V_EXECLIST_CONTEXT_CREATE,
52 VGT_G2V_EXECLIST_CONTEXT_DESTROY,
53 VGT_G2V_MAX,
54};
55
43struct vgt_if { 56struct vgt_if {
44 uint64_t magic; /* VGT_MAGIC */ 57 uint64_t magic; /* VGT_MAGIC */
45 uint16_t version_major; 58 uint16_t version_major;
@@ -70,11 +83,28 @@ struct vgt_if {
70 uint32_t rsv3[0x200 - 24]; /* pad to half page */ 83 uint32_t rsv3[0x200 - 24]; /* pad to half page */
71 /* 84 /*
72 * The bottom half page is for response from Gfx driver to hypervisor. 85 * The bottom half page is for response from Gfx driver to hypervisor.
73 * Set to reserved fields temporarily by now.
74 */ 86 */
75 uint32_t rsv4; 87 uint32_t rsv4;
76 uint32_t display_ready; /* ready for display owner switch */ 88 uint32_t display_ready; /* ready for display owner switch */
77 uint32_t rsv5[0x200 - 2]; /* pad to one page */ 89
90 uint32_t rsv5[4];
91
92 uint32_t g2v_notify;
93 uint32_t rsv6[7];
94
95 uint32_t pdp0_lo;
96 uint32_t pdp0_hi;
97 uint32_t pdp1_lo;
98 uint32_t pdp1_hi;
99 uint32_t pdp2_lo;
100 uint32_t pdp2_hi;
101 uint32_t pdp3_lo;
102 uint32_t pdp3_hi;
103
104 uint32_t execlist_context_descriptor_lo;
105 uint32_t execlist_context_descriptor_hi;
106
107 uint32_t rsv7[0x200 - 24]; /* pad to one page */
78} __packed; 108} __packed;
79 109
80#define vgtif_reg(x) \ 110#define vgtif_reg(x) \
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index d96eee1ae9c5..eb638a1e69d2 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -5,7 +5,6 @@
5 */ 5 */
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/acpi.h> 7#include <linux/acpi.h>
8#include <linux/vga_switcheroo.h>
9#include <drm/drmP.h> 8#include <drm/drmP.h>
10#include "i915_drv.h" 9#include "i915_drv.h"
11 10
@@ -146,7 +145,7 @@ static bool intel_dsm_detect(void)
146 145
147 if (vga_count == 2 && has_dsm) { 146 if (vga_count == 2 && has_dsm) {
148 acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); 147 acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
149 DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", 148 DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
150 acpi_method_name); 149 acpi_method_name);
151 return true; 150 return true;
152 } 151 }
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index e2531cf59266..f1975f267710 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -85,21 +85,15 @@ intel_connector_atomic_get_property(struct drm_connector *connector,
85struct drm_crtc_state * 85struct drm_crtc_state *
86intel_crtc_duplicate_state(struct drm_crtc *crtc) 86intel_crtc_duplicate_state(struct drm_crtc *crtc)
87{ 87{
88 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
89 struct intel_crtc_state *crtc_state; 88 struct intel_crtc_state *crtc_state;
90 89
91 if (WARN_ON(!intel_crtc->config)) 90 crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
92 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
93 else
94 crtc_state = kmemdup(intel_crtc->config,
95 sizeof(*intel_crtc->config), GFP_KERNEL);
96
97 if (!crtc_state) 91 if (!crtc_state)
98 return NULL; 92 return NULL;
99 93
100 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); 94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
101 95
102 crtc_state->base.crtc = crtc; 96 crtc_state->update_pipe = false;
103 97
104 return &crtc_state->base; 98 return &crtc_state->base;
105} 99}
@@ -149,9 +143,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
149 int i, j; 143 int i, j;
150 144
151 num_scalers_need = hweight32(scaler_state->scaler_users); 145 num_scalers_need = hweight32(scaler_state->scaler_users);
152 DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
153 crtc_state, num_scalers_need, intel_crtc->num_scalers,
154 scaler_state->scaler_users);
155 146
156 /* 147 /*
157 * High level flow: 148 * High level flow:
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index f1ab8e4b9c11..a11980696595 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -76,11 +76,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
76 struct drm_plane_state *state; 76 struct drm_plane_state *state;
77 struct intel_plane_state *intel_state; 77 struct intel_plane_state *intel_state;
78 78
79 if (WARN_ON(!plane->state)) 79 intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
80 intel_state = intel_create_plane_state(plane);
81 else
82 intel_state = kmemdup(plane->state, sizeof(*intel_state),
83 GFP_KERNEL);
84 80
85 if (!intel_state) 81 if (!intel_state)
86 return NULL; 82 return NULL;
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ae8df0a43de6..4dccd9b003a1 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -50,27 +50,32 @@
50 * co-operation between the graphics and audio drivers is handled via audio 50 * co-operation between the graphics and audio drivers is handled via audio
51 * related registers. (The notable exception is the power management, not 51 * related registers. (The notable exception is the power management, not
52 * covered here.) 52 * covered here.)
53 *
54 * The struct i915_audio_component is used to interact between the graphics
55 * and audio drivers. The struct i915_audio_component_ops *ops in it is
56 * defined in graphics driver and called in audio driver. The
57 * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
53 */ 58 */
54 59
55static const struct { 60static const struct {
56 int clock; 61 int clock;
57 u32 config; 62 u32 config;
58} hdmi_audio_clock[] = { 63} hdmi_audio_clock[] = {
59 { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, 64 { 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
60 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ 65 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
61 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, 66 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
62 { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, 67 { 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
63 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, 68 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
64 { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, 69 { 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
65 { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, 70 { 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
66 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, 71 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
67 { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, 72 { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
68 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, 73 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
69}; 74};
70 75
71/* HDMI N/CTS table */ 76/* HDMI N/CTS table */
72#define TMDS_297M 297000 77#define TMDS_297M 297000
73#define TMDS_296M DIV_ROUND_UP(297000 * 1000, 1001) 78#define TMDS_296M 296703
74static const struct { 79static const struct {
75 int sample_rate; 80 int sample_rate;
76 int clock; 81 int clock;
@@ -94,17 +99,18 @@ static const struct {
94}; 99};
95 100
96/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ 101/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
97static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) 102static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode)
98{ 103{
99 int i; 104 int i;
100 105
101 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { 106 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
102 if (mode->clock == hdmi_audio_clock[i].clock) 107 if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock)
103 break; 108 break;
104 } 109 }
105 110
106 if (i == ARRAY_SIZE(hdmi_audio_clock)) { 111 if (i == ARRAY_SIZE(hdmi_audio_clock)) {
107 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); 112 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
113 adjusted_mode->crtc_clock);
108 i = 1; 114 i = 1;
109 } 115 }
110 116
@@ -202,7 +208,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder)
202 208
203static void g4x_audio_codec_enable(struct drm_connector *connector, 209static void g4x_audio_codec_enable(struct drm_connector *connector,
204 struct intel_encoder *encoder, 210 struct intel_encoder *encoder,
205 struct drm_display_mode *mode) 211 const struct drm_display_mode *adjusted_mode)
206{ 212{
207 struct drm_i915_private *dev_priv = connector->dev->dev_private; 213 struct drm_i915_private *dev_priv = connector->dev->dev_private;
208 uint8_t *eld = connector->eld; 214 uint8_t *eld = connector->eld;
@@ -271,7 +277,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
271 277
272static void hsw_audio_codec_enable(struct drm_connector *connector, 278static void hsw_audio_codec_enable(struct drm_connector *connector,
273 struct intel_encoder *encoder, 279 struct intel_encoder *encoder,
274 struct drm_display_mode *mode) 280 const struct drm_display_mode *adjusted_mode)
275{ 281{
276 struct drm_i915_private *dev_priv = connector->dev->dev_private; 282 struct drm_i915_private *dev_priv = connector->dev->dev_private;
277 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 283 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -325,10 +331,10 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
325 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 331 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
326 tmp |= AUD_CONFIG_N_VALUE_INDEX; 332 tmp |= AUD_CONFIG_N_VALUE_INDEX;
327 else 333 else
328 tmp |= audio_config_hdmi_pixel_clock(mode); 334 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
329 335
330 tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 336 tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
331 if (audio_rate_need_prog(intel_crtc, mode)) { 337 if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
332 if (!acomp) 338 if (!acomp)
333 rate = 0; 339 rate = 0;
334 else if (port >= PORT_A && port <= PORT_E) 340 else if (port >= PORT_A && port <= PORT_E)
@@ -337,7 +343,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
337 DRM_ERROR("invalid port: %d\n", port); 343 DRM_ERROR("invalid port: %d\n", port);
338 rate = 0; 344 rate = 0;
339 } 345 }
340 n = audio_config_get_n(mode, rate); 346 n = audio_config_get_n(adjusted_mode, rate);
341 if (n != 0) 347 if (n != 0)
342 tmp = audio_config_setup_n_reg(n, tmp); 348 tmp = audio_config_setup_n_reg(n, tmp);
343 else 349 else
@@ -398,7 +404,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
398 404
399static void ilk_audio_codec_enable(struct drm_connector *connector, 405static void ilk_audio_codec_enable(struct drm_connector *connector,
400 struct intel_encoder *encoder, 406 struct intel_encoder *encoder,
401 struct drm_display_mode *mode) 407 const struct drm_display_mode *adjusted_mode)
402{ 408{
403 struct drm_i915_private *dev_priv = connector->dev->dev_private; 409 struct drm_i915_private *dev_priv = connector->dev->dev_private;
404 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 410 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -475,7 +481,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
475 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 481 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
476 tmp |= AUD_CONFIG_N_VALUE_INDEX; 482 tmp |= AUD_CONFIG_N_VALUE_INDEX;
477 else 483 else
478 tmp |= audio_config_hdmi_pixel_clock(mode); 484 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
479 I915_WRITE(aud_config, tmp); 485 I915_WRITE(aud_config, tmp);
480} 486}
481 487
@@ -490,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
490{ 496{
491 struct drm_encoder *encoder = &intel_encoder->base; 497 struct drm_encoder *encoder = &intel_encoder->base;
492 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 498 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
493 struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 499 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
494 struct drm_connector *connector; 500 struct drm_connector *connector;
495 struct drm_device *dev = encoder->dev; 501 struct drm_device *dev = encoder->dev;
496 struct drm_i915_private *dev_priv = dev->dev_private; 502 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -498,7 +504,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
498 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 504 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
499 enum port port = intel_dig_port->port; 505 enum port port = intel_dig_port->port;
500 506
501 connector = drm_select_eld(encoder, mode); 507 connector = drm_select_eld(encoder);
502 if (!connector) 508 if (!connector)
503 return; 509 return;
504 510
@@ -513,10 +519,11 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
513 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 519 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
514 connector->eld[5] |= (1 << 2); 520 connector->eld[5] |= (1 << 2);
515 521
516 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 522 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
517 523
518 if (dev_priv->display.audio_codec_enable) 524 if (dev_priv->display.audio_codec_enable)
519 dev_priv->display.audio_codec_enable(connector, intel_encoder, mode); 525 dev_priv->display.audio_codec_enable(connector, intel_encoder,
526 adjusted_mode);
520 527
521 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) 528 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
522 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); 529 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c19e669ffe50..ce82f9c7df24 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1231,20 +1231,13 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
1231 { } 1231 { }
1232}; 1232};
1233 1233
1234static const struct bdb_header *validate_vbt(const void __iomem *_base, 1234static const struct bdb_header *validate_vbt(const void *base,
1235 size_t size, 1235 size_t size,
1236 const void __iomem *_vbt, 1236 const void *_vbt,
1237 const char *source) 1237 const char *source)
1238{ 1238{
1239 /* 1239 size_t offset = _vbt - base;
1240 * This is the one place where we explicitly discard the address space 1240 const struct vbt_header *vbt = _vbt;
1241 * (__iomem) of the BIOS/VBT. (And this will cause a sparse complaint.)
1242 * From now on everything is based on 'base', and treated as regular
1243 * memory.
1244 */
1245 const void *base = (const void *) _base;
1246 size_t offset = _vbt - _base;
1247 const struct vbt_header *vbt = base + offset;
1248 const struct bdb_header *bdb; 1241 const struct bdb_header *bdb;
1249 1242
1250 if (offset + sizeof(struct vbt_header) > size) { 1243 if (offset + sizeof(struct vbt_header) > size) {
@@ -1282,7 +1275,15 @@ static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
1282 /* Scour memory looking for the VBT signature. */ 1275 /* Scour memory looking for the VBT signature. */
1283 for (i = 0; i + 4 < size; i++) { 1276 for (i = 0; i + 4 < size; i++) {
1284 if (ioread32(bios + i) == *((const u32 *) "$VBT")) { 1277 if (ioread32(bios + i) == *((const u32 *) "$VBT")) {
1285 bdb = validate_vbt(bios, size, bios + i, "PCI ROM"); 1278 /*
1279 * This is the one place where we explicitly discard the
1280 * address space (__iomem) of the BIOS/VBT. From now on
1281 * everything is based on 'base', and treated as regular
1282 * memory.
1283 */
1284 void *_bios = (void __force *) bios;
1285
1286 bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM");
1286 break; 1287 break;
1287 } 1288 }
1288 } 1289 }
@@ -1350,21 +1351,3 @@ intel_parse_bios(struct drm_device *dev)
1350 1351
1351 return 0; 1352 return 0;
1352} 1353}
1353
1354/* Ensure that vital registers have been initialised, even if the BIOS
1355 * is absent or just failing to do its job.
1356 */
1357void intel_setup_bios(struct drm_device *dev)
1358{
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360
1361 /* Set the Panel Power On/Off timings if uninitialized. */
1362 if (!HAS_PCH_SPLIT(dev) &&
1363 I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
1364 /* Set T2 to 40ms and T5 to 200ms */
1365 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
1366
1367 /* Set T3 to 35ms and Tx to 200ms */
1368 I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
1369 }
1370}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 46cd5c7ebacd..7ec8c9aefb84 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -588,7 +588,6 @@ struct bdb_psr {
588 struct psr_table psr_table[16]; 588 struct psr_table psr_table[16];
589} __packed; 589} __packed;
590 590
591void intel_setup_bios(struct drm_device *dev);
592int intel_parse_bios(struct drm_device *dev); 591int intel_parse_bios(struct drm_device *dev);
593 592
594/* 593/*
@@ -742,7 +741,6 @@ int intel_parse_bios(struct drm_device *dev);
742 */ 741 */
743#define DEVICE_TYPE_eDP_BITS \ 742#define DEVICE_TYPE_eDP_BITS \
744 (DEVICE_TYPE_INTERNAL_CONNECTOR | \ 743 (DEVICE_TYPE_INTERNAL_CONNECTOR | \
745 DEVICE_TYPE_NOT_HDMI_OUTPUT | \
746 DEVICE_TYPE_MIPI_OUTPUT | \ 744 DEVICE_TYPE_MIPI_OUTPUT | \
747 DEVICE_TYPE_COMPOSITE_OUTPUT | \ 745 DEVICE_TYPE_COMPOSITE_OUTPUT | \
748 DEVICE_TYPE_DUAL_CHANNEL | \ 746 DEVICE_TYPE_DUAL_CHANNEL | \
@@ -750,7 +748,6 @@ int intel_parse_bios(struct drm_device *dev);
750 DEVICE_TYPE_TMDS_DVI_SIGNALING | \ 748 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
751 DEVICE_TYPE_VIDEO_SIGNALING | \ 749 DEVICE_TYPE_VIDEO_SIGNALING | \
752 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ 750 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
753 DEVICE_TYPE_DIGITAL_OUTPUT | \
754 DEVICE_TYPE_ANALOG_OUTPUT) 751 DEVICE_TYPE_ANALOG_OUTPUT)
755 752
756/* define the DVO port for HDMI output type */ 753/* define the DVO port for HDMI output type */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index af5e43bef4a4..b84aaa0bb48a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -158,7 +158,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
158 struct drm_i915_private *dev_priv = dev->dev_private; 158 struct drm_i915_private *dev_priv = dev->dev_private;
159 struct intel_crt *crt = intel_encoder_to_crt(encoder); 159 struct intel_crt *crt = intel_encoder_to_crt(encoder);
160 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 160 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
161 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 161 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
162 u32 adpa; 162 u32 adpa;
163 163
164 if (INTEL_INFO(dev)->gen >= 5) 164 if (INTEL_INFO(dev)->gen >= 5)
@@ -376,7 +376,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
376{ 376{
377 struct drm_device *dev = connector->dev; 377 struct drm_device *dev = connector->dev;
378 struct drm_i915_private *dev_priv = dev->dev_private; 378 struct drm_i915_private *dev_priv = dev->dev_private;
379 u32 hotplug_en, orig, stat; 379 u32 stat;
380 bool ret = false; 380 bool ret = false;
381 int i, tries = 0; 381 int i, tries = 0;
382 382
@@ -395,12 +395,12 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
395 tries = 2; 395 tries = 2;
396 else 396 else
397 tries = 1; 397 tries = 1;
398 hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
399 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
400 398
401 for (i = 0; i < tries ; i++) { 399 for (i = 0; i < tries ; i++) {
402 /* turn on the FORCE_DETECT */ 400 /* turn on the FORCE_DETECT */
403 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 401 i915_hotplug_interrupt_update(dev_priv,
402 CRT_HOTPLUG_FORCE_DETECT,
403 CRT_HOTPLUG_FORCE_DETECT);
404 /* wait for FORCE_DETECT to go off */ 404 /* wait for FORCE_DETECT to go off */
405 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 405 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
406 CRT_HOTPLUG_FORCE_DETECT) == 0, 406 CRT_HOTPLUG_FORCE_DETECT) == 0,
@@ -415,8 +415,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
415 /* clear the interrupt we just generated, if any */ 415 /* clear the interrupt we just generated, if any */
416 I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); 416 I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
417 417
418 /* and put the bits back */ 418 i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
419 I915_WRITE(PORT_HOTPLUG_EN, orig);
420 419
421 return ret; 420 return ret;
422} 421}
@@ -891,7 +890,7 @@ void intel_crt_init(struct drm_device *dev)
891 u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | 890 u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
892 FDI_RX_LINK_REVERSAL_OVERRIDE; 891 FDI_RX_LINK_REVERSAL_OVERRIDE;
893 892
894 dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; 893 dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
895 } 894 }
896 895
897 intel_crt_reset(connector); 896 intel_crt_reset(connector);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d0f1b8d833cd..9e530a739354 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -42,13 +42,15 @@
42 */ 42 */
43 43
44#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 44#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
45#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
45 46
46MODULE_FIRMWARE(I915_CSR_SKL); 47MODULE_FIRMWARE(I915_CSR_SKL);
48MODULE_FIRMWARE(I915_CSR_BXT);
47 49
48/* 50/*
49* SKL CSR registers for DC5 and DC6 51* SKL CSR registers for DC5 and DC6
50*/ 52*/
51#define CSR_PROGRAM_BASE 0x80000 53#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
52#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0 54#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
53#define CSR_HTP_ADDR_SKL 0x00500034 55#define CSR_HTP_ADDR_SKL 0x00500034
54#define CSR_SSP_BASE 0x8F074 56#define CSR_SSP_BASE 0x8F074
@@ -181,11 +183,19 @@ static const struct stepping_info skl_stepping_info[] = {
181 {'G', '0'}, {'H', '0'}, {'I', '0'} 183 {'G', '0'}, {'H', '0'}, {'I', '0'}
182}; 184};
183 185
186static struct stepping_info bxt_stepping_info[] = {
187 {'A', '0'}, {'A', '1'}, {'A', '2'},
188 {'B', '0'}, {'B', '1'}, {'B', '2'}
189};
190
184static char intel_get_stepping(struct drm_device *dev) 191static char intel_get_stepping(struct drm_device *dev)
185{ 192{
186 if (IS_SKYLAKE(dev) && (dev->pdev->revision < 193 if (IS_SKYLAKE(dev) && (dev->pdev->revision <
187 ARRAY_SIZE(skl_stepping_info))) 194 ARRAY_SIZE(skl_stepping_info)))
188 return skl_stepping_info[dev->pdev->revision].stepping; 195 return skl_stepping_info[dev->pdev->revision].stepping;
196 else if (IS_BROXTON(dev) && (dev->pdev->revision <
197 ARRAY_SIZE(bxt_stepping_info)))
198 return bxt_stepping_info[dev->pdev->revision].stepping;
189 else 199 else
190 return -ENODATA; 200 return -ENODATA;
191} 201}
@@ -195,6 +205,9 @@ static char intel_get_substepping(struct drm_device *dev)
195 if (IS_SKYLAKE(dev) && (dev->pdev->revision < 205 if (IS_SKYLAKE(dev) && (dev->pdev->revision <
196 ARRAY_SIZE(skl_stepping_info))) 206 ARRAY_SIZE(skl_stepping_info)))
197 return skl_stepping_info[dev->pdev->revision].substepping; 207 return skl_stepping_info[dev->pdev->revision].substepping;
208 else if (IS_BROXTON(dev) && (dev->pdev->revision <
209 ARRAY_SIZE(bxt_stepping_info)))
210 return bxt_stepping_info[dev->pdev->revision].substepping;
198 else 211 else
199 return -ENODATA; 212 return -ENODATA;
200} 213}
@@ -252,11 +265,19 @@ void intel_csr_load_program(struct drm_device *dev)
252 return; 265 return;
253 } 266 }
254 267
268 /*
269 * FIXME: Firmware gets lost on S3/S4, but not when entering system
270 * standby or suspend-to-idle (which is just like forced runtime pm).
271 * Unfortunately the ACPI subsystem doesn't yet give us a way to
272 * differentiate this, hence figure it out with this hack.
273 */
274 if (I915_READ(CSR_PROGRAM(0)))
275 return;
276
255 mutex_lock(&dev_priv->csr_lock); 277 mutex_lock(&dev_priv->csr_lock);
256 fw_size = dev_priv->csr.dmc_fw_size; 278 fw_size = dev_priv->csr.dmc_fw_size;
257 for (i = 0; i < fw_size; i++) 279 for (i = 0; i < fw_size; i++)
258 I915_WRITE(CSR_PROGRAM_BASE + i * 4, 280 I915_WRITE(CSR_PROGRAM(i), payload[i]);
259 payload[i]);
260 281
261 for (i = 0; i < dev_priv->csr.mmio_count; i++) { 282 for (i = 0; i < dev_priv->csr.mmio_count; i++) {
262 I915_WRITE(dev_priv->csr.mmioaddr[i], 283 I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -409,6 +430,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
409 430
410 if (IS_SKYLAKE(dev)) 431 if (IS_SKYLAKE(dev))
411 csr->fw_path = I915_CSR_SKL; 432 csr->fw_path = I915_CSR_SKL;
433 else if (IS_BROXTON(dev_priv))
434 csr->fw_path = I915_CSR_BXT;
412 else { 435 else {
413 DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); 436 DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
414 intel_csr_load_status_set(dev_priv, FW_FAILED); 437 intel_csr_load_status_set(dev_priv, FW_FAILED);
@@ -454,10 +477,10 @@ void intel_csr_ucode_fini(struct drm_device *dev)
454 477
455void assert_csr_loaded(struct drm_i915_private *dev_priv) 478void assert_csr_loaded(struct drm_i915_private *dev_priv)
456{ 479{
457 WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED, 480 WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
458 "CSR is not loaded.\n"); 481 "CSR is not loaded.\n");
459 WARN(!I915_READ(CSR_PROGRAM_BASE), 482 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
460 "CSR program storage start is NULL\n"); 483 "CSR program storage start is NULL\n");
461 WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 484 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
462 WARN(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 485 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
463} 486}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 61575f67a626..b25e99a432fb 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -256,9 +256,6 @@ struct bxt_ddi_buf_trans {
256 bool default_index; /* true if the entry represents default value */ 256 bool default_index; /* true if the entry represents default value */
257}; 257};
258 258
259/* BSpec does not define separate vswing/pre-emphasis values for eDP.
260 * Using DP values for eDP as well.
261 */
262static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { 259static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
263 /* Idx NT mV diff db */ 260 /* Idx NT mV diff db */
264 { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */ 261 { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */
@@ -273,6 +270,20 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
273 { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */ 270 { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
274}; 271};
275 272
273static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
274 /* Idx NT mV diff db */
275 { 26, 0, 0, 128, false }, /* 0: 200 0 */
276 { 38, 0, 0, 112, false }, /* 1: 200 1.5 */
277 { 48, 0, 0, 96, false }, /* 2: 200 4 */
278 { 54, 0, 0, 69, false }, /* 3: 200 6 */
279 { 32, 0, 0, 128, false }, /* 4: 250 0 */
280 { 48, 0, 0, 104, false }, /* 5: 250 1.5 */
281 { 54, 0, 0, 85, false }, /* 6: 250 4 */
282 { 43, 0, 0, 128, false }, /* 7: 300 0 */
283 { 54, 0, 0, 101, false }, /* 8: 300 1.5 */
284 { 48, 0, 0, 128, false }, /* 9: 300 0 */
285};
286
276/* BSpec has 2 recommended values - entries 0 and 8. 287/* BSpec has 2 recommended values - entries 0 and 8.
277 * Using the entry with higher vswing. 288 * Using the entry with higher vswing.
278 */ 289 */
@@ -298,21 +309,26 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
298 enum port *port) 309 enum port *port)
299{ 310{
300 struct drm_encoder *encoder = &intel_encoder->base; 311 struct drm_encoder *encoder = &intel_encoder->base;
301 int type = intel_encoder->type;
302 312
303 if (type == INTEL_OUTPUT_DP_MST) { 313 switch (intel_encoder->type) {
314 case INTEL_OUTPUT_DP_MST:
304 *dig_port = enc_to_mst(encoder)->primary; 315 *dig_port = enc_to_mst(encoder)->primary;
305 *port = (*dig_port)->port; 316 *port = (*dig_port)->port;
306 } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || 317 break;
307 type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { 318 case INTEL_OUTPUT_DISPLAYPORT:
319 case INTEL_OUTPUT_EDP:
320 case INTEL_OUTPUT_HDMI:
321 case INTEL_OUTPUT_UNKNOWN:
308 *dig_port = enc_to_dig_port(encoder); 322 *dig_port = enc_to_dig_port(encoder);
309 *port = (*dig_port)->port; 323 *port = (*dig_port)->port;
310 } else if (type == INTEL_OUTPUT_ANALOG) { 324 break;
325 case INTEL_OUTPUT_ANALOG:
311 *dig_port = NULL; 326 *dig_port = NULL;
312 *port = PORT_E; 327 *port = PORT_E;
313 } else { 328 break;
314 DRM_ERROR("Invalid DDI encoder type %d\n", type); 329 default:
315 BUG(); 330 WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
331 break;
316 } 332 }
317} 333}
318 334
@@ -414,7 +430,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
414 bool supports_hdmi) 430 bool supports_hdmi)
415{ 431{
416 struct drm_i915_private *dev_priv = dev->dev_private; 432 struct drm_i915_private *dev_priv = dev->dev_private;
417 u32 reg;
418 u32 iboost_bit = 0; 433 u32 iboost_bit = 0;
419 int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, 434 int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
420 size; 435 size;
@@ -505,11 +520,11 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
505 BUG(); 520 BUG();
506 } 521 }
507 522
508 for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) { 523 for (i = 0; i < size; i++) {
509 I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit); 524 I915_WRITE(DDI_BUF_TRANS_LO(port, i),
510 reg += 4; 525 ddi_translations[i].trans1 | iboost_bit);
511 I915_WRITE(reg, ddi_translations[i].trans2); 526 I915_WRITE(DDI_BUF_TRANS_HI(port, i),
512 reg += 4; 527 ddi_translations[i].trans2);
513 } 528 }
514 529
515 if (!supports_hdmi) 530 if (!supports_hdmi)
@@ -521,10 +536,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
521 hdmi_level = hdmi_default_entry; 536 hdmi_level = hdmi_default_entry;
522 537
523 /* Entry 9 is for HDMI: */ 538 /* Entry 9 is for HDMI: */
524 I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); 539 I915_WRITE(DDI_BUF_TRANS_LO(port, i),
525 reg += 4; 540 ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
526 I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2); 541 I915_WRITE(DDI_BUF_TRANS_HI(port, i),
527 reg += 4; 542 ddi_translations_hdmi[hdmi_level].trans2);
528} 543}
529 544
530/* Program DDI buffers translations for DP. By default, program ports A-D in DP 545/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -543,8 +558,10 @@ void intel_prepare_ddi(struct drm_device *dev)
543 enum port port; 558 enum port port;
544 bool supports_hdmi; 559 bool supports_hdmi;
545 560
546 ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port); 561 if (intel_encoder->type == INTEL_OUTPUT_DSI)
562 continue;
547 563
564 ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
548 if (visited[port]) 565 if (visited[port])
549 continue; 566 continue;
550 567
@@ -593,7 +610,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
593 * 610 *
594 * WaFDIAutoLinkSetTimingOverrride:hsw 611 * WaFDIAutoLinkSetTimingOverrride:hsw
595 */ 612 */
596 I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | 613 I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
597 FDI_RX_PWRDN_LANE0_VAL(2) | 614 FDI_RX_PWRDN_LANE0_VAL(2) |
598 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 615 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
599 616
@@ -601,13 +618,13 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
601 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | 618 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
602 FDI_RX_PLL_ENABLE | 619 FDI_RX_PLL_ENABLE |
603 FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 620 FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
604 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 621 I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
605 POSTING_READ(_FDI_RXA_CTL); 622 POSTING_READ(FDI_RX_CTL(PIPE_A));
606 udelay(220); 623 udelay(220);
607 624
608 /* Switch from Rawclk to PCDclk */ 625 /* Switch from Rawclk to PCDclk */
609 rx_ctl_val |= FDI_PCDCLK; 626 rx_ctl_val |= FDI_PCDCLK;
610 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 627 I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
611 628
612 /* Configure Port Clock Select */ 629 /* Configure Port Clock Select */
613 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel); 630 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
@@ -636,21 +653,21 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
636 udelay(600); 653 udelay(600);
637 654
638 /* Program PCH FDI Receiver TU */ 655 /* Program PCH FDI Receiver TU */
639 I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64)); 656 I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
640 657
641 /* Enable PCH FDI Receiver with auto-training */ 658 /* Enable PCH FDI Receiver with auto-training */
642 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; 659 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
643 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 660 I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
644 POSTING_READ(_FDI_RXA_CTL); 661 POSTING_READ(FDI_RX_CTL(PIPE_A));
645 662
646 /* Wait for FDI receiver lane calibration */ 663 /* Wait for FDI receiver lane calibration */
647 udelay(30); 664 udelay(30);
648 665
649 /* Unset FDI_RX_MISC pwrdn lanes */ 666 /* Unset FDI_RX_MISC pwrdn lanes */
650 temp = I915_READ(_FDI_RXA_MISC); 667 temp = I915_READ(FDI_RX_MISC(PIPE_A));
651 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 668 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
652 I915_WRITE(_FDI_RXA_MISC, temp); 669 I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
653 POSTING_READ(_FDI_RXA_MISC); 670 POSTING_READ(FDI_RX_MISC(PIPE_A));
654 671
655 /* Wait for FDI auto training time */ 672 /* Wait for FDI auto training time */
656 udelay(5); 673 udelay(5);
@@ -684,15 +701,15 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
684 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 701 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
685 702
686 rx_ctl_val &= ~FDI_RX_ENABLE; 703 rx_ctl_val &= ~FDI_RX_ENABLE;
687 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 704 I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
688 POSTING_READ(_FDI_RXA_CTL); 705 POSTING_READ(FDI_RX_CTL(PIPE_A));
689 706
690 /* Reset FDI_RX_MISC pwrdn lanes */ 707 /* Reset FDI_RX_MISC pwrdn lanes */
691 temp = I915_READ(_FDI_RXA_MISC); 708 temp = I915_READ(FDI_RX_MISC(PIPE_A));
692 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 709 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
693 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 710 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
694 I915_WRITE(_FDI_RXA_MISC, temp); 711 I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
695 POSTING_READ(_FDI_RXA_MISC); 712 POSTING_READ(FDI_RX_MISC(PIPE_A));
696 } 713 }
697 714
698 DRM_ERROR("FDI link training failed!\n"); 715 DRM_ERROR("FDI link training failed!\n");
@@ -707,7 +724,6 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
707 intel_dp->DP = intel_dig_port->saved_port_bits | 724 intel_dp->DP = intel_dig_port->saved_port_bits |
708 DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); 725 DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
709 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); 726 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
710
711} 727}
712 728
713static struct intel_encoder * 729static struct intel_encoder *
@@ -955,8 +971,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
955 uint32_t cfgcr1_val, cfgcr2_val; 971 uint32_t cfgcr1_val, cfgcr2_val;
956 uint32_t p0, p1, p2, dco_freq; 972 uint32_t p0, p1, p2, dco_freq;
957 973
958 cfgcr1_reg = GET_CFG_CR1_REG(dpll); 974 cfgcr1_reg = DPLL_CFGCR1(dpll);
959 cfgcr2_reg = GET_CFG_CR2_REG(dpll); 975 cfgcr2_reg = DPLL_CFGCR2(dpll);
960 976
961 cfgcr1_val = I915_READ(cfgcr1_reg); 977 cfgcr1_val = I915_READ(cfgcr1_reg);
962 cfgcr2_val = I915_READ(cfgcr2_reg); 978 cfgcr2_val = I915_READ(cfgcr2_reg);
@@ -1242,9 +1258,10 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
1242static bool 1258static bool
1243hsw_ddi_pll_select(struct intel_crtc *intel_crtc, 1259hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
1244 struct intel_crtc_state *crtc_state, 1260 struct intel_crtc_state *crtc_state,
1245 struct intel_encoder *intel_encoder, 1261 struct intel_encoder *intel_encoder)
1246 int clock)
1247{ 1262{
1263 int clock = crtc_state->port_clock;
1264
1248 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 1265 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
1249 struct intel_shared_dpll *pll; 1266 struct intel_shared_dpll *pll;
1250 uint32_t val; 1267 uint32_t val;
@@ -1523,11 +1540,11 @@ skip_remaining_dividers:
1523static bool 1540static bool
1524skl_ddi_pll_select(struct intel_crtc *intel_crtc, 1541skl_ddi_pll_select(struct intel_crtc *intel_crtc,
1525 struct intel_crtc_state *crtc_state, 1542 struct intel_crtc_state *crtc_state,
1526 struct intel_encoder *intel_encoder, 1543 struct intel_encoder *intel_encoder)
1527 int clock)
1528{ 1544{
1529 struct intel_shared_dpll *pll; 1545 struct intel_shared_dpll *pll;
1530 uint32_t ctrl1, cfgcr1, cfgcr2; 1546 uint32_t ctrl1, cfgcr1, cfgcr2;
1547 int clock = crtc_state->port_clock;
1531 1548
1532 /* 1549 /*
1533 * See comment in intel_dpll_hw_state to understand why we always use 0 1550 * See comment in intel_dpll_hw_state to understand why we always use 0
@@ -1615,14 +1632,14 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
1615static bool 1632static bool
1616bxt_ddi_pll_select(struct intel_crtc *intel_crtc, 1633bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
1617 struct intel_crtc_state *crtc_state, 1634 struct intel_crtc_state *crtc_state,
1618 struct intel_encoder *intel_encoder, 1635 struct intel_encoder *intel_encoder)
1619 int clock)
1620{ 1636{
1621 struct intel_shared_dpll *pll; 1637 struct intel_shared_dpll *pll;
1622 struct bxt_clk_div clk_div = {0}; 1638 struct bxt_clk_div clk_div = {0};
1623 int vco = 0; 1639 int vco = 0;
1624 uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; 1640 uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
1625 uint32_t lanestagger; 1641 uint32_t lanestagger;
1642 int clock = crtc_state->port_clock;
1626 1643
1627 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 1644 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
1628 intel_clock_t best_clock; 1645 intel_clock_t best_clock;
@@ -1750,17 +1767,16 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
1750 struct drm_device *dev = intel_crtc->base.dev; 1767 struct drm_device *dev = intel_crtc->base.dev;
1751 struct intel_encoder *intel_encoder = 1768 struct intel_encoder *intel_encoder =
1752 intel_ddi_get_crtc_new_encoder(crtc_state); 1769 intel_ddi_get_crtc_new_encoder(crtc_state);
1753 int clock = crtc_state->port_clock;
1754 1770
1755 if (IS_SKYLAKE(dev)) 1771 if (IS_SKYLAKE(dev))
1756 return skl_ddi_pll_select(intel_crtc, crtc_state, 1772 return skl_ddi_pll_select(intel_crtc, crtc_state,
1757 intel_encoder, clock); 1773 intel_encoder);
1758 else if (IS_BROXTON(dev)) 1774 else if (IS_BROXTON(dev))
1759 return bxt_ddi_pll_select(intel_crtc, crtc_state, 1775 return bxt_ddi_pll_select(intel_crtc, crtc_state,
1760 intel_encoder, clock); 1776 intel_encoder);
1761 else 1777 else
1762 return hsw_ddi_pll_select(intel_crtc, crtc_state, 1778 return hsw_ddi_pll_select(intel_crtc, crtc_state,
1763 intel_encoder, clock); 1779 intel_encoder);
1764} 1780}
1765 1781
1766void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 1782void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -1893,7 +1909,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1893 } else 1909 } else
1894 temp |= TRANS_DDI_MODE_SELECT_DP_SST; 1910 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
1895 1911
1896 temp |= DDI_PORT_WIDTH(intel_dp->lane_count); 1912 temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
1897 } else if (type == INTEL_OUTPUT_DP_MST) { 1913 } else if (type == INTEL_OUTPUT_DP_MST) {
1898 struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp; 1914 struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
1899 1915
@@ -1902,7 +1918,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1902 } else 1918 } else
1903 temp |= TRANS_DDI_MODE_SELECT_DP_SST; 1919 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
1904 1920
1905 temp |= DDI_PORT_WIDTH(intel_dp->lane_count); 1921 temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
1906 } else { 1922 } else {
1907 WARN(1, "Invalid encoder type %d for pipe %c\n", 1923 WARN(1, "Invalid encoder type %d for pipe %c\n",
1908 intel_encoder->type, pipe_name(pipe)); 1924 intel_encoder->type, pipe_name(pipe));
@@ -2029,7 +2045,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2029void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 2045void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
2030{ 2046{
2031 struct drm_crtc *crtc = &intel_crtc->base; 2047 struct drm_crtc *crtc = &intel_crtc->base;
2032 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 2048 struct drm_device *dev = crtc->dev;
2049 struct drm_i915_private *dev_priv = dev->dev_private;
2033 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 2050 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
2034 enum port port = intel_ddi_get_encoder_port(intel_encoder); 2051 enum port port = intel_ddi_get_encoder_port(intel_encoder);
2035 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 2052 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -2114,7 +2131,11 @@ static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
2114 u32 n_entries, i; 2131 u32 n_entries, i;
2115 uint32_t val; 2132 uint32_t val;
2116 2133
2117 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 2134 if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) {
2135 n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
2136 ddi_translations = bxt_ddi_translations_edp;
2137 } else if (type == INTEL_OUTPUT_DISPLAYPORT
2138 || type == INTEL_OUTPUT_EDP) {
2118 n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); 2139 n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
2119 ddi_translations = bxt_ddi_translations_dp; 2140 ddi_translations = bxt_ddi_translations_dp;
2120 } else if (type == INTEL_OUTPUT_HDMI) { 2141 } else if (type == INTEL_OUTPUT_HDMI) {
@@ -2152,9 +2173,13 @@ static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
2152 I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val); 2173 I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
2153 2174
2154 val = I915_READ(BXT_PORT_TX_DW3_LN0(port)); 2175 val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
2155 val &= ~UNIQE_TRANGE_EN_METHOD; 2176 val &= ~SCALE_DCOMP_METHOD;
2156 if (ddi_translations[level].enable) 2177 if (ddi_translations[level].enable)
2157 val |= UNIQE_TRANGE_EN_METHOD; 2178 val |= SCALE_DCOMP_METHOD;
2179
2180 if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
2181 DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
2182
2158 I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val); 2183 I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
2159 2184
2160 val = I915_READ(BXT_PORT_TX_DW4_LN0(port)); 2185 val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
@@ -2289,11 +2314,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
2289 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 2314 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
2290 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2315 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2291 2316
2317 intel_dp_set_link_params(intel_dp, crtc->config);
2318
2292 intel_ddi_init_dp_buf_reg(intel_encoder); 2319 intel_ddi_init_dp_buf_reg(intel_encoder);
2293 2320
2294 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2321 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2295 intel_dp_start_link_train(intel_dp); 2322 intel_dp_start_link_train(intel_dp);
2296 intel_dp_complete_link_train(intel_dp);
2297 if (port != PORT_A || INTEL_INFO(dev)->gen >= 9) 2323 if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
2298 intel_dp_stop_link_train(intel_dp); 2324 intel_dp_stop_link_train(intel_dp);
2299 } else if (type == INTEL_OUTPUT_HDMI) { 2325 } else if (type == INTEL_OUTPUT_HDMI) {
@@ -2480,20 +2506,20 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = {
2480 { 2506 {
2481 /* DPLL 1 */ 2507 /* DPLL 1 */
2482 .ctl = LCPLL2_CTL, 2508 .ctl = LCPLL2_CTL,
2483 .cfgcr1 = DPLL1_CFGCR1, 2509 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
2484 .cfgcr2 = DPLL1_CFGCR2, 2510 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
2485 }, 2511 },
2486 { 2512 {
2487 /* DPLL 2 */ 2513 /* DPLL 2 */
2488 .ctl = WRPLL_CTL1, 2514 .ctl = WRPLL_CTL1,
2489 .cfgcr1 = DPLL2_CFGCR1, 2515 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
2490 .cfgcr2 = DPLL2_CFGCR2, 2516 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
2491 }, 2517 },
2492 { 2518 {
2493 /* DPLL 3 */ 2519 /* DPLL 3 */
2494 .ctl = WRPLL_CTL2, 2520 .ctl = WRPLL_CTL2,
2495 .cfgcr1 = DPLL3_CFGCR1, 2521 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
2496 .cfgcr2 = DPLL3_CFGCR2, 2522 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
2497 }, 2523 },
2498}; 2524};
2499 2525
@@ -2881,7 +2907,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2881 * here just read out lanes 0/1 and output a note if lanes 2/3 differ. 2907 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2882 */ 2908 */
2883 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); 2909 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
2884 if (I915_READ(BXT_PORT_PCS_DW12_LN23(port) != hw_state->pcsdw12)) 2910 if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
2885 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", 2911 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2886 hw_state->pcsdw12, 2912 hw_state->pcsdw12,
2887 I915_READ(BXT_PORT_PCS_DW12_LN23(port))); 2913 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
@@ -2999,22 +3025,22 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
2999 3025
3000 intel_ddi_post_disable(intel_encoder); 3026 intel_ddi_post_disable(intel_encoder);
3001 3027
3002 val = I915_READ(_FDI_RXA_CTL); 3028 val = I915_READ(FDI_RX_CTL(PIPE_A));
3003 val &= ~FDI_RX_ENABLE; 3029 val &= ~FDI_RX_ENABLE;
3004 I915_WRITE(_FDI_RXA_CTL, val); 3030 I915_WRITE(FDI_RX_CTL(PIPE_A), val);
3005 3031
3006 val = I915_READ(_FDI_RXA_MISC); 3032 val = I915_READ(FDI_RX_MISC(PIPE_A));
3007 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 3033 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
3008 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 3034 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
3009 I915_WRITE(_FDI_RXA_MISC, val); 3035 I915_WRITE(FDI_RX_MISC(PIPE_A), val);
3010 3036
3011 val = I915_READ(_FDI_RXA_CTL); 3037 val = I915_READ(FDI_RX_CTL(PIPE_A));
3012 val &= ~FDI_PCDCLK; 3038 val &= ~FDI_PCDCLK;
3013 I915_WRITE(_FDI_RXA_CTL, val); 3039 I915_WRITE(FDI_RX_CTL(PIPE_A), val);
3014 3040
3015 val = I915_READ(_FDI_RXA_CTL); 3041 val = I915_READ(FDI_RX_CTL(PIPE_A));
3016 val &= ~FDI_RX_PLL_ENABLE; 3042 val &= ~FDI_RX_PLL_ENABLE;
3017 I915_WRITE(_FDI_RXA_CTL, val); 3043 I915_WRITE(FDI_RX_CTL(PIPE_A), val);
3018} 3044}
3019 3045
3020void intel_ddi_get_config(struct intel_encoder *encoder, 3046void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3069,6 +3095,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
3069 case TRANS_DDI_MODE_SELECT_DP_SST: 3095 case TRANS_DDI_MODE_SELECT_DP_SST:
3070 case TRANS_DDI_MODE_SELECT_DP_MST: 3096 case TRANS_DDI_MODE_SELECT_DP_MST:
3071 pipe_config->has_dp_encoder = true; 3097 pipe_config->has_dp_encoder = true;
3098 pipe_config->lane_count =
3099 ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
3072 intel_dp_get_m_n(intel_crtc, pipe_config); 3100 intel_dp_get_m_n(intel_crtc, pipe_config);
3073 break; 3101 break;
3074 default: 3102 default:
@@ -3215,7 +3243,15 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3215 goto err; 3243 goto err;
3216 3244
3217 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 3245 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
3218 dev_priv->hotplug.irq_port[port] = intel_dig_port; 3246 /*
3247 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
3248 * interrupts to check the external panel connection.
3249 */
3250 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
3251 && port == PORT_B)
3252 dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
3253 else
3254 dev_priv->hotplug.irq_port[port] = intel_dig_port;
3219 } 3255 }
3220 3256
3221 /* In theory we don't need the encoder->type check, but leave it just in 3257 /* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b2270d576979..f62ffc04c21d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -72,6 +72,10 @@ static const uint32_t skl_primary_formats[] = {
72 DRM_FORMAT_ABGR8888, 72 DRM_FORMAT_ABGR8888,
73 DRM_FORMAT_XRGB2101010, 73 DRM_FORMAT_XRGB2101010,
74 DRM_FORMAT_XBGR2101010, 74 DRM_FORMAT_XBGR2101010,
75 DRM_FORMAT_YUYV,
76 DRM_FORMAT_YVYU,
77 DRM_FORMAT_UYVY,
78 DRM_FORMAT_VYUY,
75}; 79};
76 80
77/* Cursor formats */ 81/* Cursor formats */
@@ -108,6 +112,9 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
108 struct intel_crtc_state *crtc_state); 112 struct intel_crtc_state *crtc_state);
109static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 113static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
110 int num_connectors); 114 int num_connectors);
115static void skylake_pfit_enable(struct intel_crtc *crtc);
116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc);
111static void intel_modeset_setup_hw_state(struct drm_device *dev); 118static void intel_modeset_setup_hw_state(struct drm_device *dev);
112 119
113typedef struct { 120typedef struct {
@@ -125,6 +132,42 @@ struct intel_limit {
125 intel_p2_t p2; 132 intel_p2_t p2;
126}; 133};
127 134
135/* returns HPLL frequency in kHz */
136static int valleyview_get_vco(struct drm_i915_private *dev_priv)
137{
138 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
139
140 /* Obtain SKU information */
141 mutex_lock(&dev_priv->sb_lock);
142 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
143 CCK_FUSE_HPLL_FREQ_MASK;
144 mutex_unlock(&dev_priv->sb_lock);
145
146 return vco_freq[hpll_freq] * 1000;
147}
148
149static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
150 const char *name, u32 reg)
151{
152 u32 val;
153 int divider;
154
155 if (dev_priv->hpll_freq == 0)
156 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
157
158 mutex_lock(&dev_priv->sb_lock);
159 val = vlv_cck_read(dev_priv, reg);
160 mutex_unlock(&dev_priv->sb_lock);
161
162 divider = val & CCK_FREQUENCY_VALUES;
163
164 WARN((val & CCK_FREQUENCY_STATUS) !=
165 (divider << CCK_FREQUENCY_STATUS_SHIFT),
166 "%s change in progress\n", name);
167
168 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
169}
170
128int 171int
129intel_pch_rawclk(struct drm_device *dev) 172intel_pch_rawclk(struct drm_device *dev)
130{ 173{
@@ -135,6 +178,50 @@ intel_pch_rawclk(struct drm_device *dev)
135 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 178 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
136} 179}
137 180
181/* hrawclock is 1/4 the FSB frequency */
182int intel_hrawclk(struct drm_device *dev)
183{
184 struct drm_i915_private *dev_priv = dev->dev_private;
185 uint32_t clkcfg;
186
187 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
188 if (IS_VALLEYVIEW(dev))
189 return 200;
190
191 clkcfg = I915_READ(CLKCFG);
192 switch (clkcfg & CLKCFG_FSB_MASK) {
193 case CLKCFG_FSB_400:
194 return 100;
195 case CLKCFG_FSB_533:
196 return 133;
197 case CLKCFG_FSB_667:
198 return 166;
199 case CLKCFG_FSB_800:
200 return 200;
201 case CLKCFG_FSB_1067:
202 return 266;
203 case CLKCFG_FSB_1333:
204 return 333;
205 /* these two are just a guess; one of them might be right */
206 case CLKCFG_FSB_1600:
207 case CLKCFG_FSB_1600_ALT:
208 return 400;
209 default:
210 return 133;
211 }
212}
213
214static void intel_update_czclk(struct drm_i915_private *dev_priv)
215{
216 if (!IS_VALLEYVIEW(dev_priv))
217 return;
218
219 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
220 CCK_CZ_CLOCK_CONTROL);
221
222 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
223}
224
138static inline u32 /* units of 100MHz */ 225static inline u32 /* units of 100MHz */
139intel_fdi_link_freq(struct drm_device *dev) 226intel_fdi_link_freq(struct drm_device *dev)
140{ 227{
@@ -1061,54 +1148,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1061 } 1148 }
1062} 1149}
1063 1150
1064/*
1065 * ibx_digital_port_connected - is the specified port connected?
1066 * @dev_priv: i915 private structure
1067 * @port: the port to test
1068 *
1069 * Returns true if @port is connected, false otherwise.
1070 */
1071bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1072 struct intel_digital_port *port)
1073{
1074 u32 bit;
1075
1076 if (HAS_PCH_IBX(dev_priv->dev)) {
1077 switch (port->port) {
1078 case PORT_B:
1079 bit = SDE_PORTB_HOTPLUG;
1080 break;
1081 case PORT_C:
1082 bit = SDE_PORTC_HOTPLUG;
1083 break;
1084 case PORT_D:
1085 bit = SDE_PORTD_HOTPLUG;
1086 break;
1087 default:
1088 return true;
1089 }
1090 } else {
1091 switch (port->port) {
1092 case PORT_B:
1093 bit = SDE_PORTB_HOTPLUG_CPT;
1094 break;
1095 case PORT_C:
1096 bit = SDE_PORTC_HOTPLUG_CPT;
1097 break;
1098 case PORT_D:
1099 bit = SDE_PORTD_HOTPLUG_CPT;
1100 break;
1101 case PORT_E:
1102 bit = SDE_PORTE_HOTPLUG_SPT;
1103 break;
1104 default:
1105 return true;
1106 }
1107 }
1108
1109 return I915_READ(SDEISR) & bit;
1110}
1111
1112static const char *state_string(bool enabled) 1151static const char *state_string(bool enabled)
1113{ 1152{
1114 return enabled ? "on" : "off"; 1153 return enabled ? "on" : "off";
@@ -1118,12 +1157,10 @@ static const char *state_string(bool enabled)
1118void assert_pll(struct drm_i915_private *dev_priv, 1157void assert_pll(struct drm_i915_private *dev_priv,
1119 enum pipe pipe, bool state) 1158 enum pipe pipe, bool state)
1120{ 1159{
1121 int reg;
1122 u32 val; 1160 u32 val;
1123 bool cur_state; 1161 bool cur_state;
1124 1162
1125 reg = DPLL(pipe); 1163 val = I915_READ(DPLL(pipe));
1126 val = I915_READ(reg);
1127 cur_state = !!(val & DPLL_VCO_ENABLE); 1164 cur_state = !!(val & DPLL_VCO_ENABLE);
1128 I915_STATE_WARN(cur_state != state, 1165 I915_STATE_WARN(cur_state != state,
1129 "PLL state assertion failure (expected %s, current %s)\n", 1166 "PLL state assertion failure (expected %s, current %s)\n",
@@ -1180,20 +1217,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
1180static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1217static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1181 enum pipe pipe, bool state) 1218 enum pipe pipe, bool state)
1182{ 1219{
1183 int reg;
1184 u32 val;
1185 bool cur_state; 1220 bool cur_state;
1186 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1221 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1187 pipe); 1222 pipe);
1188 1223
1189 if (HAS_DDI(dev_priv->dev)) { 1224 if (HAS_DDI(dev_priv->dev)) {
1190 /* DDI does not have a specific FDI_TX register */ 1225 /* DDI does not have a specific FDI_TX register */
1191 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1226 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1192 val = I915_READ(reg);
1193 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1227 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1194 } else { 1228 } else {
1195 reg = FDI_TX_CTL(pipe); 1229 u32 val = I915_READ(FDI_TX_CTL(pipe));
1196 val = I915_READ(reg);
1197 cur_state = !!(val & FDI_TX_ENABLE); 1230 cur_state = !!(val & FDI_TX_ENABLE);
1198 } 1231 }
1199 I915_STATE_WARN(cur_state != state, 1232 I915_STATE_WARN(cur_state != state,
@@ -1206,12 +1239,10 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1206static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1239static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1207 enum pipe pipe, bool state) 1240 enum pipe pipe, bool state)
1208{ 1241{
1209 int reg;
1210 u32 val; 1242 u32 val;
1211 bool cur_state; 1243 bool cur_state;
1212 1244
1213 reg = FDI_RX_CTL(pipe); 1245 val = I915_READ(FDI_RX_CTL(pipe));
1214 val = I915_READ(reg);
1215 cur_state = !!(val & FDI_RX_ENABLE); 1246 cur_state = !!(val & FDI_RX_ENABLE);
1216 I915_STATE_WARN(cur_state != state, 1247 I915_STATE_WARN(cur_state != state,
1217 "FDI RX state assertion failure (expected %s, current %s)\n", 1248 "FDI RX state assertion failure (expected %s, current %s)\n",
@@ -1223,7 +1254,6 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1223static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1254static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1224 enum pipe pipe) 1255 enum pipe pipe)
1225{ 1256{
1226 int reg;
1227 u32 val; 1257 u32 val;
1228 1258
1229 /* ILK FDI PLL is always enabled */ 1259 /* ILK FDI PLL is always enabled */
@@ -1234,20 +1264,17 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1234 if (HAS_DDI(dev_priv->dev)) 1264 if (HAS_DDI(dev_priv->dev))
1235 return; 1265 return;
1236 1266
1237 reg = FDI_TX_CTL(pipe); 1267 val = I915_READ(FDI_TX_CTL(pipe));
1238 val = I915_READ(reg);
1239 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1268 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1240} 1269}
1241 1270
1242void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1271void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1243 enum pipe pipe, bool state) 1272 enum pipe pipe, bool state)
1244{ 1273{
1245 int reg;
1246 u32 val; 1274 u32 val;
1247 bool cur_state; 1275 bool cur_state;
1248 1276
1249 reg = FDI_RX_CTL(pipe); 1277 val = I915_READ(FDI_RX_CTL(pipe));
1250 val = I915_READ(reg);
1251 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1278 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1252 I915_STATE_WARN(cur_state != state, 1279 I915_STATE_WARN(cur_state != state,
1253 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1280 "FDI RX PLL assertion failure (expected %s, current %s)\n",
@@ -1303,7 +1330,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
1303 bool cur_state; 1330 bool cur_state;
1304 1331
1305 if (IS_845G(dev) || IS_I865G(dev)) 1332 if (IS_845G(dev) || IS_I865G(dev))
1306 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1333 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1307 else 1334 else
1308 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1335 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1309 1336
@@ -1317,8 +1344,6 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
1317void assert_pipe(struct drm_i915_private *dev_priv, 1344void assert_pipe(struct drm_i915_private *dev_priv,
1318 enum pipe pipe, bool state) 1345 enum pipe pipe, bool state)
1319{ 1346{
1320 int reg;
1321 u32 val;
1322 bool cur_state; 1347 bool cur_state;
1323 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1348 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1324 pipe); 1349 pipe);
@@ -1332,8 +1357,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1332 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1357 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1333 cur_state = false; 1358 cur_state = false;
1334 } else { 1359 } else {
1335 reg = PIPECONF(cpu_transcoder); 1360 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1336 val = I915_READ(reg);
1337 cur_state = !!(val & PIPECONF_ENABLE); 1361 cur_state = !!(val & PIPECONF_ENABLE);
1338 } 1362 }
1339 1363
@@ -1345,12 +1369,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1345static void assert_plane(struct drm_i915_private *dev_priv, 1369static void assert_plane(struct drm_i915_private *dev_priv,
1346 enum plane plane, bool state) 1370 enum plane plane, bool state)
1347{ 1371{
1348 int reg;
1349 u32 val; 1372 u32 val;
1350 bool cur_state; 1373 bool cur_state;
1351 1374
1352 reg = DSPCNTR(plane); 1375 val = I915_READ(DSPCNTR(plane));
1353 val = I915_READ(reg);
1354 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1376 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1355 I915_STATE_WARN(cur_state != state, 1377 I915_STATE_WARN(cur_state != state,
1356 "plane %c assertion failure (expected %s, current %s)\n", 1378 "plane %c assertion failure (expected %s, current %s)\n",
@@ -1364,14 +1386,11 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1364 enum pipe pipe) 1386 enum pipe pipe)
1365{ 1387{
1366 struct drm_device *dev = dev_priv->dev; 1388 struct drm_device *dev = dev_priv->dev;
1367 int reg, i; 1389 int i;
1368 u32 val;
1369 int cur_pipe;
1370 1390
1371 /* Primary planes are fixed to pipes on gen4+ */ 1391 /* Primary planes are fixed to pipes on gen4+ */
1372 if (INTEL_INFO(dev)->gen >= 4) { 1392 if (INTEL_INFO(dev)->gen >= 4) {
1373 reg = DSPCNTR(pipe); 1393 u32 val = I915_READ(DSPCNTR(pipe));
1374 val = I915_READ(reg);
1375 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1394 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1376 "plane %c assertion failure, should be disabled but not\n", 1395 "plane %c assertion failure, should be disabled but not\n",
1377 plane_name(pipe)); 1396 plane_name(pipe));
@@ -1380,9 +1399,8 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1380 1399
1381 /* Need to check both planes against the pipe */ 1400 /* Need to check both planes against the pipe */
1382 for_each_pipe(dev_priv, i) { 1401 for_each_pipe(dev_priv, i) {
1383 reg = DSPCNTR(i); 1402 u32 val = I915_READ(DSPCNTR(i));
1384 val = I915_READ(reg); 1403 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1385 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1386 DISPPLANE_SEL_PIPE_SHIFT; 1404 DISPPLANE_SEL_PIPE_SHIFT;
1387 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1405 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1388 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1406 "plane %c assertion failure, should be off on pipe %c but is still active\n",
@@ -1394,33 +1412,29 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1394 enum pipe pipe) 1412 enum pipe pipe)
1395{ 1413{
1396 struct drm_device *dev = dev_priv->dev; 1414 struct drm_device *dev = dev_priv->dev;
1397 int reg, sprite; 1415 int sprite;
1398 u32 val;
1399 1416
1400 if (INTEL_INFO(dev)->gen >= 9) { 1417 if (INTEL_INFO(dev)->gen >= 9) {
1401 for_each_sprite(dev_priv, pipe, sprite) { 1418 for_each_sprite(dev_priv, pipe, sprite) {
1402 val = I915_READ(PLANE_CTL(pipe, sprite)); 1419 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1403 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1420 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1404 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1421 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1405 sprite, pipe_name(pipe)); 1422 sprite, pipe_name(pipe));
1406 } 1423 }
1407 } else if (IS_VALLEYVIEW(dev)) { 1424 } else if (IS_VALLEYVIEW(dev)) {
1408 for_each_sprite(dev_priv, pipe, sprite) { 1425 for_each_sprite(dev_priv, pipe, sprite) {
1409 reg = SPCNTR(pipe, sprite); 1426 u32 val = I915_READ(SPCNTR(pipe, sprite));
1410 val = I915_READ(reg);
1411 I915_STATE_WARN(val & SP_ENABLE, 1427 I915_STATE_WARN(val & SP_ENABLE,
1412 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1428 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1413 sprite_name(pipe, sprite), pipe_name(pipe)); 1429 sprite_name(pipe, sprite), pipe_name(pipe));
1414 } 1430 }
1415 } else if (INTEL_INFO(dev)->gen >= 7) { 1431 } else if (INTEL_INFO(dev)->gen >= 7) {
1416 reg = SPRCTL(pipe); 1432 u32 val = I915_READ(SPRCTL(pipe));
1417 val = I915_READ(reg);
1418 I915_STATE_WARN(val & SPRITE_ENABLE, 1433 I915_STATE_WARN(val & SPRITE_ENABLE,
1419 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1434 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1420 plane_name(pipe), pipe_name(pipe)); 1435 plane_name(pipe), pipe_name(pipe));
1421 } else if (INTEL_INFO(dev)->gen >= 5) { 1436 } else if (INTEL_INFO(dev)->gen >= 5) {
1422 reg = DVSCNTR(pipe); 1437 u32 val = I915_READ(DVSCNTR(pipe));
1423 val = I915_READ(reg);
1424 I915_STATE_WARN(val & DVS_ENABLE, 1438 I915_STATE_WARN(val & DVS_ENABLE,
1425 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1439 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1426 plane_name(pipe), pipe_name(pipe)); 1440 plane_name(pipe), pipe_name(pipe));
@@ -1449,12 +1463,10 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1449static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1463static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1450 enum pipe pipe) 1464 enum pipe pipe)
1451{ 1465{
1452 int reg;
1453 u32 val; 1466 u32 val;
1454 bool enabled; 1467 bool enabled;
1455 1468
1456 reg = PCH_TRANSCONF(pipe); 1469 val = I915_READ(PCH_TRANSCONF(pipe));
1457 val = I915_READ(reg);
1458 enabled = !!(val & TRANS_ENABLE); 1470 enabled = !!(val & TRANS_ENABLE);
1459 I915_STATE_WARN(enabled, 1471 I915_STATE_WARN(enabled,
1460 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1472 "transcoder assertion failed, should be off on pipe %c but is still active\n",
@@ -1561,21 +1573,18 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1561static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1573static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1562 enum pipe pipe) 1574 enum pipe pipe)
1563{ 1575{
1564 int reg;
1565 u32 val; 1576 u32 val;
1566 1577
1567 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1578 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1568 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1579 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1569 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1580 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1570 1581
1571 reg = PCH_ADPA; 1582 val = I915_READ(PCH_ADPA);
1572 val = I915_READ(reg);
1573 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1583 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1574 "PCH VGA enabled on transcoder %c, should be disabled\n", 1584 "PCH VGA enabled on transcoder %c, should be disabled\n",
1575 pipe_name(pipe)); 1585 pipe_name(pipe));
1576 1586
1577 reg = PCH_LVDS; 1587 val = I915_READ(PCH_LVDS);
1578 val = I915_READ(reg);
1579 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1588 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1580 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1589 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1581 pipe_name(pipe)); 1590 pipe_name(pipe));
@@ -1585,26 +1594,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1585 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1594 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1586} 1595}
1587 1596
1588static void intel_init_dpio(struct drm_device *dev)
1589{
1590 struct drm_i915_private *dev_priv = dev->dev_private;
1591
1592 if (!IS_VALLEYVIEW(dev))
1593 return;
1594
1595 /*
1596 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1597 * CHV x1 PHY (DP/HDMI D)
1598 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1599 */
1600 if (IS_CHERRYVIEW(dev)) {
1601 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1602 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1603 } else {
1604 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1605 }
1606}
1607
1608static void vlv_enable_pll(struct intel_crtc *crtc, 1597static void vlv_enable_pll(struct intel_crtc *crtc,
1609 const struct intel_crtc_state *pipe_config) 1598 const struct intel_crtc_state *pipe_config)
1610{ 1599{
@@ -1840,17 +1829,6 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1840 val &= ~DPIO_DCLKP_EN; 1829 val &= ~DPIO_DCLKP_EN;
1841 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1830 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1842 1831
1843 /* disable left/right clock distribution */
1844 if (pipe != PIPE_B) {
1845 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1846 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1847 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1848 } else {
1849 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1850 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1851 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1852 }
1853
1854 mutex_unlock(&dev_priv->sb_lock); 1832 mutex_unlock(&dev_priv->sb_lock);
1855} 1833}
1856 1834
@@ -2051,9 +2029,9 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2051 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 2029 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2052 2030
2053 /* Workaround: set timing override bit. */ 2031 /* Workaround: set timing override bit. */
2054 val = I915_READ(_TRANSA_CHICKEN2); 2032 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2055 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2033 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2056 I915_WRITE(_TRANSA_CHICKEN2, val); 2034 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2057 2035
2058 val = TRANS_ENABLE; 2036 val = TRANS_ENABLE;
2059 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 2037 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
@@ -2111,9 +2089,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2111 DRM_ERROR("Failed to disable PCH transcoder\n"); 2089 DRM_ERROR("Failed to disable PCH transcoder\n");
2112 2090
2113 /* Workaround: clear timing override bit. */ 2091 /* Workaround: clear timing override bit. */
2114 val = I915_READ(_TRANSA_CHICKEN2); 2092 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2115 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2093 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2116 I915_WRITE(_TRANSA_CHICKEN2, val); 2094 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2117} 2095}
2118 2096
2119/** 2097/**
@@ -2238,7 +2216,7 @@ static bool need_vtd_wa(struct drm_device *dev)
2238 2216
2239unsigned int 2217unsigned int
2240intel_tile_height(struct drm_device *dev, uint32_t pixel_format, 2218intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2241 uint64_t fb_format_modifier) 2219 uint64_t fb_format_modifier, unsigned int plane)
2242{ 2220{
2243 unsigned int tile_height; 2221 unsigned int tile_height;
2244 uint32_t pixel_bytes; 2222 uint32_t pixel_bytes;
@@ -2254,7 +2232,7 @@ intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2254 tile_height = 32; 2232 tile_height = 32;
2255 break; 2233 break;
2256 case I915_FORMAT_MOD_Yf_TILED: 2234 case I915_FORMAT_MOD_Yf_TILED:
2257 pixel_bytes = drm_format_plane_cpp(pixel_format, 0); 2235 pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
2258 switch (pixel_bytes) { 2236 switch (pixel_bytes) {
2259 default: 2237 default:
2260 case 1: 2238 case 1:
@@ -2288,7 +2266,7 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height,
2288 uint32_t pixel_format, uint64_t fb_format_modifier) 2266 uint32_t pixel_format, uint64_t fb_format_modifier)
2289{ 2267{
2290 return ALIGN(height, intel_tile_height(dev, pixel_format, 2268 return ALIGN(height, intel_tile_height(dev, pixel_format,
2291 fb_format_modifier)); 2269 fb_format_modifier, 0));
2292} 2270}
2293 2271
2294static int 2272static int
@@ -2311,15 +2289,27 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2311 info->height = fb->height; 2289 info->height = fb->height;
2312 info->pixel_format = fb->pixel_format; 2290 info->pixel_format = fb->pixel_format;
2313 info->pitch = fb->pitches[0]; 2291 info->pitch = fb->pitches[0];
2292 info->uv_offset = fb->offsets[1];
2314 info->fb_modifier = fb->modifier[0]; 2293 info->fb_modifier = fb->modifier[0];
2315 2294
2316 tile_height = intel_tile_height(fb->dev, fb->pixel_format, 2295 tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2317 fb->modifier[0]); 2296 fb->modifier[0], 0);
2318 tile_pitch = PAGE_SIZE / tile_height; 2297 tile_pitch = PAGE_SIZE / tile_height;
2319 info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch); 2298 info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2320 info->height_pages = DIV_ROUND_UP(fb->height, tile_height); 2299 info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2321 info->size = info->width_pages * info->height_pages * PAGE_SIZE; 2300 info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2322 2301
2302 if (info->pixel_format == DRM_FORMAT_NV12) {
2303 tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2304 fb->modifier[0], 1);
2305 tile_pitch = PAGE_SIZE / tile_height;
2306 info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2307 info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2308 tile_height);
2309 info->size_uv = info->width_pages_uv * info->height_pages_uv *
2310 PAGE_SIZE;
2311 }
2312
2323 return 0; 2313 return 0;
2324} 2314}
2325 2315
@@ -2408,22 +2398,24 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2408 * framebuffer compression. For simplicity, we always install 2398 * framebuffer compression. For simplicity, we always install
2409 * a fence as the cost is not that onerous. 2399 * a fence as the cost is not that onerous.
2410 */ 2400 */
2411 ret = i915_gem_object_get_fence(obj); 2401 if (view.type == I915_GGTT_VIEW_NORMAL) {
2412 if (ret == -EDEADLK) { 2402 ret = i915_gem_object_get_fence(obj);
2413 /* 2403 if (ret == -EDEADLK) {
2414 * -EDEADLK means there are no free fences 2404 /*
2415 * no pending flips. 2405 * -EDEADLK means there are no free fences
2416 * 2406 * no pending flips.
2417 * This is propagated to atomic, but it uses 2407 *
2418 * -EDEADLK to force a locking recovery, so 2408 * This is propagated to atomic, but it uses
2419 * change the returned error to -EBUSY. 2409 * -EDEADLK to force a locking recovery, so
2420 */ 2410 * change the returned error to -EBUSY.
2421 ret = -EBUSY; 2411 */
2422 goto err_unpin; 2412 ret = -EBUSY;
2423 } else if (ret) 2413 goto err_unpin;
2424 goto err_unpin; 2414 } else if (ret)
2415 goto err_unpin;
2425 2416
2426 i915_gem_object_pin_fence(obj); 2417 i915_gem_object_pin_fence(obj);
2418 }
2427 2419
2428 dev_priv->mm.interruptible = true; 2420 dev_priv->mm.interruptible = true;
2429 intel_runtime_pm_put(dev_priv); 2421 intel_runtime_pm_put(dev_priv);
@@ -2449,7 +2441,9 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2449 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2441 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2450 WARN_ONCE(ret, "Couldn't get view from plane state!"); 2442 WARN_ONCE(ret, "Couldn't get view from plane state!");
2451 2443
2452 i915_gem_object_unpin_fence(obj); 2444 if (view.type == I915_GGTT_VIEW_NORMAL)
2445 i915_gem_object_unpin_fence(obj);
2446
2453 i915_gem_object_unpin_from_display_plane(obj, &view); 2447 i915_gem_object_unpin_from_display_plane(obj, &view);
2454} 2448}
2455 2449
@@ -2534,6 +2528,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2534 struct intel_initial_plane_config *plane_config) 2528 struct intel_initial_plane_config *plane_config)
2535{ 2529{
2536 struct drm_device *dev = crtc->base.dev; 2530 struct drm_device *dev = crtc->base.dev;
2531 struct drm_i915_private *dev_priv = to_i915(dev);
2537 struct drm_i915_gem_object *obj = NULL; 2532 struct drm_i915_gem_object *obj = NULL;
2538 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2533 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2539 struct drm_framebuffer *fb = &plane_config->fb->base; 2534 struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2546,6 +2541,12 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2546 if (plane_config->size == 0) 2541 if (plane_config->size == 0)
2547 return false; 2542 return false;
2548 2543
2544 /* If the FB is too big, just don't use it since fbdev is not very
2545 * important and we should probably use that space with FBC or other
2546 * features. */
2547 if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2548 return false;
2549
2549 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2550 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2550 base_aligned, 2551 base_aligned,
2551 base_aligned, 2552 base_aligned,
@@ -2778,6 +2779,9 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2778 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2779 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2779 } 2780 }
2780 2781
2782 intel_crtc->adjusted_x = x;
2783 intel_crtc->adjusted_y = y;
2784
2781 I915_WRITE(reg, dspcntr); 2785 I915_WRITE(reg, dspcntr);
2782 2786
2783 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2787 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
@@ -2878,6 +2882,9 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2878 } 2882 }
2879 } 2883 }
2880 2884
2885 intel_crtc->adjusted_x = x;
2886 intel_crtc->adjusted_y = y;
2887
2881 I915_WRITE(reg, dspcntr); 2888 I915_WRITE(reg, dspcntr);
2882 2889
2883 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2890 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
@@ -2927,14 +2934,29 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2927} 2934}
2928 2935
2929unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 2936unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
2930 struct drm_i915_gem_object *obj) 2937 struct drm_i915_gem_object *obj,
2938 unsigned int plane)
2931{ 2939{
2932 const struct i915_ggtt_view *view = &i915_ggtt_view_normal; 2940 const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
2941 struct i915_vma *vma;
2942 unsigned char *offset;
2933 2943
2934 if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) 2944 if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
2935 view = &i915_ggtt_view_rotated; 2945 view = &i915_ggtt_view_rotated;
2936 2946
2937 return i915_gem_obj_ggtt_offset_view(obj, view); 2947 vma = i915_gem_obj_to_ggtt_view(obj, view);
2948 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2949 view->type))
2950 return -1;
2951
2952 offset = (unsigned char *)vma->node.start;
2953
2954 if (plane == 1) {
2955 offset += vma->ggtt_view.rotation_info.uv_start_page *
2956 PAGE_SIZE;
2957 }
2958
2959 return (unsigned long)offset;
2938} 2960}
2939 2961
2940static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2962static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -2945,8 +2967,6 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2945 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2967 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2946 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2968 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2947 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 2969 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2948 DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
2949 intel_crtc->base.base.id, intel_crtc->pipe, id);
2950} 2970}
2951 2971
2952/* 2972/*
@@ -3092,34 +3112,26 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
3092 obj = intel_fb_obj(fb); 3112 obj = intel_fb_obj(fb);
3093 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 3113 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3094 fb->pixel_format); 3114 fb->pixel_format);
3095 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj); 3115 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3096 3116
3097 /* 3117 WARN_ON(drm_rect_width(&plane_state->src) == 0);
3098 * FIXME: intel_plane_state->src, dst aren't set when transitional 3118
3099 * update_plane helpers are called from legacy paths. 3119 scaler_id = plane_state->scaler_id;
3100 * Once full atomic crtc is available, below check can be avoided. 3120 src_x = plane_state->src.x1 >> 16;
3101 */ 3121 src_y = plane_state->src.y1 >> 16;
3102 if (drm_rect_width(&plane_state->src)) { 3122 src_w = drm_rect_width(&plane_state->src) >> 16;
3103 scaler_id = plane_state->scaler_id; 3123 src_h = drm_rect_height(&plane_state->src) >> 16;
3104 src_x = plane_state->src.x1 >> 16; 3124 dst_x = plane_state->dst.x1;
3105 src_y = plane_state->src.y1 >> 16; 3125 dst_y = plane_state->dst.y1;
3106 src_w = drm_rect_width(&plane_state->src) >> 16; 3126 dst_w = drm_rect_width(&plane_state->dst);
3107 src_h = drm_rect_height(&plane_state->src) >> 16; 3127 dst_h = drm_rect_height(&plane_state->dst);
3108 dst_x = plane_state->dst.x1; 3128
3109 dst_y = plane_state->dst.y1; 3129 WARN_ON(x != src_x || y != src_y);
3110 dst_w = drm_rect_width(&plane_state->dst);
3111 dst_h = drm_rect_height(&plane_state->dst);
3112
3113 WARN_ON(x != src_x || y != src_y);
3114 } else {
3115 src_w = intel_crtc->config->pipe_src_w;
3116 src_h = intel_crtc->config->pipe_src_h;
3117 }
3118 3130
3119 if (intel_rotation_90_or_270(rotation)) { 3131 if (intel_rotation_90_or_270(rotation)) {
3120 /* stride = Surface height in tiles */ 3132 /* stride = Surface height in tiles */
3121 tile_height = intel_tile_height(dev, fb->pixel_format, 3133 tile_height = intel_tile_height(dev, fb->pixel_format,
3122 fb->modifier[0]); 3134 fb->modifier[0], 0);
3123 stride = DIV_ROUND_UP(fb->height, tile_height); 3135 stride = DIV_ROUND_UP(fb->height, tile_height);
3124 x_offset = stride * tile_height - y - src_h; 3136 x_offset = stride * tile_height - y - src_h;
3125 y_offset = x; 3137 y_offset = x;
@@ -3132,6 +3144,9 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
3132 } 3144 }
3133 plane_offset = y_offset << 16 | x_offset; 3145 plane_offset = y_offset << 16 | x_offset;
3134 3146
3147 intel_crtc->adjusted_x = x_offset;
3148 intel_crtc->adjusted_y = y_offset;
3149
3135 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3150 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3136 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); 3151 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3137 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); 3152 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
@@ -3188,24 +3203,20 @@ static void intel_complete_page_flips(struct drm_device *dev)
3188 3203
3189static void intel_update_primary_planes(struct drm_device *dev) 3204static void intel_update_primary_planes(struct drm_device *dev)
3190{ 3205{
3191 struct drm_i915_private *dev_priv = dev->dev_private;
3192 struct drm_crtc *crtc; 3206 struct drm_crtc *crtc;
3193 3207
3194 for_each_crtc(dev, crtc) { 3208 for_each_crtc(dev, crtc) {
3195 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3209 struct intel_plane *plane = to_intel_plane(crtc->primary);
3210 struct intel_plane_state *plane_state;
3196 3211
3197 drm_modeset_lock(&crtc->mutex, NULL); 3212 drm_modeset_lock_crtc(crtc, &plane->base);
3198 /* 3213
3199 * FIXME: Once we have proper support for primary planes (and 3214 plane_state = to_intel_plane_state(plane->base.state);
3200 * disabling them without disabling the entire crtc) allow again 3215
3201 * a NULL crtc->primary->fb. 3216 if (plane_state->base.fb)
3202 */ 3217 plane->commit_plane(&plane->base, plane_state);
3203 if (intel_crtc->active && crtc->primary->fb) 3218
3204 dev_priv->display.update_primary_plane(crtc, 3219 drm_modeset_unlock_crtc(crtc);
3205 crtc->primary->fb,
3206 crtc->x,
3207 crtc->y);
3208 drm_modeset_unlock(&crtc->mutex);
3209 } 3220 }
3210} 3221}
3211 3222
@@ -3249,6 +3260,9 @@ void intel_finish_reset(struct drm_device *dev)
3249 * so update the base address of all primary 3260 * so update the base address of all primary
3250 * planes to the the last fb to make sure we're 3261 * planes to the the last fb to make sure we're
3251 * showing the correct fb after a reset. 3262 * showing the correct fb after a reset.
3263 *
3264 * FIXME: Atomic will make this obsolete since we won't schedule
3265 * CS-based flips (which might get lost in gpu resets) any more.
3252 */ 3266 */
3253 intel_update_primary_planes(dev); 3267 intel_update_primary_planes(dev);
3254 return; 3268 return;
@@ -3319,14 +3333,23 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3319 return pending; 3333 return pending;
3320} 3334}
3321 3335
3322static void intel_update_pipe_size(struct intel_crtc *crtc) 3336static void intel_update_pipe_config(struct intel_crtc *crtc,
3337 struct intel_crtc_state *old_crtc_state)
3323{ 3338{
3324 struct drm_device *dev = crtc->base.dev; 3339 struct drm_device *dev = crtc->base.dev;
3325 struct drm_i915_private *dev_priv = dev->dev_private; 3340 struct drm_i915_private *dev_priv = dev->dev_private;
3326 const struct drm_display_mode *adjusted_mode; 3341 struct intel_crtc_state *pipe_config =
3342 to_intel_crtc_state(crtc->base.state);
3327 3343
3328 if (!i915.fastboot) 3344 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3329 return; 3345 crtc->base.mode = crtc->base.state->mode;
3346
3347 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3348 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3349 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3350
3351 if (HAS_DDI(dev))
3352 intel_set_pipe_csc(&crtc->base);
3330 3353
3331 /* 3354 /*
3332 * Update pipe size and adjust fitter if needed: the reason for this is 3355 * Update pipe size and adjust fitter if needed: the reason for this is
@@ -3335,27 +3358,24 @@ static void intel_update_pipe_size(struct intel_crtc *crtc)
3335 * fastboot case, we'll flip, but if we don't update the pipesrc and 3358 * fastboot case, we'll flip, but if we don't update the pipesrc and
3336 * pfit state, we'll end up with a big fb scanned out into the wrong 3359 * pfit state, we'll end up with a big fb scanned out into the wrong
3337 * sized surface. 3360 * sized surface.
3338 *
3339 * To fix this properly, we need to hoist the checks up into
3340 * compute_mode_changes (or above), check the actual pfit state and
3341 * whether the platform allows pfit disable with pipe active, and only
3342 * then update the pipesrc and pfit state, even on the flip path.
3343 */ 3361 */
3344 3362
3345 adjusted_mode = &crtc->config->base.adjusted_mode;
3346
3347 I915_WRITE(PIPESRC(crtc->pipe), 3363 I915_WRITE(PIPESRC(crtc->pipe),
3348 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 3364 ((pipe_config->pipe_src_w - 1) << 16) |
3349 (adjusted_mode->crtc_vdisplay - 1)); 3365 (pipe_config->pipe_src_h - 1));
3350 if (!crtc->config->pch_pfit.enabled && 3366
3351 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 3367 /* on skylake this is done by detaching scalers */
3352 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3368 if (INTEL_INFO(dev)->gen >= 9) {
3353 I915_WRITE(PF_CTL(crtc->pipe), 0); 3369 skl_detach_scalers(crtc);
3354 I915_WRITE(PF_WIN_POS(crtc->pipe), 0); 3370
3355 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); 3371 if (pipe_config->pch_pfit.enabled)
3372 skylake_pfit_enable(crtc);
3373 } else if (HAS_PCH_SPLIT(dev)) {
3374 if (pipe_config->pch_pfit.enabled)
3375 ironlake_pfit_enable(crtc);
3376 else if (old_crtc_state->pch_pfit.enabled)
3377 ironlake_pfit_disable(crtc, true);
3356 } 3378 }
3357 crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
3358 crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
3359} 3379}
3360 3380
3361static void intel_fdi_normal_train(struct drm_crtc *crtc) 3381static void intel_fdi_normal_train(struct drm_crtc *crtc)
@@ -4401,8 +4421,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4401int skl_update_scaler_crtc(struct intel_crtc_state *state) 4421int skl_update_scaler_crtc(struct intel_crtc_state *state)
4402{ 4422{
4403 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4423 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4404 struct drm_display_mode *adjusted_mode = 4424 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4405 &state->base.adjusted_mode;
4406 4425
4407 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4426 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4408 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4427 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
@@ -4410,7 +4429,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4410 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4429 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4411 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4430 &state->scaler_state.scaler_id, DRM_ROTATE_0,
4412 state->pipe_src_w, state->pipe_src_h, 4431 state->pipe_src_w, state->pipe_src_h,
4413 adjusted_mode->hdisplay, adjusted_mode->vdisplay); 4432 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4414} 4433}
4415 4434
4416/** 4435/**
@@ -4603,7 +4622,6 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4603 struct drm_i915_private *dev_priv = dev->dev_private; 4622 struct drm_i915_private *dev_priv = dev->dev_private;
4604 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4605 enum pipe pipe = intel_crtc->pipe; 4624 enum pipe pipe = intel_crtc->pipe;
4606 int palreg = PALETTE(pipe);
4607 int i; 4625 int i;
4608 bool reenable_ips = false; 4626 bool reenable_ips = false;
4609 4627
@@ -4618,10 +4636,6 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4618 assert_pll_enabled(dev_priv, pipe); 4636 assert_pll_enabled(dev_priv, pipe);
4619 } 4637 }
4620 4638
4621 /* use legacy palette for Ironlake */
4622 if (!HAS_GMCH_DISPLAY(dev))
4623 palreg = LGC_PALETTE(pipe);
4624
4625 /* Workaround : Do not read or write the pipe palette/gamma data while 4639 /* Workaround : Do not read or write the pipe palette/gamma data while
4626 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4640 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4627 */ 4641 */
@@ -4633,7 +4647,14 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4633 } 4647 }
4634 4648
4635 for (i = 0; i < 256; i++) { 4649 for (i = 0; i < 256; i++) {
4636 I915_WRITE(palreg + 4 * i, 4650 u32 palreg;
4651
4652 if (HAS_GMCH_DISPLAY(dev))
4653 palreg = PALETTE(pipe, i);
4654 else
4655 palreg = LGC_PALETTE(pipe, i);
4656
4657 I915_WRITE(palreg,
4637 (intel_crtc->lut_r[i] << 16) | 4658 (intel_crtc->lut_r[i] << 16) |
4638 (intel_crtc->lut_g[i] << 8) | 4659 (intel_crtc->lut_g[i] << 8) |
4639 intel_crtc->lut_b[i]); 4660 intel_crtc->lut_b[i]);
@@ -4931,6 +4952,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4931 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4952 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4932 struct intel_crtc_state *pipe_config = 4953 struct intel_crtc_state *pipe_config =
4933 to_intel_crtc_state(crtc->state); 4954 to_intel_crtc_state(crtc->state);
4955 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4934 4956
4935 if (WARN_ON(intel_crtc->active)) 4957 if (WARN_ON(intel_crtc->active))
4936 return; 4958 return;
@@ -4960,9 +4982,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4960 intel_crtc->active = true; 4982 intel_crtc->active = true;
4961 4983
4962 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4984 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4963 for_each_encoder_on_crtc(dev, crtc, encoder) 4985 for_each_encoder_on_crtc(dev, crtc, encoder) {
4986 if (encoder->pre_pll_enable)
4987 encoder->pre_pll_enable(encoder);
4964 if (encoder->pre_enable) 4988 if (encoder->pre_enable)
4965 encoder->pre_enable(encoder); 4989 encoder->pre_enable(encoder);
4990 }
4966 4991
4967 if (intel_crtc->config->has_pch_encoder) { 4992 if (intel_crtc->config->has_pch_encoder) {
4968 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4993 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
@@ -4970,14 +4995,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4970 dev_priv->display.fdi_link_train(crtc); 4995 dev_priv->display.fdi_link_train(crtc);
4971 } 4996 }
4972 4997
4973 intel_ddi_enable_pipe_clock(intel_crtc); 4998 if (!is_dsi)
4999 intel_ddi_enable_pipe_clock(intel_crtc);
4974 5000
4975 if (INTEL_INFO(dev)->gen == 9) 5001 if (INTEL_INFO(dev)->gen >= 9)
4976 skylake_pfit_enable(intel_crtc); 5002 skylake_pfit_enable(intel_crtc);
4977 else if (INTEL_INFO(dev)->gen < 9)
4978 ironlake_pfit_enable(intel_crtc);
4979 else 5003 else
4980 MISSING_CASE(INTEL_INFO(dev)->gen); 5004 ironlake_pfit_enable(intel_crtc);
4981 5005
4982 /* 5006 /*
4983 * On ILK+ LUT must be loaded before the pipe is running but with 5007 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -4986,7 +5010,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4986 intel_crtc_load_lut(crtc); 5010 intel_crtc_load_lut(crtc);
4987 5011
4988 intel_ddi_set_pipe_settings(crtc); 5012 intel_ddi_set_pipe_settings(crtc);
4989 intel_ddi_enable_transcoder_func(crtc); 5013 if (!is_dsi)
5014 intel_ddi_enable_transcoder_func(crtc);
4990 5015
4991 intel_update_watermarks(crtc); 5016 intel_update_watermarks(crtc);
4992 intel_enable_pipe(intel_crtc); 5017 intel_enable_pipe(intel_crtc);
@@ -4994,7 +5019,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4994 if (intel_crtc->config->has_pch_encoder) 5019 if (intel_crtc->config->has_pch_encoder)
4995 lpt_pch_enable(crtc); 5020 lpt_pch_enable(crtc);
4996 5021
4997 if (intel_crtc->config->dp_encoder_is_mst) 5022 if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
4998 intel_ddi_set_vc_payload_alloc(crtc, true); 5023 intel_ddi_set_vc_payload_alloc(crtc, true);
4999 5024
5000 assert_vblank_disabled(crtc); 5025 assert_vblank_disabled(crtc);
@@ -5014,7 +5039,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
5014 } 5039 }
5015} 5040}
5016 5041
5017static void ironlake_pfit_disable(struct intel_crtc *crtc) 5042static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5018{ 5043{
5019 struct drm_device *dev = crtc->base.dev; 5044 struct drm_device *dev = crtc->base.dev;
5020 struct drm_i915_private *dev_priv = dev->dev_private; 5045 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5022,7 +5047,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
5022 5047
5023 /* To avoid upsetting the power well on haswell only disable the pfit if 5048 /* To avoid upsetting the power well on haswell only disable the pfit if
5024 * it's in use. The hw state code will make sure we get this right. */ 5049 * it's in use. The hw state code will make sure we get this right. */
5025 if (crtc->config->pch_pfit.enabled) { 5050 if (force || crtc->config->pch_pfit.enabled) {
5026 I915_WRITE(PF_CTL(pipe), 0); 5051 I915_WRITE(PF_CTL(pipe), 0);
5027 I915_WRITE(PF_WIN_POS(pipe), 0); 5052 I915_WRITE(PF_WIN_POS(pipe), 0);
5028 I915_WRITE(PF_WIN_SZ(pipe), 0); 5053 I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5049,7 +5074,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5049 5074
5050 intel_disable_pipe(intel_crtc); 5075 intel_disable_pipe(intel_crtc);
5051 5076
5052 ironlake_pfit_disable(intel_crtc); 5077 ironlake_pfit_disable(intel_crtc, false);
5053 5078
5054 if (intel_crtc->config->has_pch_encoder) 5079 if (intel_crtc->config->has_pch_encoder)
5055 ironlake_fdi_disable(crtc); 5080 ironlake_fdi_disable(crtc);
@@ -5078,9 +5103,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5078 5103
5079 ironlake_fdi_pll_disable(intel_crtc); 5104 ironlake_fdi_pll_disable(intel_crtc);
5080 } 5105 }
5081
5082 intel_crtc->active = false;
5083 intel_update_watermarks(crtc);
5084} 5106}
5085 5107
5086static void haswell_crtc_disable(struct drm_crtc *crtc) 5108static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5090,6 +5112,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5090 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5091 struct intel_encoder *encoder; 5113 struct intel_encoder *encoder;
5092 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5114 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5115 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5093 5116
5094 for_each_encoder_on_crtc(dev, crtc, encoder) { 5117 for_each_encoder_on_crtc(dev, crtc, encoder) {
5095 intel_opregion_notify_encoder(encoder, false); 5118 intel_opregion_notify_encoder(encoder, false);
@@ -5107,16 +5130,16 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5107 if (intel_crtc->config->dp_encoder_is_mst) 5130 if (intel_crtc->config->dp_encoder_is_mst)
5108 intel_ddi_set_vc_payload_alloc(crtc, false); 5131 intel_ddi_set_vc_payload_alloc(crtc, false);
5109 5132
5110 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5133 if (!is_dsi)
5134 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5111 5135
5112 if (INTEL_INFO(dev)->gen == 9) 5136 if (INTEL_INFO(dev)->gen >= 9)
5113 skylake_scaler_disable(intel_crtc); 5137 skylake_scaler_disable(intel_crtc);
5114 else if (INTEL_INFO(dev)->gen < 9)
5115 ironlake_pfit_disable(intel_crtc);
5116 else 5138 else
5117 MISSING_CASE(INTEL_INFO(dev)->gen); 5139 ironlake_pfit_disable(intel_crtc, false);
5118 5140
5119 intel_ddi_disable_pipe_clock(intel_crtc); 5141 if (!is_dsi)
5142 intel_ddi_disable_pipe_clock(intel_crtc);
5120 5143
5121 if (intel_crtc->config->has_pch_encoder) { 5144 if (intel_crtc->config->has_pch_encoder) {
5122 lpt_disable_pch_transcoder(dev_priv); 5145 lpt_disable_pch_transcoder(dev_priv);
@@ -5126,9 +5149,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5126 for_each_encoder_on_crtc(dev, crtc, encoder) 5149 for_each_encoder_on_crtc(dev, crtc, encoder)
5127 if (encoder->post_disable) 5150 if (encoder->post_disable)
5128 encoder->post_disable(encoder); 5151 encoder->post_disable(encoder);
5129
5130 intel_crtc->active = false;
5131 intel_update_watermarks(crtc);
5132} 5152}
5133 5153
5134static void i9xx_pfit_enable(struct intel_crtc *crtc) 5154static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5286,6 +5306,21 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5286 modeset_put_power_domains(dev_priv, put_domains[i]); 5306 modeset_put_power_domains(dev_priv, put_domains[i]);
5287} 5307}
5288 5308
5309static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5310{
5311 int max_cdclk_freq = dev_priv->max_cdclk_freq;
5312
5313 if (INTEL_INFO(dev_priv)->gen >= 9 ||
5314 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5315 return max_cdclk_freq;
5316 else if (IS_CHERRYVIEW(dev_priv))
5317 return max_cdclk_freq*95/100;
5318 else if (INTEL_INFO(dev_priv)->gen < 4)
5319 return 2*max_cdclk_freq*90/100;
5320 else
5321 return max_cdclk_freq*90/100;
5322}
5323
5289static void intel_update_max_cdclk(struct drm_device *dev) 5324static void intel_update_max_cdclk(struct drm_device *dev)
5290{ 5325{
5291 struct drm_i915_private *dev_priv = dev->dev_private; 5326 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5325,8 +5360,13 @@ static void intel_update_max_cdclk(struct drm_device *dev)
5325 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; 5360 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5326 } 5361 }
5327 5362
5363 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5364
5328 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", 5365 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5329 dev_priv->max_cdclk_freq); 5366 dev_priv->max_cdclk_freq);
5367
5368 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5369 dev_priv->max_dotclk_freq);
5330} 5370}
5331 5371
5332static void intel_update_cdclk(struct drm_device *dev) 5372static void intel_update_cdclk(struct drm_device *dev)
@@ -5702,10 +5742,16 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5702 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5742 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5703 DRM_ERROR("DBuf power disable timeout\n"); 5743 DRM_ERROR("DBuf power disable timeout\n");
5704 5744
5705 /* disable DPLL0 */ 5745 /*
5706 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5746 * DMC assumes ownership of LCPLL and will get confused if we touch it.
5707 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5747 */
5708 DRM_ERROR("Couldn't disable DPLL0\n"); 5748 if (dev_priv->csr.dmc_payload) {
5749 /* disable DPLL0 */
5750 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
5751 ~LCPLL_PLL_ENABLE);
5752 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5753 DRM_ERROR("Couldn't disable DPLL0\n");
5754 }
5709 5755
5710 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5756 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5711} 5757}
@@ -5742,20 +5788,6 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
5742 DRM_ERROR("DBuf power enable timeout\n"); 5788 DRM_ERROR("DBuf power enable timeout\n");
5743} 5789}
5744 5790
5745/* returns HPLL frequency in kHz */
5746static int valleyview_get_vco(struct drm_i915_private *dev_priv)
5747{
5748 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
5749
5750 /* Obtain SKU information */
5751 mutex_lock(&dev_priv->sb_lock);
5752 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
5753 CCK_FUSE_HPLL_FREQ_MASK;
5754 mutex_unlock(&dev_priv->sb_lock);
5755
5756 return vco_freq[hpll_freq] * 1000;
5757}
5758
5759/* Adjust CDclk dividers to allow high res or save power if possible */ 5791/* Adjust CDclk dividers to allow high res or save power if possible */
5760static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5792static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5761{ 5793{
@@ -5793,12 +5825,12 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5793 5825
5794 /* adjust cdclk divider */ 5826 /* adjust cdclk divider */
5795 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5827 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5796 val &= ~DISPLAY_FREQUENCY_VALUES; 5828 val &= ~CCK_FREQUENCY_VALUES;
5797 val |= divider; 5829 val |= divider;
5798 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5830 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5799 5831
5800 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5832 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5801 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5833 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5802 50)) 5834 50))
5803 DRM_ERROR("timed out waiting for CDclk change\n"); 5835 DRM_ERROR("timed out waiting for CDclk change\n");
5804 } 5836 }
@@ -5976,7 +6008,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5976 else 6008 else
5977 default_credits = PFI_CREDIT(8); 6009 default_credits = PFI_CREDIT(8);
5978 6010
5979 if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { 6011 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
5980 /* CHV suggested value is 31 or 63 */ 6012 /* CHV suggested value is 31 or 63 */
5981 if (IS_CHERRYVIEW(dev_priv)) 6013 if (IS_CHERRYVIEW(dev_priv))
5982 credits = PFI_CREDIT_63; 6014 credits = PFI_CREDIT_63;
@@ -6044,13 +6076,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6044 6076
6045 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 6077 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
6046 6078
6047 if (!is_dsi) {
6048 if (IS_CHERRYVIEW(dev))
6049 chv_prepare_pll(intel_crtc, intel_crtc->config);
6050 else
6051 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6052 }
6053
6054 if (intel_crtc->config->has_dp_encoder) 6079 if (intel_crtc->config->has_dp_encoder)
6055 intel_dp_set_m_n(intel_crtc, M1_N1); 6080 intel_dp_set_m_n(intel_crtc, M1_N1);
6056 6081
@@ -6074,10 +6099,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6074 encoder->pre_pll_enable(encoder); 6099 encoder->pre_pll_enable(encoder);
6075 6100
6076 if (!is_dsi) { 6101 if (!is_dsi) {
6077 if (IS_CHERRYVIEW(dev)) 6102 if (IS_CHERRYVIEW(dev)) {
6103 chv_prepare_pll(intel_crtc, intel_crtc->config);
6078 chv_enable_pll(intel_crtc, intel_crtc->config); 6104 chv_enable_pll(intel_crtc, intel_crtc->config);
6079 else 6105 } else {
6106 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6080 vlv_enable_pll(intel_crtc, intel_crtc->config); 6107 vlv_enable_pll(intel_crtc, intel_crtc->config);
6108 }
6081 } 6109 }
6082 6110
6083 for_each_encoder_on_crtc(dev, crtc, encoder) 6111 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -6205,11 +6233,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
6205 i9xx_disable_pll(intel_crtc); 6233 i9xx_disable_pll(intel_crtc);
6206 } 6234 }
6207 6235
6236 for_each_encoder_on_crtc(dev, crtc, encoder)
6237 if (encoder->post_pll_disable)
6238 encoder->post_pll_disable(encoder);
6239
6208 if (!IS_GEN2(dev)) 6240 if (!IS_GEN2(dev))
6209 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6241 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6210
6211 intel_crtc->active = false;
6212 intel_update_watermarks(crtc);
6213} 6242}
6214 6243
6215static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 6244static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -6229,6 +6258,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6229 6258
6230 intel_crtc_disable_planes(crtc, crtc->state->plane_mask); 6259 intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
6231 dev_priv->display.crtc_disable(crtc); 6260 dev_priv->display.crtc_disable(crtc);
6261 intel_crtc->active = false;
6262 intel_update_watermarks(crtc);
6232 intel_disable_shared_dpll(intel_crtc); 6263 intel_disable_shared_dpll(intel_crtc);
6233 6264
6234 domains = intel_crtc->enabled_power_domains; 6265 domains = intel_crtc->enabled_power_domains;
@@ -6465,7 +6496,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6465 struct intel_crtc_state *pipe_config) 6496 struct intel_crtc_state *pipe_config)
6466{ 6497{
6467 struct drm_device *dev = intel_crtc->base.dev; 6498 struct drm_device *dev = intel_crtc->base.dev;
6468 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6499 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6469 int lane, link_bw, fdi_dotclock, ret; 6500 int lane, link_bw, fdi_dotclock, ret;
6470 bool needs_recompute = false; 6501 bool needs_recompute = false;
6471 6502
@@ -6544,7 +6575,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6544{ 6575{
6545 struct drm_device *dev = crtc->base.dev; 6576 struct drm_device *dev = crtc->base.dev;
6546 struct drm_i915_private *dev_priv = dev->dev_private; 6577 struct drm_i915_private *dev_priv = dev->dev_private;
6547 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6578 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6548 6579
6549 /* FIXME should check pixel clock limits on all platforms */ 6580 /* FIXME should check pixel clock limits on all platforms */
6550 if (INTEL_INFO(dev)->gen < 4) { 6581 if (INTEL_INFO(dev)->gen < 4) {
@@ -6581,7 +6612,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6581 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6612 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6582 */ 6613 */
6583 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6614 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6584 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 6615 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6585 return -EINVAL; 6616 return -EINVAL;
6586 6617
6587 if (HAS_IPS(dev)) 6618 if (HAS_IPS(dev))
@@ -6708,24 +6739,8 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
6708 6739
6709static int valleyview_get_display_clock_speed(struct drm_device *dev) 6740static int valleyview_get_display_clock_speed(struct drm_device *dev)
6710{ 6741{
6711 struct drm_i915_private *dev_priv = dev->dev_private; 6742 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6712 u32 val; 6743 CCK_DISPLAY_CLOCK_CONTROL);
6713 int divider;
6714
6715 if (dev_priv->hpll_freq == 0)
6716 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
6717
6718 mutex_lock(&dev_priv->sb_lock);
6719 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
6720 mutex_unlock(&dev_priv->sb_lock);
6721
6722 divider = val & DISPLAY_FREQUENCY_VALUES;
6723
6724 WARN((val & DISPLAY_FREQUENCY_STATUS) !=
6725 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
6726 "cdclk change in progress\n");
6727
6728 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
6729} 6744}
6730 6745
6731static int ilk_get_display_clock_speed(struct drm_device *dev) 6746static int ilk_get_display_clock_speed(struct drm_device *dev)
@@ -7386,8 +7401,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
7386 1 << DPIO_CHV_N_DIV_SHIFT); 7401 1 << DPIO_CHV_N_DIV_SHIFT);
7387 7402
7388 /* M2 fraction division */ 7403 /* M2 fraction division */
7389 if (bestm2_frac) 7404 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7390 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7391 7405
7392 /* M2 fraction division enable */ 7406 /* M2 fraction division enable */
7393 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7407 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
@@ -7613,8 +7627,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7613 struct drm_i915_private *dev_priv = dev->dev_private; 7627 struct drm_i915_private *dev_priv = dev->dev_private;
7614 enum pipe pipe = intel_crtc->pipe; 7628 enum pipe pipe = intel_crtc->pipe;
7615 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7629 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7616 struct drm_display_mode *adjusted_mode = 7630 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7617 &intel_crtc->config->base.adjusted_mode;
7618 uint32_t crtc_vtotal, crtc_vblank_end; 7631 uint32_t crtc_vtotal, crtc_vblank_end;
7619 int vsyncshift = 0; 7632 int vsyncshift = 0;
7620 7633
@@ -8128,6 +8141,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8128 else 8141 else
8129 i9xx_crtc_clock_get(crtc, pipe_config); 8142 i9xx_crtc_clock_get(crtc, pipe_config);
8130 8143
8144 /*
8145 * Normally the dotclock is filled in by the encoder .get_config()
8146 * but in case the pipe is enabled w/o any ports we need a sane
8147 * default.
8148 */
8149 pipe_config->base.adjusted_mode.crtc_clock =
8150 pipe_config->port_clock / pipe_config->pixel_multiplier;
8151
8131 return true; 8152 return true;
8132} 8153}
8133 8154
@@ -8389,8 +8410,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8389 8410
8390 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8411 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8391 with_spread = true; 8412 with_spread = true;
8392 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && 8413 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8393 with_fdi, "LP PCH doesn't have FDI\n"))
8394 with_fdi = false; 8414 with_fdi = false;
8395 8415
8396 mutex_lock(&dev_priv->sb_lock); 8416 mutex_lock(&dev_priv->sb_lock);
@@ -8413,8 +8433,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8413 } 8433 }
8414 } 8434 }
8415 8435
8416 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 8436 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8417 SBI_GEN0 : SBI_DBUFF0;
8418 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8437 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8419 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8438 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8420 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8439 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -8430,8 +8449,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
8430 8449
8431 mutex_lock(&dev_priv->sb_lock); 8450 mutex_lock(&dev_priv->sb_lock);
8432 8451
8433 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 8452 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8434 SBI_GEN0 : SBI_DBUFF0;
8435 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8453 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8436 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8454 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8437 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8455 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9443,7 +9461,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9443 9461
9444 DRM_DEBUG_KMS("Enabling package C8+\n"); 9462 DRM_DEBUG_KMS("Enabling package C8+\n");
9445 9463
9446 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 9464 if (HAS_PCH_LPT_LP(dev)) {
9447 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9465 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9448 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 9466 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9449 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9467 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9463,7 +9481,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9463 hsw_restore_lcpll(dev_priv); 9481 hsw_restore_lcpll(dev_priv);
9464 lpt_init_pch_refclk(dev); 9482 lpt_init_pch_refclk(dev);
9465 9483
9466 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 9484 if (HAS_PCH_LPT_LP(dev)) {
9467 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9485 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9468 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 9486 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9469 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9487 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9813,12 +9831,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9813 } 9831 }
9814 9832
9815 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 9833 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
9816 if (INTEL_INFO(dev)->gen == 9) 9834 if (INTEL_INFO(dev)->gen >= 9)
9817 skylake_get_pfit_config(crtc, pipe_config); 9835 skylake_get_pfit_config(crtc, pipe_config);
9818 else if (INTEL_INFO(dev)->gen < 9)
9819 ironlake_get_pfit_config(crtc, pipe_config);
9820 else 9836 else
9821 MISSING_CASE(INTEL_INFO(dev)->gen); 9837 ironlake_get_pfit_config(crtc, pipe_config);
9822 } 9838 }
9823 9839
9824 if (IS_HASWELL(dev)) 9840 if (IS_HASWELL(dev))
@@ -9875,13 +9891,13 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
9875 /* On these chipsets we can only modify the base/size/stride 9891 /* On these chipsets we can only modify the base/size/stride
9876 * whilst the cursor is disabled. 9892 * whilst the cursor is disabled.
9877 */ 9893 */
9878 I915_WRITE(_CURACNTR, 0); 9894 I915_WRITE(CURCNTR(PIPE_A), 0);
9879 POSTING_READ(_CURACNTR); 9895 POSTING_READ(CURCNTR(PIPE_A));
9880 intel_crtc->cursor_cntl = 0; 9896 intel_crtc->cursor_cntl = 0;
9881 } 9897 }
9882 9898
9883 if (intel_crtc->cursor_base != base) { 9899 if (intel_crtc->cursor_base != base) {
9884 I915_WRITE(_CURABASE, base); 9900 I915_WRITE(CURBASE(PIPE_A), base);
9885 intel_crtc->cursor_base = base; 9901 intel_crtc->cursor_base = base;
9886 } 9902 }
9887 9903
@@ -9891,8 +9907,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
9891 } 9907 }
9892 9908
9893 if (intel_crtc->cursor_cntl != cntl) { 9909 if (intel_crtc->cursor_cntl != cntl) {
9894 I915_WRITE(_CURACNTR, cntl); 9910 I915_WRITE(CURCNTR(PIPE_A), cntl);
9895 POSTING_READ(_CURACNTR); 9911 POSTING_READ(CURCNTR(PIPE_A));
9896 intel_crtc->cursor_cntl = cntl; 9912 intel_crtc->cursor_cntl = cntl;
9897 } 9913 }
9898} 9914}
@@ -9924,7 +9940,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
9924 } 9940 }
9925 cntl |= pipe << 28; /* Connect to correct pipe */ 9941 cntl |= pipe << 28; /* Connect to correct pipe */
9926 9942
9927 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 9943 if (HAS_DDI(dev))
9928 cntl |= CURSOR_PIPE_CSC_ENABLE; 9944 cntl |= CURSOR_PIPE_CSC_ENABLE;
9929 } 9945 }
9930 9946
@@ -9952,8 +9968,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
9952 struct drm_i915_private *dev_priv = dev->dev_private; 9968 struct drm_i915_private *dev_priv = dev->dev_private;
9953 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9954 int pipe = intel_crtc->pipe; 9970 int pipe = intel_crtc->pipe;
9955 int x = crtc->cursor_x; 9971 struct drm_plane_state *cursor_state = crtc->cursor->state;
9956 int y = crtc->cursor_y; 9972 int x = cursor_state->crtc_x;
9973 int y = cursor_state->crtc_y;
9957 u32 base = 0, pos = 0; 9974 u32 base = 0, pos = 0;
9958 9975
9959 if (on) 9976 if (on)
@@ -9966,7 +9983,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
9966 base = 0; 9983 base = 0;
9967 9984
9968 if (x < 0) { 9985 if (x < 0) {
9969 if (x + intel_crtc->base.cursor->state->crtc_w <= 0) 9986 if (x + cursor_state->crtc_w <= 0)
9970 base = 0; 9987 base = 0;
9971 9988
9972 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 9989 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -9975,7 +9992,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
9975 pos |= x << CURSOR_X_SHIFT; 9992 pos |= x << CURSOR_X_SHIFT;
9976 9993
9977 if (y < 0) { 9994 if (y < 0) {
9978 if (y + intel_crtc->base.cursor->state->crtc_h <= 0) 9995 if (y + cursor_state->crtc_h <= 0)
9979 base = 0; 9996 base = 0;
9980 9997
9981 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 9998 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -9991,8 +10008,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
9991 /* ILK+ do this automagically */ 10008 /* ILK+ do this automagically */
9992 if (HAS_GMCH_DISPLAY(dev) && 10009 if (HAS_GMCH_DISPLAY(dev) &&
9993 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { 10010 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
9994 base += (intel_crtc->base.cursor->state->crtc_h * 10011 base += (cursor_state->crtc_h *
9995 intel_crtc->base.cursor->state->crtc_w - 1) * 4; 10012 cursor_state->crtc_w - 1) * 4;
9996 } 10013 }
9997 10014
9998 if (IS_845G(dev) || IS_I865G(dev)) 10015 if (IS_845G(dev) || IS_I865G(dev))
@@ -10793,7 +10810,7 @@ static bool page_flip_finished(struct intel_crtc *crtc)
10793 */ 10810 */
10794 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10811 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10795 crtc->unpin_work->gtt_offset && 10812 crtc->unpin_work->gtt_offset &&
10796 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), 10813 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10797 crtc->unpin_work->flip_count); 10814 crtc->unpin_work->flip_count);
10798} 10815}
10799 10816
@@ -10819,11 +10836,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
10819 spin_unlock_irqrestore(&dev->event_lock, flags); 10836 spin_unlock_irqrestore(&dev->event_lock, flags);
10820} 10837}
10821 10838
10822static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 10839static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
10823{ 10840{
10824 /* Ensure that the work item is consistent when activating it ... */ 10841 /* Ensure that the work item is consistent when activating it ... */
10825 smp_wmb(); 10842 smp_wmb();
10826 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 10843 atomic_set(&work->pending, INTEL_FLIP_PENDING);
10827 /* and that it is marked active as soon as the irq could fire. */ 10844 /* and that it is marked active as soon as the irq could fire. */
10828 smp_wmb(); 10845 smp_wmb();
10829} 10846}
@@ -10859,7 +10876,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
10859 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10876 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
10860 intel_ring_emit(ring, 0); /* aux display base address, unused */ 10877 intel_ring_emit(ring, 0); /* aux display base address, unused */
10861 10878
10862 intel_mark_page_flip_active(intel_crtc); 10879 intel_mark_page_flip_active(intel_crtc->unpin_work);
10863 return 0; 10880 return 0;
10864} 10881}
10865 10882
@@ -10891,7 +10908,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
10891 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10908 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
10892 intel_ring_emit(ring, MI_NOOP); 10909 intel_ring_emit(ring, MI_NOOP);
10893 10910
10894 intel_mark_page_flip_active(intel_crtc); 10911 intel_mark_page_flip_active(intel_crtc->unpin_work);
10895 return 0; 10912 return 0;
10896} 10913}
10897 10914
@@ -10930,7 +10947,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
10930 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10947 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
10931 intel_ring_emit(ring, pf | pipesrc); 10948 intel_ring_emit(ring, pf | pipesrc);
10932 10949
10933 intel_mark_page_flip_active(intel_crtc); 10950 intel_mark_page_flip_active(intel_crtc->unpin_work);
10934 return 0; 10951 return 0;
10935} 10952}
10936 10953
@@ -10966,7 +10983,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
10966 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10983 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
10967 intel_ring_emit(ring, pf | pipesrc); 10984 intel_ring_emit(ring, pf | pipesrc);
10968 10985
10969 intel_mark_page_flip_active(intel_crtc); 10986 intel_mark_page_flip_active(intel_crtc->unpin_work);
10970 return 0; 10987 return 0;
10971} 10988}
10972 10989
@@ -11043,10 +11060,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11043 DERRMR_PIPEB_PRI_FLIP_DONE | 11060 DERRMR_PIPEB_PRI_FLIP_DONE |
11044 DERRMR_PIPEC_PRI_FLIP_DONE)); 11061 DERRMR_PIPEC_PRI_FLIP_DONE));
11045 if (IS_GEN8(dev)) 11062 if (IS_GEN8(dev))
11046 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 11063 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11047 MI_SRM_LRM_GLOBAL_GTT); 11064 MI_SRM_LRM_GLOBAL_GTT);
11048 else 11065 else
11049 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 11066 intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11050 MI_SRM_LRM_GLOBAL_GTT); 11067 MI_SRM_LRM_GLOBAL_GTT);
11051 intel_ring_emit(ring, DERRMR); 11068 intel_ring_emit(ring, DERRMR);
11052 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 11069 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
@@ -11061,7 +11078,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11061 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11078 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11062 intel_ring_emit(ring, (MI_NOOP)); 11079 intel_ring_emit(ring, (MI_NOOP));
11063 11080
11064 intel_mark_page_flip_active(intel_crtc); 11081 intel_mark_page_flip_active(intel_crtc->unpin_work);
11065 return 0; 11082 return 0;
11066} 11083}
11067 11084
@@ -11092,7 +11109,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
11092 return ring != i915_gem_request_get_ring(obj->last_write_req); 11109 return ring != i915_gem_request_get_ring(obj->last_write_req);
11093} 11110}
11094 11111
11095static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) 11112static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11113 struct intel_unpin_work *work)
11096{ 11114{
11097 struct drm_device *dev = intel_crtc->base.dev; 11115 struct drm_device *dev = intel_crtc->base.dev;
11098 struct drm_i915_private *dev_priv = dev->dev_private; 11116 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11133,11 +11151,12 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
11133 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 11151 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11134 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 11152 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11135 11153
11136 I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset); 11154 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11137 POSTING_READ(PLANE_SURF(pipe, 0)); 11155 POSTING_READ(PLANE_SURF(pipe, 0));
11138} 11156}
11139 11157
11140static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) 11158static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11159 struct intel_unpin_work *work)
11141{ 11160{
11142 struct drm_device *dev = intel_crtc->base.dev; 11161 struct drm_device *dev = intel_crtc->base.dev;
11143 struct drm_i915_private *dev_priv = dev->dev_private; 11162 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11157,32 +11176,36 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
11157 11176
11158 I915_WRITE(reg, dspcntr); 11177 I915_WRITE(reg, dspcntr);
11159 11178
11160 I915_WRITE(DSPSURF(intel_crtc->plane), 11179 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11161 intel_crtc->unpin_work->gtt_offset);
11162 POSTING_READ(DSPSURF(intel_crtc->plane)); 11180 POSTING_READ(DSPSURF(intel_crtc->plane));
11163
11164} 11181}
11165 11182
11166/* 11183/*
11167 * XXX: This is the temporary way to update the plane registers until we get 11184 * XXX: This is the temporary way to update the plane registers until we get
11168 * around to using the usual plane update functions for MMIO flips 11185 * around to using the usual plane update functions for MMIO flips
11169 */ 11186 */
11170static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) 11187static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11171{ 11188{
11172 struct drm_device *dev = intel_crtc->base.dev; 11189 struct intel_crtc *crtc = mmio_flip->crtc;
11173 u32 start_vbl_count; 11190 struct intel_unpin_work *work;
11174 11191
11175 intel_mark_page_flip_active(intel_crtc); 11192 spin_lock_irq(&crtc->base.dev->event_lock);
11193 work = crtc->unpin_work;
11194 spin_unlock_irq(&crtc->base.dev->event_lock);
11195 if (work == NULL)
11196 return;
11176 11197
11177 intel_pipe_update_start(intel_crtc, &start_vbl_count); 11198 intel_mark_page_flip_active(work);
11178 11199
11179 if (INTEL_INFO(dev)->gen >= 9) 11200 intel_pipe_update_start(crtc);
11180 skl_do_mmio_flip(intel_crtc); 11201
11202 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11203 skl_do_mmio_flip(crtc, work);
11181 else 11204 else
11182 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11205 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11183 ilk_do_mmio_flip(intel_crtc); 11206 ilk_do_mmio_flip(crtc, work);
11184 11207
11185 intel_pipe_update_end(intel_crtc, start_vbl_count); 11208 intel_pipe_update_end(crtc);
11186} 11209}
11187 11210
11188static void intel_mmio_flip_work_func(struct work_struct *work) 11211static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11190,15 +11213,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
11190 struct intel_mmio_flip *mmio_flip = 11213 struct intel_mmio_flip *mmio_flip =
11191 container_of(work, struct intel_mmio_flip, work); 11214 container_of(work, struct intel_mmio_flip, work);
11192 11215
11193 if (mmio_flip->req) 11216 if (mmio_flip->req) {
11194 WARN_ON(__i915_wait_request(mmio_flip->req, 11217 WARN_ON(__i915_wait_request(mmio_flip->req,
11195 mmio_flip->crtc->reset_counter, 11218 mmio_flip->crtc->reset_counter,
11196 false, NULL, 11219 false, NULL,
11197 &mmio_flip->i915->rps.mmioflips)); 11220 &mmio_flip->i915->rps.mmioflips));
11221 i915_gem_request_unreference__unlocked(mmio_flip->req);
11222 }
11198 11223
11199 intel_do_mmio_flip(mmio_flip->crtc); 11224 intel_do_mmio_flip(mmio_flip);
11200
11201 i915_gem_request_unreference__unlocked(mmio_flip->req);
11202 kfree(mmio_flip); 11225 kfree(mmio_flip);
11203} 11226}
11204 11227
@@ -11246,6 +11269,9 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
11246 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 11269 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11247 return true; 11270 return true;
11248 11271
11272 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11273 return false;
11274
11249 if (!work->enable_stall_check) 11275 if (!work->enable_stall_check)
11250 return false; 11276 return false;
11251 11277
@@ -11396,7 +11422,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11396 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 11422 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
11397 11423
11398 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 11424 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11399 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; 11425 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11400 11426
11401 if (IS_VALLEYVIEW(dev)) { 11427 if (IS_VALLEYVIEW(dev)) {
11402 ring = &dev_priv->ring[BCS]; 11428 ring = &dev_priv->ring[BCS];
@@ -11426,8 +11452,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11426 if (ret) 11452 if (ret)
11427 goto cleanup_pending; 11453 goto cleanup_pending;
11428 11454
11429 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj) 11455 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11430 + intel_crtc->dspaddr_offset; 11456 obj, 0);
11457 work->gtt_offset += intel_crtc->dspaddr_offset;
11431 11458
11432 if (mmio_flip) { 11459 if (mmio_flip) {
11433 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 11460 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@@ -11636,7 +11663,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11636 intel_crtc->atomic.update_wm_pre = true; 11663 intel_crtc->atomic.update_wm_pre = true;
11637 } 11664 }
11638 11665
11639 if (visible) 11666 if (visible || was_visible)
11640 intel_crtc->atomic.fb_bits |= 11667 intel_crtc->atomic.fb_bits |=
11641 to_intel_plane(plane)->frontbuffer_bit; 11668 to_intel_plane(plane)->frontbuffer_bit;
11642 11669
@@ -11909,14 +11936,16 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
11909 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 11936 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
11910 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 11937 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
11911 pipe_config->fdi_m_n.tu); 11938 pipe_config->fdi_m_n.tu);
11912 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11939 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11913 pipe_config->has_dp_encoder, 11940 pipe_config->has_dp_encoder,
11941 pipe_config->lane_count,
11914 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 11942 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
11915 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 11943 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
11916 pipe_config->dp_m_n.tu); 11944 pipe_config->dp_m_n.tu);
11917 11945
11918 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 11946 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
11919 pipe_config->has_dp_encoder, 11947 pipe_config->has_dp_encoder,
11948 pipe_config->lane_count,
11920 pipe_config->dp_m2_n2.gmch_m, 11949 pipe_config->dp_m2_n2.gmch_m,
11921 pipe_config->dp_m2_n2.gmch_n, 11950 pipe_config->dp_m2_n2.gmch_n,
11922 pipe_config->dp_m2_n2.link_m, 11951 pipe_config->dp_m2_n2.link_m,
@@ -12128,10 +12157,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
12128 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12157 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12129 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12158 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12130 12159
12131 /* Compute a starting value for pipe_config->pipe_bpp taking the source
12132 * plane pixel format and any sink constraints into account. Returns the
12133 * source plane bpp so that dithering can be selected on mismatches
12134 * after encoders and crtc also have had their say. */
12135 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12160 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12136 pipe_config); 12161 pipe_config);
12137 if (base_bpp < 0) 12162 if (base_bpp < 0)
@@ -12200,7 +12225,7 @@ encoder_retry:
12200 /* Dithering seems to not pass-through bits correctly when it should, so 12225 /* Dithering seems to not pass-through bits correctly when it should, so
12201 * only enable it on 6bpc panels. */ 12226 * only enable it on 6bpc panels. */
12202 pipe_config->dither = pipe_config->pipe_bpp == 6*3; 12227 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12203 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 12228 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12204 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12229 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12205 12230
12206fail: 12231fail:
@@ -12250,7 +12275,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
12250 base.head) \ 12275 base.head) \
12251 if (mask & (1 <<(intel_crtc)->pipe)) 12276 if (mask & (1 <<(intel_crtc)->pipe))
12252 12277
12253
12254static bool 12278static bool
12255intel_compare_m_n(unsigned int m, unsigned int n, 12279intel_compare_m_n(unsigned int m, unsigned int n,
12256 unsigned int m2, unsigned int n2, 12280 unsigned int m2, unsigned int n2,
@@ -12423,6 +12447,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12423 PIPE_CONF_CHECK_M_N(fdi_m_n); 12447 PIPE_CONF_CHECK_M_N(fdi_m_n);
12424 12448
12425 PIPE_CONF_CHECK_I(has_dp_encoder); 12449 PIPE_CONF_CHECK_I(has_dp_encoder);
12450 PIPE_CONF_CHECK_I(lane_count);
12426 12451
12427 if (INTEL_INFO(dev)->gen < 8) { 12452 if (INTEL_INFO(dev)->gen < 8) {
12428 PIPE_CONF_CHECK_M_N(dp_m_n); 12453 PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -12470,22 +12495,24 @@ intel_pipe_config_compare(struct drm_device *dev,
12470 DRM_MODE_FLAG_NVSYNC); 12495 DRM_MODE_FLAG_NVSYNC);
12471 } 12496 }
12472 12497
12473 PIPE_CONF_CHECK_I(pipe_src_w); 12498 PIPE_CONF_CHECK_X(gmch_pfit.control);
12474 PIPE_CONF_CHECK_I(pipe_src_h);
12475
12476 PIPE_CONF_CHECK_I(gmch_pfit.control);
12477 /* pfit ratios are autocomputed by the hw on gen4+ */ 12499 /* pfit ratios are autocomputed by the hw on gen4+ */
12478 if (INTEL_INFO(dev)->gen < 4) 12500 if (INTEL_INFO(dev)->gen < 4)
12479 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 12501 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
12480 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 12502 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12481 12503
12482 PIPE_CONF_CHECK_I(pch_pfit.enabled); 12504 if (!adjust) {
12483 if (current_config->pch_pfit.enabled) { 12505 PIPE_CONF_CHECK_I(pipe_src_w);
12484 PIPE_CONF_CHECK_I(pch_pfit.pos); 12506 PIPE_CONF_CHECK_I(pipe_src_h);
12485 PIPE_CONF_CHECK_I(pch_pfit.size); 12507
12486 } 12508 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12509 if (current_config->pch_pfit.enabled) {
12510 PIPE_CONF_CHECK_X(pch_pfit.pos);
12511 PIPE_CONF_CHECK_X(pch_pfit.size);
12512 }
12487 12513
12488 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12514 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12515 }
12489 12516
12490 /* BDW+ don't expose a synchronous way to read the state */ 12517 /* BDW+ don't expose a synchronous way to read the state */
12491 if (IS_HASWELL(dev)) 12518 if (IS_HASWELL(dev))
@@ -12558,8 +12585,8 @@ static void check_wm_state(struct drm_device *dev)
12558 } 12585 }
12559 12586
12560 /* cursor */ 12587 /* cursor */
12561 hw_entry = &hw_ddb.cursor[pipe]; 12588 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12562 sw_entry = &sw_ddb->cursor[pipe]; 12589 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12563 12590
12564 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12591 if (skl_ddb_entry_equal(hw_entry, sw_entry))
12565 continue; 12592 continue;
@@ -12647,7 +12674,8 @@ check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
12647 struct intel_crtc_state *pipe_config, *sw_config; 12674 struct intel_crtc_state *pipe_config, *sw_config;
12648 bool active; 12675 bool active;
12649 12676
12650 if (!needs_modeset(crtc->state)) 12677 if (!needs_modeset(crtc->state) &&
12678 !to_intel_crtc_state(crtc->state)->update_pipe)
12651 continue; 12679 continue;
12652 12680
12653 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state); 12681 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
@@ -12801,11 +12829,11 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12801 * one to the value. 12829 * one to the value.
12802 */ 12830 */
12803 if (IS_GEN2(dev)) { 12831 if (IS_GEN2(dev)) {
12804 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 12832 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12805 int vtotal; 12833 int vtotal;
12806 12834
12807 vtotal = mode->crtc_vtotal; 12835 vtotal = adjusted_mode->crtc_vtotal;
12808 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 12836 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12809 vtotal /= 2; 12837 vtotal /= 2;
12810 12838
12811 crtc->scanline_offset = vtotal - 1; 12839 crtc->scanline_offset = vtotal - 1;
@@ -12943,7 +12971,6 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12943 return ret; 12971 return ret;
12944} 12972}
12945 12973
12946
12947static int intel_modeset_checks(struct drm_atomic_state *state) 12974static int intel_modeset_checks(struct drm_atomic_state *state)
12948{ 12975{
12949 struct drm_device *dev = state->dev; 12976 struct drm_device *dev = state->dev;
@@ -13029,11 +13056,11 @@ static int intel_atomic_check(struct drm_device *dev,
13029 if (ret) 13056 if (ret)
13030 return ret; 13057 return ret;
13031 13058
13032 if (i915.fastboot && 13059 if (intel_pipe_config_compare(state->dev,
13033 intel_pipe_config_compare(state->dev,
13034 to_intel_crtc_state(crtc->state), 13060 to_intel_crtc_state(crtc->state),
13035 pipe_config, true)) { 13061 pipe_config, true)) {
13036 crtc_state->mode_changed = false; 13062 crtc_state->mode_changed = false;
13063 to_intel_crtc_state(crtc_state)->update_pipe = true;
13037 } 13064 }
13038 13065
13039 if (needs_modeset(crtc_state)) { 13066 if (needs_modeset(crtc_state)) {
@@ -13131,16 +13158,30 @@ static int intel_atomic_commit(struct drm_device *dev,
13131 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13158 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13132 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13159 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13133 bool modeset = needs_modeset(crtc->state); 13160 bool modeset = needs_modeset(crtc->state);
13161 bool update_pipe = !modeset &&
13162 to_intel_crtc_state(crtc->state)->update_pipe;
13163 unsigned long put_domains = 0;
13134 13164
13135 if (modeset && crtc->state->active) { 13165 if (modeset && crtc->state->active) {
13136 update_scanline_offset(to_intel_crtc(crtc)); 13166 update_scanline_offset(to_intel_crtc(crtc));
13137 dev_priv->display.crtc_enable(crtc); 13167 dev_priv->display.crtc_enable(crtc);
13138 } 13168 }
13139 13169
13170 if (update_pipe) {
13171 put_domains = modeset_get_crtc_power_domains(crtc);
13172
13173 /* make sure intel_modeset_check_state runs */
13174 any_ms = true;
13175 }
13176
13140 if (!modeset) 13177 if (!modeset)
13141 intel_pre_plane_update(intel_crtc); 13178 intel_pre_plane_update(intel_crtc);
13142 13179
13143 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 13180 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13181
13182 if (put_domains)
13183 modeset_put_power_domains(dev_priv, put_domains);
13184
13144 intel_post_plane_update(intel_crtc); 13185 intel_post_plane_update(intel_crtc);
13145 } 13186 }
13146 13187
@@ -13296,8 +13337,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
13296{ 13337{
13297 struct drm_i915_private *dev_priv = dev->dev_private; 13338 struct drm_i915_private *dev_priv = dev->dev_private;
13298 13339
13299 intel_update_cdclk(dev);
13300
13301 if (HAS_DDI(dev)) 13340 if (HAS_DDI(dev))
13302 intel_ddi_pll_init(dev); 13341 intel_ddi_pll_init(dev);
13303 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 13342 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -13322,10 +13361,10 @@ static void intel_shared_dpll_init(struct drm_device *dev)
13322 */ 13361 */
13323int 13362int
13324intel_prepare_plane_fb(struct drm_plane *plane, 13363intel_prepare_plane_fb(struct drm_plane *plane,
13325 struct drm_framebuffer *fb,
13326 const struct drm_plane_state *new_state) 13364 const struct drm_plane_state *new_state)
13327{ 13365{
13328 struct drm_device *dev = plane->dev; 13366 struct drm_device *dev = plane->dev;
13367 struct drm_framebuffer *fb = new_state->fb;
13329 struct intel_plane *intel_plane = to_intel_plane(plane); 13368 struct intel_plane *intel_plane = to_intel_plane(plane);
13330 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13369 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13331 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 13370 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
@@ -13363,19 +13402,18 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13363 */ 13402 */
13364void 13403void
13365intel_cleanup_plane_fb(struct drm_plane *plane, 13404intel_cleanup_plane_fb(struct drm_plane *plane,
13366 struct drm_framebuffer *fb,
13367 const struct drm_plane_state *old_state) 13405 const struct drm_plane_state *old_state)
13368{ 13406{
13369 struct drm_device *dev = plane->dev; 13407 struct drm_device *dev = plane->dev;
13370 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13408 struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
13371 13409
13372 if (WARN_ON(!obj)) 13410 if (!obj)
13373 return; 13411 return;
13374 13412
13375 if (plane->type != DRM_PLANE_TYPE_CURSOR || 13413 if (plane->type != DRM_PLANE_TYPE_CURSOR ||
13376 !INTEL_INFO(dev)->cursor_needs_physical) { 13414 !INTEL_INFO(dev)->cursor_needs_physical) {
13377 mutex_lock(&dev->struct_mutex); 13415 mutex_lock(&dev->struct_mutex);
13378 intel_unpin_fb_obj(fb, old_state); 13416 intel_unpin_fb_obj(old_state->fb, old_state);
13379 mutex_unlock(&dev->struct_mutex); 13417 mutex_unlock(&dev->struct_mutex);
13380 } 13418 }
13381} 13419}
@@ -13457,11 +13495,9 @@ intel_commit_primary_plane(struct drm_plane *plane,
13457 if (!crtc->state->active) 13495 if (!crtc->state->active)
13458 return; 13496 return;
13459 13497
13460 if (state->visible) 13498 dev_priv->display.update_primary_plane(crtc, fb,
13461 /* FIXME: kill this fastboot hack */ 13499 state->src.x1 >> 16,
13462 intel_update_pipe_size(intel_crtc); 13500 state->src.y1 >> 16);
13463
13464 dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
13465} 13501}
13466 13502
13467static void 13503static void
@@ -13479,15 +13515,23 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13479{ 13515{
13480 struct drm_device *dev = crtc->dev; 13516 struct drm_device *dev = crtc->dev;
13481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13517 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13518 struct intel_crtc_state *old_intel_state =
13519 to_intel_crtc_state(old_crtc_state);
13520 bool modeset = needs_modeset(crtc->state);
13482 13521
13483 if (intel_crtc->atomic.update_wm_pre) 13522 if (intel_crtc->atomic.update_wm_pre)
13484 intel_update_watermarks(crtc); 13523 intel_update_watermarks(crtc);
13485 13524
13486 /* Perform vblank evasion around commit operation */ 13525 /* Perform vblank evasion around commit operation */
13487 if (crtc->state->active) 13526 if (crtc->state->active)
13488 intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count); 13527 intel_pipe_update_start(intel_crtc);
13489 13528
13490 if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9) 13529 if (modeset)
13530 return;
13531
13532 if (to_intel_crtc_state(crtc->state)->update_pipe)
13533 intel_update_pipe_config(intel_crtc, old_intel_state);
13534 else if (INTEL_INFO(dev)->gen >= 9)
13491 skl_detach_scalers(intel_crtc); 13535 skl_detach_scalers(intel_crtc);
13492} 13536}
13493 13537
@@ -13497,7 +13541,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13497 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13498 13542
13499 if (crtc->state->active) 13543 if (crtc->state->active)
13500 intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count); 13544 intel_pipe_update_end(intel_crtc);
13501} 13545}
13502 13546
13503/** 13547/**
@@ -13666,10 +13710,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
13666 crtc = crtc ? crtc : plane->crtc; 13710 crtc = crtc ? crtc : plane->crtc;
13667 intel_crtc = to_intel_crtc(crtc); 13711 intel_crtc = to_intel_crtc(crtc);
13668 13712
13669 plane->fb = state->base.fb;
13670 crtc->cursor_x = state->base.crtc_x;
13671 crtc->cursor_y = state->base.crtc_y;
13672
13673 if (intel_crtc->cursor_bo == obj) 13713 if (intel_crtc->cursor_bo == obj)
13674 goto update; 13714 goto update;
13675 13715
@@ -13955,7 +13995,7 @@ static void intel_setup_outputs(struct drm_device *dev)
13955 * On SKL pre-D0 the strap isn't connected, so we assume 13995 * On SKL pre-D0 the strap isn't connected, so we assume
13956 * it's there. 13996 * it's there.
13957 */ 13997 */
13958 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 13998 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
13959 /* WaIgnoreDDIAStrap: skl */ 13999 /* WaIgnoreDDIAStrap: skl */
13960 if (found || IS_SKYLAKE(dev)) 14000 if (found || IS_SKYLAKE(dev))
13961 intel_ddi_init(dev, PORT_A); 14001 intel_ddi_init(dev, PORT_A);
@@ -14016,29 +14056,26 @@ static void intel_setup_outputs(struct drm_device *dev)
14016 * eDP ports. Consult the VBT as well as DP_DETECTED to 14056 * eDP ports. Consult the VBT as well as DP_DETECTED to
14017 * detect eDP ports. 14057 * detect eDP ports.
14018 */ 14058 */
14019 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED && 14059 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14020 !intel_dp_is_edp(dev, PORT_B)) 14060 !intel_dp_is_edp(dev, PORT_B))
14021 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 14061 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14022 PORT_B); 14062 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14023 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
14024 intel_dp_is_edp(dev, PORT_B)) 14063 intel_dp_is_edp(dev, PORT_B))
14025 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 14064 intel_dp_init(dev, VLV_DP_B, PORT_B);
14026 14065
14027 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED && 14066 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14028 !intel_dp_is_edp(dev, PORT_C)) 14067 !intel_dp_is_edp(dev, PORT_C))
14029 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 14068 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14030 PORT_C); 14069 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14031 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
14032 intel_dp_is_edp(dev, PORT_C)) 14070 intel_dp_is_edp(dev, PORT_C))
14033 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 14071 intel_dp_init(dev, VLV_DP_C, PORT_C);
14034 14072
14035 if (IS_CHERRYVIEW(dev)) { 14073 if (IS_CHERRYVIEW(dev)) {
14036 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
14037 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
14038 PORT_D);
14039 /* eDP not supported on port D, so don't check VBT */ 14074 /* eDP not supported on port D, so don't check VBT */
14040 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 14075 if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14041 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 14076 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14077 if (I915_READ(CHV_DP_D) & DP_DETECTED)
14078 intel_dp_init(dev, CHV_DP_D, PORT_D);
14042 } 14079 }
14043 14080
14044 intel_dsi_init(dev); 14081 intel_dsi_init(dev);
@@ -14534,8 +14571,6 @@ static void intel_init_display(struct drm_device *dev)
14534 dev_priv->display.queue_flip = intel_default_queue_flip; 14571 dev_priv->display.queue_flip = intel_default_queue_flip;
14535 } 14572 }
14536 14573
14537 intel_panel_init_backlight_funcs(dev);
14538
14539 mutex_init(&dev_priv->pps_mutex); 14574 mutex_init(&dev_priv->pps_mutex);
14540} 14575}
14541 14576
@@ -14678,6 +14713,9 @@ static struct intel_quirk intel_quirks[] = {
14678 14713
14679 /* Dell Chromebook 11 */ 14714 /* Dell Chromebook 11 */
14680 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14715 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14716
14717 /* Dell Chromebook 11 (2015 version) */
14718 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14681}; 14719};
14682 14720
14683static void intel_init_quirks(struct drm_device *dev) 14721static void intel_init_quirks(struct drm_device *dev)
@@ -14813,7 +14851,8 @@ void intel_modeset_init(struct drm_device *dev)
14813 } 14851 }
14814 } 14852 }
14815 14853
14816 intel_init_dpio(dev); 14854 intel_update_czclk(dev_priv);
14855 intel_update_cdclk(dev);
14817 14856
14818 intel_shared_dpll_init(dev); 14857 intel_shared_dpll_init(dev);
14819 14858
@@ -14881,13 +14920,12 @@ intel_check_plane_mapping(struct intel_crtc *crtc)
14881{ 14920{
14882 struct drm_device *dev = crtc->base.dev; 14921 struct drm_device *dev = crtc->base.dev;
14883 struct drm_i915_private *dev_priv = dev->dev_private; 14922 struct drm_i915_private *dev_priv = dev->dev_private;
14884 u32 reg, val; 14923 u32 val;
14885 14924
14886 if (INTEL_INFO(dev)->num_pipes == 1) 14925 if (INTEL_INFO(dev)->num_pipes == 1)
14887 return true; 14926 return true;
14888 14927
14889 reg = DSPCNTR(!crtc->plane); 14928 val = I915_READ(DSPCNTR(!crtc->plane));
14890 val = I915_READ(reg);
14891 14929
14892 if ((val & DISPLAY_PLANE_ENABLE) && 14930 if ((val & DISPLAY_PLANE_ENABLE) &&
14893 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 14931 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
@@ -14896,13 +14934,22 @@ intel_check_plane_mapping(struct intel_crtc *crtc)
14896 return true; 14934 return true;
14897} 14935}
14898 14936
14937static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
14938{
14939 struct drm_device *dev = crtc->base.dev;
14940 struct intel_encoder *encoder;
14941
14942 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
14943 return true;
14944
14945 return false;
14946}
14947
14899static void intel_sanitize_crtc(struct intel_crtc *crtc) 14948static void intel_sanitize_crtc(struct intel_crtc *crtc)
14900{ 14949{
14901 struct drm_device *dev = crtc->base.dev; 14950 struct drm_device *dev = crtc->base.dev;
14902 struct drm_i915_private *dev_priv = dev->dev_private; 14951 struct drm_i915_private *dev_priv = dev->dev_private;
14903 struct intel_encoder *encoder;
14904 u32 reg; 14952 u32 reg;
14905 bool enable;
14906 14953
14907 /* Clear any frame start delays used for debugging left by the BIOS */ 14954 /* Clear any frame start delays used for debugging left by the BIOS */
14908 reg = PIPECONF(crtc->config->cpu_transcoder); 14955 reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14913,8 +14960,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
14913 if (crtc->active) { 14960 if (crtc->active) {
14914 struct intel_plane *plane; 14961 struct intel_plane *plane;
14915 14962
14916 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
14917 update_scanline_offset(crtc);
14918 drm_crtc_vblank_on(&crtc->base); 14963 drm_crtc_vblank_on(&crtc->base);
14919 14964
14920 /* Disable everything but the primary plane */ 14965 /* Disable everything but the primary plane */
@@ -14956,16 +15001,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
14956 15001
14957 /* Adjust the state of the output pipe according to whether we 15002 /* Adjust the state of the output pipe according to whether we
14958 * have active connectors/encoders. */ 15003 * have active connectors/encoders. */
14959 enable = false; 15004 if (!intel_crtc_has_encoders(crtc))
14960 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
14961 enable = true;
14962 break;
14963 }
14964
14965 if (!enable)
14966 intel_crtc_disable_noatomic(&crtc->base); 15005 intel_crtc_disable_noatomic(&crtc->base);
14967 15006
14968 if (crtc->active != crtc->base.state->active) { 15007 if (crtc->active != crtc->base.state->active) {
15008 struct intel_encoder *encoder;
14969 15009
14970 /* This can happen either due to bugs in the get_hw_state 15010 /* This can happen either due to bugs in the get_hw_state
14971 * functions or because of calls to intel_crtc_disable_noatomic, 15011 * functions or because of calls to intel_crtc_disable_noatomic,
@@ -15219,6 +15259,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15219 * recalculation. 15259 * recalculation.
15220 */ 15260 */
15221 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 15261 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15262
15263 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15264 update_scanline_offset(crtc);
15222 } 15265 }
15223 } 15266 }
15224} 15267}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0a2e33fbf20d..09bdd94ca3ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -130,6 +130,11 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130static void vlv_steal_power_sequencer(struct drm_device *dev, 130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe); 131 enum pipe pipe);
132 132
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
133static int 138static int
134intel_dp_max_link_bw(struct intel_dp *intel_dp) 139intel_dp_max_link_bw(struct intel_dp *intel_dp)
135{ 140{
@@ -253,40 +258,6 @@ static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 dst[i] = src >> ((3-i) * 8); 258 dst[i] = src >> ((3-i) * 8);
254} 259}
255 260
256/* hrawclock is 1/4 the FSB frequency */
257static int
258intel_hrawclk(struct drm_device *dev)
259{
260 struct drm_i915_private *dev_priv = dev->dev_private;
261 uint32_t clkcfg;
262
263 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264 if (IS_VALLEYVIEW(dev))
265 return 200;
266
267 clkcfg = I915_READ(CLKCFG);
268 switch (clkcfg & CLKCFG_FSB_MASK) {
269 case CLKCFG_FSB_400:
270 return 100;
271 case CLKCFG_FSB_533:
272 return 133;
273 case CLKCFG_FSB_667:
274 return 166;
275 case CLKCFG_FSB_800:
276 return 200;
277 case CLKCFG_FSB_1067:
278 return 266;
279 case CLKCFG_FSB_1333:
280 return 333;
281 /* these two are just a guess; one of them might be right */
282 case CLKCFG_FSB_1600:
283 case CLKCFG_FSB_1600_ALT:
284 return 400;
285 default:
286 return 133;
287 }
288}
289
290static void 261static void
291intel_dp_init_panel_power_sequencer(struct drm_device *dev, 262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
292 struct intel_dp *intel_dp); 263 struct intel_dp *intel_dp);
@@ -333,7 +304,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
333 struct drm_device *dev = intel_dig_port->base.base.dev; 304 struct drm_device *dev = intel_dig_port->base.base.dev;
334 struct drm_i915_private *dev_priv = dev->dev_private; 305 struct drm_i915_private *dev_priv = dev->dev_private;
335 enum pipe pipe = intel_dp->pps_pipe; 306 enum pipe pipe = intel_dp->pps_pipe;
336 bool pll_enabled; 307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
337 uint32_t DP; 310 uint32_t DP;
338 311
339 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, 312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
@@ -363,9 +336,13 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
363 * The DPLL for the pipe must be enabled for this to work. 336 * The DPLL for the pipe must be enabled for this to work.
364 * So enable temporarily it if it's not already enabled. 337 * So enable temporarily it if it's not already enabled.
365 */ 338 */
366 if (!pll_enabled) 339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
367 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? 343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
368 &chv_dpll[0].dpll : &vlv_dpll[0].dpll); 344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
345 }
369 346
370 /* 347 /*
371 * Similar magic as in intel_dp_enable_port(). 348 * Similar magic as in intel_dp_enable_port().
@@ -382,8 +359,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
382 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg); 360 POSTING_READ(intel_dp->output_reg);
384 361
385 if (!pll_enabled) 362 if (!pll_enabled) {
386 vlv_force_pll_off(dev, pipe); 363 vlv_force_pll_off(dev, pipe);
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
387} 368}
388 369
389static enum pipe 370static enum pipe
@@ -593,8 +574,6 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
593 edp_notifier); 574 edp_notifier);
594 struct drm_device *dev = intel_dp_to_dev(intel_dp); 575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
595 struct drm_i915_private *dev_priv = dev->dev_private; 576 struct drm_i915_private *dev_priv = dev->dev_private;
596 u32 pp_div;
597 u32 pp_ctrl_reg, pp_div_reg;
598 577
599 if (!is_edp(intel_dp) || code != SYS_RESTART) 578 if (!is_edp(intel_dp) || code != SYS_RESTART)
600 return 0; 579 return 0;
@@ -603,6 +582,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
603 582
604 if (IS_VALLEYVIEW(dev)) { 583 if (IS_VALLEYVIEW(dev)) {
605 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
606 587
607 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
@@ -974,6 +955,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
974 switch (msg->request & ~DP_AUX_I2C_MOT) { 955 switch (msg->request & ~DP_AUX_I2C_MOT) {
975 case DP_AUX_NATIVE_WRITE: 956 case DP_AUX_NATIVE_WRITE:
976 case DP_AUX_I2C_WRITE: 957 case DP_AUX_I2C_WRITE:
958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
978 rxsize = 2; /* 0 or 1 data bytes */ 960 rxsize = 2; /* 0 or 1 data bytes */
979 961
@@ -1383,6 +1365,19 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1383 return rate_to_index(rate, intel_dp->sink_rates); 1365 return rate_to_index(rate, intel_dp->sink_rates);
1384} 1366}
1385 1367
1368static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1369 uint8_t *link_bw, uint8_t *rate_select)
1370{
1371 if (intel_dp->num_sink_rates) {
1372 *link_bw = 0;
1373 *rate_select =
1374 intel_dp_rate_select(intel_dp, port_clock);
1375 } else {
1376 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1377 *rate_select = 0;
1378 }
1379}
1380
1386bool 1381bool
1387intel_dp_compute_config(struct intel_encoder *encoder, 1382intel_dp_compute_config(struct intel_encoder *encoder,
1388 struct intel_crtc_state *pipe_config) 1383 struct intel_crtc_state *pipe_config)
@@ -1404,6 +1399,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1404 int link_avail, link_clock; 1399 int link_avail, link_clock;
1405 int common_rates[DP_MAX_SUPPORTED_RATES] = {}; 1400 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1406 int common_len; 1401 int common_len;
1402 uint8_t link_bw, rate_select;
1407 1403
1408 common_len = intel_dp_common_rates(intel_dp, common_rates); 1404 common_len = intel_dp_common_rates(intel_dp, common_rates);
1409 1405
@@ -1499,32 +1495,23 @@ found:
1499 * CEA-861-E - 5.1 Default Encoding Parameters 1495 * CEA-861-E - 5.1 Default Encoding Parameters
1500 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1496 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1501 */ 1497 */
1502 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) 1498 pipe_config->limited_color_range =
1503 intel_dp->color_range = DP_COLOR_RANGE_16_235; 1499 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1504 else
1505 intel_dp->color_range = 0;
1506 }
1507
1508 if (intel_dp->color_range)
1509 pipe_config->limited_color_range = true;
1510
1511 intel_dp->lane_count = lane_count;
1512
1513 if (intel_dp->num_sink_rates) {
1514 intel_dp->link_bw = 0;
1515 intel_dp->rate_select =
1516 intel_dp_rate_select(intel_dp, common_rates[clock]);
1517 } else { 1500 } else {
1518 intel_dp->link_bw = 1501 pipe_config->limited_color_range =
1519 drm_dp_link_rate_to_bw_code(common_rates[clock]); 1502 intel_dp->limited_color_range;
1520 intel_dp->rate_select = 0;
1521 } 1503 }
1522 1504
1505 pipe_config->lane_count = lane_count;
1506
1523 pipe_config->pipe_bpp = bpp; 1507 pipe_config->pipe_bpp = bpp;
1524 pipe_config->port_clock = common_rates[clock]; 1508 pipe_config->port_clock = common_rates[clock];
1525 1509
1526 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 1510 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1527 intel_dp->link_bw, intel_dp->lane_count, 1511 &link_bw, &rate_select);
1512
1513 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1514 link_bw, rate_select, pipe_config->lane_count,
1528 pipe_config->port_clock, bpp); 1515 pipe_config->port_clock, bpp);
1529 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 1516 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1530 mode_rate, link_avail); 1517 mode_rate, link_avail);
@@ -1586,6 +1573,13 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1586 udelay(500); 1573 udelay(500);
1587} 1574}
1588 1575
1576void intel_dp_set_link_params(struct intel_dp *intel_dp,
1577 const struct intel_crtc_state *pipe_config)
1578{
1579 intel_dp->link_rate = pipe_config->port_clock;
1580 intel_dp->lane_count = pipe_config->lane_count;
1581}
1582
1589static void intel_dp_prepare(struct intel_encoder *encoder) 1583static void intel_dp_prepare(struct intel_encoder *encoder)
1590{ 1584{
1591 struct drm_device *dev = encoder->base.dev; 1585 struct drm_device *dev = encoder->base.dev;
@@ -1593,7 +1587,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1593 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1587 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1594 enum port port = dp_to_dig_port(intel_dp)->port; 1588 enum port port = dp_to_dig_port(intel_dp)->port;
1595 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1589 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1596 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 1590 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1591
1592 intel_dp_set_link_params(intel_dp, crtc->config);
1597 1593
1598 /* 1594 /*
1599 * There are four kinds of DP registers: 1595 * There are four kinds of DP registers:
@@ -1619,7 +1615,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1619 1615
1620 /* Handle DP bits in common between all three register formats */ 1616 /* Handle DP bits in common between all three register formats */
1621 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1617 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1622 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); 1618 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1623 1619
1624 if (crtc->config->has_audio) 1620 if (crtc->config->has_audio)
1625 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 1621 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
@@ -1649,8 +1645,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1649 trans_dp &= ~TRANS_DP_ENH_FRAMING; 1645 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1650 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); 1646 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1651 } else { 1647 } else {
1652 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 1648 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1653 intel_dp->DP |= intel_dp->color_range; 1649 crtc->config->limited_color_range)
1650 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1654 1651
1655 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1652 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1656 intel_dp->DP |= DP_SYNC_HS_HIGH; 1653 intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -2290,13 +2287,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2290 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 2287 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2291 2288
2292 if (HAS_PCH_CPT(dev) && port != PORT_A) { 2289 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2293 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 2290 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2294 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) 2291
2292 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2295 flags |= DRM_MODE_FLAG_PHSYNC; 2293 flags |= DRM_MODE_FLAG_PHSYNC;
2296 else 2294 else
2297 flags |= DRM_MODE_FLAG_NHSYNC; 2295 flags |= DRM_MODE_FLAG_NHSYNC;
2298 2296
2299 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) 2297 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2300 flags |= DRM_MODE_FLAG_PVSYNC; 2298 flags |= DRM_MODE_FLAG_PVSYNC;
2301 else 2299 else
2302 flags |= DRM_MODE_FLAG_NVSYNC; 2300 flags |= DRM_MODE_FLAG_NVSYNC;
@@ -2320,6 +2318,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2320 2318
2321 pipe_config->has_dp_encoder = true; 2319 pipe_config->has_dp_encoder = true;
2322 2320
2321 pipe_config->lane_count =
2322 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2323
2323 intel_dp_get_m_n(crtc, pipe_config); 2324 intel_dp_get_m_n(crtc, pipe_config);
2324 2325
2325 if (port == PORT_A) { 2326 if (port == PORT_A) {
@@ -2399,38 +2400,62 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
2399 intel_dp_link_down(intel_dp); 2400 intel_dp_link_down(intel_dp);
2400} 2401}
2401 2402
2402static void chv_post_disable_dp(struct intel_encoder *encoder) 2403static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2404 bool reset)
2403{ 2405{
2404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2406 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2405 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2407 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2406 struct drm_device *dev = encoder->base.dev; 2408 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2407 struct drm_i915_private *dev_priv = dev->dev_private; 2409 enum pipe pipe = crtc->pipe;
2408 struct intel_crtc *intel_crtc = 2410 uint32_t val;
2409 to_intel_crtc(encoder->base.crtc);
2410 enum dpio_channel ch = vlv_dport_to_channel(dport);
2411 enum pipe pipe = intel_crtc->pipe;
2412 u32 val;
2413 2411
2414 intel_dp_link_down(intel_dp); 2412 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2413 if (reset)
2414 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2415 else
2416 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2417 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2415 2418
2416 mutex_lock(&dev_priv->sb_lock); 2419 if (crtc->config->lane_count > 2) {
2420 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2421 if (reset)
2422 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2423 else
2424 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2425 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2426 }
2417 2427
2418 /* Propagate soft reset to data lane reset */
2419 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 2428 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2420 val |= CHV_PCS_REQ_SOFTRESET_EN; 2429 val |= CHV_PCS_REQ_SOFTRESET_EN;
2430 if (reset)
2431 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2432 else
2433 val |= DPIO_PCS_CLK_SOFT_RESET;
2421 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); 2434 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2422 2435
2423 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); 2436 if (crtc->config->lane_count > 2) {
2424 val |= CHV_PCS_REQ_SOFTRESET_EN; 2437 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2425 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); 2438 val |= CHV_PCS_REQ_SOFTRESET_EN;
2439 if (reset)
2440 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2441 else
2442 val |= DPIO_PCS_CLK_SOFT_RESET;
2443 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2444 }
2445}
2426 2446
2427 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); 2447static void chv_post_disable_dp(struct intel_encoder *encoder)
2428 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 2448{
2429 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); 2449 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2450 struct drm_device *dev = encoder->base.dev;
2451 struct drm_i915_private *dev_priv = dev->dev_private;
2452
2453 intel_dp_link_down(intel_dp);
2430 2454
2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); 2455 mutex_lock(&dev_priv->sb_lock);
2432 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 2456
2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); 2457 /* Assert data lane reset */
2458 chv_data_lane_soft_reset(encoder, true);
2434 2459
2435 mutex_unlock(&dev_priv->sb_lock); 2460 mutex_unlock(&dev_priv->sb_lock);
2436} 2461}
@@ -2550,7 +2575,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2550 struct drm_i915_private *dev_priv = dev->dev_private; 2575 struct drm_i915_private *dev_priv = dev->dev_private;
2551 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2576 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2552 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2577 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2553 unsigned int lane_mask = 0x0;
2554 2578
2555 if (WARN_ON(dp_reg & DP_PORT_EN)) 2579 if (WARN_ON(dp_reg & DP_PORT_EN))
2556 return; 2580 return;
@@ -2568,13 +2592,18 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2568 2592
2569 pps_unlock(intel_dp); 2593 pps_unlock(intel_dp);
2570 2594
2571 if (IS_VALLEYVIEW(dev)) 2595 if (IS_VALLEYVIEW(dev)) {
2596 unsigned int lane_mask = 0x0;
2597
2598 if (IS_CHERRYVIEW(dev))
2599 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2600
2572 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 2601 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2573 lane_mask); 2602 lane_mask);
2603 }
2574 2604
2575 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2605 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2576 intel_dp_start_link_train(intel_dp); 2606 intel_dp_start_link_train(intel_dp);
2577 intel_dp_complete_link_train(intel_dp);
2578 intel_dp_stop_link_train(intel_dp); 2607 intel_dp_stop_link_train(intel_dp);
2579 2608
2580 if (crtc->config->has_audio) { 2609 if (crtc->config->has_audio) {
@@ -2797,31 +2826,19 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2797 val &= ~DPIO_LANEDESKEW_STRAP_OVRD; 2826 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2798 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); 2827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2799 2828
2800 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); 2829 if (intel_crtc->config->lane_count > 2) {
2801 val &= ~DPIO_LANEDESKEW_STRAP_OVRD; 2830 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2802 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); 2831 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2803 2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2804 /* Deassert soft data lane reset*/ 2833 }
2805 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2806 val |= CHV_PCS_REQ_SOFTRESET_EN;
2807 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2808
2809 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2810 val |= CHV_PCS_REQ_SOFTRESET_EN;
2811 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2812
2813 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2814 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2815 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2816
2817 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2818 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2819 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2820 2834
2821 /* Program Tx lane latency optimal setting*/ 2835 /* Program Tx lane latency optimal setting*/
2822 for (i = 0; i < 4; i++) { 2836 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2823 /* Set the upar bit */ 2837 /* Set the upar bit */
2824 data = (i == 1) ? 0x0 : 0x1; 2838 if (intel_crtc->config->lane_count == 1)
2839 data = 0x0;
2840 else
2841 data = (i == 1) ? 0x0 : 0x1;
2825 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), 2842 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2826 data << DPIO_UPAR_SHIFT); 2843 data << DPIO_UPAR_SHIFT);
2827 } 2844 }
@@ -2842,9 +2859,11 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2842 val |= DPIO_TX2_STAGGER_MASK(0x1f); 2859 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2843 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); 2860 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2844 2861
2845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); 2862 if (intel_crtc->config->lane_count > 2) {
2846 val |= DPIO_TX2_STAGGER_MASK(0x1f); 2863 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2847 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); 2864 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2866 }
2848 2867
2849 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), 2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2850 DPIO_LANESTAGGER_STRAP(stagger) | 2869 DPIO_LANESTAGGER_STRAP(stagger) |
@@ -2853,16 +2872,27 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2853 DPIO_TX1_STAGGER_MULT(6) | 2872 DPIO_TX1_STAGGER_MULT(6) |
2854 DPIO_TX2_STAGGER_MULT(0)); 2873 DPIO_TX2_STAGGER_MULT(0));
2855 2874
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), 2875 if (intel_crtc->config->lane_count > 2) {
2857 DPIO_LANESTAGGER_STRAP(stagger) | 2876 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2858 DPIO_LANESTAGGER_STRAP_OVRD | 2877 DPIO_LANESTAGGER_STRAP(stagger) |
2859 DPIO_TX1_STAGGER_MASK(0x1f) | 2878 DPIO_LANESTAGGER_STRAP_OVRD |
2860 DPIO_TX1_STAGGER_MULT(7) | 2879 DPIO_TX1_STAGGER_MASK(0x1f) |
2861 DPIO_TX2_STAGGER_MULT(5)); 2880 DPIO_TX1_STAGGER_MULT(7) |
2881 DPIO_TX2_STAGGER_MULT(5));
2882 }
2883
2884 /* Deassert data lane reset */
2885 chv_data_lane_soft_reset(encoder, false);
2862 2886
2863 mutex_unlock(&dev_priv->sb_lock); 2887 mutex_unlock(&dev_priv->sb_lock);
2864 2888
2865 intel_enable_dp(encoder); 2889 intel_enable_dp(encoder);
2890
2891 /* Second common lane will stay alive on its own now */
2892 if (dport->release_cl2_override) {
2893 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2894 dport->release_cl2_override = false;
2895 }
2866} 2896}
2867 2897
2868static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) 2898static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2874,12 +2904,27 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2874 to_intel_crtc(encoder->base.crtc); 2904 to_intel_crtc(encoder->base.crtc);
2875 enum dpio_channel ch = vlv_dport_to_channel(dport); 2905 enum dpio_channel ch = vlv_dport_to_channel(dport);
2876 enum pipe pipe = intel_crtc->pipe; 2906 enum pipe pipe = intel_crtc->pipe;
2907 unsigned int lane_mask =
2908 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2877 u32 val; 2909 u32 val;
2878 2910
2879 intel_dp_prepare(encoder); 2911 intel_dp_prepare(encoder);
2880 2912
2913 /*
2914 * Must trick the second common lane into life.
2915 * Otherwise we can't even access the PLL.
2916 */
2917 if (ch == DPIO_CH0 && pipe == PIPE_B)
2918 dport->release_cl2_override =
2919 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2920
2921 chv_phy_powergate_lanes(encoder, true, lane_mask);
2922
2881 mutex_lock(&dev_priv->sb_lock); 2923 mutex_lock(&dev_priv->sb_lock);
2882 2924
2925 /* Assert data lane reset */
2926 chv_data_lane_soft_reset(encoder, true);
2927
2883 /* program left/right clock distribution */ 2928 /* program left/right clock distribution */
2884 if (pipe != PIPE_B) { 2929 if (pipe != PIPE_B) {
2885 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 2930 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
@@ -2908,13 +2953,15 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2908 val |= CHV_PCS_USEDCLKCHANNEL; 2953 val |= CHV_PCS_USEDCLKCHANNEL;
2909 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); 2954 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2910 2955
2911 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); 2956 if (intel_crtc->config->lane_count > 2) {
2912 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; 2957 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2913 if (pipe != PIPE_B) 2958 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2914 val &= ~CHV_PCS_USEDCLKCHANNEL; 2959 if (pipe != PIPE_B)
2915 else 2960 val &= ~CHV_PCS_USEDCLKCHANNEL;
2916 val |= CHV_PCS_USEDCLKCHANNEL; 2961 else
2917 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); 2962 val |= CHV_PCS_USEDCLKCHANNEL;
2963 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2964 }
2918 2965
2919 /* 2966 /*
2920 * This a a bit weird since generally CL 2967 * This a a bit weird since generally CL
@@ -2931,6 +2978,39 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2931 mutex_unlock(&dev_priv->sb_lock); 2978 mutex_unlock(&dev_priv->sb_lock);
2932} 2979}
2933 2980
2981static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2982{
2983 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2984 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2985 u32 val;
2986
2987 mutex_lock(&dev_priv->sb_lock);
2988
2989 /* disable left/right clock distribution */
2990 if (pipe != PIPE_B) {
2991 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2992 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2993 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2994 } else {
2995 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2996 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2997 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2998 }
2999
3000 mutex_unlock(&dev_priv->sb_lock);
3001
3002 /*
3003 * Leave the power down bit cleared for at least one
3004 * lane so that chv_powergate_phy_ch() will power
3005 * on something when the channel is otherwise unused.
3006 * When the port is off and the override is removed
3007 * the lanes power down anyway, so otherwise it doesn't
3008 * really matter what the state of power down bits is
3009 * after this.
3010 */
3011 chv_phy_powergate_lanes(encoder, false, 0x0);
3012}
3013
2934/* 3014/*
2935 * Native read with retry for link status and receiver capability reads for 3015 * Native read with retry for link status and receiver capability reads for
2936 * cases where the sink may still be asleep. 3016 * cases where the sink may still be asleep.
@@ -3167,6 +3247,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3167 return 0; 3247 return 0;
3168} 3248}
3169 3249
3250static bool chv_need_uniq_trans_scale(uint8_t train_set)
3251{
3252 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3253 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3254}
3255
3170static uint32_t chv_signal_levels(struct intel_dp *intel_dp) 3256static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3171{ 3257{
3172 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3258 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -3258,24 +3344,28 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3258 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; 3344 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3259 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 3345 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3260 3346
3261 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 3347 if (intel_crtc->config->lane_count > 2) {
3262 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 3348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3263 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); 3349 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3264 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; 3350 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3265 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 3351 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3352 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3353 }
3266 3354
3267 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); 3355 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3268 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); 3356 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3269 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; 3357 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3270 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); 3358 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3271 3359
3272 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); 3360 if (intel_crtc->config->lane_count > 2) {
3273 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); 3361 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3274 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; 3362 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3275 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); 3363 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3364 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3365 }
3276 3366
3277 /* Program swing deemph */ 3367 /* Program swing deemph */
3278 for (i = 0; i < 4; i++) { 3368 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3279 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); 3369 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3280 val &= ~DPIO_SWING_DEEMPH9P5_MASK; 3370 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3281 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; 3371 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
@@ -3283,43 +3373,36 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3283 } 3373 }
3284 3374
3285 /* Program swing margin */ 3375 /* Program swing margin */
3286 for (i = 0; i < 4; i++) { 3376 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3287 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 3377 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3378
3288 val &= ~DPIO_SWING_MARGIN000_MASK; 3379 val &= ~DPIO_SWING_MARGIN000_MASK;
3289 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; 3380 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3381
3382 /*
3383 * Supposedly this value shouldn't matter when unique transition
3384 * scale is disabled, but in fact it does matter. Let's just
3385 * always program the same value and hope it's OK.
3386 */
3387 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3388 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3389
3290 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 3390 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3291 } 3391 }
3292 3392
3293 /* Disable unique transition scale */ 3393 /*
3294 for (i = 0; i < 4; i++) { 3394 * The document said it needs to set bit 27 for ch0 and bit 26
3395 * for ch1. Might be a typo in the doc.
3396 * For now, for this unique transition scale selection, set bit
3397 * 27 for ch0 and ch1.
3398 */
3399 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3295 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); 3400 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3296 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; 3401 if (chv_need_uniq_trans_scale(train_set))
3297 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3298 }
3299
3300 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3301 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3302 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3303 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3304
3305 /*
3306 * The document said it needs to set bit 27 for ch0 and bit 26
3307 * for ch1. Might be a typo in the doc.
3308 * For now, for this unique transition scale selection, set bit
3309 * 27 for ch0 and ch1.
3310 */
3311 for (i = 0; i < 4; i++) {
3312 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3313 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; 3402 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3314 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); 3403 else
3315 } 3404 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3316 3405 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3317 for (i = 0; i < 4; i++) {
3318 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3319 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3320 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3321 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3322 }
3323 } 3406 }
3324 3407
3325 /* Start swing calculation */ 3408 /* Start swing calculation */
@@ -3327,14 +3410,11 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3327 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 3410 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3328 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 3411 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3329 3412
3330 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 3413 if (intel_crtc->config->lane_count > 2) {
3331 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 3414 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3332 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 3415 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3333 3416 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3334 /* LRC Bypass */ 3417 }
3335 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3336 val |= DPIO_LRC_BYPASS;
3337 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3338 3418
3339 mutex_unlock(&dev_priv->sb_lock); 3419 mutex_unlock(&dev_priv->sb_lock);
3340 3420
@@ -3520,8 +3600,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
3520 uint8_t dp_train_pat) 3600 uint8_t dp_train_pat)
3521{ 3601{
3522 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3523 struct drm_device *dev = intel_dig_port->base.base.dev; 3603 struct drm_i915_private *dev_priv =
3524 struct drm_i915_private *dev_priv = dev->dev_private; 3604 to_i915(intel_dig_port->base.base.dev);
3525 uint8_t buf[sizeof(intel_dp->train_set) + 1]; 3605 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3526 int ret, len; 3606 int ret, len;
3527 3607
@@ -3562,8 +3642,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3562 const uint8_t link_status[DP_LINK_STATUS_SIZE]) 3642 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3563{ 3643{
3564 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3644 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3565 struct drm_device *dev = intel_dig_port->base.base.dev; 3645 struct drm_i915_private *dev_priv =
3566 struct drm_i915_private *dev_priv = dev->dev_private; 3646 to_i915(intel_dig_port->base.base.dev);
3567 int ret; 3647 int ret;
3568 3648
3569 intel_get_adjust_train(intel_dp, link_status); 3649 intel_get_adjust_train(intel_dp, link_status);
@@ -3610,8 +3690,8 @@ static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3610} 3690}
3611 3691
3612/* Enable corresponding port and start training pattern 1 */ 3692/* Enable corresponding port and start training pattern 1 */
3613void 3693static void
3614intel_dp_start_link_train(struct intel_dp *intel_dp) 3694intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3615{ 3695{
3616 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 3696 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3617 struct drm_device *dev = encoder->dev; 3697 struct drm_device *dev = encoder->dev;
@@ -3620,19 +3700,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
3620 int voltage_tries, loop_tries; 3700 int voltage_tries, loop_tries;
3621 uint32_t DP = intel_dp->DP; 3701 uint32_t DP = intel_dp->DP;
3622 uint8_t link_config[2]; 3702 uint8_t link_config[2];
3703 uint8_t link_bw, rate_select;
3623 3704
3624 if (HAS_DDI(dev)) 3705 if (HAS_DDI(dev))
3625 intel_ddi_prepare_link_retrain(encoder); 3706 intel_ddi_prepare_link_retrain(encoder);
3626 3707
3708 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3709 &link_bw, &rate_select);
3710
3627 /* Write the link configuration data */ 3711 /* Write the link configuration data */
3628 link_config[0] = intel_dp->link_bw; 3712 link_config[0] = link_bw;
3629 link_config[1] = intel_dp->lane_count; 3713 link_config[1] = intel_dp->lane_count;
3630 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 3714 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3631 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 3715 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3632 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); 3716 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3633 if (intel_dp->num_sink_rates) 3717 if (intel_dp->num_sink_rates)
3634 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, 3718 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3635 &intel_dp->rate_select, 1); 3719 &rate_select, 1);
3636 3720
3637 link_config[0] = 0; 3721 link_config[0] = 0;
3638 link_config[1] = DP_SET_ANSI_8B10B; 3722 link_config[1] = DP_SET_ANSI_8B10B;
@@ -3720,17 +3804,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
3720 intel_dp->DP = DP; 3804 intel_dp->DP = DP;
3721} 3805}
3722 3806
3723void 3807static void
3724intel_dp_complete_link_train(struct intel_dp *intel_dp) 3808intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3725{ 3809{
3810 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3811 struct drm_device *dev = dig_port->base.base.dev;
3726 bool channel_eq = false; 3812 bool channel_eq = false;
3727 int tries, cr_tries; 3813 int tries, cr_tries;
3728 uint32_t DP = intel_dp->DP; 3814 uint32_t DP = intel_dp->DP;
3729 uint32_t training_pattern = DP_TRAINING_PATTERN_2; 3815 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3730 3816
3731 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ 3817 /*
3732 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) 3818 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3819 *
3820 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3821 * also mandatory for downstream devices that support HBR2.
3822 *
3823 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3824 * supported but still not enabled.
3825 */
3826 if (intel_dp_source_supports_hbr2(dev) &&
3827 drm_dp_tps3_supported(intel_dp->dpcd))
3733 training_pattern = DP_TRAINING_PATTERN_3; 3828 training_pattern = DP_TRAINING_PATTERN_3;
3829 else if (intel_dp->link_rate == 540000)
3830 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3734 3831
3735 /* channel equalization */ 3832 /* channel equalization */
3736 if (!intel_dp_set_link_train(intel_dp, &DP, 3833 if (!intel_dp_set_link_train(intel_dp, &DP,
@@ -3758,9 +3855,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
3758 } 3855 }
3759 3856
3760 /* Make sure clock is still ok */ 3857 /* Make sure clock is still ok */
3761 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 3858 if (!drm_dp_clock_recovery_ok(link_status,
3859 intel_dp->lane_count)) {
3762 intel_dp->train_set_valid = false; 3860 intel_dp->train_set_valid = false;
3763 intel_dp_start_link_train(intel_dp); 3861 intel_dp_link_training_clock_recovery(intel_dp);
3764 intel_dp_set_link_train(intel_dp, &DP, 3862 intel_dp_set_link_train(intel_dp, &DP,
3765 training_pattern | 3863 training_pattern |
3766 DP_LINK_SCRAMBLING_DISABLE); 3864 DP_LINK_SCRAMBLING_DISABLE);
@@ -3768,7 +3866,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
3768 continue; 3866 continue;
3769 } 3867 }
3770 3868
3771 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 3869 if (drm_dp_channel_eq_ok(link_status,
3870 intel_dp->lane_count)) {
3772 channel_eq = true; 3871 channel_eq = true;
3773 break; 3872 break;
3774 } 3873 }
@@ -3776,7 +3875,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
3776 /* Try 5 times, then try clock recovery if that fails */ 3875 /* Try 5 times, then try clock recovery if that fails */
3777 if (tries > 5) { 3876 if (tries > 5) {
3778 intel_dp->train_set_valid = false; 3877 intel_dp->train_set_valid = false;
3779 intel_dp_start_link_train(intel_dp); 3878 intel_dp_link_training_clock_recovery(intel_dp);
3780 intel_dp_set_link_train(intel_dp, &DP, 3879 intel_dp_set_link_train(intel_dp, &DP,
3781 training_pattern | 3880 training_pattern |
3782 DP_LINK_SCRAMBLING_DISABLE); 3881 DP_LINK_SCRAMBLING_DISABLE);
@@ -3809,6 +3908,13 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3809 DP_TRAINING_PATTERN_DISABLE); 3908 DP_TRAINING_PATTERN_DISABLE);
3810} 3909}
3811 3910
3911void
3912intel_dp_start_link_train(struct intel_dp *intel_dp)
3913{
3914 intel_dp_link_training_clock_recovery(intel_dp);
3915 intel_dp_link_training_channel_equalization(intel_dp);
3916}
3917
3812static void 3918static void
3813intel_dp_link_down(struct intel_dp *intel_dp) 3919intel_dp_link_down(struct intel_dp *intel_dp)
3814{ 3920{
@@ -3909,19 +4015,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3909 } 4015 }
3910 } 4016 }
3911 4017
3912 /* Training Pattern 3 support, Intel platforms that support HBR2 alone 4018 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3913 * have support for TP3 hence that check is used along with dpcd check 4019 yesno(intel_dp_source_supports_hbr2(dev)),
3914 * to ensure TP3 can be enabled. 4020 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3915 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3916 * supported but still not enabled.
3917 */
3918 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3919 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3920 intel_dp_source_supports_hbr2(dev)) {
3921 intel_dp->use_tps3 = true;
3922 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3923 } else
3924 intel_dp->use_tps3 = false;
3925 4021
3926 /* Intermediate frequency support */ 4022 /* Intermediate frequency support */
3927 if (is_edp(intel_dp) && 4023 if (is_edp(intel_dp) &&
@@ -4007,22 +4103,30 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
4007 return intel_dp->is_mst; 4103 return intel_dp->is_mst;
4008} 4104}
4009 4105
4010static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp) 4106static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4011{ 4107{
4012 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4013 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 4109 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4014 u8 buf; 4110 u8 buf;
4111 int ret = 0;
4015 4112
4016 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { 4113 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4017 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); 4114 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4018 return; 4115 ret = -EIO;
4116 goto out;
4019 } 4117 }
4020 4118
4021 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 4119 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4022 buf & ~DP_TEST_SINK_START) < 0) 4120 buf & ~DP_TEST_SINK_START) < 0) {
4023 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); 4121 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4122 ret = -EIO;
4123 goto out;
4124 }
4024 4125
4126 intel_dp->sink_crc.started = false;
4127 out:
4025 hsw_enable_ips(intel_crtc); 4128 hsw_enable_ips(intel_crtc);
4129 return ret;
4026} 4130}
4027 4131
4028static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) 4132static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
@@ -4030,6 +4134,13 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4030 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4134 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4031 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 4135 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4032 u8 buf; 4136 u8 buf;
4137 int ret;
4138
4139 if (intel_dp->sink_crc.started) {
4140 ret = intel_dp_sink_crc_stop(intel_dp);
4141 if (ret)
4142 return ret;
4143 }
4033 4144
4034 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) 4145 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4035 return -EIO; 4146 return -EIO;
@@ -4037,6 +4148,8 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4037 if (!(buf & DP_TEST_CRC_SUPPORTED)) 4148 if (!(buf & DP_TEST_CRC_SUPPORTED))
4038 return -ENOTTY; 4149 return -ENOTTY;
4039 4150
4151 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4152
4040 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) 4153 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4041 return -EIO; 4154 return -EIO;
4042 4155
@@ -4048,6 +4161,7 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4048 return -EIO; 4161 return -EIO;
4049 } 4162 }
4050 4163
4164 intel_dp->sink_crc.started = true;
4051 return 0; 4165 return 0;
4052} 4166}
4053 4167
@@ -4057,38 +4171,55 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4057 struct drm_device *dev = dig_port->base.base.dev; 4171 struct drm_device *dev = dig_port->base.base.dev;
4058 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 4172 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4059 u8 buf; 4173 u8 buf;
4060 int test_crc_count; 4174 int count, ret;
4061 int attempts = 6; 4175 int attempts = 6;
4062 int ret; 4176 bool old_equal_new;
4063 4177
4064 ret = intel_dp_sink_crc_start(intel_dp); 4178 ret = intel_dp_sink_crc_start(intel_dp);
4065 if (ret) 4179 if (ret)
4066 return ret; 4180 return ret;
4067 4181
4068 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4069 ret = -EIO;
4070 goto stop;
4071 }
4072
4073 test_crc_count = buf & DP_TEST_COUNT_MASK;
4074
4075 do { 4182 do {
4183 intel_wait_for_vblank(dev, intel_crtc->pipe);
4184
4076 if (drm_dp_dpcd_readb(&intel_dp->aux, 4185 if (drm_dp_dpcd_readb(&intel_dp->aux,
4077 DP_TEST_SINK_MISC, &buf) < 0) { 4186 DP_TEST_SINK_MISC, &buf) < 0) {
4078 ret = -EIO; 4187 ret = -EIO;
4079 goto stop; 4188 goto stop;
4080 } 4189 }
4081 intel_wait_for_vblank(dev, intel_crtc->pipe); 4190 count = buf & DP_TEST_COUNT_MASK;
4082 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count); 4191
4192 /*
4193 * Count might be reset during the loop. In this case
4194 * last known count needs to be reset as well.
4195 */
4196 if (count == 0)
4197 intel_dp->sink_crc.last_count = 0;
4198
4199 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4200 ret = -EIO;
4201 goto stop;
4202 }
4203
4204 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4205 !memcmp(intel_dp->sink_crc.last_crc, crc,
4206 6 * sizeof(u8)));
4207
4208 } while (--attempts && (count == 0 || old_equal_new));
4209
4210 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4211 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4083 4212
4084 if (attempts == 0) { 4213 if (attempts == 0) {
4085 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n"); 4214 if (old_equal_new) {
4086 ret = -ETIMEDOUT; 4215 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4087 goto stop; 4216 } else {
4217 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4218 ret = -ETIMEDOUT;
4219 goto stop;
4220 }
4088 } 4221 }
4089 4222
4090 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4091 ret = -EIO;
4092stop: 4223stop:
4093 intel_dp_sink_crc_stop(intel_dp); 4224 intel_dp_sink_crc_stop(intel_dp);
4094 return ret; 4225 return ret;
@@ -4248,10 +4379,10 @@ go_again:
4248 if (bret == true) { 4379 if (bret == true) {
4249 4380
4250 /* check link status - esi[10] = 0x200c */ 4381 /* check link status - esi[10] = 0x200c */
4251 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 4382 if (intel_dp->active_mst_links &&
4383 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4252 DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); 4384 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4253 intel_dp_start_link_train(intel_dp); 4385 intel_dp_start_link_train(intel_dp);
4254 intel_dp_complete_link_train(intel_dp);
4255 intel_dp_stop_link_train(intel_dp); 4386 intel_dp_stop_link_train(intel_dp);
4256 } 4387 }
4257 4388
@@ -4342,7 +4473,6 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
4342 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 4473 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4343 intel_encoder->base.name); 4474 intel_encoder->base.name);
4344 intel_dp_start_link_train(intel_dp); 4475 intel_dp_start_link_train(intel_dp);
4345 intel_dp_complete_link_train(intel_dp);
4346 intel_dp_stop_link_train(intel_dp); 4476 intel_dp_stop_link_train(intel_dp);
4347 } 4477 }
4348} 4478}
@@ -4410,58 +4540,164 @@ edp_detect(struct intel_dp *intel_dp)
4410 return status; 4540 return status;
4411} 4541}
4412 4542
4413static enum drm_connector_status 4543static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4414ironlake_dp_detect(struct intel_dp *intel_dp) 4544 struct intel_digital_port *port)
4415{ 4545{
4416 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4546 u32 bit;
4417 struct drm_i915_private *dev_priv = dev->dev_private;
4418 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4419 4547
4420 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 4548 switch (port->port) {
4421 return connector_status_disconnected; 4549 case PORT_A:
4550 return true;
4551 case PORT_B:
4552 bit = SDE_PORTB_HOTPLUG;
4553 break;
4554 case PORT_C:
4555 bit = SDE_PORTC_HOTPLUG;
4556 break;
4557 case PORT_D:
4558 bit = SDE_PORTD_HOTPLUG;
4559 break;
4560 default:
4561 MISSING_CASE(port->port);
4562 return false;
4563 }
4422 4564
4423 return intel_dp_detect_dpcd(intel_dp); 4565 return I915_READ(SDEISR) & bit;
4566}
4567
4568static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4569 struct intel_digital_port *port)
4570{
4571 u32 bit;
4572
4573 switch (port->port) {
4574 case PORT_A:
4575 return true;
4576 case PORT_B:
4577 bit = SDE_PORTB_HOTPLUG_CPT;
4578 break;
4579 case PORT_C:
4580 bit = SDE_PORTC_HOTPLUG_CPT;
4581 break;
4582 case PORT_D:
4583 bit = SDE_PORTD_HOTPLUG_CPT;
4584 break;
4585 case PORT_E:
4586 bit = SDE_PORTE_HOTPLUG_SPT;
4587 break;
4588 default:
4589 MISSING_CASE(port->port);
4590 return false;
4591 }
4592
4593 return I915_READ(SDEISR) & bit;
4594}
4595
4596static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4597 struct intel_digital_port *port)
4598{
4599 u32 bit;
4600
4601 switch (port->port) {
4602 case PORT_B:
4603 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4604 break;
4605 case PORT_C:
4606 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4607 break;
4608 case PORT_D:
4609 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4610 break;
4611 default:
4612 MISSING_CASE(port->port);
4613 return false;
4614 }
4615
4616 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4424} 4617}
4425 4618
4426static int g4x_digital_port_connected(struct drm_device *dev, 4619static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4620 struct intel_digital_port *port)
4621{
4622 u32 bit;
4623
4624 switch (port->port) {
4625 case PORT_B:
4626 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4627 break;
4628 case PORT_C:
4629 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4630 break;
4631 case PORT_D:
4632 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4633 break;
4634 default:
4635 MISSING_CASE(port->port);
4636 return false;
4637 }
4638
4639 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4640}
4641
4642static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4427 struct intel_digital_port *intel_dig_port) 4643 struct intel_digital_port *intel_dig_port)
4428{ 4644{
4429 struct drm_i915_private *dev_priv = dev->dev_private; 4645 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4430 uint32_t bit; 4646 enum port port;
4647 u32 bit;
4431 4648
4432 if (IS_VALLEYVIEW(dev)) { 4649 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4433 switch (intel_dig_port->port) { 4650 switch (port) {
4434 case PORT_B: 4651 case PORT_A:
4435 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 4652 bit = BXT_DE_PORT_HP_DDIA;
4436 break; 4653 break;
4437 case PORT_C: 4654 case PORT_B:
4438 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 4655 bit = BXT_DE_PORT_HP_DDIB;
4439 break; 4656 break;
4440 case PORT_D: 4657 case PORT_C:
4441 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 4658 bit = BXT_DE_PORT_HP_DDIC;
4442 break; 4659 break;
4443 default: 4660 default:
4444 return -EINVAL; 4661 MISSING_CASE(port);
4445 } 4662 return false;
4446 } else {
4447 switch (intel_dig_port->port) {
4448 case PORT_B:
4449 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4450 break;
4451 case PORT_C:
4452 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4453 break;
4454 case PORT_D:
4455 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4456 break;
4457 default:
4458 return -EINVAL;
4459 }
4460 } 4663 }
4461 4664
4462 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 4665 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4463 return 0; 4666}
4464 return 1; 4667
4668/*
4669 * intel_digital_port_connected - is the specified port connected?
4670 * @dev_priv: i915 private structure
4671 * @port: the port to test
4672 *
4673 * Return %true if @port is connected, %false otherwise.
4674 */
4675bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4676 struct intel_digital_port *port)
4677{
4678 if (HAS_PCH_IBX(dev_priv))
4679 return ibx_digital_port_connected(dev_priv, port);
4680 if (HAS_PCH_SPLIT(dev_priv))
4681 return cpt_digital_port_connected(dev_priv, port);
4682 else if (IS_BROXTON(dev_priv))
4683 return bxt_digital_port_connected(dev_priv, port);
4684 else if (IS_VALLEYVIEW(dev_priv))
4685 return vlv_digital_port_connected(dev_priv, port);
4686 else
4687 return g4x_digital_port_connected(dev_priv, port);
4688}
4689
4690static enum drm_connector_status
4691ironlake_dp_detect(struct intel_dp *intel_dp)
4692{
4693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4694 struct drm_i915_private *dev_priv = dev->dev_private;
4695 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4696
4697 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4698 return connector_status_disconnected;
4699
4700 return intel_dp_detect_dpcd(intel_dp);
4465} 4701}
4466 4702
4467static enum drm_connector_status 4703static enum drm_connector_status
@@ -4469,7 +4705,6 @@ g4x_dp_detect(struct intel_dp *intel_dp)
4469{ 4705{
4470 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4706 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4471 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4472 int ret;
4473 4708
4474 /* Can't disconnect eDP, but you can close the lid... */ 4709 /* Can't disconnect eDP, but you can close the lid... */
4475 if (is_edp(intel_dp)) { 4710 if (is_edp(intel_dp)) {
@@ -4481,10 +4716,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
4481 return status; 4716 return status;
4482 } 4717 }
4483 4718
4484 ret = g4x_digital_port_connected(dev, intel_dig_port); 4719 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4485 if (ret == -EINVAL)
4486 return connector_status_unknown;
4487 else if (ret == 0)
4488 return connector_status_disconnected; 4720 return connector_status_disconnected;
4489 4721
4490 return intel_dp_detect_dpcd(intel_dp); 4722 return intel_dp_detect_dpcd(intel_dp);
@@ -4728,7 +4960,7 @@ intel_dp_set_property(struct drm_connector *connector,
4728 4960
4729 if (property == dev_priv->broadcast_rgb_property) { 4961 if (property == dev_priv->broadcast_rgb_property) {
4730 bool old_auto = intel_dp->color_range_auto; 4962 bool old_auto = intel_dp->color_range_auto;
4731 uint32_t old_range = intel_dp->color_range; 4963 bool old_range = intel_dp->limited_color_range;
4732 4964
4733 switch (val) { 4965 switch (val) {
4734 case INTEL_BROADCAST_RGB_AUTO: 4966 case INTEL_BROADCAST_RGB_AUTO:
@@ -4736,18 +4968,18 @@ intel_dp_set_property(struct drm_connector *connector,
4736 break; 4968 break;
4737 case INTEL_BROADCAST_RGB_FULL: 4969 case INTEL_BROADCAST_RGB_FULL:
4738 intel_dp->color_range_auto = false; 4970 intel_dp->color_range_auto = false;
4739 intel_dp->color_range = 0; 4971 intel_dp->limited_color_range = false;
4740 break; 4972 break;
4741 case INTEL_BROADCAST_RGB_LIMITED: 4973 case INTEL_BROADCAST_RGB_LIMITED:
4742 intel_dp->color_range_auto = false; 4974 intel_dp->color_range_auto = false;
4743 intel_dp->color_range = DP_COLOR_RANGE_16_235; 4975 intel_dp->limited_color_range = true;
4744 break; 4976 break;
4745 default: 4977 default:
4746 return -EINVAL; 4978 return -EINVAL;
4747 } 4979 }
4748 4980
4749 if (old_auto == intel_dp->color_range_auto && 4981 if (old_auto == intel_dp->color_range_auto &&
4750 old_range == intel_dp->color_range) 4982 old_range == intel_dp->limited_color_range)
4751 return 0; 4983 return 0;
4752 4984
4753 goto done; 4985 goto done;
@@ -4947,13 +5179,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4947 /* indicate that we need to restart link training */ 5179 /* indicate that we need to restart link training */
4948 intel_dp->train_set_valid = false; 5180 intel_dp->train_set_valid = false;
4949 5181
4950 if (HAS_PCH_SPLIT(dev)) { 5182 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4951 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 5183 goto mst_fail;
4952 goto mst_fail;
4953 } else {
4954 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4955 goto mst_fail;
4956 }
4957 5184
4958 if (!intel_dp_get_dpcd(intel_dp)) { 5185 if (!intel_dp_get_dpcd(intel_dp)) {
4959 goto mst_fail; 5186 goto mst_fail;
@@ -5028,6 +5255,13 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5028 [PORT_E] = DVO_PORT_DPE, 5255 [PORT_E] = DVO_PORT_DPE,
5029 }; 5256 };
5030 5257
5258 /*
5259 * eDP not supported on g4x. so bail out early just
5260 * for a bit extra safety in case the VBT is bonkers.
5261 */
5262 if (INTEL_INFO(dev)->gen < 5)
5263 return false;
5264
5031 if (port == PORT_A) 5265 if (port == PORT_A)
5032 return true; 5266 return true;
5033 5267
@@ -5302,7 +5536,6 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5302 struct intel_dp *intel_dp = dev_priv->drrs.dp; 5536 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5303 struct intel_crtc_state *config = NULL; 5537 struct intel_crtc_state *config = NULL;
5304 struct intel_crtc *intel_crtc = NULL; 5538 struct intel_crtc *intel_crtc = NULL;
5305 u32 reg, val;
5306 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 5539 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5307 5540
5308 if (refresh_rate <= 0) { 5541 if (refresh_rate <= 0) {
@@ -5364,9 +5597,10 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5364 DRM_ERROR("Unsupported refreshrate type\n"); 5597 DRM_ERROR("Unsupported refreshrate type\n");
5365 } 5598 }
5366 } else if (INTEL_INFO(dev)->gen > 6) { 5599 } else if (INTEL_INFO(dev)->gen > 6) {
5367 reg = PIPECONF(intel_crtc->config->cpu_transcoder); 5600 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5368 val = I915_READ(reg); 5601 u32 val;
5369 5602
5603 val = I915_READ(reg);
5370 if (index > DRRS_HIGH_RR) { 5604 if (index > DRRS_HIGH_RR) {
5371 if (IS_VALLEYVIEW(dev)) 5605 if (IS_VALLEYVIEW(dev))
5372 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 5606 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
@@ -5765,7 +5999,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5765 } 5999 }
5766 6000
5767 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 6001 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5768 intel_connector->panel.backlight_power = intel_edp_backlight_power; 6002 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5769 intel_panel_setup_backlight(connector, pipe); 6003 intel_panel_setup_backlight(connector, pipe);
5770 6004
5771 return true; 6005 return true;
@@ -5853,6 +6087,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5853 break; 6087 break;
5854 case PORT_B: 6088 case PORT_B:
5855 intel_encoder->hpd_pin = HPD_PORT_B; 6089 intel_encoder->hpd_pin = HPD_PORT_B;
6090 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6091 intel_encoder->hpd_pin = HPD_PORT_A;
5856 break; 6092 break;
5857 case PORT_C: 6093 case PORT_C:
5858 intel_encoder->hpd_pin = HPD_PORT_C; 6094 intel_encoder->hpd_pin = HPD_PORT_C;
@@ -5932,10 +6168,8 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5932 return; 6168 return;
5933 6169
5934 intel_connector = intel_connector_alloc(); 6170 intel_connector = intel_connector_alloc();
5935 if (!intel_connector) { 6171 if (!intel_connector)
5936 kfree(intel_dig_port); 6172 goto err_connector_alloc;
5937 return;
5938 }
5939 6173
5940 intel_encoder = &intel_dig_port->base; 6174 intel_encoder = &intel_dig_port->base;
5941 encoder = &intel_encoder->base; 6175 encoder = &intel_encoder->base;
@@ -5953,6 +6187,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5953 intel_encoder->pre_enable = chv_pre_enable_dp; 6187 intel_encoder->pre_enable = chv_pre_enable_dp;
5954 intel_encoder->enable = vlv_enable_dp; 6188 intel_encoder->enable = vlv_enable_dp;
5955 intel_encoder->post_disable = chv_post_disable_dp; 6189 intel_encoder->post_disable = chv_post_disable_dp;
6190 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5956 } else if (IS_VALLEYVIEW(dev)) { 6191 } else if (IS_VALLEYVIEW(dev)) {
5957 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 6192 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5958 intel_encoder->pre_enable = vlv_pre_enable_dp; 6193 intel_encoder->pre_enable = vlv_pre_enable_dp;
@@ -5982,11 +6217,18 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5982 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 6217 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5983 dev_priv->hotplug.irq_port[port] = intel_dig_port; 6218 dev_priv->hotplug.irq_port[port] = intel_dig_port;
5984 6219
5985 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 6220 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5986 drm_encoder_cleanup(encoder); 6221 goto err_init_connector;
5987 kfree(intel_dig_port); 6222
5988 kfree(intel_connector); 6223 return;
5989 } 6224
6225err_init_connector:
6226 drm_encoder_cleanup(encoder);
6227 kfree(intel_connector);
6228err_connector_alloc:
6229 kfree(intel_dig_port);
6230
6231 return;
5990} 6232}
5991 6233
5992void intel_dp_mst_suspend(struct drm_device *dev) 6234void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6ade06888432..0639275fc471 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -39,8 +39,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
39 struct intel_dp *intel_dp = &intel_dig_port->dp; 39 struct intel_dp *intel_dp = &intel_dig_port->dp;
40 struct drm_atomic_state *state; 40 struct drm_atomic_state *state;
41 int bpp, i; 41 int bpp, i;
42 int lane_count, slots, rate; 42 int lane_count, slots;
43 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 43 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
44 struct drm_connector *drm_connector; 44 struct drm_connector *drm_connector;
45 struct intel_connector *connector, *found = NULL; 45 struct intel_connector *connector, *found = NULL;
46 struct drm_connector_state *connector_state; 46 struct drm_connector_state *connector_state;
@@ -56,20 +56,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
56 */ 56 */
57 lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 57 lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
58 58
59 rate = intel_dp_max_link_rate(intel_dp);
60 59
61 if (intel_dp->num_sink_rates) { 60 pipe_config->lane_count = lane_count;
62 intel_dp->link_bw = 0;
63 intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
64 } else {
65 intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
66 intel_dp->rate_select = 0;
67 }
68
69 intel_dp->lane_count = lane_count;
70 61
71 pipe_config->pipe_bpp = 24; 62 pipe_config->pipe_bpp = 24;
72 pipe_config->port_clock = rate; 63 pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
73 64
74 state = pipe_config->base.state; 65 state = pipe_config->base.state;
75 66
@@ -87,7 +78,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
87 return false; 78 return false;
88 } 79 }
89 80
90 mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp); 81 mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
91 82
92 pipe_config->pbn = mst_pbn; 83 pipe_config->pbn = mst_pbn;
93 slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn); 84 slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
@@ -184,6 +175,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
184 if (intel_dp->active_mst_links == 0) { 175 if (intel_dp->active_mst_links == 0) {
185 enum port port = intel_ddi_get_encoder_port(encoder); 176 enum port port = intel_ddi_get_encoder_port(encoder);
186 177
178 intel_dp_set_link_params(intel_dp, intel_crtc->config);
179
187 /* FIXME: add support for SKL */ 180 /* FIXME: add support for SKL */
188 if (INTEL_INFO(dev)->gen < 9) 181 if (INTEL_INFO(dev)->gen < 9)
189 I915_WRITE(PORT_CLK_SEL(port), 182 I915_WRITE(PORT_CLK_SEL(port),
@@ -195,7 +188,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
195 188
196 189
197 intel_dp_start_link_train(intel_dp); 190 intel_dp_start_link_train(intel_dp);
198 intel_dp_complete_link_train(intel_dp);
199 intel_dp_stop_link_train(intel_dp); 191 intel_dp_stop_link_train(intel_dp);
200 } 192 }
201 193
@@ -286,6 +278,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
286 break; 278 break;
287 } 279 }
288 pipe_config->base.adjusted_mode.flags |= flags; 280 pipe_config->base.adjusted_mode.flags |= flags;
281
282 pipe_config->lane_count =
283 ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
284
289 intel_dp_get_m_n(crtc, pipe_config); 285 intel_dp_get_m_n(crtc, pipe_config);
290 286
291 intel_ddi_clock_get(&intel_dig_port->base, pipe_config); 287 intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2b9e6f9775c5..0598932ce623 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -142,6 +142,7 @@ struct intel_encoder {
142 void (*mode_set)(struct intel_encoder *intel_encoder); 142 void (*mode_set)(struct intel_encoder *intel_encoder);
143 void (*disable)(struct intel_encoder *); 143 void (*disable)(struct intel_encoder *);
144 void (*post_disable)(struct intel_encoder *); 144 void (*post_disable)(struct intel_encoder *);
145 void (*post_pll_disable)(struct intel_encoder *);
145 /* Read out the current hw state of this connector, returning true if 146 /* Read out the current hw state of this connector, returning true if
146 * the encoder is active. If the encoder is enabled it also set the pipe 147 * the encoder is active. If the encoder is enabled it also set the pipe
147 * it is connected to in the pipe parameter. */ 148 * it is connected to in the pipe parameter. */
@@ -178,12 +179,22 @@ struct intel_panel {
178 bool active_low_pwm; 179 bool active_low_pwm;
179 180
180 /* PWM chip */ 181 /* PWM chip */
182 bool util_pin_active_low; /* bxt+ */
183 u8 controller; /* bxt+ only */
181 struct pwm_device *pwm; 184 struct pwm_device *pwm;
182 185
183 struct backlight_device *device; 186 struct backlight_device *device;
184 } backlight;
185 187
186 void (*backlight_power)(struct intel_connector *, bool enable); 188 /* Connector and platform specific backlight functions */
189 int (*setup)(struct intel_connector *connector, enum pipe pipe);
190 uint32_t (*get)(struct intel_connector *connector);
191 void (*set)(struct intel_connector *connector, uint32_t level);
192 void (*disable)(struct intel_connector *connector);
193 void (*enable)(struct intel_connector *connector);
194 uint32_t (*hz_to_pwm)(struct intel_connector *connector,
195 uint32_t hz);
196 void (*power)(struct intel_connector *, bool enable);
197 } backlight;
187}; 198};
188 199
189struct intel_connector { 200struct intel_connector {
@@ -337,6 +348,8 @@ struct intel_crtc_state {
337#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 348#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
338 unsigned long quirks; 349 unsigned long quirks;
339 350
351 bool update_pipe;
352
340 /* Pipe source size (ie. panel fitter input size) 353 /* Pipe source size (ie. panel fitter input size)
341 * All planes will be positioned inside this space, 354 * All planes will be positioned inside this space,
342 * and get clipped at the edges. */ 355 * and get clipped at the edges. */
@@ -423,6 +436,8 @@ struct intel_crtc_state {
423 /* Used by SDVO (and if we ever fix it, HDMI). */ 436 /* Used by SDVO (and if we ever fix it, HDMI). */
424 unsigned pixel_multiplier; 437 unsigned pixel_multiplier;
425 438
439 uint8_t lane_count;
440
426 /* Panel fitter controls for gen2-gen4 + VLV */ 441 /* Panel fitter controls for gen2-gen4 + VLV */
427 struct { 442 struct {
428 u32 control; 443 u32 control;
@@ -532,6 +547,8 @@ struct intel_crtc {
532 * gen4+ this only adjusts up to a tile, offsets within a tile are 547 * gen4+ this only adjusts up to a tile, offsets within a tile are
533 * handled in the hw itself (with the TILEOFF register). */ 548 * handled in the hw itself (with the TILEOFF register). */
534 unsigned long dspaddr_offset; 549 unsigned long dspaddr_offset;
550 int adjusted_x;
551 int adjusted_y;
535 552
536 struct drm_i915_gem_object *cursor_bo; 553 struct drm_i915_gem_object *cursor_bo;
537 uint32_t cursor_addr; 554 uint32_t cursor_addr;
@@ -560,7 +577,13 @@ struct intel_crtc {
560 577
561 int scanline_offset; 578 int scanline_offset;
562 579
563 unsigned start_vbl_count; 580 struct {
581 unsigned start_vbl_count;
582 ktime_t start_vbl_time;
583 int min_vbl, max_vbl;
584 int scanline_start;
585 } debug;
586
564 struct intel_crtc_atomic_commit atomic; 587 struct intel_crtc_atomic_commit atomic;
565 588
566 /* scalers available on this crtc */ 589 /* scalers available on this crtc */
@@ -657,19 +680,20 @@ struct cxsr_latency {
657struct intel_hdmi { 680struct intel_hdmi {
658 u32 hdmi_reg; 681 u32 hdmi_reg;
659 int ddc_bus; 682 int ddc_bus;
660 uint32_t color_range; 683 bool limited_color_range;
661 bool color_range_auto; 684 bool color_range_auto;
662 bool has_hdmi_sink; 685 bool has_hdmi_sink;
663 bool has_audio; 686 bool has_audio;
664 enum hdmi_force_audio force_audio; 687 enum hdmi_force_audio force_audio;
665 bool rgb_quant_range_selectable; 688 bool rgb_quant_range_selectable;
666 enum hdmi_picture_aspect aspect_ratio; 689 enum hdmi_picture_aspect aspect_ratio;
690 struct intel_connector *attached_connector;
667 void (*write_infoframe)(struct drm_encoder *encoder, 691 void (*write_infoframe)(struct drm_encoder *encoder,
668 enum hdmi_infoframe_type type, 692 enum hdmi_infoframe_type type,
669 const void *frame, ssize_t len); 693 const void *frame, ssize_t len);
670 void (*set_infoframes)(struct drm_encoder *encoder, 694 void (*set_infoframes)(struct drm_encoder *encoder,
671 bool enable, 695 bool enable,
672 struct drm_display_mode *adjusted_mode); 696 const struct drm_display_mode *adjusted_mode);
673 bool (*infoframe_enabled)(struct drm_encoder *encoder); 697 bool (*infoframe_enabled)(struct drm_encoder *encoder);
674}; 698};
675 699
@@ -696,23 +720,29 @@ enum link_m_n_set {
696 M2_N2 720 M2_N2
697}; 721};
698 722
723struct sink_crc {
724 bool started;
725 u8 last_crc[6];
726 int last_count;
727};
728
699struct intel_dp { 729struct intel_dp {
700 uint32_t output_reg; 730 uint32_t output_reg;
701 uint32_t aux_ch_ctl_reg; 731 uint32_t aux_ch_ctl_reg;
702 uint32_t DP; 732 uint32_t DP;
733 int link_rate;
734 uint8_t lane_count;
703 bool has_audio; 735 bool has_audio;
704 enum hdmi_force_audio force_audio; 736 enum hdmi_force_audio force_audio;
705 uint32_t color_range; 737 bool limited_color_range;
706 bool color_range_auto; 738 bool color_range_auto;
707 uint8_t link_bw;
708 uint8_t rate_select;
709 uint8_t lane_count;
710 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 739 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
711 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 740 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
712 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 741 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
713 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ 742 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
714 uint8_t num_sink_rates; 743 uint8_t num_sink_rates;
715 int sink_rates[DP_MAX_SUPPORTED_RATES]; 744 int sink_rates[DP_MAX_SUPPORTED_RATES];
745 struct sink_crc sink_crc;
716 struct drm_dp_aux aux; 746 struct drm_dp_aux aux;
717 uint8_t train_set[4]; 747 uint8_t train_set[4];
718 int panel_power_up_delay; 748 int panel_power_up_delay;
@@ -735,7 +765,6 @@ struct intel_dp {
735 enum pipe pps_pipe; 765 enum pipe pps_pipe;
736 struct edp_power_seq pps_delays; 766 struct edp_power_seq pps_delays;
737 767
738 bool use_tps3;
739 bool can_mst; /* this port supports mst */ 768 bool can_mst; /* this port supports mst */
740 bool is_mst; 769 bool is_mst;
741 int active_mst_links; 770 int active_mst_links;
@@ -770,6 +799,7 @@ struct intel_digital_port {
770 struct intel_dp dp; 799 struct intel_dp dp;
771 struct intel_hdmi hdmi; 800 struct intel_hdmi hdmi;
772 enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); 801 enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
802 bool release_cl2_override;
773}; 803};
774 804
775struct intel_dp_mst_encoder { 805struct intel_dp_mst_encoder {
@@ -779,7 +809,7 @@ struct intel_dp_mst_encoder {
779 void *port; /* store this opaque as its illegal to dereference it */ 809 void *port; /* store this opaque as its illegal to dereference it */
780}; 810};
781 811
782static inline int 812static inline enum dpio_channel
783vlv_dport_to_channel(struct intel_digital_port *dport) 813vlv_dport_to_channel(struct intel_digital_port *dport)
784{ 814{
785 switch (dport->port) { 815 switch (dport->port) {
@@ -793,7 +823,21 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
793 } 823 }
794} 824}
795 825
796static inline int 826static inline enum dpio_phy
827vlv_dport_to_phy(struct intel_digital_port *dport)
828{
829 switch (dport->port) {
830 case PORT_B:
831 case PORT_C:
832 return DPIO_PHY0;
833 case PORT_D:
834 return DPIO_PHY1;
835 default:
836 BUG();
837 }
838}
839
840static inline enum dpio_channel
797vlv_pipe_to_channel(enum pipe pipe) 841vlv_pipe_to_channel(enum pipe pipe)
798{ 842{
799 switch (pipe) { 843 switch (pipe) {
@@ -834,8 +878,8 @@ struct intel_unpin_work {
834 u32 flip_count; 878 u32 flip_count;
835 u32 gtt_offset; 879 u32 gtt_offset;
836 struct drm_i915_gem_request *flip_queued_req; 880 struct drm_i915_gem_request *flip_queued_req;
837 int flip_queued_vblank; 881 u32 flip_queued_vblank;
838 int flip_ready_vblank; 882 u32 flip_ready_vblank;
839 bool enable_stall_check; 883 bool enable_stall_check;
840}; 884};
841 885
@@ -987,6 +1031,7 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
987extern const struct drm_plane_funcs intel_plane_funcs; 1031extern const struct drm_plane_funcs intel_plane_funcs;
988bool intel_has_pending_fb_unpin(struct drm_device *dev); 1032bool intel_has_pending_fb_unpin(struct drm_device *dev);
989int intel_pch_rawclk(struct drm_device *dev); 1033int intel_pch_rawclk(struct drm_device *dev);
1034int intel_hrawclk(struct drm_device *dev);
990void intel_mark_busy(struct drm_device *dev); 1035void intel_mark_busy(struct drm_device *dev);
991void intel_mark_idle(struct drm_device *dev); 1036void intel_mark_idle(struct drm_device *dev);
992void intel_crtc_restore_mode(struct drm_crtc *crtc); 1037void intel_crtc_restore_mode(struct drm_crtc *crtc);
@@ -995,8 +1040,6 @@ void intel_encoder_destroy(struct drm_encoder *encoder);
995int intel_connector_init(struct intel_connector *); 1040int intel_connector_init(struct intel_connector *);
996struct intel_connector *intel_connector_alloc(void); 1041struct intel_connector *intel_connector_alloc(void);
997bool intel_connector_get_hw_state(struct intel_connector *connector); 1042bool intel_connector_get_hw_state(struct intel_connector *connector);
998bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
999 struct intel_digital_port *port);
1000void intel_connector_attach_encoder(struct intel_connector *connector, 1043void intel_connector_attach_encoder(struct intel_connector *connector,
1001 struct intel_encoder *encoder); 1044 struct intel_encoder *encoder);
1002struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 1045struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -1038,10 +1081,8 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe);
1038void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 1081void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
1039void intel_check_page_flip(struct drm_device *dev, int pipe); 1082void intel_check_page_flip(struct drm_device *dev, int pipe);
1040int intel_prepare_plane_fb(struct drm_plane *plane, 1083int intel_prepare_plane_fb(struct drm_plane *plane,
1041 struct drm_framebuffer *fb,
1042 const struct drm_plane_state *new_state); 1084 const struct drm_plane_state *new_state);
1043void intel_cleanup_plane_fb(struct drm_plane *plane, 1085void intel_cleanup_plane_fb(struct drm_plane *plane,
1044 struct drm_framebuffer *fb,
1045 const struct drm_plane_state *old_state); 1086 const struct drm_plane_state *old_state);
1046int intel_plane_atomic_get_property(struct drm_plane *plane, 1087int intel_plane_atomic_get_property(struct drm_plane *plane,
1047 const struct drm_plane_state *state, 1088 const struct drm_plane_state *state,
@@ -1056,7 +1097,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
1056 1097
1057unsigned int 1098unsigned int
1058intel_tile_height(struct drm_device *dev, uint32_t pixel_format, 1099intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
1059 uint64_t fb_format_modifier); 1100 uint64_t fb_format_modifier, unsigned int plane);
1060 1101
1061static inline bool 1102static inline bool
1062intel_rotation_90_or_270(unsigned int rotation) 1103intel_rotation_90_or_270(unsigned int rotation)
@@ -1137,7 +1178,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1137int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); 1178int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
1138 1179
1139unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 1180unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
1140 struct drm_i915_gem_object *obj); 1181 struct drm_i915_gem_object *obj,
1182 unsigned int plane);
1183
1141u32 skl_plane_ctl_format(uint32_t pixel_format); 1184u32 skl_plane_ctl_format(uint32_t pixel_format);
1142u32 skl_plane_ctl_tiling(uint64_t fb_modifier); 1185u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
1143u32 skl_plane_ctl_rotation(unsigned int rotation); 1186u32 skl_plane_ctl_rotation(unsigned int rotation);
@@ -1155,8 +1198,9 @@ void assert_csr_loaded(struct drm_i915_private *dev_priv);
1155void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 1198void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
1156bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1199bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1157 struct intel_connector *intel_connector); 1200 struct intel_connector *intel_connector);
1201void intel_dp_set_link_params(struct intel_dp *intel_dp,
1202 const struct intel_crtc_state *pipe_config);
1158void intel_dp_start_link_train(struct intel_dp *intel_dp); 1203void intel_dp_start_link_train(struct intel_dp *intel_dp);
1159void intel_dp_complete_link_train(struct intel_dp *intel_dp);
1160void intel_dp_stop_link_train(struct intel_dp *intel_dp); 1204void intel_dp_stop_link_train(struct intel_dp *intel_dp);
1161void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 1205void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
1162void intel_dp_encoder_destroy(struct drm_encoder *encoder); 1206void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1185,6 +1229,8 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
1185void intel_edp_drrs_invalidate(struct drm_device *dev, 1229void intel_edp_drrs_invalidate(struct drm_device *dev,
1186 unsigned frontbuffer_bits); 1230 unsigned frontbuffer_bits);
1187void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); 1231void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
1232bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
1233 struct intel_digital_port *port);
1188void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); 1234void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
1189 1235
1190/* intel_dp_mst.c */ 1236/* intel_dp_mst.c */
@@ -1263,6 +1309,7 @@ int intel_connector_update_modes(struct drm_connector *connector,
1263int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 1309int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
1264void intel_attach_force_audio_property(struct drm_connector *connector); 1310void intel_attach_force_audio_property(struct drm_connector *connector);
1265void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 1311void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
1312void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1266 1313
1267 1314
1268/* intel_overlay.c */ 1315/* intel_overlay.c */
@@ -1295,7 +1342,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
1295void intel_panel_enable_backlight(struct intel_connector *connector); 1342void intel_panel_enable_backlight(struct intel_connector *connector);
1296void intel_panel_disable_backlight(struct intel_connector *connector); 1343void intel_panel_disable_backlight(struct intel_connector *connector);
1297void intel_panel_destroy_backlight(struct drm_connector *connector); 1344void intel_panel_destroy_backlight(struct drm_connector *connector);
1298void intel_panel_init_backlight_funcs(struct drm_device *dev);
1299enum drm_connector_status intel_panel_detect(struct drm_device *dev); 1345enum drm_connector_status intel_panel_detect(struct drm_device *dev);
1300extern struct drm_display_mode *intel_find_panel_downclock( 1346extern struct drm_display_mode *intel_find_panel_downclock(
1301 struct drm_device *dev, 1347 struct drm_device *dev,
@@ -1339,6 +1385,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1339 1385
1340void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); 1386void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
1341 1387
1388void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1389 bool override, unsigned int mask);
1390bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1391 enum dpio_channel ch, bool override);
1392
1393
1342/* intel_pm.c */ 1394/* intel_pm.c */
1343void intel_init_clock_gating(struct drm_device *dev); 1395void intel_init_clock_gating(struct drm_device *dev);
1344void intel_suspend_hw(struct drm_device *dev); 1396void intel_suspend_hw(struct drm_device *dev);
@@ -1384,9 +1436,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
1384int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); 1436int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1385int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1437int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv); 1438 struct drm_file *file_priv);
1387void intel_pipe_update_start(struct intel_crtc *crtc, 1439void intel_pipe_update_start(struct intel_crtc *crtc);
1388 uint32_t *start_vbl_count); 1440void intel_pipe_update_end(struct intel_crtc *crtc);
1389void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
1390 1441
1391/* intel_tv.c */ 1442/* intel_tv.c */
1392void intel_tv_init(struct drm_device *dev); 1443void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 32a6c7184ca4..170ae6f4866e 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -282,58 +282,46 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
282 return true; 282 return true;
283} 283}
284 284
285static void intel_dsi_port_enable(struct intel_encoder *encoder) 285static void bxt_dsi_device_ready(struct intel_encoder *encoder)
286{ 286{
287 struct drm_device *dev = encoder->base.dev; 287 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
288 struct drm_i915_private *dev_priv = dev->dev_private;
289 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
290 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 288 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
291 enum port port; 289 enum port port;
292 u32 temp; 290 u32 val;
293 291
294 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 292 DRM_DEBUG_KMS("\n");
295 temp = I915_READ(VLV_CHICKEN_3);
296 temp &= ~PIXEL_OVERLAP_CNT_MASK |
297 intel_dsi->pixel_overlap <<
298 PIXEL_OVERLAP_CNT_SHIFT;
299 I915_WRITE(VLV_CHICKEN_3, temp);
300 }
301 293
294 /* Exit Low power state in 4 steps*/
302 for_each_dsi_port(port, intel_dsi->ports) { 295 for_each_dsi_port(port, intel_dsi->ports) {
303 temp = I915_READ(MIPI_PORT_CTRL(port));
304 temp &= ~LANE_CONFIGURATION_MASK;
305 temp &= ~DUAL_LINK_MODE_MASK;
306 296
307 if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) { 297 /* 1. Enable MIPI PHY transparent latch */
308 temp |= (intel_dsi->dual_link - 1) 298 val = I915_READ(BXT_MIPI_PORT_CTRL(port));
309 << DUAL_LINK_MODE_SHIFT; 299 I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
310 temp |= intel_crtc->pipe ? 300 usleep_range(2000, 2500);
311 LANE_CONFIGURATION_DUAL_LINK_B :
312 LANE_CONFIGURATION_DUAL_LINK_A;
313 }
314 /* assert ip_tg_enable signal */
315 I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE);
316 POSTING_READ(MIPI_PORT_CTRL(port));
317 }
318}
319 301
320static void intel_dsi_port_disable(struct intel_encoder *encoder) 302 /* 2. Enter ULPS */
321{ 303 val = I915_READ(MIPI_DEVICE_READY(port));
322 struct drm_device *dev = encoder->base.dev; 304 val &= ~ULPS_STATE_MASK;
323 struct drm_i915_private *dev_priv = dev->dev_private; 305 val |= (ULPS_STATE_ENTER | DEVICE_READY);
324 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 306 I915_WRITE(MIPI_DEVICE_READY(port), val);
325 enum port port; 307 usleep_range(2, 3);
326 u32 temp; 308
309 /* 3. Exit ULPS */
310 val = I915_READ(MIPI_DEVICE_READY(port));
311 val &= ~ULPS_STATE_MASK;
312 val |= (ULPS_STATE_EXIT | DEVICE_READY);
313 I915_WRITE(MIPI_DEVICE_READY(port), val);
314 usleep_range(1000, 1500);
327 315
328 for_each_dsi_port(port, intel_dsi->ports) { 316 /* Clear ULPS and set device ready */
329 /* de-assert ip_tg_enable signal */ 317 val = I915_READ(MIPI_DEVICE_READY(port));
330 temp = I915_READ(MIPI_PORT_CTRL(port)); 318 val &= ~ULPS_STATE_MASK;
331 I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE); 319 val |= DEVICE_READY;
332 POSTING_READ(MIPI_PORT_CTRL(port)); 320 I915_WRITE(MIPI_DEVICE_READY(port), val);
333 } 321 }
334} 322}
335 323
336static void intel_dsi_device_ready(struct intel_encoder *encoder) 324static void vlv_dsi_device_ready(struct intel_encoder *encoder)
337{ 325{
338 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 326 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
339 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 327 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -372,6 +360,75 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
372 } 360 }
373} 361}
374 362
363static void intel_dsi_device_ready(struct intel_encoder *encoder)
364{
365 struct drm_device *dev = encoder->base.dev;
366
367 if (IS_VALLEYVIEW(dev))
368 vlv_dsi_device_ready(encoder);
369 else if (IS_BROXTON(dev))
370 bxt_dsi_device_ready(encoder);
371}
372
373static void intel_dsi_port_enable(struct intel_encoder *encoder)
374{
375 struct drm_device *dev = encoder->base.dev;
376 struct drm_i915_private *dev_priv = dev->dev_private;
377 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
378 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
379 enum port port;
380 u32 temp;
381 u32 port_ctrl;
382
383 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
384 temp = I915_READ(VLV_CHICKEN_3);
385 temp &= ~PIXEL_OVERLAP_CNT_MASK |
386 intel_dsi->pixel_overlap <<
387 PIXEL_OVERLAP_CNT_SHIFT;
388 I915_WRITE(VLV_CHICKEN_3, temp);
389 }
390
391 for_each_dsi_port(port, intel_dsi->ports) {
392 port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
393 MIPI_PORT_CTRL(port);
394
395 temp = I915_READ(port_ctrl);
396
397 temp &= ~LANE_CONFIGURATION_MASK;
398 temp &= ~DUAL_LINK_MODE_MASK;
399
400 if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
401 temp |= (intel_dsi->dual_link - 1)
402 << DUAL_LINK_MODE_SHIFT;
403 temp |= intel_crtc->pipe ?
404 LANE_CONFIGURATION_DUAL_LINK_B :
405 LANE_CONFIGURATION_DUAL_LINK_A;
406 }
407 /* assert ip_tg_enable signal */
408 I915_WRITE(port_ctrl, temp | DPI_ENABLE);
409 POSTING_READ(port_ctrl);
410 }
411}
412
413static void intel_dsi_port_disable(struct intel_encoder *encoder)
414{
415 struct drm_device *dev = encoder->base.dev;
416 struct drm_i915_private *dev_priv = dev->dev_private;
417 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
418 enum port port;
419 u32 temp;
420 u32 port_ctrl;
421
422 for_each_dsi_port(port, intel_dsi->ports) {
423 /* de-assert ip_tg_enable signal */
424 port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
425 MIPI_PORT_CTRL(port);
426 temp = I915_READ(port_ctrl);
427 I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
428 POSTING_READ(port_ctrl);
429 }
430}
431
375static void intel_dsi_enable(struct intel_encoder *encoder) 432static void intel_dsi_enable(struct intel_encoder *encoder)
376{ 433{
377 struct drm_device *dev = encoder->base.dev; 434 struct drm_device *dev = encoder->base.dev;
@@ -419,19 +476,24 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
419 476
420 msleep(intel_dsi->panel_on_delay); 477 msleep(intel_dsi->panel_on_delay);
421 478
422 /* Disable DPOunit clock gating, can stall pipe 479 if (IS_VALLEYVIEW(dev)) {
423 * and we need DPLL REFA always enabled */ 480 /*
424 tmp = I915_READ(DPLL(pipe)); 481 * Disable DPOunit clock gating, can stall pipe
425 tmp |= DPLL_REF_CLK_ENABLE_VLV; 482 * and we need DPLL REFA always enabled
426 I915_WRITE(DPLL(pipe), tmp); 483 */
427 484 tmp = I915_READ(DPLL(pipe));
428 /* update the hw state for DPLL */ 485 tmp |= DPLL_REF_CLK_ENABLE_VLV;
429 intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 486 I915_WRITE(DPLL(pipe), tmp);
430 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 487
431 488 /* update the hw state for DPLL */
432 tmp = I915_READ(DSPCLK_GATE_D); 489 intel_crtc->config->dpll_hw_state.dpll =
433 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 490 DPLL_INTEGRATED_REF_CLK_VLV |
434 I915_WRITE(DSPCLK_GATE_D, tmp); 491 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
492
493 tmp = I915_READ(DSPCLK_GATE_D);
494 tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
495 I915_WRITE(DSPCLK_GATE_D, tmp);
496 }
435 497
436 /* put device in ready state */ 498 /* put device in ready state */
437 intel_dsi_device_ready(encoder); 499 intel_dsi_device_ready(encoder);
@@ -495,12 +557,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
495 /* Panel commands can be sent when clock is in LP11 */ 557 /* Panel commands can be sent when clock is in LP11 */
496 I915_WRITE(MIPI_DEVICE_READY(port), 0x0); 558 I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
497 559
498 temp = I915_READ(MIPI_CTRL(port)); 560 intel_dsi_reset_clocks(encoder, port);
499 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
500 I915_WRITE(MIPI_CTRL(port), temp |
501 intel_dsi->escape_clk_div <<
502 ESCAPE_CLOCK_DIVIDER_SHIFT);
503
504 I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); 561 I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
505 562
506 temp = I915_READ(MIPI_DSI_FUNC_PRG(port)); 563 temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
@@ -519,10 +576,12 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
519 576
520static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) 577static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
521{ 578{
579 struct drm_device *dev = encoder->base.dev;
522 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 580 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
523 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 581 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
524 enum port port; 582 enum port port;
525 u32 val; 583 u32 val;
584 u32 port_ctrl = 0;
526 585
527 DRM_DEBUG_KMS("\n"); 586 DRM_DEBUG_KMS("\n");
528 for_each_dsi_port(port, intel_dsi->ports) { 587 for_each_dsi_port(port, intel_dsi->ports) {
@@ -539,25 +598,29 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
539 ULPS_STATE_ENTER); 598 ULPS_STATE_ENTER);
540 usleep_range(2000, 2500); 599 usleep_range(2000, 2500);
541 600
601 if (IS_BROXTON(dev))
602 port_ctrl = BXT_MIPI_PORT_CTRL(port);
603 else if (IS_VALLEYVIEW(dev))
604 /* Common bit for both MIPI Port A & MIPI Port C */
605 port_ctrl = MIPI_PORT_CTRL(PORT_A);
606
542 /* Wait till Clock lanes are in LP-00 state for MIPI Port A 607 /* Wait till Clock lanes are in LP-00 state for MIPI Port A
543 * only. MIPI Port C has no similar bit for checking 608 * only. MIPI Port C has no similar bit for checking
544 */ 609 */
545 if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT) 610 if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
546 == 0x00000), 30)) 611 == 0x00000), 30))
547 DRM_ERROR("DSI LP not going Low\n"); 612 DRM_ERROR("DSI LP not going Low\n");
548 613
549 /* Disable MIPI PHY transparent latch 614 /* Disable MIPI PHY transparent latch */
550 * Common bit for both MIPI Port A & MIPI Port C 615 val = I915_READ(port_ctrl);
551 */ 616 I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
552 val = I915_READ(MIPI_PORT_CTRL(PORT_A));
553 I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
554 usleep_range(1000, 1500); 617 usleep_range(1000, 1500);
555 618
556 I915_WRITE(MIPI_DEVICE_READY(port), 0x00); 619 I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
557 usleep_range(2000, 2500); 620 usleep_range(2000, 2500);
558 } 621 }
559 622
560 vlv_disable_dsi_pll(encoder); 623 intel_disable_dsi_pll(encoder);
561} 624}
562 625
563static void intel_dsi_post_disable(struct intel_encoder *encoder) 626static void intel_dsi_post_disable(struct intel_encoder *encoder)
@@ -593,7 +656,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
593 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 656 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
594 struct drm_device *dev = encoder->base.dev; 657 struct drm_device *dev = encoder->base.dev;
595 enum intel_display_power_domain power_domain; 658 enum intel_display_power_domain power_domain;
596 u32 dpi_enabled, func; 659 u32 dpi_enabled, func, ctrl_reg;
597 enum port port; 660 enum port port;
598 661
599 DRM_DEBUG_KMS("\n"); 662 DRM_DEBUG_KMS("\n");
@@ -605,8 +668,9 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
605 /* XXX: this only works for one DSI output */ 668 /* XXX: this only works for one DSI output */
606 for_each_dsi_port(port, intel_dsi->ports) { 669 for_each_dsi_port(port, intel_dsi->ports) {
607 func = I915_READ(MIPI_DSI_FUNC_PRG(port)); 670 func = I915_READ(MIPI_DSI_FUNC_PRG(port));
608 dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) & 671 ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
609 DPI_ENABLE; 672 MIPI_PORT_CTRL(port);
673 dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
610 674
611 /* Due to some hardware limitations on BYT, MIPI Port C DPI 675 /* Due to some hardware limitations on BYT, MIPI Port C DPI
612 * Enable bit does not get set. To check whether DSI Port C 676 * Enable bit does not get set. To check whether DSI Port C
@@ -631,7 +695,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
631static void intel_dsi_get_config(struct intel_encoder *encoder, 695static void intel_dsi_get_config(struct intel_encoder *encoder,
632 struct intel_crtc_state *pipe_config) 696 struct intel_crtc_state *pipe_config)
633{ 697{
634 u32 pclk; 698 u32 pclk = 0;
635 DRM_DEBUG_KMS("\n"); 699 DRM_DEBUG_KMS("\n");
636 700
637 /* 701 /*
@@ -640,7 +704,11 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
640 */ 704 */
641 pipe_config->dpll_hw_state.dpll_md = 0; 705 pipe_config->dpll_hw_state.dpll_md = 0;
642 706
643 pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); 707 if (IS_BROXTON(encoder->base.dev))
708 pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
709 else if (IS_VALLEYVIEW(encoder->base.dev))
710 pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
711
644 if (!pclk) 712 if (!pclk)
645 return; 713 return;
646 714
@@ -654,6 +722,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
654{ 722{
655 struct intel_connector *intel_connector = to_intel_connector(connector); 723 struct intel_connector *intel_connector = to_intel_connector(connector);
656 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 724 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
725 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
657 726
658 DRM_DEBUG_KMS("\n"); 727 DRM_DEBUG_KMS("\n");
659 728
@@ -667,6 +736,8 @@ intel_dsi_mode_valid(struct drm_connector *connector,
667 return MODE_PANEL; 736 return MODE_PANEL;
668 if (mode->vdisplay > fixed_mode->vdisplay) 737 if (mode->vdisplay > fixed_mode->vdisplay)
669 return MODE_PANEL; 738 return MODE_PANEL;
739 if (fixed_mode->clock > max_dotclk)
740 return MODE_CLOCK_HIGH;
670 } 741 }
671 742
672 return MODE_OK; 743 return MODE_OK;
@@ -695,7 +766,7 @@ static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
695} 766}
696 767
697static void set_dsi_timings(struct drm_encoder *encoder, 768static void set_dsi_timings(struct drm_encoder *encoder,
698 const struct drm_display_mode *mode) 769 const struct drm_display_mode *adjusted_mode)
699{ 770{
700 struct drm_device *dev = encoder->dev; 771 struct drm_device *dev = encoder->dev;
701 struct drm_i915_private *dev_priv = dev->dev_private; 772 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -707,10 +778,10 @@ static void set_dsi_timings(struct drm_encoder *encoder,
707 778
708 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; 779 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
709 780
710 hactive = mode->hdisplay; 781 hactive = adjusted_mode->crtc_hdisplay;
711 hfp = mode->hsync_start - mode->hdisplay; 782 hfp = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay;
712 hsync = mode->hsync_end - mode->hsync_start; 783 hsync = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
713 hbp = mode->htotal - mode->hsync_end; 784 hbp = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end;
714 785
715 if (intel_dsi->dual_link) { 786 if (intel_dsi->dual_link) {
716 hactive /= 2; 787 hactive /= 2;
@@ -721,9 +792,9 @@ static void set_dsi_timings(struct drm_encoder *encoder,
721 hbp /= 2; 792 hbp /= 2;
722 } 793 }
723 794
724 vfp = mode->vsync_start - mode->vdisplay; 795 vfp = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay;
725 vsync = mode->vsync_end - mode->vsync_start; 796 vsync = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
726 vbp = mode->vtotal - mode->vsync_end; 797 vbp = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_end;
727 798
728 /* horizontal values are in terms of high speed byte clock */ 799 /* horizontal values are in terms of high speed byte clock */
729 hactive = txbyteclkhs(hactive, bpp, lane_count, 800 hactive = txbyteclkhs(hactive, bpp, lane_count,
@@ -734,6 +805,21 @@ static void set_dsi_timings(struct drm_encoder *encoder,
734 hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); 805 hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
735 806
736 for_each_dsi_port(port, intel_dsi->ports) { 807 for_each_dsi_port(port, intel_dsi->ports) {
808 if (IS_BROXTON(dev)) {
809 /*
810 * Program hdisplay and vdisplay on MIPI transcoder.
811 * This is different from calculated hactive and
812 * vactive, as they are calculated per channel basis,
813 * whereas these values should be based on resolution.
814 */
815 I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
816 adjusted_mode->crtc_hdisplay);
817 I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
818 adjusted_mode->crtc_vdisplay);
819 I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
820 adjusted_mode->crtc_vtotal);
821 }
822
737 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive); 823 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
738 I915_WRITE(MIPI_HFP_COUNT(port), hfp); 824 I915_WRITE(MIPI_HFP_COUNT(port), hfp);
739 825
@@ -756,8 +842,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
756 struct drm_i915_private *dev_priv = dev->dev_private; 842 struct drm_i915_private *dev_priv = dev->dev_private;
757 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 843 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
758 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 844 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
759 struct drm_display_mode *adjusted_mode = 845 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
760 &intel_crtc->config->base.adjusted_mode;
761 enum port port; 846 enum port port;
762 unsigned int bpp = intel_crtc->config->pipe_bpp; 847 unsigned int bpp = intel_crtc->config->pipe_bpp;
763 u32 val, tmp; 848 u32 val, tmp;
@@ -765,7 +850,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
765 850
766 DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe)); 851 DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
767 852
768 mode_hdisplay = adjusted_mode->hdisplay; 853 mode_hdisplay = adjusted_mode->crtc_hdisplay;
769 854
770 if (intel_dsi->dual_link) { 855 if (intel_dsi->dual_link) {
771 mode_hdisplay /= 2; 856 mode_hdisplay /= 2;
@@ -774,16 +859,39 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
774 } 859 }
775 860
776 for_each_dsi_port(port, intel_dsi->ports) { 861 for_each_dsi_port(port, intel_dsi->ports) {
777 /* escape clock divider, 20MHz, shared for A and C. 862 if (IS_VALLEYVIEW(dev)) {
778 * device ready must be off when doing this! txclkesc? */ 863 /*
779 tmp = I915_READ(MIPI_CTRL(PORT_A)); 864 * escape clock divider, 20MHz, shared for A and C.
780 tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; 865 * device ready must be off when doing this! txclkesc?
781 I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1); 866 */
782 867 tmp = I915_READ(MIPI_CTRL(PORT_A));
783 /* read request priority is per pipe */ 868 tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
784 tmp = I915_READ(MIPI_CTRL(port)); 869 I915_WRITE(MIPI_CTRL(PORT_A), tmp |
785 tmp &= ~READ_REQUEST_PRIORITY_MASK; 870 ESCAPE_CLOCK_DIVIDER_1);
786 I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH); 871
872 /* read request priority is per pipe */
873 tmp = I915_READ(MIPI_CTRL(port));
874 tmp &= ~READ_REQUEST_PRIORITY_MASK;
875 I915_WRITE(MIPI_CTRL(port), tmp |
876 READ_REQUEST_PRIORITY_HIGH);
877 } else if (IS_BROXTON(dev)) {
878 /*
879 * FIXME:
880 * BXT can connect any PIPE to any MIPI port.
881 * Select the pipe based on the MIPI port read from
882 * VBT for now. Pick PIPE A for MIPI port A and C
883 * for port C.
884 */
885 tmp = I915_READ(MIPI_CTRL(port));
886 tmp &= ~BXT_PIPE_SELECT_MASK;
887
888 if (port == PORT_A)
889 tmp |= BXT_PIPE_SELECT_A;
890 else if (port == PORT_C)
891 tmp |= BXT_PIPE_SELECT_C;
892
893 I915_WRITE(MIPI_CTRL(port), tmp);
894 }
787 895
788 /* XXX: why here, why like this? handling in irq handler?! */ 896 /* XXX: why here, why like this? handling in irq handler?! */
789 I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff); 897 I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
@@ -792,7 +900,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
792 I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg); 900 I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
793 901
794 I915_WRITE(MIPI_DPI_RESOLUTION(port), 902 I915_WRITE(MIPI_DPI_RESOLUTION(port),
795 adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | 903 adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
796 mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT); 904 mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
797 } 905 }
798 906
@@ -838,15 +946,15 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
838 if (is_vid_mode(intel_dsi) && 946 if (is_vid_mode(intel_dsi) &&
839 intel_dsi->video_mode_format == VIDEO_MODE_BURST) { 947 intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
840 I915_WRITE(MIPI_HS_TX_TIMEOUT(port), 948 I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
841 txbyteclkhs(adjusted_mode->htotal, bpp, 949 txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
842 intel_dsi->lane_count, 950 intel_dsi->lane_count,
843 intel_dsi->burst_mode_ratio) + 1); 951 intel_dsi->burst_mode_ratio) + 1);
844 } else { 952 } else {
845 I915_WRITE(MIPI_HS_TX_TIMEOUT(port), 953 I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
846 txbyteclkhs(adjusted_mode->vtotal * 954 txbyteclkhs(adjusted_mode->crtc_vtotal *
847 adjusted_mode->htotal, 955 adjusted_mode->crtc_htotal,
848 bpp, intel_dsi->lane_count, 956 bpp, intel_dsi->lane_count,
849 intel_dsi->burst_mode_ratio) + 1); 957 intel_dsi->burst_mode_ratio) + 1);
850 } 958 }
851 I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout); 959 I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
852 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port), 960 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
@@ -860,6 +968,17 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
860 I915_WRITE(MIPI_INIT_COUNT(port), 968 I915_WRITE(MIPI_INIT_COUNT(port),
861 txclkesc(intel_dsi->escape_clk_div, 100)); 969 txclkesc(intel_dsi->escape_clk_div, 100));
862 970
971 if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
972 /*
973 * BXT spec says write MIPI_INIT_COUNT for
974 * both the ports, even if only one is
975 * getting used. So write the other port
976 * if not in dual link mode.
977 */
978 I915_WRITE(MIPI_INIT_COUNT(port ==
979 PORT_A ? PORT_C : PORT_A),
980 intel_dsi->init_count);
981 }
863 982
864 /* recovery disables */ 983 /* recovery disables */
865 I915_WRITE(MIPI_EOT_DISABLE(port), tmp); 984 I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
@@ -911,8 +1030,8 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
911 DRM_DEBUG_KMS("\n"); 1030 DRM_DEBUG_KMS("\n");
912 1031
913 intel_dsi_prepare(encoder); 1032 intel_dsi_prepare(encoder);
1033 intel_enable_dsi_pll(encoder);
914 1034
915 vlv_enable_dsi_pll(encoder);
916} 1035}
917 1036
918static enum drm_connector_status 1037static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 42a68593e32a..e6cb25239941 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -124,9 +124,12 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
124 return container_of(encoder, struct intel_dsi, base.base); 124 return container_of(encoder, struct intel_dsi, base.base);
125} 125}
126 126
127extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); 127extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
128extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); 128extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
129extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); 129extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
130extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
131extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
132 enum port port);
130 133
131struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id); 134struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
132 135
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index c6a8975b128f..cb3cf3986212 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -246,7 +246,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
246 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl); 246 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
247} 247}
248 248
249void vlv_enable_dsi_pll(struct intel_encoder *encoder) 249static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
250{ 250{
251 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 251 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
252 u32 tmp; 252 u32 tmp;
@@ -276,7 +276,7 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
276 DRM_DEBUG_KMS("DSI PLL locked\n"); 276 DRM_DEBUG_KMS("DSI PLL locked\n");
277} 277}
278 278
279void vlv_disable_dsi_pll(struct intel_encoder *encoder) 279static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
280{ 280{
281 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 281 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
282 u32 tmp; 282 u32 tmp;
@@ -293,6 +293,26 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
293 mutex_unlock(&dev_priv->sb_lock); 293 mutex_unlock(&dev_priv->sb_lock);
294} 294}
295 295
296static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
297{
298 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
299 u32 val;
300
301 DRM_DEBUG_KMS("\n");
302
303 val = I915_READ(BXT_DSI_PLL_ENABLE);
304 val &= ~BXT_DSI_PLL_DO_ENABLE;
305 I915_WRITE(BXT_DSI_PLL_ENABLE, val);
306
307 /*
308 * PLL lock should deassert within 200us.
309 * Wait up to 1ms before timing out.
310 */
311 if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
312 & BXT_DSI_PLL_LOCKED) == 0, 1))
313 DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
314}
315
296static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) 316static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
297{ 317{
298 int bpp = dsi_pixel_format_bpp(pixel_format); 318 int bpp = dsi_pixel_format_bpp(pixel_format);
@@ -363,3 +383,222 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
363 383
364 return pclk; 384 return pclk;
365} 385}
386
387u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
388{
389 u32 pclk;
390 u32 dsi_clk;
391 u32 dsi_ratio;
392 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
393 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
394
395 /* Divide by zero */
396 if (!pipe_bpp) {
397 DRM_ERROR("Invalid BPP(0)\n");
398 return 0;
399 }
400
401 dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
402 BXT_DSI_PLL_RATIO_MASK;
403
404 /* Invalid DSI ratio ? */
405 if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
406 dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
407 DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
408 return 0;
409 }
410
411 dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
412
413 /* pixel_format and pipe_bpp should agree */
414 assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
415
416 pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);
417
418 DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
419 return pclk;
420}
421
422static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
423{
424 u32 temp;
425 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
426 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
427
428 temp = I915_READ(MIPI_CTRL(port));
429 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
430 I915_WRITE(MIPI_CTRL(port), temp |
431 intel_dsi->escape_clk_div <<
432 ESCAPE_CLOCK_DIVIDER_SHIFT);
433}
434
435/* Program BXT Mipi clocks and dividers */
436static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
437{
438 u32 tmp;
439 u32 divider;
440 u32 dsi_rate;
441 u32 pll_ratio;
442 struct drm_i915_private *dev_priv = dev->dev_private;
443
444 /* Clear old configurations */
445 tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
446 tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
447 tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
448 tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
449 tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
450
451 /* Get the current DSI rate(actual) */
452 pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
453 BXT_DSI_PLL_RATIO_MASK;
454 dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
455
456 /* Max possible output of clock is 39.5 MHz, program value -1 */
457 divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
458 tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
459
460 /*
461 * Tx escape clock must be as close to 20MHz possible, but should
462 * not exceed it. Hence select divide by 2
463 */
464 tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
465
466 tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
467
468 I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
469}
470
471static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
472{
473 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
474 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
475 u8 dsi_ratio;
476 u32 dsi_clk;
477 u32 val;
478
479 dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
480 intel_dsi->lane_count);
481
482 /*
483 * From clock diagram, to get PLL ratio divider, divide double of DSI
484 * link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to
485 * round 'up' the result
486 */
487 dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
488 if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
489 dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
490 DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
491 return false;
492 }
493
494 /*
495 * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
496 * Spec says both have to be programmed, even if one is not getting
497 * used. Configure MIPI_CLOCK_CTL dividers in modeset
498 */
499 val = I915_READ(BXT_DSI_PLL_CTL);
500 val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
501 val &= ~BXT_DSI_FREQ_SEL_MASK;
502 val &= ~BXT_DSI_PLL_RATIO_MASK;
503 val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
504
505 /* As per recommendation from hardware team,
506 * Prog PVD ratio =1 if dsi ratio <= 50
507 */
508 if (dsi_ratio <= 50) {
509 val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
510 val |= BXT_DSI_PLL_PVD_RATIO_1;
511 }
512
513 I915_WRITE(BXT_DSI_PLL_CTL, val);
514 POSTING_READ(BXT_DSI_PLL_CTL);
515
516 return true;
517}
518
519static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
520{
521 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
522 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
523 enum port port;
524 u32 val;
525
526 DRM_DEBUG_KMS("\n");
527
528 val = I915_READ(BXT_DSI_PLL_ENABLE);
529
530 if (val & BXT_DSI_PLL_DO_ENABLE) {
531 WARN(1, "DSI PLL already enabled. Disabling it.\n");
532 val &= ~BXT_DSI_PLL_DO_ENABLE;
533 I915_WRITE(BXT_DSI_PLL_ENABLE, val);
534 }
535
536 /* Configure PLL vales */
537 if (!bxt_configure_dsi_pll(encoder)) {
538 DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
539 return;
540 }
541
542 /* Program TX, RX, Dphy clocks */
543 for_each_dsi_port(port, intel_dsi->ports)
544 bxt_dsi_program_clocks(encoder->base.dev, port);
545
546 /* Enable DSI PLL */
547 val = I915_READ(BXT_DSI_PLL_ENABLE);
548 val |= BXT_DSI_PLL_DO_ENABLE;
549 I915_WRITE(BXT_DSI_PLL_ENABLE, val);
550
551 /* Timeout and fail if PLL not locked */
552 if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
553 DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
554 return;
555 }
556
557 DRM_DEBUG_KMS("DSI PLL locked\n");
558}
559
560void intel_enable_dsi_pll(struct intel_encoder *encoder)
561{
562 struct drm_device *dev = encoder->base.dev;
563
564 if (IS_VALLEYVIEW(dev))
565 vlv_enable_dsi_pll(encoder);
566 else if (IS_BROXTON(dev))
567 bxt_enable_dsi_pll(encoder);
568}
569
570void intel_disable_dsi_pll(struct intel_encoder *encoder)
571{
572 struct drm_device *dev = encoder->base.dev;
573
574 if (IS_VALLEYVIEW(dev))
575 vlv_disable_dsi_pll(encoder);
576 else if (IS_BROXTON(dev))
577 bxt_disable_dsi_pll(encoder);
578}
579
580static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
581{
582 u32 tmp;
583 struct drm_device *dev = encoder->base.dev;
584 struct drm_i915_private *dev_priv = dev->dev_private;
585
586 /* Clear old configurations */
587 tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
588 tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
589 tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
590 tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
591 tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
592 I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
593 I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
594}
595
596void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
597{
598 struct drm_device *dev = encoder->base.dev;
599
600 if (IS_BROXTON(dev))
601 bxt_dsi_reset_clocks(encoder, port);
602 else if (IS_VALLEYVIEW(dev))
603 vlv_dsi_reset_clocks(encoder, port);
604}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index dc532bb61d22..8492053e0ff0 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -97,7 +97,8 @@ struct intel_dvo {
97 97
98 struct intel_dvo_device dev; 98 struct intel_dvo_device dev;
99 99
100 struct drm_display_mode *panel_fixed_mode; 100 struct intel_connector *attached_connector;
101
101 bool panel_wants_dither; 102 bool panel_wants_dither;
102}; 103};
103 104
@@ -201,19 +202,28 @@ intel_dvo_mode_valid(struct drm_connector *connector,
201 struct drm_display_mode *mode) 202 struct drm_display_mode *mode)
202{ 203{
203 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 204 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
205 const struct drm_display_mode *fixed_mode =
206 to_intel_connector(connector)->panel.fixed_mode;
207 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
208 int target_clock = mode->clock;
204 209
205 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 210 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
206 return MODE_NO_DBLESCAN; 211 return MODE_NO_DBLESCAN;
207 212
208 /* XXX: Validate clock range */ 213 /* XXX: Validate clock range */
209 214
210 if (intel_dvo->panel_fixed_mode) { 215 if (fixed_mode) {
211 if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay) 216 if (mode->hdisplay > fixed_mode->hdisplay)
212 return MODE_PANEL; 217 return MODE_PANEL;
213 if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay) 218 if (mode->vdisplay > fixed_mode->vdisplay)
214 return MODE_PANEL; 219 return MODE_PANEL;
220
221 target_clock = fixed_mode->clock;
215 } 222 }
216 223
224 if (target_clock > max_dotclk)
225 return MODE_CLOCK_HIGH;
226
217 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); 227 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
218} 228}
219 229
@@ -221,6 +231,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
221 struct intel_crtc_state *pipe_config) 231 struct intel_crtc_state *pipe_config)
222{ 232{
223 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 233 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
234 const struct drm_display_mode *fixed_mode =
235 intel_dvo->attached_connector->panel.fixed_mode;
224 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 236 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
225 237
226 /* If we have timings from the BIOS for the panel, put them in 238 /* If we have timings from the BIOS for the panel, put them in
@@ -228,21 +240,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
228 * with the panel scaling set up to source from the H/VDisplay 240 * with the panel scaling set up to source from the H/VDisplay
229 * of the original mode. 241 * of the original mode.
230 */ 242 */
231 if (intel_dvo->panel_fixed_mode != NULL) { 243 if (fixed_mode)
232#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x 244 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
233 C(hdisplay);
234 C(hsync_start);
235 C(hsync_end);
236 C(htotal);
237 C(vdisplay);
238 C(vsync_start);
239 C(vsync_end);
240 C(vtotal);
241 C(clock);
242#undef C
243
244 drm_mode_set_crtcinfo(adjusted_mode, 0);
245 }
246 245
247 return true; 246 return true;
248} 247}
@@ -252,7 +251,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
252 struct drm_device *dev = encoder->base.dev; 251 struct drm_device *dev = encoder->base.dev;
253 struct drm_i915_private *dev_priv = dev->dev_private; 252 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 253 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
255 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 254 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
256 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 255 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
257 int pipe = crtc->pipe; 256 int pipe = crtc->pipe;
258 u32 dvo_val; 257 u32 dvo_val;
@@ -286,11 +285,11 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
286 dvo_val |= DVO_VSYNC_ACTIVE_HIGH; 285 dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
287 286
288 /*I915_WRITE(DVOB_SRCDIM, 287 /*I915_WRITE(DVOB_SRCDIM,
289 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 288 (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
290 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ 289 (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
291 I915_WRITE(dvo_srcdim_reg, 290 I915_WRITE(dvo_srcdim_reg,
292 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 291 (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
293 (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); 292 (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
294 /*I915_WRITE(DVOB, dvo_val);*/ 293 /*I915_WRITE(DVOB, dvo_val);*/
295 I915_WRITE(dvo_reg, dvo_val); 294 I915_WRITE(dvo_reg, dvo_val);
296} 295}
@@ -311,8 +310,9 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
311 310
312static int intel_dvo_get_modes(struct drm_connector *connector) 311static int intel_dvo_get_modes(struct drm_connector *connector)
313{ 312{
314 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
315 struct drm_i915_private *dev_priv = connector->dev->dev_private; 313 struct drm_i915_private *dev_priv = connector->dev->dev_private;
314 const struct drm_display_mode *fixed_mode =
315 to_intel_connector(connector)->panel.fixed_mode;
316 316
317 /* We should probably have an i2c driver get_modes function for those 317 /* We should probably have an i2c driver get_modes function for those
318 * devices which will have a fixed set of modes determined by the chip 318 * devices which will have a fixed set of modes determined by the chip
@@ -324,9 +324,9 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
324 if (!list_empty(&connector->probed_modes)) 324 if (!list_empty(&connector->probed_modes))
325 return 1; 325 return 1;
326 326
327 if (intel_dvo->panel_fixed_mode != NULL) { 327 if (fixed_mode) {
328 struct drm_display_mode *mode; 328 struct drm_display_mode *mode;
329 mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode); 329 mode = drm_mode_duplicate(connector->dev, fixed_mode);
330 if (mode) { 330 if (mode) {
331 drm_mode_probed_add(connector, mode); 331 drm_mode_probed_add(connector, mode);
332 return 1; 332 return 1;
@@ -339,6 +339,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
339static void intel_dvo_destroy(struct drm_connector *connector) 339static void intel_dvo_destroy(struct drm_connector *connector)
340{ 340{
341 drm_connector_cleanup(connector); 341 drm_connector_cleanup(connector);
342 intel_panel_fini(&to_intel_connector(connector)->panel);
342 kfree(connector); 343 kfree(connector);
343} 344}
344 345
@@ -365,8 +366,6 @@ static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
365 if (intel_dvo->dev.dev_ops->destroy) 366 if (intel_dvo->dev.dev_ops->destroy)
366 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); 367 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
367 368
368 kfree(intel_dvo->panel_fixed_mode);
369
370 intel_encoder_destroy(encoder); 369 intel_encoder_destroy(encoder);
371} 370}
372 371
@@ -431,6 +430,8 @@ void intel_dvo_init(struct drm_device *dev)
431 return; 430 return;
432 } 431 }
433 432
433 intel_dvo->attached_connector = intel_connector;
434
434 intel_encoder = &intel_dvo->base; 435 intel_encoder = &intel_dvo->base;
435 drm_encoder_init(dev, &intel_encoder->base, 436 drm_encoder_init(dev, &intel_encoder->base,
436 &intel_dvo_enc_funcs, encoder_type); 437 &intel_dvo_enc_funcs, encoder_type);
@@ -535,8 +536,9 @@ void intel_dvo_init(struct drm_device *dev)
535 * headers, likely), so for now, just get the current 536 * headers, likely), so for now, just get the current
536 * mode being output through DVO. 537 * mode being output through DVO.
537 */ 538 */
538 intel_dvo->panel_fixed_mode = 539 intel_panel_init(&intel_connector->panel,
539 intel_dvo_get_current_mode(connector); 540 intel_dvo_get_current_mode(connector),
541 NULL);
540 intel_dvo->panel_wants_dither = true; 542 intel_dvo->panel_wants_dither = true;
541 } 543 }
542 544
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 1f97fb548c2a..cf47352b7b8e 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -41,6 +41,24 @@
41#include "intel_drv.h" 41#include "intel_drv.h"
42#include "i915_drv.h" 42#include "i915_drv.h"
43 43
44static inline bool fbc_supported(struct drm_i915_private *dev_priv)
45{
46 return dev_priv->fbc.enable_fbc != NULL;
47}
48
49/*
50 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
51 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
52 * origin so the x and y offsets can actually fit the registers. As a
53 * consequence, the fence doesn't really start exactly at the display plane
54 * address we program because it starts at the real start of the buffer, so we
55 * have to take this into consideration here.
56 */
57static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
58{
59 return crtc->base.y - crtc->adjusted_y;
60}
61
44static void i8xx_fbc_disable(struct drm_i915_private *dev_priv) 62static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
45{ 63{
46 u32 fbc_ctl; 64 u32 fbc_ctl;
@@ -88,7 +106,7 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
88 106
89 /* Clear old tags */ 107 /* Clear old tags */
90 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 108 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
91 I915_WRITE(FBC_TAG + (i * 4), 0); 109 I915_WRITE(FBC_TAG(i), 0);
92 110
93 if (IS_GEN4(dev_priv)) { 111 if (IS_GEN4(dev_priv)) {
94 u32 fbc_ctl2; 112 u32 fbc_ctl2;
@@ -97,7 +115,7 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
97 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 115 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
98 fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane); 116 fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
99 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 117 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
100 I915_WRITE(FBC_FENCE_OFF, crtc->base.y); 118 I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
101 } 119 }
102 120
103 /* enable it... */ 121 /* enable it... */
@@ -135,7 +153,7 @@ static void g4x_fbc_enable(struct intel_crtc *crtc)
135 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 153 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
136 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 154 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
137 155
138 I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y); 156 I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
139 157
140 /* enable it... */ 158 /* enable it... */
141 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 159 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -177,6 +195,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
177 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 195 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
178 u32 dpfc_ctl; 196 u32 dpfc_ctl;
179 int threshold = dev_priv->fbc.threshold; 197 int threshold = dev_priv->fbc.threshold;
198 unsigned int y_offset;
180 199
181 dev_priv->fbc.enabled = true; 200 dev_priv->fbc.enabled = true;
182 201
@@ -200,7 +219,8 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
200 if (IS_GEN5(dev_priv)) 219 if (IS_GEN5(dev_priv))
201 dpfc_ctl |= obj->fence_reg; 220 dpfc_ctl |= obj->fence_reg;
202 221
203 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y); 222 y_offset = get_crtc_fence_y_offset(crtc);
223 I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
204 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); 224 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
205 /* enable it... */ 225 /* enable it... */
206 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 226 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -208,7 +228,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
208 if (IS_GEN6(dev_priv)) { 228 if (IS_GEN6(dev_priv)) {
209 I915_WRITE(SNB_DPFC_CTL_SA, 229 I915_WRITE(SNB_DPFC_CTL_SA,
210 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 230 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
211 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y); 231 I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
212 } 232 }
213 233
214 intel_fbc_nuke(dev_priv); 234 intel_fbc_nuke(dev_priv);
@@ -272,23 +292,23 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
272 if (dev_priv->fbc.false_color) 292 if (dev_priv->fbc.false_color)
273 dpfc_ctl |= FBC_CTL_FALSE_COLOR; 293 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
274 294
275 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
276
277 if (IS_IVYBRIDGE(dev_priv)) { 295 if (IS_IVYBRIDGE(dev_priv)) {
278 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 296 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
279 I915_WRITE(ILK_DISPLAY_CHICKEN1, 297 I915_WRITE(ILK_DISPLAY_CHICKEN1,
280 I915_READ(ILK_DISPLAY_CHICKEN1) | 298 I915_READ(ILK_DISPLAY_CHICKEN1) |
281 ILK_FBCQ_DIS); 299 ILK_FBCQ_DIS);
282 } else { 300 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
283 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 301 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
284 I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe), 302 I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
285 I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) | 303 I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
286 HSW_FBCQ_DIS); 304 HSW_FBCQ_DIS);
287 } 305 }
288 306
307 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
308
289 I915_WRITE(SNB_DPFC_CTL_SA, 309 I915_WRITE(SNB_DPFC_CTL_SA,
290 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 310 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
291 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y); 311 I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
292 312
293 intel_fbc_nuke(dev_priv); 313 intel_fbc_nuke(dev_priv);
294 314
@@ -308,6 +328,18 @@ bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
308 return dev_priv->fbc.enabled; 328 return dev_priv->fbc.enabled;
309} 329}
310 330
331static void intel_fbc_enable(struct intel_crtc *crtc,
332 const struct drm_framebuffer *fb)
333{
334 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
335
336 dev_priv->fbc.enable_fbc(crtc);
337
338 dev_priv->fbc.crtc = crtc;
339 dev_priv->fbc.fb_id = fb->base.id;
340 dev_priv->fbc.y = crtc->base.y;
341}
342
311static void intel_fbc_work_fn(struct work_struct *__work) 343static void intel_fbc_work_fn(struct work_struct *__work)
312{ 344{
313 struct intel_fbc_work *work = 345 struct intel_fbc_work *work =
@@ -321,13 +353,8 @@ static void intel_fbc_work_fn(struct work_struct *__work)
321 /* Double check that we haven't switched fb without cancelling 353 /* Double check that we haven't switched fb without cancelling
322 * the prior work. 354 * the prior work.
323 */ 355 */
324 if (crtc_fb == work->fb) { 356 if (crtc_fb == work->fb)
325 dev_priv->fbc.enable_fbc(work->crtc); 357 intel_fbc_enable(work->crtc, work->fb);
326
327 dev_priv->fbc.crtc = work->crtc;
328 dev_priv->fbc.fb_id = crtc_fb->base.id;
329 dev_priv->fbc.y = work->crtc->base.y;
330 }
331 358
332 dev_priv->fbc.fbc_work = NULL; 359 dev_priv->fbc.fbc_work = NULL;
333 } 360 }
@@ -361,7 +388,7 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
361 dev_priv->fbc.fbc_work = NULL; 388 dev_priv->fbc.fbc_work = NULL;
362} 389}
363 390
364static void intel_fbc_enable(struct intel_crtc *crtc) 391static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
365{ 392{
366 struct intel_fbc_work *work; 393 struct intel_fbc_work *work;
367 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 394 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -373,7 +400,7 @@ static void intel_fbc_enable(struct intel_crtc *crtc)
373 work = kzalloc(sizeof(*work), GFP_KERNEL); 400 work = kzalloc(sizeof(*work), GFP_KERNEL);
374 if (work == NULL) { 401 if (work == NULL) {
375 DRM_ERROR("Failed to allocate FBC work structure\n"); 402 DRM_ERROR("Failed to allocate FBC work structure\n");
376 dev_priv->fbc.enable_fbc(crtc); 403 intel_fbc_enable(crtc, crtc->base.primary->fb);
377 return; 404 return;
378 } 405 }
379 406
@@ -417,7 +444,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
417 */ 444 */
418void intel_fbc_disable(struct drm_i915_private *dev_priv) 445void intel_fbc_disable(struct drm_i915_private *dev_priv)
419{ 446{
420 if (!dev_priv->fbc.enable_fbc) 447 if (!fbc_supported(dev_priv))
421 return; 448 return;
422 449
423 mutex_lock(&dev_priv->fbc.lock); 450 mutex_lock(&dev_priv->fbc.lock);
@@ -435,7 +462,7 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc)
435{ 462{
436 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 463 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
437 464
438 if (!dev_priv->fbc.enable_fbc) 465 if (!fbc_supported(dev_priv))
439 return; 466 return;
440 467
441 mutex_lock(&dev_priv->fbc.lock); 468 mutex_lock(&dev_priv->fbc.lock);
@@ -473,6 +500,12 @@ const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
473 return "rotation unsupported"; 500 return "rotation unsupported";
474 case FBC_IN_DBG_MASTER: 501 case FBC_IN_DBG_MASTER:
475 return "Kernel debugger is active"; 502 return "Kernel debugger is active";
503 case FBC_BAD_STRIDE:
504 return "framebuffer stride not supported";
505 case FBC_PIXEL_RATE:
506 return "pixel rate is too big";
507 case FBC_PIXEL_FORMAT:
508 return "pixel format is invalid";
476 default: 509 default:
477 MISSING_CASE(reason); 510 MISSING_CASE(reason);
478 return "unknown reason"; 511 return "unknown reason";
@@ -542,6 +575,16 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
542{ 575{
543 int compression_threshold = 1; 576 int compression_threshold = 1;
544 int ret; 577 int ret;
578 u64 end;
579
580 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
581 * reserved range size, so it always assumes the maximum (8mb) is used.
582 * If we enable FBC using a CFB on that memory range we'll get FIFO
583 * underruns, even if that range is not reserved by the BIOS. */
584 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
585 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
586 else
587 end = dev_priv->gtt.stolen_usable_size;
545 588
546 /* HACK: This code depends on what we will do in *_enable_fbc. If that 589 /* HACK: This code depends on what we will do in *_enable_fbc. If that
547 * code changes, this code needs to change as well. 590 * code changes, this code needs to change as well.
@@ -551,7 +594,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
551 */ 594 */
552 595
553 /* Try to over-allocate to reduce reallocations and fragmentation. */ 596 /* Try to over-allocate to reduce reallocations and fragmentation. */
554 ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096); 597 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
598 4096, 0, end);
555 if (ret == 0) 599 if (ret == 0)
556 return compression_threshold; 600 return compression_threshold;
557 601
@@ -561,7 +605,8 @@ again:
561 (fb_cpp == 2 && compression_threshold == 2)) 605 (fb_cpp == 2 && compression_threshold == 2))
562 return 0; 606 return 0;
563 607
564 ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096); 608 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
609 4096, 0, end);
565 if (ret && INTEL_INFO(dev_priv)->gen <= 4) { 610 if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
566 return 0; 611 return 0;
567 } else if (ret) { 612 } else if (ret) {
@@ -613,8 +658,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
613 658
614 dev_priv->fbc.uncompressed_size = size; 659 dev_priv->fbc.uncompressed_size = size;
615 660
616 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 661 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
617 size); 662 dev_priv->fbc.compressed_fb.size,
663 dev_priv->fbc.threshold);
618 664
619 return 0; 665 return 0;
620 666
@@ -644,7 +690,7 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
644 690
645void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 691void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
646{ 692{
647 if (!dev_priv->fbc.enable_fbc) 693 if (!fbc_supported(dev_priv))
648 return; 694 return;
649 695
650 mutex_lock(&dev_priv->fbc.lock); 696 mutex_lock(&dev_priv->fbc.lock);
@@ -652,16 +698,134 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
652 mutex_unlock(&dev_priv->fbc.lock); 698 mutex_unlock(&dev_priv->fbc.lock);
653} 699}
654 700
655static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size, 701/*
656 int fb_cpp) 702 * For SKL+, the plane source size used by the hardware is based on the value we
703 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
704 * we wrote to PIPESRC.
705 */
706static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
707 int *width, int *height)
657{ 708{
709 struct intel_plane_state *plane_state =
710 to_intel_plane_state(crtc->base.primary->state);
711 int w, h;
712
713 if (intel_rotation_90_or_270(plane_state->base.rotation)) {
714 w = drm_rect_height(&plane_state->src) >> 16;
715 h = drm_rect_width(&plane_state->src) >> 16;
716 } else {
717 w = drm_rect_width(&plane_state->src) >> 16;
718 h = drm_rect_height(&plane_state->src) >> 16;
719 }
720
721 if (width)
722 *width = w;
723 if (height)
724 *height = h;
725}
726
727static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
728{
729 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
730 struct drm_framebuffer *fb = crtc->base.primary->fb;
731 int lines;
732
733 intel_fbc_get_plane_source_size(crtc, NULL, &lines);
734 if (INTEL_INFO(dev_priv)->gen >= 7)
735 lines = min(lines, 2048);
736
737 return lines * fb->pitches[0];
738}
739
740static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
741{
742 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
743 struct drm_framebuffer *fb = crtc->base.primary->fb;
744 int size, cpp;
745
746 size = intel_fbc_calculate_cfb_size(crtc);
747 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
748
658 if (size <= dev_priv->fbc.uncompressed_size) 749 if (size <= dev_priv->fbc.uncompressed_size)
659 return 0; 750 return 0;
660 751
661 /* Release any current block */ 752 /* Release any current block */
662 __intel_fbc_cleanup_cfb(dev_priv); 753 __intel_fbc_cleanup_cfb(dev_priv);
663 754
664 return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp); 755 return intel_fbc_alloc_cfb(dev_priv, size, cpp);
756}
757
758static bool stride_is_valid(struct drm_i915_private *dev_priv,
759 unsigned int stride)
760{
761 /* These should have been caught earlier. */
762 WARN_ON(stride < 512);
763 WARN_ON((stride & (64 - 1)) != 0);
764
765 /* Below are the additional FBC restrictions. */
766
767 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
768 return stride == 4096 || stride == 8192;
769
770 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
771 return false;
772
773 if (stride > 16384)
774 return false;
775
776 return true;
777}
778
779static bool pixel_format_is_valid(struct drm_framebuffer *fb)
780{
781 struct drm_device *dev = fb->dev;
782 struct drm_i915_private *dev_priv = dev->dev_private;
783
784 switch (fb->pixel_format) {
785 case DRM_FORMAT_XRGB8888:
786 case DRM_FORMAT_XBGR8888:
787 return true;
788 case DRM_FORMAT_XRGB1555:
789 case DRM_FORMAT_RGB565:
790 /* 16bpp not supported on gen2 */
791 if (IS_GEN2(dev))
792 return false;
793 /* WaFbcOnly1to1Ratio:ctg */
794 if (IS_G4X(dev_priv))
795 return false;
796 return true;
797 default:
798 return false;
799 }
800}
801
802/*
803 * For some reason, the hardware tracking starts looking at whatever we
804 * programmed as the display plane base address register. It does not look at
805 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
806 * variables instead of just looking at the pipe/plane size.
807 */
808static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
809{
810 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
811 unsigned int effective_w, effective_h, max_w, max_h;
812
813 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
814 max_w = 4096;
815 max_h = 4096;
816 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
817 max_w = 4096;
818 max_h = 2048;
819 } else {
820 max_w = 2048;
821 max_h = 1536;
822 }
823
824 intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
825 effective_w += crtc->adjusted_x;
826 effective_h += crtc->adjusted_y;
827
828 return effective_w <= max_w && effective_h <= max_h;
665} 829}
666 830
667/** 831/**
@@ -690,7 +854,6 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
690 struct drm_framebuffer *fb; 854 struct drm_framebuffer *fb;
691 struct drm_i915_gem_object *obj; 855 struct drm_i915_gem_object *obj;
692 const struct drm_display_mode *adjusted_mode; 856 const struct drm_display_mode *adjusted_mode;
693 unsigned int max_width, max_height;
694 857
695 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 858 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
696 859
@@ -739,21 +902,11 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
739 goto out_disable; 902 goto out_disable;
740 } 903 }
741 904
742 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { 905 if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) {
743 max_width = 4096;
744 max_height = 4096;
745 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
746 max_width = 4096;
747 max_height = 2048;
748 } else {
749 max_width = 2048;
750 max_height = 1536;
751 }
752 if (intel_crtc->config->pipe_src_w > max_width ||
753 intel_crtc->config->pipe_src_h > max_height) {
754 set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE); 906 set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
755 goto out_disable; 907 goto out_disable;
756 } 908 }
909
757 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) && 910 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
758 intel_crtc->plane != PLANE_A) { 911 intel_crtc->plane != PLANE_A) {
759 set_no_fbc_reason(dev_priv, FBC_BAD_PLANE); 912 set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
@@ -774,14 +927,31 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
774 goto out_disable; 927 goto out_disable;
775 } 928 }
776 929
930 if (!stride_is_valid(dev_priv, fb->pitches[0])) {
931 set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
932 goto out_disable;
933 }
934
935 if (!pixel_format_is_valid(fb)) {
936 set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
937 goto out_disable;
938 }
939
777 /* If the kernel debugger is active, always disable compression */ 940 /* If the kernel debugger is active, always disable compression */
778 if (in_dbg_master()) { 941 if (in_dbg_master()) {
779 set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER); 942 set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
780 goto out_disable; 943 goto out_disable;
781 } 944 }
782 945
783 if (intel_fbc_setup_cfb(dev_priv, obj->base.size, 946 /* WaFbcExceedCdClockThreshold:hsw,bdw */
784 drm_format_plane_cpp(fb->pixel_format, 0))) { 947 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
948 ilk_pipe_pixel_rate(intel_crtc->config) >=
949 dev_priv->cdclk_freq * 95 / 100) {
950 set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
951 goto out_disable;
952 }
953
954 if (intel_fbc_setup_cfb(intel_crtc)) {
785 set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL); 955 set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
786 goto out_disable; 956 goto out_disable;
787 } 957 }
@@ -824,7 +994,7 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
824 __intel_fbc_disable(dev_priv); 994 __intel_fbc_disable(dev_priv);
825 } 995 }
826 996
827 intel_fbc_enable(intel_crtc); 997 intel_fbc_schedule_enable(intel_crtc);
828 dev_priv->fbc.no_fbc_reason = FBC_OK; 998 dev_priv->fbc.no_fbc_reason = FBC_OK;
829 return; 999 return;
830 1000
@@ -845,7 +1015,7 @@ out_disable:
845 */ 1015 */
846void intel_fbc_update(struct drm_i915_private *dev_priv) 1016void intel_fbc_update(struct drm_i915_private *dev_priv)
847{ 1017{
848 if (!dev_priv->fbc.enable_fbc) 1018 if (!fbc_supported(dev_priv))
849 return; 1019 return;
850 1020
851 mutex_lock(&dev_priv->fbc.lock); 1021 mutex_lock(&dev_priv->fbc.lock);
@@ -859,7 +1029,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
859{ 1029{
860 unsigned int fbc_bits; 1030 unsigned int fbc_bits;
861 1031
862 if (!dev_priv->fbc.enable_fbc) 1032 if (!fbc_supported(dev_priv))
863 return; 1033 return;
864 1034
865 if (origin == ORIGIN_GTT) 1035 if (origin == ORIGIN_GTT)
@@ -886,7 +1056,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
886void intel_fbc_flush(struct drm_i915_private *dev_priv, 1056void intel_fbc_flush(struct drm_i915_private *dev_priv,
887 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1057 unsigned int frontbuffer_bits, enum fb_op_origin origin)
888{ 1058{
889 if (!dev_priv->fbc.enable_fbc) 1059 if (!fbc_supported(dev_priv))
890 return; 1060 return;
891 1061
892 if (origin == ORIGIN_GTT) 1062 if (origin == ORIGIN_GTT)
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 8c6a6fa46005..4fd5fdfef6bd 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -121,8 +121,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
121 container_of(helper, struct intel_fbdev, helper); 121 container_of(helper, struct intel_fbdev, helper);
122 struct drm_framebuffer *fb; 122 struct drm_framebuffer *fb;
123 struct drm_device *dev = helper->dev; 123 struct drm_device *dev = helper->dev;
124 struct drm_i915_private *dev_priv = to_i915(dev);
124 struct drm_mode_fb_cmd2 mode_cmd = {}; 125 struct drm_mode_fb_cmd2 mode_cmd = {};
125 struct drm_i915_gem_object *obj; 126 struct drm_i915_gem_object *obj = NULL;
126 int size, ret; 127 int size, ret;
127 128
128 /* we don't do packed 24bpp */ 129 /* we don't do packed 24bpp */
@@ -139,7 +140,12 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
139 140
140 size = mode_cmd.pitches[0] * mode_cmd.height; 141 size = mode_cmd.pitches[0] * mode_cmd.height;
141 size = PAGE_ALIGN(size); 142 size = PAGE_ALIGN(size);
142 obj = i915_gem_object_create_stolen(dev, size); 143
144 /* If the FB is too big, just don't use it since fbdev is not very
145 * important and we should probably use that space with FBC or other
146 * features. */
147 if (size * 2 < dev_priv->gtt.stolen_usable_size)
148 obj = i915_gem_object_create_stolen(dev, size);
143 if (obj == NULL) 149 if (obj == NULL)
144 obj = i915_gem_alloc_object(dev, size); 150 obj = i915_gem_alloc_object(dev, size);
145 if (!obj) { 151 if (!obj) {
@@ -263,7 +269,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
263 269
264 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 270 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
265 271
266 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", 272 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
267 fb->width, fb->height, 273 fb->width, fb->height,
268 i915_gem_obj_ggtt_offset(obj), obj); 274 i915_gem_obj_ggtt_offset(obj), obj);
269 275
@@ -541,16 +547,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
541 struct intel_crtc *intel_crtc; 547 struct intel_crtc *intel_crtc;
542 unsigned int max_size = 0; 548 unsigned int max_size = 0;
543 549
544 if (!i915.fastboot)
545 return false;
546
547 /* Find the largest fb */ 550 /* Find the largest fb */
548 for_each_crtc(dev, crtc) { 551 for_each_crtc(dev, crtc) {
549 struct drm_i915_gem_object *obj = 552 struct drm_i915_gem_object *obj =
550 intel_fb_obj(crtc->primary->state->fb); 553 intel_fb_obj(crtc->primary->state->fb);
551 intel_crtc = to_intel_crtc(crtc); 554 intel_crtc = to_intel_crtc(crtc);
552 555
553 if (!intel_crtc->active || !obj) { 556 if (!crtc->state->active || !obj) {
554 DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", 557 DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
555 pipe_name(intel_crtc->pipe)); 558 pipe_name(intel_crtc->pipe));
556 continue; 559 continue;
@@ -575,7 +578,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
575 578
576 intel_crtc = to_intel_crtc(crtc); 579 intel_crtc = to_intel_crtc(crtc);
577 580
578 if (!intel_crtc->active) { 581 if (!crtc->state->active) {
579 DRM_DEBUG_KMS("pipe %c not active, skipping\n", 582 DRM_DEBUG_KMS("pipe %c not active, skipping\n",
580 pipe_name(intel_crtc->pipe)); 583 pipe_name(intel_crtc->pipe));
581 continue; 584 continue;
@@ -638,7 +641,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
638 for_each_crtc(dev, crtc) { 641 for_each_crtc(dev, crtc) {
639 intel_crtc = to_intel_crtc(crtc); 642 intel_crtc = to_intel_crtc(crtc);
640 643
641 if (!intel_crtc->active) 644 if (!crtc->state->active)
642 continue; 645 continue;
643 646
644 WARN(!crtc->primary->fb, 647 WARN(!crtc->primary->fb,
@@ -689,6 +692,8 @@ int intel_fbdev_init(struct drm_device *dev)
689 return ret; 692 return ret;
690 } 693 }
691 694
695 ifbdev->helper.atomic = true;
696
692 dev_priv->fbdev = ifbdev; 697 dev_priv->fbdev = ifbdev;
693 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); 698 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
694 699
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
new file mode 100644
index 000000000000..081d5f648d26
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24#ifndef _INTEL_GUC_H_
25#define _INTEL_GUC_H_
26
27#include "intel_guc_fwif.h"
28#include "i915_guc_reg.h"
29
30struct i915_guc_client {
31 struct drm_i915_gem_object *client_obj;
32 struct intel_context *owner;
33 struct intel_guc *guc;
34 uint32_t priority;
35 uint32_t ctx_index;
36
37 uint32_t proc_desc_offset;
38 uint32_t doorbell_offset;
39 uint32_t cookie;
40 uint16_t doorbell_id;
41 uint16_t padding; /* Maintain alignment */
42
43 uint32_t wq_offset;
44 uint32_t wq_size;
45
46 spinlock_t wq_lock; /* Protects all data below */
47 uint32_t wq_tail;
48
49 /* GuC submission statistics & status */
50 uint64_t submissions[I915_NUM_RINGS];
51 uint32_t q_fail;
52 uint32_t b_fail;
53 int retcode;
54};
55
56enum intel_guc_fw_status {
57 GUC_FIRMWARE_FAIL = -1,
58 GUC_FIRMWARE_NONE = 0,
59 GUC_FIRMWARE_PENDING,
60 GUC_FIRMWARE_SUCCESS
61};
62
63/*
64 * This structure encapsulates all the data needed during the process
65 * of fetching, caching, and loading the firmware image into the GuC.
66 */
67struct intel_guc_fw {
68 struct drm_device * guc_dev;
69 const char * guc_fw_path;
70 size_t guc_fw_size;
71 struct drm_i915_gem_object * guc_fw_obj;
72 enum intel_guc_fw_status guc_fw_fetch_status;
73 enum intel_guc_fw_status guc_fw_load_status;
74
75 uint16_t guc_fw_major_wanted;
76 uint16_t guc_fw_minor_wanted;
77 uint16_t guc_fw_major_found;
78 uint16_t guc_fw_minor_found;
79};
80
81struct intel_guc {
82 struct intel_guc_fw guc_fw;
83
84 uint32_t log_flags;
85 struct drm_i915_gem_object *log_obj;
86
87 struct drm_i915_gem_object *ctx_pool_obj;
88 struct ida ctx_ids;
89
90 struct i915_guc_client *execbuf_client;
91
92 spinlock_t host2guc_lock; /* Protects all data below */
93
94 DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
95 uint32_t db_cacheline; /* Cyclic counter mod pagesize */
96
97 /* Action status & statistics */
98 uint64_t action_count; /* Total commands issued */
99 uint32_t action_cmd; /* Last command word */
100 uint32_t action_status; /* Last return status */
101 uint32_t action_fail; /* Total number of failures */
102 int32_t action_err; /* Last error code */
103
104 uint64_t submissions[I915_NUM_RINGS];
105 uint32_t last_seqno[I915_NUM_RINGS];
106};
107
108/* intel_guc_loader.c */
109extern void intel_guc_ucode_init(struct drm_device *dev);
110extern int intel_guc_ucode_load(struct drm_device *dev);
111extern void intel_guc_ucode_fini(struct drm_device *dev);
112extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
113extern int intel_guc_suspend(struct drm_device *dev);
114extern int intel_guc_resume(struct drm_device *dev);
115
116/* i915_guc_submission.c */
117int i915_guc_submission_init(struct drm_device *dev);
118int i915_guc_submission_enable(struct drm_device *dev);
119int i915_guc_submit(struct i915_guc_client *client,
120 struct drm_i915_gem_request *rq);
121void i915_guc_submission_disable(struct drm_device *dev);
122void i915_guc_submission_fini(struct drm_device *dev);
123
124#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 18d7f20936c8..593d2f585978 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -32,17 +32,16 @@
32 * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST. 32 * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
33 */ 33 */
34 34
35#define GFXCORE_FAMILY_GEN8 11
36#define GFXCORE_FAMILY_GEN9 12 35#define GFXCORE_FAMILY_GEN9 12
37#define GFXCORE_FAMILY_FORCE_ULONG 0x7fffffff 36#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
38 37
39#define GUC_CTX_PRIORITY_CRITICAL 0 38#define GUC_CTX_PRIORITY_KMD_HIGH 0
40#define GUC_CTX_PRIORITY_HIGH 1 39#define GUC_CTX_PRIORITY_HIGH 1
41#define GUC_CTX_PRIORITY_NORMAL 2 40#define GUC_CTX_PRIORITY_KMD_NORMAL 2
42#define GUC_CTX_PRIORITY_LOW 3 41#define GUC_CTX_PRIORITY_NORMAL 3
43 42
44#define GUC_MAX_GPU_CONTEXTS 1024 43#define GUC_MAX_GPU_CONTEXTS 1024
45#define GUC_INVALID_CTX_ID (GUC_MAX_GPU_CONTEXTS + 1) 44#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
46 45
47/* Work queue item header definitions */ 46/* Work queue item header definitions */
48#define WQ_STATUS_ACTIVE 1 47#define WQ_STATUS_ACTIVE 1
@@ -76,6 +75,7 @@
76#define GUC_CTX_DESC_ATTR_RESET (1 << 4) 75#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
77#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5) 76#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
78#define GUC_CTX_DESC_ATTR_PCH (1 << 6) 77#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
78#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7)
79 79
80/* The guc control data is 10 DWORDs */ 80/* The guc control data is 10 DWORDs */
81#define GUC_CTL_CTXINFO 0 81#define GUC_CTL_CTXINFO 0
@@ -108,6 +108,7 @@
108#define GUC_CTL_DISABLE_SCHEDULER (1 << 4) 108#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
109#define GUC_CTL_PREEMPTION_LOG (1 << 5) 109#define GUC_CTL_PREEMPTION_LOG (1 << 5)
110#define GUC_CTL_ENABLE_SLPC (1 << 7) 110#define GUC_CTL_ENABLE_SLPC (1 << 7)
111#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
111#define GUC_CTL_DEBUG 8 112#define GUC_CTL_DEBUG 8
112#define GUC_LOG_VERBOSITY_SHIFT 0 113#define GUC_LOG_VERBOSITY_SHIFT 0
113#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) 114#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
@@ -117,8 +118,9 @@
117/* Verbosity range-check limits, without the shift */ 118/* Verbosity range-check limits, without the shift */
118#define GUC_LOG_VERBOSITY_MIN 0 119#define GUC_LOG_VERBOSITY_MIN 0
119#define GUC_LOG_VERBOSITY_MAX 3 120#define GUC_LOG_VERBOSITY_MAX 3
121#define GUC_CTL_RSRVD 9
120 122
121#define GUC_CTL_MAX_DWORDS (GUC_CTL_DEBUG + 1) 123#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
122 124
123struct guc_doorbell_info { 125struct guc_doorbell_info {
124 u32 db_status; 126 u32 db_status;
@@ -208,18 +210,31 @@ struct guc_context_desc {
208 210
209 u32 engine_presence; 211 u32 engine_presence;
210 212
211 u32 reserved0[1]; 213 u8 engine_suspended;
214
215 u8 reserved0[3];
212 u64 reserved1[1]; 216 u64 reserved1[1];
213 217
214 u64 desc_private; 218 u64 desc_private;
215} __packed; 219} __packed;
216 220
221#define GUC_FORCEWAKE_RENDER (1 << 0)
222#define GUC_FORCEWAKE_MEDIA (1 << 1)
223
224#define GUC_POWER_UNSPECIFIED 0
225#define GUC_POWER_D0 1
226#define GUC_POWER_D1 2
227#define GUC_POWER_D2 3
228#define GUC_POWER_D3 4
229
217/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ 230/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
218enum host2guc_action { 231enum host2guc_action {
219 HOST2GUC_ACTION_DEFAULT = 0x0, 232 HOST2GUC_ACTION_DEFAULT = 0x0,
220 HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6, 233 HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
221 HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10, 234 HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
222 HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, 235 HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
236 HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
237 HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
223 HOST2GUC_ACTION_SLPC_REQUEST = 0x3003, 238 HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
224 HOST2GUC_ACTION_LIMIT 239 HOST2GUC_ACTION_LIMIT
225}; 240};
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
new file mode 100644
index 000000000000..3541f76c65a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -0,0 +1,608 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Vinit Azad <vinit.azad@intel.com>
25 * Ben Widawsky <ben@bwidawsk.net>
26 * Dave Gordon <david.s.gordon@intel.com>
27 * Alex Dai <yu.dai@intel.com>
28 */
29#include <linux/firmware.h>
30#include "i915_drv.h"
31#include "intel_guc.h"
32
33/**
34 * DOC: GuC
35 *
36 * intel_guc:
37 * Top level structure of guc. It handles firmware loading and manages client
38 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
39 * ExecList submission.
40 *
41 * Firmware versioning:
42 * The firmware build process will generate a version header file with major and
43 * minor version defined. The versions are built into CSS header of firmware.
44 * i915 kernel driver set the minimal firmware version required per platform.
45 * The firmware installation package will install (symbolic link) proper version
46 * of firmware.
47 *
48 * GuC address space:
49 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
50 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
51 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
52 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
53 *
54 * Firmware log:
55 * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
56 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
57 * i915_guc_load_status will print out firmware loading status and scratch
58 * registers value.
59 *
60 */
61
62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
63MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
64
65/* User-friendly representation of an enum */
66const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
67{
68 switch (status) {
69 case GUC_FIRMWARE_FAIL:
70 return "FAIL";
71 case GUC_FIRMWARE_NONE:
72 return "NONE";
73 case GUC_FIRMWARE_PENDING:
74 return "PENDING";
75 case GUC_FIRMWARE_SUCCESS:
76 return "SUCCESS";
77 default:
78 return "UNKNOWN!";
79 }
80};
81
82static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
83{
84 struct intel_engine_cs *ring;
85 int i, irqs;
86
87 /* tell all command streamers NOT to forward interrupts and vblank to GuC */
88 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
89 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
90 for_each_ring(ring, dev_priv, i)
91 I915_WRITE(RING_MODE_GEN7(ring), irqs);
92
93 /* route all GT interrupts to the host */
94 I915_WRITE(GUC_BCS_RCS_IER, 0);
95 I915_WRITE(GUC_VCS2_VCS1_IER, 0);
96 I915_WRITE(GUC_WD_VECS_IER, 0);
97}
98
99static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
100{
101 struct intel_engine_cs *ring;
102 int i, irqs;
103
104 /* tell all command streamers to forward interrupts and vblank to GuC */
105 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
106 irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
107 for_each_ring(ring, dev_priv, i)
108 I915_WRITE(RING_MODE_GEN7(ring), irqs);
109
110 /* route USER_INTERRUPT to Host, all others are sent to GuC. */
111 irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
112 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
113 /* These three registers have the same bit definitions */
114 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
115 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
116 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
117}
118
119static u32 get_gttype(struct drm_i915_private *dev_priv)
120{
121 /* XXX: GT type based on PCI device ID? field seems unused by fw */
122 return 0;
123}
124
125static u32 get_core_family(struct drm_i915_private *dev_priv)
126{
127 switch (INTEL_INFO(dev_priv)->gen) {
128 case 9:
129 return GFXCORE_FAMILY_GEN9;
130
131 default:
132 DRM_ERROR("GUC: unsupported core family\n");
133 return GFXCORE_FAMILY_UNKNOWN;
134 }
135}
136
137static void set_guc_init_params(struct drm_i915_private *dev_priv)
138{
139 struct intel_guc *guc = &dev_priv->guc;
140 u32 params[GUC_CTL_MAX_DWORDS];
141 int i;
142
143 memset(&params, 0, sizeof(params));
144
145 params[GUC_CTL_DEVICE_INFO] |=
146 (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
147 (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
148
149 /*
150 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
151 * second. This ARAR is calculated by:
152 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
153 */
154 params[GUC_CTL_ARAT_HIGH] = 0;
155 params[GUC_CTL_ARAT_LOW] = 100000000;
156
157 params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
158
159 params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
160 GUC_CTL_VCS2_ENABLED;
161
162 if (i915.guc_log_level >= 0) {
163 params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
164 params[GUC_CTL_DEBUG] =
165 i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
166 }
167
168 /* If GuC submission is enabled, set up additional parameters here */
169 if (i915.enable_guc_submission) {
170 u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
171 u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
172
173 pgs >>= PAGE_SHIFT;
174 params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
175 (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
176
177 params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
178
179 /* Unmask this bit to enable the GuC's internal scheduler */
180 params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
181 }
182
183 I915_WRITE(SOFT_SCRATCH(0), 0);
184
185 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
186 I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
187}
188
189/*
190 * Read the GuC status register (GUC_STATUS) and store it in the
191 * specified location; then return a boolean indicating whether
192 * the value matches either of two values representing completion
193 * of the GuC boot process.
194 *
195 * This is used for polling the GuC status in a wait_for_atomic()
196 * loop below.
197 */
198static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
199 u32 *status)
200{
201 u32 val = I915_READ(GUC_STATUS);
202 u32 uk_val = val & GS_UKERNEL_MASK;
203 *status = val;
204 return (uk_val == GS_UKERNEL_READY ||
205 ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
206}
207
208/*
209 * Transfer the firmware image to RAM for execution by the microcontroller.
210 *
211 * GuC Firmware layout:
212 * +-------------------------------+ ----
213 * | CSS header | 128B
214 * | contains major/minor version |
215 * +-------------------------------+ ----
216 * | uCode |
217 * +-------------------------------+ ----
218 * | RSA signature | 256B
219 * +-------------------------------+ ----
220 *
221 * Architecturally, the DMA engine is bidirectional, and can potentially even
222 * transfer between GTT locations. This functionality is left out of the API
223 * for now as there is no need for it.
224 *
225 * Note that GuC needs the CSS header plus uKernel code to be copied by the
226 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
227 */
228
229#define UOS_CSS_HEADER_OFFSET 0
230#define UOS_VER_MINOR_OFFSET 0x44
231#define UOS_VER_MAJOR_OFFSET 0x46
232#define UOS_CSS_HEADER_SIZE 0x80
233#define UOS_RSA_SIG_SIZE 0x100
234
235static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
236{
237 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
238 struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
239 unsigned long offset;
240 struct sg_table *sg = fw_obj->pages;
241 u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
242 int i, ret = 0;
243
244 /* uCode size, also is where RSA signature starts */
245 offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
246 I915_WRITE(DMA_COPY_SIZE, ucode_size);
247
248 /* Copy RSA signature from the fw image to HW for verification */
249 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
250 for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
251 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
252
253 /* Set the source address for the new blob */
254 offset = i915_gem_obj_ggtt_offset(fw_obj);
255 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
256 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
257
258 /*
259 * Set the DMA destination. Current uCode expects the code to be
260 * loaded at 8k; locations below this are used for the stack.
261 */
262 I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
263 I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
264
265 /* Finally start the DMA */
266 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
267
268 /*
269 * Spin-wait for the DMA to complete & the GuC to start up.
270 * NB: Docs recommend not using the interrupt for completion.
271 * Measurements indicate this should take no more than 20ms, so a
272 * timeout here indicates that the GuC has failed and is unusable.
273 * (Higher levels of the driver will attempt to fall back to
274 * execlist mode if this happens.)
275 */
276 ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
277
278 DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
279 I915_READ(DMA_CTRL), status);
280
281 if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
282 DRM_ERROR("GuC firmware signature verification failed\n");
283 ret = -ENOEXEC;
284 }
285
286 DRM_DEBUG_DRIVER("returning %d\n", ret);
287
288 return ret;
289}
290
291/*
292 * Load the GuC firmware blob into the MinuteIA.
293 */
294static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
295{
296 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
297 struct drm_device *dev = dev_priv->dev;
298 int ret;
299
300 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
301 if (ret) {
302 DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
303 return ret;
304 }
305
306 ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
307 if (ret) {
308 DRM_DEBUG_DRIVER("pin failed %d\n", ret);
309 return ret;
310 }
311
312 /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
313 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
314
315 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
316
317 /* init WOPCM */
318 I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
319 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
320
321 /* Enable MIA caching. GuC clock gating is disabled. */
322 I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
323
324 /* WaDisableMinuteIaClockGating:skl,bxt */
325 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
326 (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
327 I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
328 ~GUC_ENABLE_MIA_CLOCK_GATING));
329 }
330
331 /* WaC6DisallowByGfxPause*/
332 I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
333
334 if (IS_BROXTON(dev))
335 I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
336 else
337 I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
338
339 if (IS_GEN9(dev)) {
340 /* DOP Clock Gating Enable for GuC clocks */
341 I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
342 I915_READ(GEN7_MISCCPCTL)));
343
344 /* allows for 5us before GT can go to RC6 */
345 I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
346 }
347
348 set_guc_init_params(dev_priv);
349
350 ret = guc_ucode_xfer_dma(dev_priv);
351
352 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
353
354 /*
355 * We keep the object pages for reuse during resume. But we can unpin it
356 * now that DMA has completed, so it doesn't continue to take up space.
357 */
358 i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
359
360 return ret;
361}
362
363/**
364 * intel_guc_ucode_load() - load GuC uCode into the device
365 * @dev: drm device
366 *
367 * Called from gem_init_hw() during driver loading and also after a GPU reset.
368 *
369 * The firmware image should have already been fetched into memory by the
370 * earlier call to intel_guc_ucode_init(), so here we need only check that
371 * is succeeded, and then transfer the image to the h/w.
372 *
373 * Return: non-zero code on error
374 */
375int intel_guc_ucode_load(struct drm_device *dev)
376{
377 struct drm_i915_private *dev_priv = dev->dev_private;
378 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
379 int err = 0;
380
381 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
382 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
383 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
384
385 direct_interrupts_to_host(dev_priv);
386
387 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
388 return 0;
389
390 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
391 guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
392 return -ENOEXEC;
393
394 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
395
396 DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
397 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
398
399 switch (guc_fw->guc_fw_fetch_status) {
400 case GUC_FIRMWARE_FAIL:
401 /* something went wrong :( */
402 err = -EIO;
403 goto fail;
404
405 case GUC_FIRMWARE_NONE:
406 case GUC_FIRMWARE_PENDING:
407 default:
408 /* "can't happen" */
409 WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
410 guc_fw->guc_fw_path,
411 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
412 guc_fw->guc_fw_fetch_status);
413 err = -ENXIO;
414 goto fail;
415
416 case GUC_FIRMWARE_SUCCESS:
417 break;
418 }
419
420 err = i915_guc_submission_init(dev);
421 if (err)
422 goto fail;
423
424 err = guc_ucode_xfer(dev_priv);
425 if (err)
426 goto fail;
427
428 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
429
430 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
431 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
432 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
433
434 if (i915.enable_guc_submission) {
435 /* The execbuf_client will be recreated. Release it first. */
436 i915_guc_submission_disable(dev);
437
438 err = i915_guc_submission_enable(dev);
439 if (err)
440 goto fail;
441 direct_interrupts_to_guc(dev_priv);
442 }
443
444 return 0;
445
446fail:
447 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
448 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
449
450 direct_interrupts_to_host(dev_priv);
451 i915_guc_submission_disable(dev);
452
453 return err;
454}
455
456static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
457{
458 struct drm_i915_gem_object *obj;
459 const struct firmware *fw;
460 const u8 *css_header;
461 const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
462 const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
463 - 0x8000; /* 32k reserved (8K stack + 24k context) */
464 int err;
465
466 DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
467 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
468
469 err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
470 if (err)
471 goto fail;
472 if (!fw)
473 goto fail;
474
475 DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
476 guc_fw->guc_fw_path, fw);
477 DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
478 fw->size, minsize, maxsize);
479
480 /* Check the size of the blob befoe examining buffer contents */
481 if (fw->size < minsize || fw->size > maxsize)
482 goto fail;
483
484 /*
485 * The GuC firmware image has the version number embedded at a well-known
486 * offset within the firmware blob; note that major / minor version are
487 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
488 * in terms of bytes (u8).
489 */
490 css_header = fw->data + UOS_CSS_HEADER_OFFSET;
491 guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
492 guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
493
494 if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
495 guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
496 DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
497 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
498 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
499 err = -ENOEXEC;
500 goto fail;
501 }
502
503 DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
504 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
505 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
506
507 mutex_lock(&dev->struct_mutex);
508 obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
509 mutex_unlock(&dev->struct_mutex);
510 if (IS_ERR_OR_NULL(obj)) {
511 err = obj ? PTR_ERR(obj) : -ENOMEM;
512 goto fail;
513 }
514
515 guc_fw->guc_fw_obj = obj;
516 guc_fw->guc_fw_size = fw->size;
517
518 DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
519 guc_fw->guc_fw_obj);
520
521 release_firmware(fw);
522 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
523 return;
524
525fail:
526 DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
527 err, fw, guc_fw->guc_fw_obj);
528 DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
529 guc_fw->guc_fw_path, err);
530
531 obj = guc_fw->guc_fw_obj;
532 if (obj)
533 drm_gem_object_unreference(&obj->base);
534 guc_fw->guc_fw_obj = NULL;
535
536 release_firmware(fw); /* OK even if fw is NULL */
537 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
538}
539
540/**
541 * intel_guc_ucode_init() - define parameters and fetch firmware
542 * @dev: drm device
543 *
544 * Called early during driver load, but after GEM is initialised.
545 *
546 * The firmware will be transferred to the GuC's memory later,
547 * when intel_guc_ucode_load() is called.
548 */
549void intel_guc_ucode_init(struct drm_device *dev)
550{
551 struct drm_i915_private *dev_priv = dev->dev_private;
552 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
553 const char *fw_path;
554
555 if (!HAS_GUC_SCHED(dev))
556 i915.enable_guc_submission = false;
557
558 if (!HAS_GUC_UCODE(dev)) {
559 fw_path = NULL;
560 } else if (IS_SKYLAKE(dev)) {
561 fw_path = I915_SKL_GUC_UCODE;
562 guc_fw->guc_fw_major_wanted = 4;
563 guc_fw->guc_fw_minor_wanted = 3;
564 } else {
565 i915.enable_guc_submission = false;
566 fw_path = ""; /* unknown device */
567 }
568
569 guc_fw->guc_dev = dev;
570 guc_fw->guc_fw_path = fw_path;
571 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
572 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
573
574 if (fw_path == NULL)
575 return;
576
577 if (*fw_path == '\0') {
578 DRM_ERROR("No GuC firmware known for this platform\n");
579 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
580 return;
581 }
582
583 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
584 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
585 guc_fw_fetch(dev, guc_fw);
586 /* status must now be FAIL or SUCCESS */
587}
588
589/**
590 * intel_guc_ucode_fini() - clean up all allocated resources
591 * @dev: drm device
592 */
593void intel_guc_ucode_fini(struct drm_device *dev)
594{
595 struct drm_i915_private *dev_priv = dev->dev_private;
596 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
597
598 direct_interrupts_to_host(dev_priv);
599 i915_guc_submission_fini(dev);
600
601 mutex_lock(&dev->struct_mutex);
602 if (guc_fw->guc_fw_obj)
603 drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
604 guc_fw->guc_fw_obj = NULL;
605 mutex_unlock(&dev->struct_mutex);
606
607 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
608}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index dcd336bcdfe7..9eafa191cee2 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -113,17 +113,18 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
113 } 113 }
114} 114}
115 115
116static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, 116static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
117 enum transcoder cpu_transcoder, 117 enum transcoder cpu_transcoder,
118 struct drm_i915_private *dev_priv) 118 enum hdmi_infoframe_type type,
119 int i)
119{ 120{
120 switch (type) { 121 switch (type) {
121 case HDMI_INFOFRAME_TYPE_AVI: 122 case HDMI_INFOFRAME_TYPE_AVI:
122 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); 123 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
123 case HDMI_INFOFRAME_TYPE_SPD: 124 case HDMI_INFOFRAME_TYPE_SPD:
124 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); 125 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i);
125 case HDMI_INFOFRAME_TYPE_VENDOR: 126 case HDMI_INFOFRAME_TYPE_VENDOR:
126 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder); 127 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
127 default: 128 default:
128 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); 129 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
129 return 0; 130 return 0;
@@ -365,14 +366,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
365 struct drm_device *dev = encoder->dev; 366 struct drm_device *dev = encoder->dev;
366 struct drm_i915_private *dev_priv = dev->dev_private; 367 struct drm_i915_private *dev_priv = dev->dev_private;
367 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 368 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
368 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); 369 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
370 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
369 u32 data_reg; 371 u32 data_reg;
370 int i; 372 int i;
371 u32 val = I915_READ(ctl_reg); 373 u32 val = I915_READ(ctl_reg);
372 374
373 data_reg = hsw_infoframe_data_reg(type, 375 data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
374 intel_crtc->config->cpu_transcoder,
375 dev_priv);
376 if (data_reg == 0) 376 if (data_reg == 0)
377 return; 377 return;
378 378
@@ -381,12 +381,14 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
381 381
382 mmiowb(); 382 mmiowb();
383 for (i = 0; i < len; i += 4) { 383 for (i = 0; i < len; i += 4) {
384 I915_WRITE(data_reg + i, *data); 384 I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
385 type, i >> 2), *data);
385 data++; 386 data++;
386 } 387 }
387 /* Write every possible data byte to force correct ECC calculation. */ 388 /* Write every possible data byte to force correct ECC calculation. */
388 for (; i < VIDEO_DIP_DATA_SIZE; i += 4) 389 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
389 I915_WRITE(data_reg + i, 0); 390 I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
391 type, i >> 2), 0);
390 mmiowb(); 392 mmiowb();
391 393
392 val |= hsw_infoframe_enable(type); 394 val |= hsw_infoframe_enable(type);
@@ -447,16 +449,13 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
447} 449}
448 450
449static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 451static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
450 struct drm_display_mode *adjusted_mode) 452 const struct drm_display_mode *adjusted_mode)
451{ 453{
452 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 454 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
453 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 455 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
454 union hdmi_infoframe frame; 456 union hdmi_infoframe frame;
455 int ret; 457 int ret;
456 458
457 /* Set user selected PAR to incoming mode's member */
458 adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
459
460 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 459 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
461 adjusted_mode); 460 adjusted_mode);
462 if (ret < 0) { 461 if (ret < 0) {
@@ -494,7 +493,7 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
494 493
495static void 494static void
496intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, 495intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
497 struct drm_display_mode *adjusted_mode) 496 const struct drm_display_mode *adjusted_mode)
498{ 497{
499 union hdmi_infoframe frame; 498 union hdmi_infoframe frame;
500 int ret; 499 int ret;
@@ -509,7 +508,7 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
509 508
510static void g4x_set_infoframes(struct drm_encoder *encoder, 509static void g4x_set_infoframes(struct drm_encoder *encoder,
511 bool enable, 510 bool enable,
512 struct drm_display_mode *adjusted_mode) 511 const struct drm_display_mode *adjusted_mode)
513{ 512{
514 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 513 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
515 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 514 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -661,7 +660,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
661 660
662static void ibx_set_infoframes(struct drm_encoder *encoder, 661static void ibx_set_infoframes(struct drm_encoder *encoder,
663 bool enable, 662 bool enable,
664 struct drm_display_mode *adjusted_mode) 663 const struct drm_display_mode *adjusted_mode)
665{ 664{
666 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 665 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
667 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 666 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -713,7 +712,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
713 712
714static void cpt_set_infoframes(struct drm_encoder *encoder, 713static void cpt_set_infoframes(struct drm_encoder *encoder,
715 bool enable, 714 bool enable,
716 struct drm_display_mode *adjusted_mode) 715 const struct drm_display_mode *adjusted_mode)
717{ 716{
718 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 717 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
719 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 718 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -755,7 +754,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
755 754
756static void vlv_set_infoframes(struct drm_encoder *encoder, 755static void vlv_set_infoframes(struct drm_encoder *encoder,
757 bool enable, 756 bool enable,
758 struct drm_display_mode *adjusted_mode) 757 const struct drm_display_mode *adjusted_mode)
759{ 758{
760 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 759 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
761 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 760 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -807,7 +806,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
807 806
808static void hsw_set_infoframes(struct drm_encoder *encoder, 807static void hsw_set_infoframes(struct drm_encoder *encoder,
809 bool enable, 808 bool enable,
810 struct drm_display_mode *adjusted_mode) 809 const struct drm_display_mode *adjusted_mode)
811{ 810{
812 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 811 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
813 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 812 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -844,12 +843,12 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
844 struct drm_i915_private *dev_priv = dev->dev_private; 843 struct drm_i915_private *dev_priv = dev->dev_private;
845 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 844 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
846 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 845 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
847 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 846 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
848 u32 hdmi_val; 847 u32 hdmi_val;
849 848
850 hdmi_val = SDVO_ENCODING_HDMI; 849 hdmi_val = SDVO_ENCODING_HDMI;
851 if (!HAS_PCH_SPLIT(dev)) 850 if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
852 hdmi_val |= intel_hdmi->color_range; 851 hdmi_val |= HDMI_COLOR_RANGE_16_235;
853 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 852 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
854 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; 853 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
855 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 854 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1260,11 +1259,12 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1260 1259
1261 if (intel_hdmi->color_range_auto) { 1260 if (intel_hdmi->color_range_auto) {
1262 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1261 /* See CEA-861-E - 5.1 Default Encoding Parameters */
1263 if (pipe_config->has_hdmi_sink && 1262 pipe_config->limited_color_range =
1264 drm_match_cea_mode(adjusted_mode) > 1) 1263 pipe_config->has_hdmi_sink &&
1265 intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; 1264 drm_match_cea_mode(adjusted_mode) > 1;
1266 else 1265 } else {
1267 intel_hdmi->color_range = 0; 1266 pipe_config->limited_color_range =
1267 intel_hdmi->limited_color_range;
1268 } 1268 }
1269 1269
1270 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { 1270 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
@@ -1273,9 +1273,6 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1273 clock_12bpc *= 2; 1273 clock_12bpc *= 2;
1274 } 1274 }
1275 1275
1276 if (intel_hdmi->color_range)
1277 pipe_config->limited_color_range = true;
1278
1279 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) 1276 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
1280 pipe_config->has_pch_encoder = true; 1277 pipe_config->has_pch_encoder = true;
1281 1278
@@ -1314,6 +1311,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1314 return false; 1311 return false;
1315 } 1312 }
1316 1313
1314 /* Set user selected PAR to incoming mode's member */
1315 adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
1316
1317 return true; 1317 return true;
1318} 1318}
1319 1319
@@ -1331,22 +1331,23 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
1331} 1331}
1332 1332
1333static bool 1333static bool
1334intel_hdmi_set_edid(struct drm_connector *connector) 1334intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1335{ 1335{
1336 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1336 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1338 struct intel_encoder *intel_encoder = 1338 struct intel_encoder *intel_encoder =
1339 &hdmi_to_dig_port(intel_hdmi)->base; 1339 &hdmi_to_dig_port(intel_hdmi)->base;
1340 enum intel_display_power_domain power_domain; 1340 enum intel_display_power_domain power_domain;
1341 struct edid *edid; 1341 struct edid *edid = NULL;
1342 bool connected = false; 1342 bool connected = false;
1343 1343
1344 power_domain = intel_display_port_power_domain(intel_encoder); 1344 power_domain = intel_display_port_power_domain(intel_encoder);
1345 intel_display_power_get(dev_priv, power_domain); 1345 intel_display_power_get(dev_priv, power_domain);
1346 1346
1347 edid = drm_get_edid(connector, 1347 if (force)
1348 intel_gmbus_get_adapter(dev_priv, 1348 edid = drm_get_edid(connector,
1349 intel_hdmi->ddc_bus)); 1349 intel_gmbus_get_adapter(dev_priv,
1350 intel_hdmi->ddc_bus));
1350 1351
1351 intel_display_power_put(dev_priv, power_domain); 1352 intel_display_power_put(dev_priv, power_domain);
1352 1353
@@ -1374,13 +1375,26 @@ static enum drm_connector_status
1374intel_hdmi_detect(struct drm_connector *connector, bool force) 1375intel_hdmi_detect(struct drm_connector *connector, bool force)
1375{ 1376{
1376 enum drm_connector_status status; 1377 enum drm_connector_status status;
1378 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1379 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1380 bool live_status = false;
1381 unsigned int retry = 3;
1377 1382
1378 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1383 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1379 connector->base.id, connector->name); 1384 connector->base.id, connector->name);
1380 1385
1386 while (!live_status && --retry) {
1387 live_status = intel_digital_port_connected(dev_priv,
1388 hdmi_to_dig_port(intel_hdmi));
1389 mdelay(10);
1390 }
1391
1392 if (!live_status)
1393 DRM_DEBUG_KMS("Live status not up!");
1394
1381 intel_hdmi_unset_edid(connector); 1395 intel_hdmi_unset_edid(connector);
1382 1396
1383 if (intel_hdmi_set_edid(connector)) { 1397 if (intel_hdmi_set_edid(connector, live_status)) {
1384 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1398 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1385 1399
1386 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; 1400 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1404,7 +1418,7 @@ intel_hdmi_force(struct drm_connector *connector)
1404 if (connector->status != connector_status_connected) 1418 if (connector->status != connector_status_connected)
1405 return; 1419 return;
1406 1420
1407 intel_hdmi_set_edid(connector); 1421 intel_hdmi_set_edid(connector, true);
1408 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; 1422 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1409} 1423}
1410 1424
@@ -1470,7 +1484,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
1470 1484
1471 if (property == dev_priv->broadcast_rgb_property) { 1485 if (property == dev_priv->broadcast_rgb_property) {
1472 bool old_auto = intel_hdmi->color_range_auto; 1486 bool old_auto = intel_hdmi->color_range_auto;
1473 uint32_t old_range = intel_hdmi->color_range; 1487 bool old_range = intel_hdmi->limited_color_range;
1474 1488
1475 switch (val) { 1489 switch (val) {
1476 case INTEL_BROADCAST_RGB_AUTO: 1490 case INTEL_BROADCAST_RGB_AUTO:
@@ -1478,18 +1492,18 @@ intel_hdmi_set_property(struct drm_connector *connector,
1478 break; 1492 break;
1479 case INTEL_BROADCAST_RGB_FULL: 1493 case INTEL_BROADCAST_RGB_FULL:
1480 intel_hdmi->color_range_auto = false; 1494 intel_hdmi->color_range_auto = false;
1481 intel_hdmi->color_range = 0; 1495 intel_hdmi->limited_color_range = false;
1482 break; 1496 break;
1483 case INTEL_BROADCAST_RGB_LIMITED: 1497 case INTEL_BROADCAST_RGB_LIMITED:
1484 intel_hdmi->color_range_auto = false; 1498 intel_hdmi->color_range_auto = false;
1485 intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; 1499 intel_hdmi->limited_color_range = true;
1486 break; 1500 break;
1487 default: 1501 default:
1488 return -EINVAL; 1502 return -EINVAL;
1489 } 1503 }
1490 1504
1491 if (old_auto == intel_hdmi->color_range_auto && 1505 if (old_auto == intel_hdmi->color_range_auto &&
1492 old_range == intel_hdmi->color_range) 1506 old_range == intel_hdmi->limited_color_range)
1493 return 0; 1507 return 0;
1494 1508
1495 goto done; 1509 goto done;
@@ -1525,8 +1539,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1525{ 1539{
1526 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1540 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1527 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1541 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1528 struct drm_display_mode *adjusted_mode = 1542 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1529 &intel_crtc->config->base.adjusted_mode;
1530 1543
1531 intel_hdmi_prepare(encoder); 1544 intel_hdmi_prepare(encoder);
1532 1545
@@ -1543,8 +1556,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1543 struct drm_i915_private *dev_priv = dev->dev_private; 1556 struct drm_i915_private *dev_priv = dev->dev_private;
1544 struct intel_crtc *intel_crtc = 1557 struct intel_crtc *intel_crtc =
1545 to_intel_crtc(encoder->base.crtc); 1558 to_intel_crtc(encoder->base.crtc);
1546 struct drm_display_mode *adjusted_mode = 1559 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1547 &intel_crtc->config->base.adjusted_mode;
1548 enum dpio_channel port = vlv_dport_to_channel(dport); 1560 enum dpio_channel port = vlv_dport_to_channel(dport);
1549 int pipe = intel_crtc->pipe; 1561 int pipe = intel_crtc->pipe;
1550 u32 val; 1562 u32 val;
@@ -1617,6 +1629,50 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1617 mutex_unlock(&dev_priv->sb_lock); 1629 mutex_unlock(&dev_priv->sb_lock);
1618} 1630}
1619 1631
1632static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
1633 bool reset)
1634{
1635 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1636 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1637 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1638 enum pipe pipe = crtc->pipe;
1639 uint32_t val;
1640
1641 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1642 if (reset)
1643 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1644 else
1645 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1646 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1647
1648 if (crtc->config->lane_count > 2) {
1649 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1650 if (reset)
1651 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1652 else
1653 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1654 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1655 }
1656
1657 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1658 val |= CHV_PCS_REQ_SOFTRESET_EN;
1659 if (reset)
1660 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1661 else
1662 val |= DPIO_PCS_CLK_SOFT_RESET;
1663 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1664
1665 if (crtc->config->lane_count > 2) {
1666 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1667 val |= CHV_PCS_REQ_SOFTRESET_EN;
1668 if (reset)
1669 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1670 else
1671 val |= DPIO_PCS_CLK_SOFT_RESET;
1672 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1673 }
1674}
1675
1620static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1676static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1621{ 1677{
1622 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1678 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1630,8 +1686,21 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1630 1686
1631 intel_hdmi_prepare(encoder); 1687 intel_hdmi_prepare(encoder);
1632 1688
1689 /*
1690 * Must trick the second common lane into life.
1691 * Otherwise we can't even access the PLL.
1692 */
1693 if (ch == DPIO_CH0 && pipe == PIPE_B)
1694 dport->release_cl2_override =
1695 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
1696
1697 chv_phy_powergate_lanes(encoder, true, 0x0);
1698
1633 mutex_lock(&dev_priv->sb_lock); 1699 mutex_lock(&dev_priv->sb_lock);
1634 1700
1701 /* Assert data lane reset */
1702 chv_data_lane_soft_reset(encoder, true);
1703
1635 /* program left/right clock distribution */ 1704 /* program left/right clock distribution */
1636 if (pipe != PIPE_B) { 1705 if (pipe != PIPE_B) {
1637 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1706 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
@@ -1683,6 +1752,39 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1683 mutex_unlock(&dev_priv->sb_lock); 1752 mutex_unlock(&dev_priv->sb_lock);
1684} 1753}
1685 1754
1755static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
1756{
1757 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1758 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
1759 u32 val;
1760
1761 mutex_lock(&dev_priv->sb_lock);
1762
1763 /* disable left/right clock distribution */
1764 if (pipe != PIPE_B) {
1765 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1766 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1767 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1768 } else {
1769 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1770 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1771 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1772 }
1773
1774 mutex_unlock(&dev_priv->sb_lock);
1775
1776 /*
1777 * Leave the power down bit cleared for at least one
1778 * lane so that chv_powergate_phy_ch() will power
1779 * on something when the channel is otherwise unused.
1780 * When the port is off and the override is removed
1781 * the lanes power down anyway, so otherwise it doesn't
1782 * really matter what the state of power down bits is
1783 * after this.
1784 */
1785 chv_phy_powergate_lanes(encoder, false, 0x0);
1786}
1787
1686static void vlv_hdmi_post_disable(struct intel_encoder *encoder) 1788static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1687{ 1789{
1688 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1790 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1701,33 +1803,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1701 1803
1702static void chv_hdmi_post_disable(struct intel_encoder *encoder) 1804static void chv_hdmi_post_disable(struct intel_encoder *encoder)
1703{ 1805{
1704 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1705 struct drm_device *dev = encoder->base.dev; 1806 struct drm_device *dev = encoder->base.dev;
1706 struct drm_i915_private *dev_priv = dev->dev_private; 1807 struct drm_i915_private *dev_priv = dev->dev_private;
1707 struct intel_crtc *intel_crtc =
1708 to_intel_crtc(encoder->base.crtc);
1709 enum dpio_channel ch = vlv_dport_to_channel(dport);
1710 enum pipe pipe = intel_crtc->pipe;
1711 u32 val;
1712 1808
1713 mutex_lock(&dev_priv->sb_lock); 1809 mutex_lock(&dev_priv->sb_lock);
1714 1810
1715 /* Propagate soft reset to data lane reset */ 1811 /* Assert data lane reset */
1716 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 1812 chv_data_lane_soft_reset(encoder, true);
1717 val |= CHV_PCS_REQ_SOFTRESET_EN;
1718 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1719
1720 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1721 val |= CHV_PCS_REQ_SOFTRESET_EN;
1722 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1723
1724 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1725 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1726 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1727
1728 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1729 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1730 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1731 1813
1732 mutex_unlock(&dev_priv->sb_lock); 1814 mutex_unlock(&dev_priv->sb_lock);
1733} 1815}
@@ -1740,8 +1822,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1740 struct drm_i915_private *dev_priv = dev->dev_private; 1822 struct drm_i915_private *dev_priv = dev->dev_private;
1741 struct intel_crtc *intel_crtc = 1823 struct intel_crtc *intel_crtc =
1742 to_intel_crtc(encoder->base.crtc); 1824 to_intel_crtc(encoder->base.crtc);
1743 struct drm_display_mode *adjusted_mode = 1825 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1744 &intel_crtc->config->base.adjusted_mode;
1745 enum dpio_channel ch = vlv_dport_to_channel(dport); 1826 enum dpio_channel ch = vlv_dport_to_channel(dport);
1746 int pipe = intel_crtc->pipe; 1827 int pipe = intel_crtc->pipe;
1747 int data, i, stagger; 1828 int data, i, stagger;
@@ -1758,23 +1839,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1758 val &= ~DPIO_LANEDESKEW_STRAP_OVRD; 1839 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1759 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); 1840 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1760 1841
1761 /* Deassert soft data lane reset*/
1762 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1763 val |= CHV_PCS_REQ_SOFTRESET_EN;
1764 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1765
1766 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1767 val |= CHV_PCS_REQ_SOFTRESET_EN;
1768 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1769
1770 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1771 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1772 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1773
1774 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1775 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1776 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1777
1778 /* Program Tx latency optimal setting */ 1842 /* Program Tx latency optimal setting */
1779 for (i = 0; i < 4; i++) { 1843 for (i = 0; i < 4; i++) {
1780 /* Set the upar bit */ 1844 /* Set the upar bit */
@@ -1817,6 +1881,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1817 DPIO_TX1_STAGGER_MULT(7) | 1881 DPIO_TX1_STAGGER_MULT(7) |
1818 DPIO_TX2_STAGGER_MULT(5)); 1882 DPIO_TX2_STAGGER_MULT(5));
1819 1883
1884 /* Deassert data lane reset */
1885 chv_data_lane_soft_reset(encoder, false);
1886
1820 /* Clear calc init */ 1887 /* Clear calc init */
1821 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 1888 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
1822 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 1889 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
@@ -1851,31 +1918,33 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1851 1918
1852 for (i = 0; i < 4; i++) { 1919 for (i = 0; i < 4; i++) {
1853 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 1920 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1921
1854 val &= ~DPIO_SWING_MARGIN000_MASK; 1922 val &= ~DPIO_SWING_MARGIN000_MASK;
1855 val |= 102 << DPIO_SWING_MARGIN000_SHIFT; 1923 val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
1924
1925 /*
1926 * Supposedly this value shouldn't matter when unique transition
1927 * scale is disabled, but in fact it does matter. Let's just
1928 * always program the same value and hope it's OK.
1929 */
1930 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
1931 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
1932
1856 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 1933 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
1857 } 1934 }
1858 1935
1859 /* Disable unique transition scale */ 1936 /*
1937 * The document said it needs to set bit 27 for ch0 and bit 26
1938 * for ch1. Might be a typo in the doc.
1939 * For now, for this unique transition scale selection, set bit
1940 * 27 for ch0 and ch1.
1941 */
1860 for (i = 0; i < 4; i++) { 1942 for (i = 0; i < 4; i++) {
1861 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); 1943 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
1862 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; 1944 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
1863 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); 1945 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
1864 } 1946 }
1865 1947
1866 /* Additional steps for 1200mV-0dB */
1867#if 0
1868 val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
1869 if (ch)
1870 val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
1871 else
1872 val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
1873 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
1874
1875 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
1876 vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
1877 (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
1878#endif
1879 /* Start swing calculation */ 1948 /* Start swing calculation */
1880 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 1949 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
1881 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 1950 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
@@ -1885,11 +1954,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1885 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 1954 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
1886 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 1955 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
1887 1956
1888 /* LRC Bypass */
1889 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1890 val |= DPIO_LRC_BYPASS;
1891 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
1892
1893 mutex_unlock(&dev_priv->sb_lock); 1957 mutex_unlock(&dev_priv->sb_lock);
1894 1958
1895 intel_hdmi->set_infoframes(&encoder->base, 1959 intel_hdmi->set_infoframes(&encoder->base,
@@ -1899,6 +1963,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1899 g4x_enable_hdmi(encoder); 1963 g4x_enable_hdmi(encoder);
1900 1964
1901 vlv_wait_port_ready(dev_priv, dport, 0x0); 1965 vlv_wait_port_ready(dev_priv, dport, 0x0);
1966
1967 /* Second common lane will stay alive on its own now */
1968 if (dport->release_cl2_override) {
1969 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
1970 dport->release_cl2_override = false;
1971 }
1902} 1972}
1903 1973
1904static void intel_hdmi_destroy(struct drm_connector *connector) 1974static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -1931,15 +2001,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
1931}; 2001};
1932 2002
1933static void 2003static void
1934intel_attach_aspect_ratio_property(struct drm_connector *connector)
1935{
1936 if (!drm_mode_create_aspect_ratio_property(connector->dev))
1937 drm_object_attach_property(&connector->base,
1938 connector->dev->mode_config.aspect_ratio_property,
1939 DRM_MODE_PICTURE_ASPECT_NONE);
1940}
1941
1942static void
1943intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 2004intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
1944{ 2005{
1945 intel_attach_force_audio_property(connector); 2006 intel_attach_force_audio_property(connector);
@@ -1974,7 +2035,14 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1974 intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; 2035 intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
1975 else 2036 else
1976 intel_hdmi->ddc_bus = GMBUS_PIN_DPB; 2037 intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
1977 intel_encoder->hpd_pin = HPD_PORT_B; 2038 /*
2039 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
2040 * interrupts to check the external panel connection.
2041 */
2042 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
2043 intel_encoder->hpd_pin = HPD_PORT_A;
2044 else
2045 intel_encoder->hpd_pin = HPD_PORT_B;
1978 break; 2046 break;
1979 case PORT_C: 2047 case PORT_C:
1980 if (IS_BROXTON(dev_priv)) 2048 if (IS_BROXTON(dev_priv))
@@ -2051,6 +2119,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2051 2119
2052 intel_connector_attach_encoder(intel_connector, intel_encoder); 2120 intel_connector_attach_encoder(intel_connector, intel_encoder);
2053 drm_connector_register(connector); 2121 drm_connector_register(connector);
2122 intel_hdmi->attached_connector = intel_connector;
2054 2123
2055 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2124 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2056 * 0xd. Failure to do so will result in spurious interrupts being 2125 * 0xd. Failure to do so will result in spurious interrupts being
@@ -2097,6 +2166,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
2097 intel_encoder->pre_enable = chv_hdmi_pre_enable; 2166 intel_encoder->pre_enable = chv_hdmi_pre_enable;
2098 intel_encoder->enable = vlv_enable_hdmi; 2167 intel_encoder->enable = vlv_enable_hdmi;
2099 intel_encoder->post_disable = chv_hdmi_post_disable; 2168 intel_encoder->post_disable = chv_hdmi_post_disable;
2169 intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
2100 } else if (IS_VALLEYVIEW(dev)) { 2170 } else if (IS_VALLEYVIEW(dev)) {
2101 intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; 2171 intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
2102 intel_encoder->pre_enable = vlv_hdmi_pre_enable; 2172 intel_encoder->pre_enable = vlv_hdmi_pre_enable;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index a64f26c670af..1369fc41d039 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -114,8 +114,8 @@ intel_i2c_reset(struct drm_device *dev)
114{ 114{
115 struct drm_i915_private *dev_priv = dev->dev_private; 115 struct drm_i915_private *dev_priv = dev->dev_private;
116 116
117 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 117 I915_WRITE(GMBUS0, 0);
118 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 118 I915_WRITE(GMBUS4, 0);
119} 119}
120 120
121static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) 121static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -261,7 +261,6 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
261 u32 gmbus4_irq_en) 261 u32 gmbus4_irq_en)
262{ 262{
263 int i; 263 int i;
264 int reg_offset = dev_priv->gpio_mmio_base;
265 u32 gmbus2 = 0; 264 u32 gmbus2 = 0;
266 DEFINE_WAIT(wait); 265 DEFINE_WAIT(wait);
267 266
@@ -271,13 +270,13 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
271 /* Important: The hw handles only the first bit, so set only one! Since 270 /* Important: The hw handles only the first bit, so set only one! Since
272 * we also need to check for NAKs besides the hw ready/idle signal, we 271 * we also need to check for NAKs besides the hw ready/idle signal, we
273 * need to wake up periodically and check that ourselves. */ 272 * need to wake up periodically and check that ourselves. */
274 I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en); 273 I915_WRITE(GMBUS4, gmbus4_irq_en);
275 274
276 for (i = 0; i < msecs_to_jiffies_timeout(50); i++) { 275 for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
277 prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, 276 prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
278 TASK_UNINTERRUPTIBLE); 277 TASK_UNINTERRUPTIBLE);
279 278
280 gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset); 279 gmbus2 = I915_READ_NOTRACE(GMBUS2);
281 if (gmbus2 & (GMBUS_SATOER | gmbus2_status)) 280 if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
282 break; 281 break;
283 282
@@ -285,7 +284,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
285 } 284 }
286 finish_wait(&dev_priv->gmbus_wait_queue, &wait); 285 finish_wait(&dev_priv->gmbus_wait_queue, &wait);
287 286
288 I915_WRITE(GMBUS4 + reg_offset, 0); 287 I915_WRITE(GMBUS4, 0);
289 288
290 if (gmbus2 & GMBUS_SATOER) 289 if (gmbus2 & GMBUS_SATOER)
291 return -ENXIO; 290 return -ENXIO;
@@ -298,20 +297,19 @@ static int
298gmbus_wait_idle(struct drm_i915_private *dev_priv) 297gmbus_wait_idle(struct drm_i915_private *dev_priv)
299{ 298{
300 int ret; 299 int ret;
301 int reg_offset = dev_priv->gpio_mmio_base;
302 300
303#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0) 301#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
304 302
305 if (!HAS_GMBUS_IRQ(dev_priv->dev)) 303 if (!HAS_GMBUS_IRQ(dev_priv->dev))
306 return wait_for(C, 10); 304 return wait_for(C, 10);
307 305
308 /* Important: The hw handles only the first bit, so set only one! */ 306 /* Important: The hw handles only the first bit, so set only one! */
309 I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN); 307 I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
310 308
311 ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 309 ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
312 msecs_to_jiffies_timeout(10)); 310 msecs_to_jiffies_timeout(10));
313 311
314 I915_WRITE(GMBUS4 + reg_offset, 0); 312 I915_WRITE(GMBUS4, 0);
315 313
316 if (ret) 314 if (ret)
317 return 0; 315 return 0;
@@ -325,9 +323,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
325 unsigned short addr, u8 *buf, unsigned int len, 323 unsigned short addr, u8 *buf, unsigned int len,
326 u32 gmbus1_index) 324 u32 gmbus1_index)
327{ 325{
328 int reg_offset = dev_priv->gpio_mmio_base; 326 I915_WRITE(GMBUS1,
329
330 I915_WRITE(GMBUS1 + reg_offset,
331 gmbus1_index | 327 gmbus1_index |
332 GMBUS_CYCLE_WAIT | 328 GMBUS_CYCLE_WAIT |
333 (len << GMBUS_BYTE_COUNT_SHIFT) | 329 (len << GMBUS_BYTE_COUNT_SHIFT) |
@@ -342,7 +338,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
342 if (ret) 338 if (ret)
343 return ret; 339 return ret;
344 340
345 val = I915_READ(GMBUS3 + reg_offset); 341 val = I915_READ(GMBUS3);
346 do { 342 do {
347 *buf++ = val & 0xff; 343 *buf++ = val & 0xff;
348 val >>= 8; 344 val >>= 8;
@@ -380,7 +376,6 @@ static int
380gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, 376gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
381 unsigned short addr, u8 *buf, unsigned int len) 377 unsigned short addr, u8 *buf, unsigned int len)
382{ 378{
383 int reg_offset = dev_priv->gpio_mmio_base;
384 unsigned int chunk_size = len; 379 unsigned int chunk_size = len;
385 u32 val, loop; 380 u32 val, loop;
386 381
@@ -390,8 +385,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
390 len -= 1; 385 len -= 1;
391 } 386 }
392 387
393 I915_WRITE(GMBUS3 + reg_offset, val); 388 I915_WRITE(GMBUS3, val);
394 I915_WRITE(GMBUS1 + reg_offset, 389 I915_WRITE(GMBUS1,
395 GMBUS_CYCLE_WAIT | 390 GMBUS_CYCLE_WAIT |
396 (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | 391 (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
397 (addr << GMBUS_SLAVE_ADDR_SHIFT) | 392 (addr << GMBUS_SLAVE_ADDR_SHIFT) |
@@ -404,7 +399,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
404 val |= *buf++ << (8 * loop); 399 val |= *buf++ << (8 * loop);
405 } while (--len && ++loop < 4); 400 } while (--len && ++loop < 4);
406 401
407 I915_WRITE(GMBUS3 + reg_offset, val); 402 I915_WRITE(GMBUS3, val);
408 403
409 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, 404 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
410 GMBUS_HW_RDY_EN); 405 GMBUS_HW_RDY_EN);
@@ -452,7 +447,6 @@ gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
452static int 447static int
453gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) 448gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
454{ 449{
455 int reg_offset = dev_priv->gpio_mmio_base;
456 u32 gmbus1_index = 0; 450 u32 gmbus1_index = 0;
457 u32 gmbus5 = 0; 451 u32 gmbus5 = 0;
458 int ret; 452 int ret;
@@ -466,13 +460,13 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
466 460
467 /* GMBUS5 holds 16-bit index */ 461 /* GMBUS5 holds 16-bit index */
468 if (gmbus5) 462 if (gmbus5)
469 I915_WRITE(GMBUS5 + reg_offset, gmbus5); 463 I915_WRITE(GMBUS5, gmbus5);
470 464
471 ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); 465 ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
472 466
473 /* Clear GMBUS5 after each index transfer */ 467 /* Clear GMBUS5 after each index transfer */
474 if (gmbus5) 468 if (gmbus5)
475 I915_WRITE(GMBUS5 + reg_offset, 0); 469 I915_WRITE(GMBUS5, 0);
476 470
477 return ret; 471 return ret;
478} 472}
@@ -486,7 +480,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
486 struct intel_gmbus, 480 struct intel_gmbus,
487 adapter); 481 adapter);
488 struct drm_i915_private *dev_priv = bus->dev_priv; 482 struct drm_i915_private *dev_priv = bus->dev_priv;
489 int i = 0, inc, try = 0, reg_offset; 483 int i = 0, inc, try = 0;
490 int ret = 0; 484 int ret = 0;
491 485
492 intel_aux_display_runtime_get(dev_priv); 486 intel_aux_display_runtime_get(dev_priv);
@@ -497,10 +491,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
497 goto out; 491 goto out;
498 } 492 }
499 493
500 reg_offset = dev_priv->gpio_mmio_base;
501
502retry: 494retry:
503 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 495 I915_WRITE(GMBUS0, bus->reg0);
504 496
505 for (; i < num; i += inc) { 497 for (; i < num; i += inc) {
506 inc = 1; 498 inc = 1;
@@ -530,7 +522,7 @@ retry:
530 * a STOP on the very first cycle. To simplify the code we 522 * a STOP on the very first cycle. To simplify the code we
531 * unconditionally generate the STOP condition with an additional gmbus 523 * unconditionally generate the STOP condition with an additional gmbus
532 * cycle. */ 524 * cycle. */
533 I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); 525 I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
534 526
535 /* Mark the GMBUS interface as disabled after waiting for idle. 527 /* Mark the GMBUS interface as disabled after waiting for idle.
536 * We will re-enable it at the start of the next xfer, 528 * We will re-enable it at the start of the next xfer,
@@ -541,7 +533,7 @@ retry:
541 adapter->name); 533 adapter->name);
542 ret = -ETIMEDOUT; 534 ret = -ETIMEDOUT;
543 } 535 }
544 I915_WRITE(GMBUS0 + reg_offset, 0); 536 I915_WRITE(GMBUS0, 0);
545 ret = ret ?: i; 537 ret = ret ?: i;
546 goto out; 538 goto out;
547 539
@@ -570,9 +562,9 @@ clear_err:
570 * of resetting the GMBUS controller and so clearing the 562 * of resetting the GMBUS controller and so clearing the
571 * BUS_ERROR raised by the slave's NAK. 563 * BUS_ERROR raised by the slave's NAK.
572 */ 564 */
573 I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); 565 I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
574 I915_WRITE(GMBUS1 + reg_offset, 0); 566 I915_WRITE(GMBUS1, 0);
575 I915_WRITE(GMBUS0 + reg_offset, 0); 567 I915_WRITE(GMBUS0, 0);
576 568
577 DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", 569 DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
578 adapter->name, msgs[i].addr, 570 adapter->name, msgs[i].addr,
@@ -595,7 +587,7 @@ clear_err:
595timeout: 587timeout:
596 DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", 588 DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
597 bus->adapter.name, bus->reg0 & 0xff); 589 bus->adapter.name, bus->reg0 & 0xff);
598 I915_WRITE(GMBUS0 + reg_offset, 0); 590 I915_WRITE(GMBUS0, 0);
599 591
600 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 592 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
601 bus->force_bit = 1; 593 bus->force_bit = 1;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 29dd4488dc49..88e12bdf79e2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -196,13 +196,21 @@
196 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ 196 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
197} 197}
198 198
199#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
200 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
201 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
202}
203
199enum { 204enum {
200 ADVANCED_CONTEXT = 0, 205 ADVANCED_CONTEXT = 0,
201 LEGACY_CONTEXT, 206 LEGACY_32B_CONTEXT,
202 ADVANCED_AD_CONTEXT, 207 ADVANCED_AD_CONTEXT,
203 LEGACY_64B_CONTEXT 208 LEGACY_64B_CONTEXT
204}; 209};
205#define GEN8_CTX_MODE_SHIFT 3 210#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
211#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
212 LEGACY_64B_CONTEXT :\
213 LEGACY_32B_CONTEXT)
206enum { 214enum {
207 FAULT_AND_HANG = 0, 215 FAULT_AND_HANG = 0,
208 FAULT_AND_HALT, /* Debug only */ 216 FAULT_AND_HALT, /* Debug only */
@@ -213,6 +221,9 @@ enum {
213#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 221#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
214 222
215static int intel_lr_context_pin(struct drm_i915_gem_request *rq); 223static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
224static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
225 struct drm_i915_gem_object *default_ctx_obj);
226
216 227
217/** 228/**
218 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 229 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -228,6 +239,12 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
228{ 239{
229 WARN_ON(i915.enable_ppgtt == -1); 240 WARN_ON(i915.enable_ppgtt == -1);
230 241
242 /* On platforms with execlist available, vGPU will only
243 * support execlist mode, no ring buffer mode.
244 */
245 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
246 return 1;
247
231 if (INTEL_INFO(dev)->gen >= 9) 248 if (INTEL_INFO(dev)->gen >= 9)
232 return 1; 249 return 1;
233 250
@@ -255,25 +272,35 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
255 */ 272 */
256u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) 273u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
257{ 274{
258 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj); 275 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
276 LRC_PPHWSP_PN * PAGE_SIZE;
259 277
260 /* LRCA is required to be 4K aligned so the more significant 20 bits 278 /* LRCA is required to be 4K aligned so the more significant 20 bits
261 * are globally unique */ 279 * are globally unique */
262 return lrca >> 12; 280 return lrca >> 12;
263} 281}
264 282
265static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq) 283static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
266{ 284{
267 struct intel_engine_cs *ring = rq->ring;
268 struct drm_device *dev = ring->dev; 285 struct drm_device *dev = ring->dev;
269 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; 286
287 return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
288 (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
289 (ring->id == VCS || ring->id == VCS2);
290}
291
292uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
293 struct intel_engine_cs *ring)
294{
295 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
270 uint64_t desc; 296 uint64_t desc;
271 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); 297 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
298 LRC_PPHWSP_PN * PAGE_SIZE;
272 299
273 WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); 300 WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
274 301
275 desc = GEN8_CTX_VALID; 302 desc = GEN8_CTX_VALID;
276 desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT; 303 desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
277 if (IS_GEN8(ctx_obj->base.dev)) 304 if (IS_GEN8(ctx_obj->base.dev))
278 desc |= GEN8_CTX_L3LLC_COHERENT; 305 desc |= GEN8_CTX_L3LLC_COHERENT;
279 desc |= GEN8_CTX_PRIVILEGE; 306 desc |= GEN8_CTX_PRIVILEGE;
@@ -285,10 +312,8 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
285 /* desc |= GEN8_CTX_FORCE_RESTORE; */ 312 /* desc |= GEN8_CTX_FORCE_RESTORE; */
286 313
287 /* WaEnableForceRestoreInCtxtDescForVCS:skl */ 314 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
288 if (IS_GEN9(dev) && 315 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
289 INTEL_REVID(dev) <= SKL_REVID_B0 && 316 if (disable_lite_restore_wa(ring))
290 (ring->id == BCS || ring->id == VCS ||
291 ring->id == VECS || ring->id == VCS2))
292 desc |= GEN8_CTX_FORCE_RESTORE; 317 desc |= GEN8_CTX_FORCE_RESTORE;
293 318
294 return desc; 319 return desc;
@@ -304,13 +329,13 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
304 uint64_t desc[2]; 329 uint64_t desc[2];
305 330
306 if (rq1) { 331 if (rq1) {
307 desc[1] = execlists_ctx_descriptor(rq1); 332 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
308 rq1->elsp_submitted++; 333 rq1->elsp_submitted++;
309 } else { 334 } else {
310 desc[1] = 0; 335 desc[1] = 0;
311 } 336 }
312 337
313 desc[0] = execlists_ctx_descriptor(rq0); 338 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
314 rq0->elsp_submitted++; 339 rq0->elsp_submitted++;
315 340
316 /* You must always write both descriptors in the order below. */ 341 /* You must always write both descriptors in the order below. */
@@ -324,7 +349,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
324 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0])); 349 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
325 350
326 /* ELSP is a wo register, use another nearby reg for posting */ 351 /* ELSP is a wo register, use another nearby reg for posting */
327 POSTING_READ_FW(RING_EXECLIST_STATUS(ring)); 352 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
328 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); 353 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
329 spin_unlock(&dev_priv->uncore.lock); 354 spin_unlock(&dev_priv->uncore.lock);
330} 355}
@@ -342,16 +367,18 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
342 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); 367 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
343 WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); 368 WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
344 369
345 page = i915_gem_object_get_page(ctx_obj, 1); 370 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
346 reg_state = kmap_atomic(page); 371 reg_state = kmap_atomic(page);
347 372
348 reg_state[CTX_RING_TAIL+1] = rq->tail; 373 reg_state[CTX_RING_TAIL+1] = rq->tail;
349 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); 374 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
350 375
351 /* True PPGTT with dynamic page allocation: update PDP registers and 376 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
352 * point the unallocated PDPs to the scratch page 377 /* True 32b PPGTT with dynamic page allocation: update PDP
353 */ 378 * registers and point the unallocated PDPs to scratch page.
354 if (ppgtt) { 379 * PML4 is allocated during ppgtt init, so this is not needed
380 * in 48-bit mode.
381 */
355 ASSIGN_CTX_PDP(ppgtt, reg_state, 3); 382 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
356 ASSIGN_CTX_PDP(ppgtt, reg_state, 2); 383 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
357 ASSIGN_CTX_PDP(ppgtt, reg_state, 1); 384 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
@@ -477,7 +504,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
477 u32 status_pointer; 504 u32 status_pointer;
478 u8 read_pointer; 505 u8 read_pointer;
479 u8 write_pointer; 506 u8 write_pointer;
480 u32 status; 507 u32 status = 0;
481 u32 status_id; 508 u32 status_id;
482 u32 submit_contexts = 0; 509 u32 submit_contexts = 0;
483 510
@@ -492,10 +519,8 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
492 519
493 while (read_pointer < write_pointer) { 520 while (read_pointer < write_pointer) {
494 read_pointer++; 521 read_pointer++;
495 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 522 status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
496 (read_pointer % GEN8_CSB_ENTRIES) * 8); 523 status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
497 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
498 (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
499 524
500 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 525 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
501 continue; 526 continue;
@@ -515,8 +540,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
515 } 540 }
516 } 541 }
517 542
518 if (submit_contexts != 0) 543 if (disable_lite_restore_wa(ring)) {
544 /* Prevent a ctx to preempt itself */
545 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
546 (submit_contexts != 0))
547 execlists_context_unqueue(ring);
548 } else if (submit_contexts != 0) {
519 execlists_context_unqueue(ring); 549 execlists_context_unqueue(ring);
550 }
520 551
521 spin_unlock(&ring->execlist_lock); 552 spin_unlock(&ring->execlist_lock);
522 553
@@ -540,8 +571,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
540 571
541 i915_gem_request_reference(request); 572 i915_gem_request_reference(request);
542 573
543 request->tail = request->ringbuf->tail;
544
545 spin_lock_irq(&ring->execlist_lock); 574 spin_lock_irq(&ring->execlist_lock);
546 575
547 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) 576 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
@@ -694,13 +723,19 @@ static void
694intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) 723intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
695{ 724{
696 struct intel_engine_cs *ring = request->ring; 725 struct intel_engine_cs *ring = request->ring;
726 struct drm_i915_private *dev_priv = request->i915;
697 727
698 intel_logical_ring_advance(request->ringbuf); 728 intel_logical_ring_advance(request->ringbuf);
699 729
730 request->tail = request->ringbuf->tail;
731
700 if (intel_ring_stopped(ring)) 732 if (intel_ring_stopped(ring))
701 return; 733 return;
702 734
703 execlists_context_queue(request); 735 if (dev_priv->guc.execbuf_client)
736 i915_guc_submit(dev_priv->guc.execbuf_client, request);
737 else
738 execlists_context_queue(request);
704} 739}
705 740
706static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) 741static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
@@ -767,8 +802,7 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
767/** 802/**
768 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands 803 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
769 * 804 *
770 * @request: The request to start some new work for 805 * @req: The request to start some new work for
771 * @ctx: Logical ring context whose ringbuffer is being prepared.
772 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. 806 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
773 * 807 *
774 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to 808 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -870,21 +904,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
870 return -EINVAL; 904 return -EINVAL;
871 } 905 }
872 906
873 if (args->num_cliprects != 0) {
874 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
875 return -EINVAL;
876 } else {
877 if (args->DR4 == 0xffffffff) {
878 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
879 args->DR4 = 0;
880 }
881
882 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
883 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
884 return -EINVAL;
885 }
886 }
887
888 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 907 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
889 DRM_DEBUG("sol reset is gen7 only\n"); 908 DRM_DEBUG("sol reset is gen7 only\n");
890 return -EINVAL; 909 return -EINVAL;
@@ -988,34 +1007,54 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
988 return 0; 1007 return 0;
989} 1008}
990 1009
1010static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
1011 struct drm_i915_gem_object *ctx_obj,
1012 struct intel_ringbuffer *ringbuf)
1013{
1014 struct drm_device *dev = ring->dev;
1015 struct drm_i915_private *dev_priv = dev->dev_private;
1016 int ret = 0;
1017
1018 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1019 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1020 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
1021 if (ret)
1022 return ret;
1023
1024 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
1025 if (ret)
1026 goto unpin_ctx_obj;
1027
1028 ctx_obj->dirty = true;
1029
1030 /* Invalidate GuC TLB. */
1031 if (i915.enable_guc_submission)
1032 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
1033
1034 return ret;
1035
1036unpin_ctx_obj:
1037 i915_gem_object_ggtt_unpin(ctx_obj);
1038
1039 return ret;
1040}
1041
991static int intel_lr_context_pin(struct drm_i915_gem_request *rq) 1042static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
992{ 1043{
1044 int ret = 0;
993 struct intel_engine_cs *ring = rq->ring; 1045 struct intel_engine_cs *ring = rq->ring;
994 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; 1046 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
995 struct intel_ringbuffer *ringbuf = rq->ringbuf; 1047 struct intel_ringbuffer *ringbuf = rq->ringbuf;
996 int ret = 0;
997 1048
998 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
999 if (rq->ctx->engine[ring->id].pin_count++ == 0) { 1049 if (rq->ctx->engine[ring->id].pin_count++ == 0) {
1000 ret = i915_gem_obj_ggtt_pin(ctx_obj, 1050 ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
1001 GEN8_LR_CONTEXT_ALIGN, 0);
1002 if (ret) 1051 if (ret)
1003 goto reset_pin_count; 1052 goto reset_pin_count;
1004
1005 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
1006 if (ret)
1007 goto unpin_ctx_obj;
1008
1009 ctx_obj->dirty = true;
1010 } 1053 }
1011
1012 return ret; 1054 return ret;
1013 1055
1014unpin_ctx_obj:
1015 i915_gem_object_ggtt_unpin(ctx_obj);
1016reset_pin_count: 1056reset_pin_count:
1017 rq->ctx->engine[ring->id].pin_count = 0; 1057 rq->ctx->engine[ring->id].pin_count = 0;
1018
1019 return ret; 1058 return ret;
1020} 1059}
1021 1060
@@ -1113,7 +1152,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1113 if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) 1152 if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
1114 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1153 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1115 1154
1116 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) | 1155 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1117 MI_SRM_LRM_GLOBAL_GTT)); 1156 MI_SRM_LRM_GLOBAL_GTT));
1118 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1157 wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
1119 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1158 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
@@ -1131,7 +1170,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1131 wa_ctx_emit(batch, index, 0); 1170 wa_ctx_emit(batch, index, 0);
1132 wa_ctx_emit(batch, index, 0); 1171 wa_ctx_emit(batch, index, 0);
1133 1172
1134 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) | 1173 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1135 MI_SRM_LRM_GLOBAL_GTT)); 1174 MI_SRM_LRM_GLOBAL_GTT));
1136 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1175 wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
1137 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1176 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
@@ -1200,9 +1239,10 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1200 1239
1201 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1240 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1202 if (IS_BROADWELL(ring->dev)) { 1241 if (IS_BROADWELL(ring->dev)) {
1203 index = gen8_emit_flush_coherentl3_wa(ring, batch, index); 1242 int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
1204 if (index < 0) 1243 if (rc < 0)
1205 return index; 1244 return rc;
1245 index = rc;
1206 } 1246 }
1207 1247
1208 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1248 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
@@ -1426,6 +1466,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1426 struct drm_i915_private *dev_priv = dev->dev_private; 1466 struct drm_i915_private *dev_priv = dev->dev_private;
1427 u8 next_context_status_buffer_hw; 1467 u8 next_context_status_buffer_hw;
1428 1468
1469 lrc_setup_hardware_status_page(ring,
1470 ring->default_context->engine[ring->id].state);
1471
1429 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1472 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1430 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1473 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1431 1474
@@ -1542,12 +1585,16 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1542 * Ideally, we should set Force PD Restore in ctx descriptor, 1585 * Ideally, we should set Force PD Restore in ctx descriptor,
1543 * but we can't. Force Restore would be a second option, but 1586 * but we can't. Force Restore would be a second option, but
1544 * it is unsafe in case of lite-restore (because the ctx is 1587 * it is unsafe in case of lite-restore (because the ctx is
1545 * not idle). */ 1588 * not idle). PML4 is allocated during ppgtt init so this is
1589 * not needed in 48-bit.*/
1546 if (req->ctx->ppgtt && 1590 if (req->ctx->ppgtt &&
1547 (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { 1591 (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
1548 ret = intel_logical_ring_emit_pdps(req); 1592 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1549 if (ret) 1593 !intel_vgpu_active(req->i915->dev)) {
1550 return ret; 1594 ret = intel_logical_ring_emit_pdps(req);
1595 if (ret)
1596 return ret;
1597 }
1551 1598
1552 req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); 1599 req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
1553 } 1600 }
@@ -1714,6 +1761,34 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1714 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1761 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1715} 1762}
1716 1763
1764static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1765{
1766
1767 /*
1768 * On BXT A steppings there is a HW coherency issue whereby the
1769 * MI_STORE_DATA_IMM storing the completed request's seqno
1770 * occasionally doesn't invalidate the CPU cache. Work around this by
1771 * clflushing the corresponding cacheline whenever the caller wants
1772 * the coherency to be guaranteed. Note that this cacheline is known
1773 * to be clean at this point, since we only write it in
1774 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1775 * this clflush in practice becomes an invalidate operation.
1776 */
1777
1778 if (!lazy_coherency)
1779 intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
1780
1781 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1782}
1783
1784static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1785{
1786 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1787
1788 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1789 intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
1790}
1791
1717static int gen8_emit_request(struct drm_i915_gem_request *request) 1792static int gen8_emit_request(struct drm_i915_gem_request *request)
1718{ 1793{
1719 struct intel_ringbuffer *ringbuf = request->ringbuf; 1794 struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1856,7 +1931,21 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
1856 if (ret) 1931 if (ret)
1857 return ret; 1932 return ret;
1858 1933
1859 ret = intel_lr_context_deferred_create(ring->default_context, ring); 1934 ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
1935 if (ret)
1936 return ret;
1937
1938 /* As this is the default context, always pin it */
1939 ret = intel_lr_context_do_pin(
1940 ring,
1941 ring->default_context->engine[ring->id].state,
1942 ring->default_context->engine[ring->id].ringbuf);
1943 if (ret) {
1944 DRM_ERROR(
1945 "Failed to pin and map ringbuffer %s: %d\n",
1946 ring->name, ret);
1947 return ret;
1948 }
1860 1949
1861 return ret; 1950 return ret;
1862} 1951}
@@ -1883,8 +1972,13 @@ static int logical_render_ring_init(struct drm_device *dev)
1883 ring->init_hw = gen8_init_render_ring; 1972 ring->init_hw = gen8_init_render_ring;
1884 ring->init_context = gen8_init_rcs_context; 1973 ring->init_context = gen8_init_rcs_context;
1885 ring->cleanup = intel_fini_pipe_control; 1974 ring->cleanup = intel_fini_pipe_control;
1886 ring->get_seqno = gen8_get_seqno; 1975 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
1887 ring->set_seqno = gen8_set_seqno; 1976 ring->get_seqno = bxt_a_get_seqno;
1977 ring->set_seqno = bxt_a_set_seqno;
1978 } else {
1979 ring->get_seqno = gen8_get_seqno;
1980 ring->set_seqno = gen8_set_seqno;
1981 }
1888 ring->emit_request = gen8_emit_request; 1982 ring->emit_request = gen8_emit_request;
1889 ring->emit_flush = gen8_emit_flush_render; 1983 ring->emit_flush = gen8_emit_flush_render;
1890 ring->irq_get = gen8_logical_ring_get_irq; 1984 ring->irq_get = gen8_logical_ring_get_irq;
@@ -1930,8 +2024,13 @@ static int logical_bsd_ring_init(struct drm_device *dev)
1930 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2024 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1931 2025
1932 ring->init_hw = gen8_init_common_ring; 2026 ring->init_hw = gen8_init_common_ring;
1933 ring->get_seqno = gen8_get_seqno; 2027 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
1934 ring->set_seqno = gen8_set_seqno; 2028 ring->get_seqno = bxt_a_get_seqno;
2029 ring->set_seqno = bxt_a_set_seqno;
2030 } else {
2031 ring->get_seqno = gen8_get_seqno;
2032 ring->set_seqno = gen8_set_seqno;
2033 }
1935 ring->emit_request = gen8_emit_request; 2034 ring->emit_request = gen8_emit_request;
1936 ring->emit_flush = gen8_emit_flush; 2035 ring->emit_flush = gen8_emit_flush;
1937 ring->irq_get = gen8_logical_ring_get_irq; 2036 ring->irq_get = gen8_logical_ring_get_irq;
@@ -1980,8 +2079,13 @@ static int logical_blt_ring_init(struct drm_device *dev)
1980 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2079 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1981 2080
1982 ring->init_hw = gen8_init_common_ring; 2081 ring->init_hw = gen8_init_common_ring;
1983 ring->get_seqno = gen8_get_seqno; 2082 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
1984 ring->set_seqno = gen8_set_seqno; 2083 ring->get_seqno = bxt_a_get_seqno;
2084 ring->set_seqno = bxt_a_set_seqno;
2085 } else {
2086 ring->get_seqno = gen8_get_seqno;
2087 ring->set_seqno = gen8_set_seqno;
2088 }
1985 ring->emit_request = gen8_emit_request; 2089 ring->emit_request = gen8_emit_request;
1986 ring->emit_flush = gen8_emit_flush; 2090 ring->emit_flush = gen8_emit_flush;
1987 ring->irq_get = gen8_logical_ring_get_irq; 2091 ring->irq_get = gen8_logical_ring_get_irq;
@@ -2005,8 +2109,13 @@ static int logical_vebox_ring_init(struct drm_device *dev)
2005 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2109 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2006 2110
2007 ring->init_hw = gen8_init_common_ring; 2111 ring->init_hw = gen8_init_common_ring;
2008 ring->get_seqno = gen8_get_seqno; 2112 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
2009 ring->set_seqno = gen8_set_seqno; 2113 ring->get_seqno = bxt_a_get_seqno;
2114 ring->set_seqno = bxt_a_set_seqno;
2115 } else {
2116 ring->get_seqno = gen8_get_seqno;
2117 ring->set_seqno = gen8_set_seqno;
2118 }
2010 ring->emit_request = gen8_emit_request; 2119 ring->emit_request = gen8_emit_request;
2011 ring->emit_flush = gen8_emit_flush; 2120 ring->emit_flush = gen8_emit_flush;
2012 ring->irq_get = gen8_logical_ring_get_irq; 2121 ring->irq_get = gen8_logical_ring_get_irq;
@@ -2059,14 +2168,8 @@ int intel_logical_rings_init(struct drm_device *dev)
2059 goto cleanup_vebox_ring; 2168 goto cleanup_vebox_ring;
2060 } 2169 }
2061 2170
2062 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
2063 if (ret)
2064 goto cleanup_bsd2_ring;
2065
2066 return 0; 2171 return 0;
2067 2172
2068cleanup_bsd2_ring:
2069 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
2070cleanup_vebox_ring: 2173cleanup_vebox_ring:
2071 intel_logical_ring_cleanup(&dev_priv->ring[VECS]); 2174 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
2072cleanup_blt_ring: 2175cleanup_blt_ring:
@@ -2152,7 +2255,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2152 2255
2153 /* The second page of the context object contains some fields which must 2256 /* The second page of the context object contains some fields which must
2154 * be set up prior to the first execution. */ 2257 * be set up prior to the first execution. */
2155 page = i915_gem_object_get_page(ctx_obj, 1); 2258 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2156 reg_state = kmap_atomic(page); 2259 reg_state = kmap_atomic(page);
2157 2260
2158 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM 2261 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
@@ -2229,13 +2332,24 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2229 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); 2332 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
2230 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); 2333 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
2231 2334
2232 /* With dynamic page allocation, PDPs may not be allocated at this point, 2335 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2233 * Point the unallocated PDPs to the scratch page 2336 /* 64b PPGTT (48bit canonical)
2234 */ 2337 * PDP0_DESCRIPTOR contains the base address to PML4 and
2235 ASSIGN_CTX_PDP(ppgtt, reg_state, 3); 2338 * other PDP Descriptors are ignored.
2236 ASSIGN_CTX_PDP(ppgtt, reg_state, 2); 2339 */
2237 ASSIGN_CTX_PDP(ppgtt, reg_state, 1); 2340 ASSIGN_CTX_PML4(ppgtt, reg_state);
2238 ASSIGN_CTX_PDP(ppgtt, reg_state, 0); 2341 } else {
2342 /* 32b PPGTT
2343 * PDP*_DESCRIPTOR contains the base address of space supported.
2344 * With dynamic page allocation, PDPs may not be allocated at
2345 * this point. Point the unallocated PDPs to the scratch page
2346 */
2347 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
2348 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
2349 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
2350 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
2351 }
2352
2239 if (ring->id == RCS) { 2353 if (ring->id == RCS) {
2240 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2354 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2241 reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; 2355 reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
@@ -2276,8 +2390,7 @@ void intel_lr_context_free(struct intel_context *ctx)
2276 i915_gem_object_ggtt_unpin(ctx_obj); 2390 i915_gem_object_ggtt_unpin(ctx_obj);
2277 } 2391 }
2278 WARN_ON(ctx->engine[ring->id].pin_count); 2392 WARN_ON(ctx->engine[ring->id].pin_count);
2279 intel_destroy_ringbuffer_obj(ringbuf); 2393 intel_ringbuffer_free(ringbuf);
2280 kfree(ringbuf);
2281 drm_gem_object_unreference(&ctx_obj->base); 2394 drm_gem_object_unreference(&ctx_obj->base);
2282 } 2395 }
2283 } 2396 }
@@ -2311,12 +2424,13 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
2311 struct drm_i915_gem_object *default_ctx_obj) 2424 struct drm_i915_gem_object *default_ctx_obj)
2312{ 2425{
2313 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2426 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2427 struct page *page;
2314 2428
2315 /* The status page is offset 0 from the default context object 2429 /* The HWSP is part of the default context object in LRC mode. */
2316 * in LRC mode. */ 2430 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
2317 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj); 2431 + LRC_PPHWSP_PN * PAGE_SIZE;
2318 ring->status_page.page_addr = 2432 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
2319 kmap(sg_page(default_ctx_obj->pages->sgl)); 2433 ring->status_page.page_addr = kmap(page);
2320 ring->status_page.obj = default_ctx_obj; 2434 ring->status_page.obj = default_ctx_obj;
2321 2435
2322 I915_WRITE(RING_HWS_PGA(ring->mmio_base), 2436 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
@@ -2325,7 +2439,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
2325} 2439}
2326 2440
2327/** 2441/**
2328 * intel_lr_context_deferred_create() - create the LRC specific bits of a context 2442 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
2329 * @ctx: LR context to create. 2443 * @ctx: LR context to create.
2330 * @ring: engine to be used with the context. 2444 * @ring: engine to be used with the context.
2331 * 2445 *
@@ -2337,10 +2451,10 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
2337 * 2451 *
2338 * Return: non-zero on error. 2452 * Return: non-zero on error.
2339 */ 2453 */
2340int intel_lr_context_deferred_create(struct intel_context *ctx, 2454
2455int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2341 struct intel_engine_cs *ring) 2456 struct intel_engine_cs *ring)
2342{ 2457{
2343 const bool is_global_default_ctx = (ctx == ring->default_context);
2344 struct drm_device *dev = ring->dev; 2458 struct drm_device *dev = ring->dev;
2345 struct drm_i915_gem_object *ctx_obj; 2459 struct drm_i915_gem_object *ctx_obj;
2346 uint32_t context_size; 2460 uint32_t context_size;
@@ -2352,107 +2466,58 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
2352 2466
2353 context_size = round_up(get_lr_context_size(ring), 4096); 2467 context_size = round_up(get_lr_context_size(ring), 4096);
2354 2468
2469 /* One extra page as the sharing data between driver and GuC */
2470 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2471
2355 ctx_obj = i915_gem_alloc_object(dev, context_size); 2472 ctx_obj = i915_gem_alloc_object(dev, context_size);
2356 if (!ctx_obj) { 2473 if (!ctx_obj) {
2357 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2474 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2358 return -ENOMEM; 2475 return -ENOMEM;
2359 } 2476 }
2360 2477
2361 if (is_global_default_ctx) { 2478 ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
2362 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); 2479 if (IS_ERR(ringbuf)) {
2363 if (ret) { 2480 ret = PTR_ERR(ringbuf);
2364 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", 2481 goto error_deref_obj;
2365 ret);
2366 drm_gem_object_unreference(&ctx_obj->base);
2367 return ret;
2368 }
2369 }
2370
2371 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2372 if (!ringbuf) {
2373 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
2374 ring->name);
2375 ret = -ENOMEM;
2376 goto error_unpin_ctx;
2377 }
2378
2379 ringbuf->ring = ring;
2380
2381 ringbuf->size = 32 * PAGE_SIZE;
2382 ringbuf->effective_size = ringbuf->size;
2383 ringbuf->head = 0;
2384 ringbuf->tail = 0;
2385 ringbuf->last_retired_head = -1;
2386 intel_ring_update_space(ringbuf);
2387
2388 if (ringbuf->obj == NULL) {
2389 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
2390 if (ret) {
2391 DRM_DEBUG_DRIVER(
2392 "Failed to allocate ringbuffer obj %s: %d\n",
2393 ring->name, ret);
2394 goto error_free_rbuf;
2395 }
2396
2397 if (is_global_default_ctx) {
2398 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
2399 if (ret) {
2400 DRM_ERROR(
2401 "Failed to pin and map ringbuffer %s: %d\n",
2402 ring->name, ret);
2403 goto error_destroy_rbuf;
2404 }
2405 }
2406
2407 } 2482 }
2408 2483
2409 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); 2484 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
2410 if (ret) { 2485 if (ret) {
2411 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 2486 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2412 goto error; 2487 goto error_ringbuf;
2413 } 2488 }
2414 2489
2415 ctx->engine[ring->id].ringbuf = ringbuf; 2490 ctx->engine[ring->id].ringbuf = ringbuf;
2416 ctx->engine[ring->id].state = ctx_obj; 2491 ctx->engine[ring->id].state = ctx_obj;
2417 2492
2418 if (ctx == ring->default_context) 2493 if (ctx != ring->default_context && ring->init_context) {
2419 lrc_setup_hardware_status_page(ring, ctx_obj); 2494 struct drm_i915_gem_request *req;
2420 else if (ring->id == RCS && !ctx->rcs_initialized) {
2421 if (ring->init_context) {
2422 struct drm_i915_gem_request *req;
2423 2495
2424 ret = i915_gem_request_alloc(ring, ctx, &req); 2496 ret = i915_gem_request_alloc(ring,
2425 if (ret) 2497 ctx, &req);
2426 return ret; 2498 if (ret) {
2427 2499 DRM_ERROR("ring create req: %d\n",
2428 ret = ring->init_context(req); 2500 ret);
2429 if (ret) { 2501 goto error_ringbuf;
2430 DRM_ERROR("ring init context: %d\n", ret);
2431 i915_gem_request_cancel(req);
2432 ctx->engine[ring->id].ringbuf = NULL;
2433 ctx->engine[ring->id].state = NULL;
2434 goto error;
2435 }
2436
2437 i915_add_request_no_flush(req);
2438 } 2502 }
2439 2503
2440 ctx->rcs_initialized = true; 2504 ret = ring->init_context(req);
2505 if (ret) {
2506 DRM_ERROR("ring init context: %d\n",
2507 ret);
2508 i915_gem_request_cancel(req);
2509 goto error_ringbuf;
2510 }
2511 i915_add_request_no_flush(req);
2441 } 2512 }
2442
2443 return 0; 2513 return 0;
2444 2514
2445error: 2515error_ringbuf:
2446 if (is_global_default_ctx) 2516 intel_ringbuffer_free(ringbuf);
2447 intel_unpin_ringbuffer_obj(ringbuf); 2517error_deref_obj:
2448error_destroy_rbuf:
2449 intel_destroy_ringbuffer_obj(ringbuf);
2450error_free_rbuf:
2451 kfree(ringbuf);
2452error_unpin_ctx:
2453 if (is_global_default_ctx)
2454 i915_gem_object_ggtt_unpin(ctx_obj);
2455 drm_gem_object_unreference(&ctx_obj->base); 2518 drm_gem_object_unreference(&ctx_obj->base);
2519 ctx->engine[ring->id].ringbuf = NULL;
2520 ctx->engine[ring->id].state = NULL;
2456 return ret; 2521 return ret;
2457} 2522}
2458 2523
@@ -2478,7 +2543,7 @@ void intel_lr_context_reset(struct drm_device *dev,
2478 WARN(1, "Failed get_pages for context obj\n"); 2543 WARN(1, "Failed get_pages for context obj\n");
2479 continue; 2544 continue;
2480 } 2545 }
2481 page = i915_gem_object_get_page(ctx_obj, 1); 2546 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2482 reg_state = kmap_atomic(page); 2547 reg_state = kmap_atomic(page);
2483 2548
2484 reg_state[CTX_RING_HEAD+1] = 0; 2549 reg_state[CTX_RING_HEAD+1] = 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 3c63bb32ad81..4e60d54ba66d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -30,12 +30,14 @@
30 30
31/* Execlists regs */ 31/* Execlists regs */
32#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 32#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
33#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) 33#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234)
34#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4)
34#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) 35#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
35#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) 36#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
36#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) 37#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
37#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) 38#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
38#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) 39#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8)
40#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4)
39#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) 41#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
40 42
41/* Logical Rings */ 43/* Logical Rings */
@@ -70,12 +72,20 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
70} 72}
71 73
72/* Logical Ring Contexts */ 74/* Logical Ring Contexts */
75
76/* One extra page is added before LRC for GuC as shared data */
77#define LRC_GUCSHR_PN (0)
78#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
79#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
80
73void intel_lr_context_free(struct intel_context *ctx); 81void intel_lr_context_free(struct intel_context *ctx);
74int intel_lr_context_deferred_create(struct intel_context *ctx, 82int intel_lr_context_deferred_alloc(struct intel_context *ctx,
75 struct intel_engine_cs *ring); 83 struct intel_engine_cs *ring);
76void intel_lr_context_unpin(struct drm_i915_gem_request *req); 84void intel_lr_context_unpin(struct drm_i915_gem_request *req);
77void intel_lr_context_reset(struct drm_device *dev, 85void intel_lr_context_reset(struct drm_device *dev,
78 struct intel_context *ctx); 86 struct intel_context *ctx);
87uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
88 struct intel_engine_cs *ring);
79 89
80/* Execlists */ 90/* Execlists */
81int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 91int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 881b5d13592e..7f39b8ad88ae 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -98,15 +98,11 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
98{ 98{
99 struct drm_device *dev = encoder->base.dev; 99 struct drm_device *dev = encoder->base.dev;
100 struct drm_i915_private *dev_priv = dev->dev_private; 100 struct drm_i915_private *dev_priv = dev->dev_private;
101 u32 lvds_reg, tmp, flags = 0; 101 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
102 u32 tmp, flags = 0;
102 int dotclock; 103 int dotclock;
103 104
104 if (HAS_PCH_SPLIT(dev)) 105 tmp = I915_READ(lvds_encoder->reg);
105 lvds_reg = PCH_LVDS;
106 else
107 lvds_reg = LVDS;
108
109 tmp = I915_READ(lvds_reg);
110 if (tmp & LVDS_HSYNC_POLARITY) 106 if (tmp & LVDS_HSYNC_POLARITY)
111 flags |= DRM_MODE_FLAG_NHSYNC; 107 flags |= DRM_MODE_FLAG_NHSYNC;
112 else 108 else
@@ -139,8 +135,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
139 struct drm_device *dev = encoder->base.dev; 135 struct drm_device *dev = encoder->base.dev;
140 struct drm_i915_private *dev_priv = dev->dev_private; 136 struct drm_i915_private *dev_priv = dev->dev_private;
141 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 137 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
142 const struct drm_display_mode *adjusted_mode = 138 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
143 &crtc->config->base.adjusted_mode;
144 int pipe = crtc->pipe; 139 int pipe = crtc->pipe;
145 u32 temp; 140 u32 temp;
146 141
@@ -289,11 +284,14 @@ intel_lvds_mode_valid(struct drm_connector *connector,
289{ 284{
290 struct intel_connector *intel_connector = to_intel_connector(connector); 285 struct intel_connector *intel_connector = to_intel_connector(connector);
291 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 286 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
287 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
292 288
293 if (mode->hdisplay > fixed_mode->hdisplay) 289 if (mode->hdisplay > fixed_mode->hdisplay)
294 return MODE_PANEL; 290 return MODE_PANEL;
295 if (mode->vdisplay > fixed_mode->vdisplay) 291 if (mode->vdisplay > fixed_mode->vdisplay)
296 return MODE_PANEL; 292 return MODE_PANEL;
293 if (fixed_mode->clock > max_pixclk)
294 return MODE_CLOCK_HIGH;
297 295
298 return MODE_OK; 296 return MODE_OK;
299} 297}
@@ -941,6 +939,7 @@ void intel_lvds_init(struct drm_device *dev)
941 struct drm_display_mode *downclock_mode = NULL; 939 struct drm_display_mode *downclock_mode = NULL;
942 struct edid *edid; 940 struct edid *edid;
943 struct drm_crtc *crtc; 941 struct drm_crtc *crtc;
942 u32 lvds_reg;
944 u32 lvds; 943 u32 lvds;
945 int pipe; 944 int pipe;
946 u8 pin; 945 u8 pin;
@@ -952,7 +951,7 @@ void intel_lvds_init(struct drm_device *dev)
952 if (HAS_PCH_SPLIT(dev)) { 951 if (HAS_PCH_SPLIT(dev)) {
953 I915_WRITE(PCH_PP_CONTROL, 952 I915_WRITE(PCH_PP_CONTROL,
954 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); 953 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
955 } else { 954 } else if (INTEL_INFO(dev_priv)->gen < 5) {
956 I915_WRITE(PP_CONTROL, 955 I915_WRITE(PP_CONTROL,
957 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); 956 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
958 } 957 }
@@ -963,8 +962,15 @@ void intel_lvds_init(struct drm_device *dev)
963 if (dmi_check_system(intel_no_lvds)) 962 if (dmi_check_system(intel_no_lvds))
964 return; 963 return;
965 964
965 if (HAS_PCH_SPLIT(dev))
966 lvds_reg = PCH_LVDS;
967 else
968 lvds_reg = LVDS;
969
970 lvds = I915_READ(lvds_reg);
971
966 if (HAS_PCH_SPLIT(dev)) { 972 if (HAS_PCH_SPLIT(dev)) {
967 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 973 if ((lvds & LVDS_DETECTED) == 0)
968 return; 974 return;
969 if (dev_priv->vbt.edp_support) { 975 if (dev_priv->vbt.edp_support) {
970 DRM_DEBUG_KMS("disable LVDS for eDP support\n"); 976 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
@@ -974,14 +980,25 @@ void intel_lvds_init(struct drm_device *dev)
974 980
975 pin = GMBUS_PIN_PANEL; 981 pin = GMBUS_PIN_PANEL;
976 if (!lvds_is_present_in_vbt(dev, &pin)) { 982 if (!lvds_is_present_in_vbt(dev, &pin)) {
977 u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS; 983 if ((lvds & LVDS_PORT_EN) == 0) {
978 if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
979 DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 984 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
980 return; 985 return;
981 } 986 }
982 DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n"); 987 DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
983 } 988 }
984 989
990 /* Set the Panel Power On/Off timings if uninitialized. */
991 if (INTEL_INFO(dev_priv)->gen < 5 &&
992 I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
993 /* Set T2 to 40ms and T5 to 200ms */
994 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
995
996 /* Set T3 to 35ms and Tx to 200ms */
997 I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
998
999 DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
1000 }
1001
985 lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); 1002 lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
986 if (!lvds_encoder) 1003 if (!lvds_encoder)
987 return; 1004 return;
@@ -1040,11 +1057,7 @@ void intel_lvds_init(struct drm_device *dev)
1040 connector->interlace_allowed = false; 1057 connector->interlace_allowed = false;
1041 connector->doublescan_allowed = false; 1058 connector->doublescan_allowed = false;
1042 1059
1043 if (HAS_PCH_SPLIT(dev)) { 1060 lvds_encoder->reg = lvds_reg;
1044 lvds_encoder->reg = PCH_LVDS;
1045 } else {
1046 lvds_encoder->reg = LVDS;
1047 }
1048 1061
1049 /* create the scaling mode property */ 1062 /* create the scaling mode property */
1050 drm_mode_create_scaling_mode_property(dev); 1063 drm_mode_create_scaling_mode_property(dev);
@@ -1125,7 +1138,6 @@ void intel_lvds_init(struct drm_device *dev)
1125 if (HAS_PCH_SPLIT(dev)) 1138 if (HAS_PCH_SPLIT(dev))
1126 goto failed; 1139 goto failed;
1127 1140
1128 lvds = I915_READ(LVDS);
1129 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; 1141 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
1130 crtc = intel_get_crtc_for_pipe(dev, pipe); 1142 crtc = intel_get_crtc_for_pipe(dev, pipe);
1131 1143
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 0e860f39933d..38a4c8ce7e63 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -126,3 +126,12 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
126 126
127 drm_object_attach_property(&connector->base, prop, 0); 127 drm_object_attach_property(&connector->base, prop, 0);
128} 128}
129
130void
131intel_attach_aspect_ratio_property(struct drm_connector *connector)
132{
133 if (!drm_mode_create_aspect_ratio_property(connector->dev))
134 drm_object_attach_property(&connector->base,
135 connector->dev->mode_config.aspect_ratio_property,
136 DRM_MODE_PICTURE_ASPECT_NONE);
137}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cb1c65739425..6dc13c02c28e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -239,7 +239,7 @@ struct opregion_asle {
239static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) 239static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
240{ 240{
241 struct drm_i915_private *dev_priv = dev->dev_private; 241 struct drm_i915_private *dev_priv = dev->dev_private;
242 struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci; 242 struct opregion_swsci *swsci = dev_priv->opregion.swsci;
243 u32 main_function, sub_function, scic; 243 u32 main_function, sub_function, scic;
244 u16 pci_swsci; 244 u16 pci_swsci;
245 u32 dslp; 245 u32 dslp;
@@ -264,7 +264,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
264 } 264 }
265 265
266 /* Driver sleep timeout in ms. */ 266 /* Driver sleep timeout in ms. */
267 dslp = ioread32(&swsci->dslp); 267 dslp = swsci->dslp;
268 if (!dslp) { 268 if (!dslp) {
269 /* The spec says 2ms should be the default, but it's too small 269 /* The spec says 2ms should be the default, but it's too small
270 * for some machines. */ 270 * for some machines. */
@@ -277,7 +277,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
277 } 277 }
278 278
279 /* The spec tells us to do this, but we are the only user... */ 279 /* The spec tells us to do this, but we are the only user... */
280 scic = ioread32(&swsci->scic); 280 scic = swsci->scic;
281 if (scic & SWSCI_SCIC_INDICATOR) { 281 if (scic & SWSCI_SCIC_INDICATOR) {
282 DRM_DEBUG_DRIVER("SWSCI request already in progress\n"); 282 DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
283 return -EBUSY; 283 return -EBUSY;
@@ -285,8 +285,8 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
285 285
286 scic = function | SWSCI_SCIC_INDICATOR; 286 scic = function | SWSCI_SCIC_INDICATOR;
287 287
288 iowrite32(parm, &swsci->parm); 288 swsci->parm = parm;
289 iowrite32(scic, &swsci->scic); 289 swsci->scic = scic;
290 290
291 /* Ensure SCI event is selected and event trigger is cleared. */ 291 /* Ensure SCI event is selected and event trigger is cleared. */
292 pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci); 292 pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
@@ -301,7 +301,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
301 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci); 301 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
302 302
303 /* Poll for the result. */ 303 /* Poll for the result. */
304#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0) 304#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
305 if (wait_for(C, dslp)) { 305 if (wait_for(C, dslp)) {
306 DRM_DEBUG_DRIVER("SWSCI request timed out\n"); 306 DRM_DEBUG_DRIVER("SWSCI request timed out\n");
307 return -ETIMEDOUT; 307 return -ETIMEDOUT;
@@ -317,7 +317,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
317 } 317 }
318 318
319 if (parm_out) 319 if (parm_out)
320 *parm_out = ioread32(&swsci->parm); 320 *parm_out = swsci->parm;
321 321
322 return 0; 322 return 0;
323 323
@@ -341,8 +341,12 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
341 if (!HAS_DDI(dev)) 341 if (!HAS_DDI(dev))
342 return 0; 342 return 0;
343 343
344 port = intel_ddi_get_encoder_port(intel_encoder); 344 if (intel_encoder->type == INTEL_OUTPUT_DSI)
345 if (port == PORT_E) { 345 port = 0;
346 else
347 port = intel_ddi_get_encoder_port(intel_encoder);
348
349 if (port == PORT_E) {
346 port = 0; 350 port = 0;
347 } else { 351 } else {
348 parm |= 1 << port; 352 parm |= 1 << port;
@@ -363,6 +367,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
363 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; 367 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
364 break; 368 break;
365 case INTEL_OUTPUT_EDP: 369 case INTEL_OUTPUT_EDP:
370 case INTEL_OUTPUT_DSI:
366 type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL; 371 type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
367 break; 372 break;
368 default: 373 default:
@@ -407,7 +412,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
407{ 412{
408 struct drm_i915_private *dev_priv = dev->dev_private; 413 struct drm_i915_private *dev_priv = dev->dev_private;
409 struct intel_connector *intel_connector; 414 struct intel_connector *intel_connector;
410 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 415 struct opregion_asle *asle = dev_priv->opregion.asle;
411 416
412 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 417 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
413 418
@@ -432,7 +437,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
432 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); 437 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
433 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) 438 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
434 intel_panel_set_backlight_acpi(intel_connector, bclp, 255); 439 intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
435 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 440 asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
436 441
437 drm_modeset_unlock(&dev->mode_config.connection_mutex); 442 drm_modeset_unlock(&dev->mode_config.connection_mutex);
438 443
@@ -519,14 +524,14 @@ static void asle_work(struct work_struct *work)
519 struct drm_i915_private *dev_priv = 524 struct drm_i915_private *dev_priv =
520 container_of(opregion, struct drm_i915_private, opregion); 525 container_of(opregion, struct drm_i915_private, opregion);
521 struct drm_device *dev = dev_priv->dev; 526 struct drm_device *dev = dev_priv->dev;
522 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 527 struct opregion_asle *asle = dev_priv->opregion.asle;
523 u32 aslc_stat = 0; 528 u32 aslc_stat = 0;
524 u32 aslc_req; 529 u32 aslc_req;
525 530
526 if (!asle) 531 if (!asle)
527 return; 532 return;
528 533
529 aslc_req = ioread32(&asle->aslc); 534 aslc_req = asle->aslc;
530 535
531 if (!(aslc_req & ASLC_REQ_MSK)) { 536 if (!(aslc_req & ASLC_REQ_MSK)) {
532 DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n", 537 DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
@@ -535,34 +540,34 @@ static void asle_work(struct work_struct *work)
535 } 540 }
536 541
537 if (aslc_req & ASLC_SET_ALS_ILLUM) 542 if (aslc_req & ASLC_SET_ALS_ILLUM)
538 aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); 543 aslc_stat |= asle_set_als_illum(dev, asle->alsi);
539 544
540 if (aslc_req & ASLC_SET_BACKLIGHT) 545 if (aslc_req & ASLC_SET_BACKLIGHT)
541 aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); 546 aslc_stat |= asle_set_backlight(dev, asle->bclp);
542 547
543 if (aslc_req & ASLC_SET_PFIT) 548 if (aslc_req & ASLC_SET_PFIT)
544 aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); 549 aslc_stat |= asle_set_pfit(dev, asle->pfit);
545 550
546 if (aslc_req & ASLC_SET_PWM_FREQ) 551 if (aslc_req & ASLC_SET_PWM_FREQ)
547 aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); 552 aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
548 553
549 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) 554 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
550 aslc_stat |= asle_set_supported_rotation_angles(dev, 555 aslc_stat |= asle_set_supported_rotation_angles(dev,
551 ioread32(&asle->srot)); 556 asle->srot);
552 557
553 if (aslc_req & ASLC_BUTTON_ARRAY) 558 if (aslc_req & ASLC_BUTTON_ARRAY)
554 aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer)); 559 aslc_stat |= asle_set_button_array(dev, asle->iuer);
555 560
556 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) 561 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
557 aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer)); 562 aslc_stat |= asle_set_convertible(dev, asle->iuer);
558 563
559 if (aslc_req & ASLC_DOCKING_INDICATOR) 564 if (aslc_req & ASLC_DOCKING_INDICATOR)
560 aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer)); 565 aslc_stat |= asle_set_docking(dev, asle->iuer);
561 566
562 if (aslc_req & ASLC_ISCT_STATE_CHANGE) 567 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
563 aslc_stat |= asle_isct_state(dev); 568 aslc_stat |= asle_isct_state(dev);
564 569
565 iowrite32(aslc_stat, &asle->aslc); 570 asle->aslc = aslc_stat;
566} 571}
567 572
568void intel_opregion_asle_intr(struct drm_device *dev) 573void intel_opregion_asle_intr(struct drm_device *dev)
@@ -587,8 +592,8 @@ static int intel_opregion_video_event(struct notifier_block *nb,
587 Linux, these are handled by the dock, button and video drivers. 592 Linux, these are handled by the dock, button and video drivers.
588 */ 593 */
589 594
590 struct opregion_acpi __iomem *acpi;
591 struct acpi_bus_event *event = data; 595 struct acpi_bus_event *event = data;
596 struct opregion_acpi *acpi;
592 int ret = NOTIFY_OK; 597 int ret = NOTIFY_OK;
593 598
594 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) 599 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
@@ -599,11 +604,10 @@ static int intel_opregion_video_event(struct notifier_block *nb,
599 604
600 acpi = system_opregion->acpi; 605 acpi = system_opregion->acpi;
601 606
602 if (event->type == 0x80 && 607 if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
603 (ioread32(&acpi->cevt) & 1) == 0)
604 ret = NOTIFY_BAD; 608 ret = NOTIFY_BAD;
605 609
606 iowrite32(0, &acpi->csts); 610 acpi->csts = 0;
607 611
608 return ret; 612 return ret;
609} 613}
@@ -623,14 +627,14 @@ static u32 get_did(struct intel_opregion *opregion, int i)
623 u32 did; 627 u32 did;
624 628
625 if (i < ARRAY_SIZE(opregion->acpi->didl)) { 629 if (i < ARRAY_SIZE(opregion->acpi->didl)) {
626 did = ioread32(&opregion->acpi->didl[i]); 630 did = opregion->acpi->didl[i];
627 } else { 631 } else {
628 i -= ARRAY_SIZE(opregion->acpi->didl); 632 i -= ARRAY_SIZE(opregion->acpi->didl);
629 633
630 if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) 634 if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
631 return 0; 635 return 0;
632 636
633 did = ioread32(&opregion->acpi->did2[i]); 637 did = opregion->acpi->did2[i];
634 } 638 }
635 639
636 return did; 640 return did;
@@ -639,14 +643,14 @@ static u32 get_did(struct intel_opregion *opregion, int i)
639static void set_did(struct intel_opregion *opregion, int i, u32 val) 643static void set_did(struct intel_opregion *opregion, int i, u32 val)
640{ 644{
641 if (i < ARRAY_SIZE(opregion->acpi->didl)) { 645 if (i < ARRAY_SIZE(opregion->acpi->didl)) {
642 iowrite32(val, &opregion->acpi->didl[i]); 646 opregion->acpi->didl[i] = val;
643 } else { 647 } else {
644 i -= ARRAY_SIZE(opregion->acpi->didl); 648 i -= ARRAY_SIZE(opregion->acpi->didl);
645 649
646 if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) 650 if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
647 return; 651 return;
648 652
649 iowrite32(val, &opregion->acpi->did2[i]); 653 opregion->acpi->did2[i] = val;
650 } 654 }
651} 655}
652 656
@@ -768,7 +772,7 @@ static void intel_setup_cadls(struct drm_device *dev)
768 * there are less than eight devices. */ 772 * there are less than eight devices. */
769 do { 773 do {
770 disp_id = get_did(opregion, i); 774 disp_id = get_did(opregion, i);
771 iowrite32(disp_id, &opregion->acpi->cadl[i]); 775 opregion->acpi->cadl[i] = disp_id;
772 } while (++i < 8 && disp_id != 0); 776 } while (++i < 8 && disp_id != 0);
773} 777}
774 778
@@ -787,16 +791,16 @@ void intel_opregion_init(struct drm_device *dev)
787 /* Notify BIOS we are ready to handle ACPI video ext notifs. 791 /* Notify BIOS we are ready to handle ACPI video ext notifs.
788 * Right now, all the events are handled by the ACPI video module. 792 * Right now, all the events are handled by the ACPI video module.
789 * We don't actually need to do anything with them. */ 793 * We don't actually need to do anything with them. */
790 iowrite32(0, &opregion->acpi->csts); 794 opregion->acpi->csts = 0;
791 iowrite32(1, &opregion->acpi->drdy); 795 opregion->acpi->drdy = 1;
792 796
793 system_opregion = opregion; 797 system_opregion = opregion;
794 register_acpi_notifier(&intel_opregion_notifier); 798 register_acpi_notifier(&intel_opregion_notifier);
795 } 799 }
796 800
797 if (opregion->asle) { 801 if (opregion->asle) {
798 iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche); 802 opregion->asle->tche = ASLE_TCHE_BLC_EN;
799 iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy); 803 opregion->asle->ardy = ASLE_ARDY_READY;
800 } 804 }
801} 805}
802 806
@@ -809,19 +813,19 @@ void intel_opregion_fini(struct drm_device *dev)
809 return; 813 return;
810 814
811 if (opregion->asle) 815 if (opregion->asle)
812 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); 816 opregion->asle->ardy = ASLE_ARDY_NOT_READY;
813 817
814 cancel_work_sync(&dev_priv->opregion.asle_work); 818 cancel_work_sync(&dev_priv->opregion.asle_work);
815 819
816 if (opregion->acpi) { 820 if (opregion->acpi) {
817 iowrite32(0, &opregion->acpi->drdy); 821 opregion->acpi->drdy = 0;
818 822
819 system_opregion = NULL; 823 system_opregion = NULL;
820 unregister_acpi_notifier(&intel_opregion_notifier); 824 unregister_acpi_notifier(&intel_opregion_notifier);
821 } 825 }
822 826
823 /* just clear all opregion memory pointers now */ 827 /* just clear all opregion memory pointers now */
824 iounmap(opregion->header); 828 memunmap(opregion->header);
825 opregion->header = NULL; 829 opregion->header = NULL;
826 opregion->acpi = NULL; 830 opregion->acpi = NULL;
827 opregion->swsci = NULL; 831 opregion->swsci = NULL;
@@ -894,10 +898,10 @@ int intel_opregion_setup(struct drm_device *dev)
894{ 898{
895 struct drm_i915_private *dev_priv = dev->dev_private; 899 struct drm_i915_private *dev_priv = dev->dev_private;
896 struct intel_opregion *opregion = &dev_priv->opregion; 900 struct intel_opregion *opregion = &dev_priv->opregion;
897 void __iomem *base;
898 u32 asls, mboxes; 901 u32 asls, mboxes;
899 char buf[sizeof(OPREGION_SIGNATURE)]; 902 char buf[sizeof(OPREGION_SIGNATURE)];
900 int err = 0; 903 int err = 0;
904 void *base;
901 905
902 BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100); 906 BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
903 BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); 907 BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
@@ -915,11 +919,11 @@ int intel_opregion_setup(struct drm_device *dev)
915 INIT_WORK(&opregion->asle_work, asle_work); 919 INIT_WORK(&opregion->asle_work, asle_work);
916#endif 920#endif
917 921
918 base = acpi_os_ioremap(asls, OPREGION_SIZE); 922 base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
919 if (!base) 923 if (!base)
920 return -ENOMEM; 924 return -ENOMEM;
921 925
922 memcpy_fromio(buf, base, sizeof(buf)); 926 memcpy(buf, base, sizeof(buf));
923 927
924 if (memcmp(buf, OPREGION_SIGNATURE, 16)) { 928 if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
925 DRM_DEBUG_DRIVER("opregion signature mismatch\n"); 929 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
@@ -931,7 +935,7 @@ int intel_opregion_setup(struct drm_device *dev)
931 935
932 opregion->lid_state = base + ACPI_CLID; 936 opregion->lid_state = base + ACPI_CLID;
933 937
934 mboxes = ioread32(&opregion->header->mboxes); 938 mboxes = opregion->header->mboxes;
935 if (mboxes & MBOX_ACPI) { 939 if (mboxes & MBOX_ACPI) {
936 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 940 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
937 opregion->acpi = base + OPREGION_ACPI_OFFSET; 941 opregion->acpi = base + OPREGION_ACPI_OFFSET;
@@ -946,12 +950,12 @@ int intel_opregion_setup(struct drm_device *dev)
946 DRM_DEBUG_DRIVER("ASLE supported\n"); 950 DRM_DEBUG_DRIVER("ASLE supported\n");
947 opregion->asle = base + OPREGION_ASLE_OFFSET; 951 opregion->asle = base + OPREGION_ASLE_OFFSET;
948 952
949 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); 953 opregion->asle->ardy = ASLE_ARDY_NOT_READY;
950 } 954 }
951 955
952 return 0; 956 return 0;
953 957
954err_out: 958err_out:
955 iounmap(base); 959 memunmap(base);
956 return err; 960 return err;
957} 961}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e2ab3f6ed022..a24df35e11e7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -105,59 +105,55 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
105 struct intel_crtc_state *pipe_config, 105 struct intel_crtc_state *pipe_config,
106 int fitting_mode) 106 int fitting_mode)
107{ 107{
108 struct drm_display_mode *adjusted_mode; 108 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
109 int x, y, width, height; 109 int x = 0, y = 0, width = 0, height = 0;
110
111 adjusted_mode = &pipe_config->base.adjusted_mode;
112
113 x = y = width = height = 0;
114 110
115 /* Native modes don't need fitting */ 111 /* Native modes don't need fitting */
116 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w && 112 if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
117 adjusted_mode->vdisplay == pipe_config->pipe_src_h) 113 adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
118 goto done; 114 goto done;
119 115
120 switch (fitting_mode) { 116 switch (fitting_mode) {
121 case DRM_MODE_SCALE_CENTER: 117 case DRM_MODE_SCALE_CENTER:
122 width = pipe_config->pipe_src_w; 118 width = pipe_config->pipe_src_w;
123 height = pipe_config->pipe_src_h; 119 height = pipe_config->pipe_src_h;
124 x = (adjusted_mode->hdisplay - width + 1)/2; 120 x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
125 y = (adjusted_mode->vdisplay - height + 1)/2; 121 y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
126 break; 122 break;
127 123
128 case DRM_MODE_SCALE_ASPECT: 124 case DRM_MODE_SCALE_ASPECT:
129 /* Scale but preserve the aspect ratio */ 125 /* Scale but preserve the aspect ratio */
130 { 126 {
131 u32 scaled_width = adjusted_mode->hdisplay 127 u32 scaled_width = adjusted_mode->crtc_hdisplay
132 * pipe_config->pipe_src_h; 128 * pipe_config->pipe_src_h;
133 u32 scaled_height = pipe_config->pipe_src_w 129 u32 scaled_height = pipe_config->pipe_src_w
134 * adjusted_mode->vdisplay; 130 * adjusted_mode->crtc_vdisplay;
135 if (scaled_width > scaled_height) { /* pillar */ 131 if (scaled_width > scaled_height) { /* pillar */
136 width = scaled_height / pipe_config->pipe_src_h; 132 width = scaled_height / pipe_config->pipe_src_h;
137 if (width & 1) 133 if (width & 1)
138 width++; 134 width++;
139 x = (adjusted_mode->hdisplay - width + 1) / 2; 135 x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
140 y = 0; 136 y = 0;
141 height = adjusted_mode->vdisplay; 137 height = adjusted_mode->crtc_vdisplay;
142 } else if (scaled_width < scaled_height) { /* letter */ 138 } else if (scaled_width < scaled_height) { /* letter */
143 height = scaled_width / pipe_config->pipe_src_w; 139 height = scaled_width / pipe_config->pipe_src_w;
144 if (height & 1) 140 if (height & 1)
145 height++; 141 height++;
146 y = (adjusted_mode->vdisplay - height + 1) / 2; 142 y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
147 x = 0; 143 x = 0;
148 width = adjusted_mode->hdisplay; 144 width = adjusted_mode->crtc_hdisplay;
149 } else { 145 } else {
150 x = y = 0; 146 x = y = 0;
151 width = adjusted_mode->hdisplay; 147 width = adjusted_mode->crtc_hdisplay;
152 height = adjusted_mode->vdisplay; 148 height = adjusted_mode->crtc_vdisplay;
153 } 149 }
154 } 150 }
155 break; 151 break;
156 152
157 case DRM_MODE_SCALE_FULLSCREEN: 153 case DRM_MODE_SCALE_FULLSCREEN:
158 x = y = 0; 154 x = y = 0;
159 width = adjusted_mode->hdisplay; 155 width = adjusted_mode->crtc_hdisplay;
160 height = adjusted_mode->vdisplay; 156 height = adjusted_mode->crtc_vdisplay;
161 break; 157 break;
162 158
163 default: 159 default:
@@ -172,46 +168,46 @@ done:
172} 168}
173 169
174static void 170static void
175centre_horizontally(struct drm_display_mode *mode, 171centre_horizontally(struct drm_display_mode *adjusted_mode,
176 int width) 172 int width)
177{ 173{
178 u32 border, sync_pos, blank_width, sync_width; 174 u32 border, sync_pos, blank_width, sync_width;
179 175
180 /* keep the hsync and hblank widths constant */ 176 /* keep the hsync and hblank widths constant */
181 sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; 177 sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
182 blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; 178 blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
183 sync_pos = (blank_width - sync_width + 1) / 2; 179 sync_pos = (blank_width - sync_width + 1) / 2;
184 180
185 border = (mode->hdisplay - width + 1) / 2; 181 border = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
186 border += border & 1; /* make the border even */ 182 border += border & 1; /* make the border even */
187 183
188 mode->crtc_hdisplay = width; 184 adjusted_mode->crtc_hdisplay = width;
189 mode->crtc_hblank_start = width + border; 185 adjusted_mode->crtc_hblank_start = width + border;
190 mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; 186 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width;
191 187
192 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; 188 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos;
193 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; 189 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width;
194} 190}
195 191
196static void 192static void
197centre_vertically(struct drm_display_mode *mode, 193centre_vertically(struct drm_display_mode *adjusted_mode,
198 int height) 194 int height)
199{ 195{
200 u32 border, sync_pos, blank_width, sync_width; 196 u32 border, sync_pos, blank_width, sync_width;
201 197
202 /* keep the vsync and vblank widths constant */ 198 /* keep the vsync and vblank widths constant */
203 sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; 199 sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
204 blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; 200 blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start;
205 sync_pos = (blank_width - sync_width + 1) / 2; 201 sync_pos = (blank_width - sync_width + 1) / 2;
206 202
207 border = (mode->vdisplay - height + 1) / 2; 203 border = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
208 204
209 mode->crtc_vdisplay = height; 205 adjusted_mode->crtc_vdisplay = height;
210 mode->crtc_vblank_start = height + border; 206 adjusted_mode->crtc_vblank_start = height + border;
211 mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; 207 adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width;
212 208
213 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; 209 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos;
214 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; 210 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
215} 211}
216 212
217static inline u32 panel_fitter_scaling(u32 source, u32 target) 213static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@ -230,11 +226,11 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
230static void i965_scale_aspect(struct intel_crtc_state *pipe_config, 226static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
231 u32 *pfit_control) 227 u32 *pfit_control)
232{ 228{
233 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 229 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
234 u32 scaled_width = adjusted_mode->hdisplay * 230 u32 scaled_width = adjusted_mode->crtc_hdisplay *
235 pipe_config->pipe_src_h; 231 pipe_config->pipe_src_h;
236 u32 scaled_height = pipe_config->pipe_src_w * 232 u32 scaled_height = pipe_config->pipe_src_w *
237 adjusted_mode->vdisplay; 233 adjusted_mode->crtc_vdisplay;
238 234
239 /* 965+ is easy, it does everything in hw */ 235 /* 965+ is easy, it does everything in hw */
240 if (scaled_width > scaled_height) 236 if (scaled_width > scaled_height)
@@ -243,7 +239,7 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
243 else if (scaled_width < scaled_height) 239 else if (scaled_width < scaled_height)
244 *pfit_control |= PFIT_ENABLE | 240 *pfit_control |= PFIT_ENABLE |
245 PFIT_SCALING_LETTER; 241 PFIT_SCALING_LETTER;
246 else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w) 242 else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
247 *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; 243 *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
248} 244}
249 245
@@ -252,10 +248,10 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
252 u32 *border) 248 u32 *border)
253{ 249{
254 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 250 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
255 u32 scaled_width = adjusted_mode->hdisplay * 251 u32 scaled_width = adjusted_mode->crtc_hdisplay *
256 pipe_config->pipe_src_h; 252 pipe_config->pipe_src_h;
257 u32 scaled_height = pipe_config->pipe_src_w * 253 u32 scaled_height = pipe_config->pipe_src_w *
258 adjusted_mode->vdisplay; 254 adjusted_mode->crtc_vdisplay;
259 u32 bits; 255 u32 bits;
260 256
261 /* 257 /*
@@ -269,9 +265,9 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
269 pipe_config->pipe_src_h); 265 pipe_config->pipe_src_h);
270 266
271 *border = LVDS_BORDER_ENABLE; 267 *border = LVDS_BORDER_ENABLE;
272 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) { 268 if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
273 bits = panel_fitter_scaling(pipe_config->pipe_src_h, 269 bits = panel_fitter_scaling(pipe_config->pipe_src_h,
274 adjusted_mode->vdisplay); 270 adjusted_mode->crtc_vdisplay);
275 271
276 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | 272 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
277 bits << PFIT_VERT_SCALE_SHIFT); 273 bits << PFIT_VERT_SCALE_SHIFT);
@@ -285,9 +281,9 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
285 pipe_config->pipe_src_w); 281 pipe_config->pipe_src_w);
286 282
287 *border = LVDS_BORDER_ENABLE; 283 *border = LVDS_BORDER_ENABLE;
288 if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) { 284 if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
289 bits = panel_fitter_scaling(pipe_config->pipe_src_w, 285 bits = panel_fitter_scaling(pipe_config->pipe_src_w,
290 adjusted_mode->hdisplay); 286 adjusted_mode->crtc_hdisplay);
291 287
292 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | 288 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
293 bits << PFIT_VERT_SCALE_SHIFT); 289 bits << PFIT_VERT_SCALE_SHIFT);
@@ -310,13 +306,11 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
310{ 306{
311 struct drm_device *dev = intel_crtc->base.dev; 307 struct drm_device *dev = intel_crtc->base.dev;
312 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 308 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
313 struct drm_display_mode *adjusted_mode; 309 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
314
315 adjusted_mode = &pipe_config->base.adjusted_mode;
316 310
317 /* Native modes don't need fitting */ 311 /* Native modes don't need fitting */
318 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w && 312 if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
319 adjusted_mode->vdisplay == pipe_config->pipe_src_h) 313 adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
320 goto out; 314 goto out;
321 315
322 switch (fitting_mode) { 316 switch (fitting_mode) {
@@ -342,8 +336,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
342 * Full scaling, even if it changes the aspect ratio. 336 * Full scaling, even if it changes the aspect ratio.
343 * Fortunately this is all done for us in hw. 337 * Fortunately this is all done for us in hw.
344 */ 338 */
345 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay || 339 if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
346 pipe_config->pipe_src_w != adjusted_mode->hdisplay) { 340 pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
347 pfit_control |= PFIT_ENABLE; 341 pfit_control |= PFIT_ENABLE;
348 if (INTEL_INFO(dev)->gen >= 4) 342 if (INTEL_INFO(dev)->gen >= 4)
349 pfit_control |= PFIT_SCALING_AUTO; 343 pfit_control |= PFIT_SCALING_AUTO;
@@ -387,7 +381,7 @@ intel_panel_detect(struct drm_device *dev)
387 381
388 /* Assume that the BIOS does not lie through the OpRegion... */ 382 /* Assume that the BIOS does not lie through the OpRegion... */
389 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { 383 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
390 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 384 return *dev_priv->opregion.lid_state & 0x1 ?
391 connector_status_connected : 385 connector_status_connected :
392 connector_status_disconnected; 386 connector_status_disconnected;
393 } 387 }
@@ -484,7 +478,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
484 return val; 478 return val;
485} 479}
486 480
487static u32 bdw_get_backlight(struct intel_connector *connector) 481static u32 lpt_get_backlight(struct intel_connector *connector)
488{ 482{
489 struct drm_device *dev = connector->base.dev; 483 struct drm_device *dev = connector->base.dev;
490 struct drm_i915_private *dev_priv = dev->dev_private; 484 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -542,9 +536,10 @@ static u32 vlv_get_backlight(struct intel_connector *connector)
542static u32 bxt_get_backlight(struct intel_connector *connector) 536static u32 bxt_get_backlight(struct intel_connector *connector)
543{ 537{
544 struct drm_device *dev = connector->base.dev; 538 struct drm_device *dev = connector->base.dev;
539 struct intel_panel *panel = &connector->panel;
545 struct drm_i915_private *dev_priv = dev->dev_private; 540 struct drm_i915_private *dev_priv = dev->dev_private;
546 541
547 return I915_READ(BXT_BLC_PWM_DUTY1); 542 return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
548} 543}
549 544
550static u32 pwm_get_backlight(struct intel_connector *connector) 545static u32 pwm_get_backlight(struct intel_connector *connector)
@@ -566,7 +561,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
566 mutex_lock(&dev_priv->backlight_lock); 561 mutex_lock(&dev_priv->backlight_lock);
567 562
568 if (panel->backlight.enabled) { 563 if (panel->backlight.enabled) {
569 val = dev_priv->display.get_backlight(connector); 564 val = panel->backlight.get(connector);
570 val = intel_panel_compute_brightness(connector, val); 565 val = intel_panel_compute_brightness(connector, val);
571 } 566 }
572 567
@@ -576,7 +571,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
576 return val; 571 return val;
577} 572}
578 573
579static void bdw_set_backlight(struct intel_connector *connector, u32 level) 574static void lpt_set_backlight(struct intel_connector *connector, u32 level)
580{ 575{
581 struct drm_device *dev = connector->base.dev; 576 struct drm_device *dev = connector->base.dev;
582 struct drm_i915_private *dev_priv = dev->dev_private; 577 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -640,8 +635,9 @@ static void bxt_set_backlight(struct intel_connector *connector, u32 level)
640{ 635{
641 struct drm_device *dev = connector->base.dev; 636 struct drm_device *dev = connector->base.dev;
642 struct drm_i915_private *dev_priv = dev->dev_private; 637 struct drm_i915_private *dev_priv = dev->dev_private;
638 struct intel_panel *panel = &connector->panel;
643 639
644 I915_WRITE(BXT_BLC_PWM_DUTY1, level); 640 I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
645} 641}
646 642
647static void pwm_set_backlight(struct intel_connector *connector, u32 level) 643static void pwm_set_backlight(struct intel_connector *connector, u32 level)
@@ -655,13 +651,12 @@ static void pwm_set_backlight(struct intel_connector *connector, u32 level)
655static void 651static void
656intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level) 652intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
657{ 653{
658 struct drm_device *dev = connector->base.dev; 654 struct intel_panel *panel = &connector->panel;
659 struct drm_i915_private *dev_priv = dev->dev_private;
660 655
661 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 656 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
662 657
663 level = intel_panel_compute_brightness(connector, level); 658 level = intel_panel_compute_brightness(connector, level);
664 dev_priv->display.set_backlight(connector, level); 659 panel->backlight.set(connector, level);
665} 660}
666 661
667/* set backlight brightness to level in range [0..max], scaling wrt hw min */ 662/* set backlight brightness to level in range [0..max], scaling wrt hw min */
@@ -729,6 +724,32 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
729 mutex_unlock(&dev_priv->backlight_lock); 724 mutex_unlock(&dev_priv->backlight_lock);
730} 725}
731 726
727static void lpt_disable_backlight(struct intel_connector *connector)
728{
729 struct drm_device *dev = connector->base.dev;
730 struct drm_i915_private *dev_priv = dev->dev_private;
731 u32 tmp;
732
733 intel_panel_actually_set_backlight(connector, 0);
734
735 /*
736 * Although we don't support or enable CPU PWM with LPT/SPT based
737 * systems, it may have been enabled prior to loading the
738 * driver. Disable to avoid warnings on LCPLL disable.
739 *
740 * This needs rework if we need to add support for CPU PWM on PCH split
741 * platforms.
742 */
743 tmp = I915_READ(BLC_PWM_CPU_CTL2);
744 if (tmp & BLM_PWM_ENABLE) {
745 DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n");
746 I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
747 }
748
749 tmp = I915_READ(BLC_PWM_PCH_CTL1);
750 I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
751}
752
732static void pch_disable_backlight(struct intel_connector *connector) 753static void pch_disable_backlight(struct intel_connector *connector)
733{ 754{
734 struct drm_device *dev = connector->base.dev; 755 struct drm_device *dev = connector->base.dev;
@@ -781,12 +802,20 @@ static void bxt_disable_backlight(struct intel_connector *connector)
781{ 802{
782 struct drm_device *dev = connector->base.dev; 803 struct drm_device *dev = connector->base.dev;
783 struct drm_i915_private *dev_priv = dev->dev_private; 804 struct drm_i915_private *dev_priv = dev->dev_private;
784 u32 tmp; 805 struct intel_panel *panel = &connector->panel;
806 u32 tmp, val;
785 807
786 intel_panel_actually_set_backlight(connector, 0); 808 intel_panel_actually_set_backlight(connector, 0);
787 809
788 tmp = I915_READ(BXT_BLC_PWM_CTL1); 810 tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
789 I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE); 811 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
812 tmp & ~BXT_BLC_PWM_ENABLE);
813
814 if (panel->backlight.controller == 1) {
815 val = I915_READ(UTIL_PIN_CTL);
816 val &= ~UTIL_PIN_ENABLE;
817 I915_WRITE(UTIL_PIN_CTL, val);
818 }
790} 819}
791 820
792static void pwm_disable_backlight(struct intel_connector *connector) 821static void pwm_disable_backlight(struct intel_connector *connector)
@@ -809,7 +838,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
809 return; 838 return;
810 839
811 /* 840 /*
812 * Do not disable backlight on the vgaswitcheroo path. When switching 841 * Do not disable backlight on the vga_switcheroo path. When switching
813 * away from i915, the other client may depend on i915 to handle the 842 * away from i915, the other client may depend on i915 to handle the
814 * backlight. This will leave the backlight on unnecessarily when 843 * backlight. This will leave the backlight on unnecessarily when
815 * another client is not activated. 844 * another client is not activated.
@@ -824,12 +853,12 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
824 if (panel->backlight.device) 853 if (panel->backlight.device)
825 panel->backlight.device->props.power = FB_BLANK_POWERDOWN; 854 panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
826 panel->backlight.enabled = false; 855 panel->backlight.enabled = false;
827 dev_priv->display.disable_backlight(connector); 856 panel->backlight.disable(connector);
828 857
829 mutex_unlock(&dev_priv->backlight_lock); 858 mutex_unlock(&dev_priv->backlight_lock);
830} 859}
831 860
832static void bdw_enable_backlight(struct intel_connector *connector) 861static void lpt_enable_backlight(struct intel_connector *connector)
833{ 862{
834 struct drm_device *dev = connector->base.dev; 863 struct drm_device *dev = connector->base.dev;
835 struct drm_i915_private *dev_priv = dev->dev_private; 864 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1018,16 +1047,38 @@ static void bxt_enable_backlight(struct intel_connector *connector)
1018 struct drm_device *dev = connector->base.dev; 1047 struct drm_device *dev = connector->base.dev;
1019 struct drm_i915_private *dev_priv = dev->dev_private; 1048 struct drm_i915_private *dev_priv = dev->dev_private;
1020 struct intel_panel *panel = &connector->panel; 1049 struct intel_panel *panel = &connector->panel;
1021 u32 pwm_ctl; 1050 enum pipe pipe = intel_get_pipe_from_connector(connector);
1051 u32 pwm_ctl, val;
1052
1053 /* To use 2nd set of backlight registers, utility pin has to be
1054 * enabled with PWM mode.
1055 * The field should only be changed when the utility pin is disabled
1056 */
1057 if (panel->backlight.controller == 1) {
1058 val = I915_READ(UTIL_PIN_CTL);
1059 if (val & UTIL_PIN_ENABLE) {
1060 DRM_DEBUG_KMS("util pin already enabled\n");
1061 val &= ~UTIL_PIN_ENABLE;
1062 I915_WRITE(UTIL_PIN_CTL, val);
1063 }
1022 1064
1023 pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1); 1065 val = 0;
1066 if (panel->backlight.util_pin_active_low)
1067 val |= UTIL_PIN_POLARITY;
1068 I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
1069 UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
1070 }
1071
1072 pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
1024 if (pwm_ctl & BXT_BLC_PWM_ENABLE) { 1073 if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
1025 DRM_DEBUG_KMS("backlight already enabled\n"); 1074 DRM_DEBUG_KMS("backlight already enabled\n");
1026 pwm_ctl &= ~BXT_BLC_PWM_ENABLE; 1075 pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
1027 I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl); 1076 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
1077 pwm_ctl);
1028 } 1078 }
1029 1079
1030 I915_WRITE(BXT_BLC_PWM_FREQ1, panel->backlight.max); 1080 I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
1081 panel->backlight.max);
1031 1082
1032 intel_panel_actually_set_backlight(connector, panel->backlight.level); 1083 intel_panel_actually_set_backlight(connector, panel->backlight.level);
1033 1084
@@ -1035,9 +1086,10 @@ static void bxt_enable_backlight(struct intel_connector *connector)
1035 if (panel->backlight.active_low_pwm) 1086 if (panel->backlight.active_low_pwm)
1036 pwm_ctl |= BXT_BLC_PWM_POLARITY; 1087 pwm_ctl |= BXT_BLC_PWM_POLARITY;
1037 1088
1038 I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl); 1089 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
1039 POSTING_READ(BXT_BLC_PWM_CTL1); 1090 POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
1040 I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE); 1091 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
1092 pwm_ctl | BXT_BLC_PWM_ENABLE);
1041} 1093}
1042 1094
1043static void pwm_enable_backlight(struct intel_connector *connector) 1095static void pwm_enable_backlight(struct intel_connector *connector)
@@ -1073,7 +1125,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
1073 panel->backlight.device->props.max_brightness); 1125 panel->backlight.device->props.max_brightness);
1074 } 1126 }
1075 1127
1076 dev_priv->display.enable_backlight(connector); 1128 panel->backlight.enable(connector);
1077 panel->backlight.enabled = true; 1129 panel->backlight.enabled = true;
1078 if (panel->backlight.device) 1130 if (panel->backlight.device)
1079 panel->backlight.device->props.power = FB_BLANK_UNBLANK; 1131 panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@@ -1101,10 +1153,10 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
1101 * callback needs to take this into account. 1153 * callback needs to take this into account.
1102 */ 1154 */
1103 if (panel->backlight.enabled) { 1155 if (panel->backlight.enabled) {
1104 if (panel->backlight_power) { 1156 if (panel->backlight.power) {
1105 bool enable = bd->props.power == FB_BLANK_UNBLANK && 1157 bool enable = bd->props.power == FB_BLANK_UNBLANK &&
1106 bd->props.brightness != 0; 1158 bd->props.brightness != 0;
1107 panel->backlight_power(connector, enable); 1159 panel->backlight.power(connector, enable);
1108 } 1160 }
1109 } else { 1161 } else {
1110 bd->props.power = FB_BLANK_POWERDOWN; 1162 bd->props.power = FB_BLANK_POWERDOWN;
@@ -1212,10 +1264,150 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
1212#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1264#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1213 1265
1214/* 1266/*
1215 * Note: The setup hooks can't assume pipe is set! 1267 * SPT: This value represents the period of the PWM stream in clock periods
1268 * multiplied by 16 (default increment) or 128 (alternate increment selected in
1269 * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
1270 */
1271static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1272{
1273 struct drm_device *dev = connector->base.dev;
1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275 u32 mul, clock;
1276
1277 if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
1278 mul = 128;
1279 else
1280 mul = 16;
1281
1282 clock = MHz(24);
1283
1284 return clock / (pwm_freq_hz * mul);
1285}
1286
1287/*
1288 * LPT: This value represents the period of the PWM stream in clock periods
1289 * multiplied by 128 (default increment) or 16 (alternate increment, selected in
1290 * LPT SOUTH_CHICKEN2 register bit 5).
1291 */
1292static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1293{
1294 struct drm_device *dev = connector->base.dev;
1295 struct drm_i915_private *dev_priv = dev->dev_private;
1296 u32 mul, clock;
1297
1298 if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
1299 mul = 16;
1300 else
1301 mul = 128;
1302
1303 if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
1304 clock = MHz(135); /* LPT:H */
1305 else
1306 clock = MHz(24); /* LPT:LP */
1307
1308 return clock / (pwm_freq_hz * mul);
1309}
1310
1311/*
1312 * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
1313 * display raw clocks multiplied by 128.
1314 */
1315static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1316{
1317 struct drm_device *dev = connector->base.dev;
1318 int clock = MHz(intel_pch_rawclk(dev));
1319
1320 return clock / (pwm_freq_hz * 128);
1321}
1322
1323/*
1324 * Gen2: This field determines the number of time base events (display core
1325 * clock frequency/32) in total for a complete cycle of modulated backlight
1326 * control.
1216 * 1327 *
1217 * XXX: Query mode clock or hardware clock and program PWM modulation frequency 1328 * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
1218 * appropriately when it's 0. Use VBT and/or sane defaults. 1329 * divided by 32.
1330 */
1331static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1332{
1333 struct drm_device *dev = connector->base.dev;
1334 struct drm_i915_private *dev_priv = dev->dev_private;
1335 int clock;
1336
1337 if (IS_PINEVIEW(dev))
1338 clock = intel_hrawclk(dev);
1339 else
1340 clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
1341
1342 return clock / (pwm_freq_hz * 32);
1343}
1344
1345/*
1346 * Gen4: This value represents the period of the PWM stream in display core
1347 * clocks multiplied by 128.
1348 */
1349static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1350{
1351 struct drm_device *dev = connector->base.dev;
1352 struct drm_i915_private *dev_priv = dev->dev_private;
1353 int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
1354
1355 return clock / (pwm_freq_hz * 128);
1356}
1357
1358/*
1359 * VLV: This value represents the period of the PWM stream in display core
1360 * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
1361 * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
1362 */
1363static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1364{
1365 struct drm_device *dev = connector->base.dev;
1366 struct drm_i915_private *dev_priv = dev->dev_private;
1367 int clock;
1368
1369 if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
1370 if (IS_CHERRYVIEW(dev))
1371 return KHz(19200) / (pwm_freq_hz * 16);
1372 else
1373 return MHz(25) / (pwm_freq_hz * 16);
1374 } else {
1375 clock = intel_hrawclk(dev);
1376 return MHz(clock) / (pwm_freq_hz * 128);
1377 }
1378}
1379
1380static u32 get_backlight_max_vbt(struct intel_connector *connector)
1381{
1382 struct drm_device *dev = connector->base.dev;
1383 struct drm_i915_private *dev_priv = dev->dev_private;
1384 struct intel_panel *panel = &connector->panel;
1385 u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
1386 u32 pwm;
1387
1388 if (!pwm_freq_hz) {
1389 DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
1390 return 0;
1391 }
1392
1393 if (!panel->backlight.hz_to_pwm) {
1394 DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
1395 return 0;
1396 }
1397
1398 pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
1399 if (!pwm) {
1400 DRM_DEBUG_KMS("backlight frequency conversion failed\n");
1401 return 0;
1402 }
1403
1404 DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
1405
1406 return pwm;
1407}
1408
1409/*
1410 * Note: The setup hooks can't assume pipe is set!
1219 */ 1411 */
1220static u32 get_backlight_min_vbt(struct intel_connector *connector) 1412static u32 get_backlight_min_vbt(struct intel_connector *connector)
1221{ 1413{
@@ -1243,7 +1435,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
1243 return scale(min, 0, 255, 0, panel->backlight.max); 1435 return scale(min, 0, 255, 0, panel->backlight.max);
1244} 1436}
1245 1437
1246static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused) 1438static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
1247{ 1439{
1248 struct drm_device *dev = connector->base.dev; 1440 struct drm_device *dev = connector->base.dev;
1249 struct drm_i915_private *dev_priv = dev->dev_private; 1441 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1255,12 +1447,16 @@ static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unus
1255 1447
1256 pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); 1448 pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
1257 panel->backlight.max = pch_ctl2 >> 16; 1449 panel->backlight.max = pch_ctl2 >> 16;
1450
1451 if (!panel->backlight.max)
1452 panel->backlight.max = get_backlight_max_vbt(connector);
1453
1258 if (!panel->backlight.max) 1454 if (!panel->backlight.max)
1259 return -ENODEV; 1455 return -ENODEV;
1260 1456
1261 panel->backlight.min = get_backlight_min_vbt(connector); 1457 panel->backlight.min = get_backlight_min_vbt(connector);
1262 1458
1263 val = bdw_get_backlight(connector); 1459 val = lpt_get_backlight(connector);
1264 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1460 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1265 1461
1266 panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) && 1462 panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
@@ -1281,6 +1477,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
1281 1477
1282 pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); 1478 pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
1283 panel->backlight.max = pch_ctl2 >> 16; 1479 panel->backlight.max = pch_ctl2 >> 16;
1480
1481 if (!panel->backlight.max)
1482 panel->backlight.max = get_backlight_max_vbt(connector);
1483
1284 if (!panel->backlight.max) 1484 if (!panel->backlight.max)
1285 return -ENODEV; 1485 return -ENODEV;
1286 1486
@@ -1312,12 +1512,18 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
1312 panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; 1512 panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
1313 1513
1314 panel->backlight.max = ctl >> 17; 1514 panel->backlight.max = ctl >> 17;
1315 if (panel->backlight.combination_mode) 1515
1316 panel->backlight.max *= 0xff; 1516 if (!panel->backlight.max) {
1517 panel->backlight.max = get_backlight_max_vbt(connector);
1518 panel->backlight.max >>= 1;
1519 }
1317 1520
1318 if (!panel->backlight.max) 1521 if (!panel->backlight.max)
1319 return -ENODEV; 1522 return -ENODEV;
1320 1523
1524 if (panel->backlight.combination_mode)
1525 panel->backlight.max *= 0xff;
1526
1321 panel->backlight.min = get_backlight_min_vbt(connector); 1527 panel->backlight.min = get_backlight_min_vbt(connector);
1322 1528
1323 val = i9xx_get_backlight(connector); 1529 val = i9xx_get_backlight(connector);
@@ -1341,12 +1547,16 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
1341 1547
1342 ctl = I915_READ(BLC_PWM_CTL); 1548 ctl = I915_READ(BLC_PWM_CTL);
1343 panel->backlight.max = ctl >> 16; 1549 panel->backlight.max = ctl >> 16;
1344 if (panel->backlight.combination_mode) 1550
1345 panel->backlight.max *= 0xff; 1551 if (!panel->backlight.max)
1552 panel->backlight.max = get_backlight_max_vbt(connector);
1346 1553
1347 if (!panel->backlight.max) 1554 if (!panel->backlight.max)
1348 return -ENODEV; 1555 return -ENODEV;
1349 1556
1557 if (panel->backlight.combination_mode)
1558 panel->backlight.max *= 0xff;
1559
1350 panel->backlight.min = get_backlight_min_vbt(connector); 1560 panel->backlight.min = get_backlight_min_vbt(connector);
1351 1561
1352 val = i9xx_get_backlight(connector); 1562 val = i9xx_get_backlight(connector);
@@ -1363,21 +1573,8 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
1363 struct drm_device *dev = connector->base.dev; 1573 struct drm_device *dev = connector->base.dev;
1364 struct drm_i915_private *dev_priv = dev->dev_private; 1574 struct drm_i915_private *dev_priv = dev->dev_private;
1365 struct intel_panel *panel = &connector->panel; 1575 struct intel_panel *panel = &connector->panel;
1366 enum pipe p;
1367 u32 ctl, ctl2, val; 1576 u32 ctl, ctl2, val;
1368 1577
1369 for_each_pipe(dev_priv, p) {
1370 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
1371
1372 /* Skip if the modulation freq is already set */
1373 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
1374 continue;
1375
1376 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
1377 I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
1378 cur_val);
1379 }
1380
1381 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) 1578 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
1382 return -ENODEV; 1579 return -ENODEV;
1383 1580
@@ -1386,6 +1583,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
1386 1583
1387 ctl = I915_READ(VLV_BLC_PWM_CTL(pipe)); 1584 ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
1388 panel->backlight.max = ctl >> 16; 1585 panel->backlight.max = ctl >> 16;
1586
1587 if (!panel->backlight.max)
1588 panel->backlight.max = get_backlight_max_vbt(connector);
1589
1389 if (!panel->backlight.max) 1590 if (!panel->backlight.max)
1390 return -ENODEV; 1591 return -ENODEV;
1391 1592
@@ -1408,10 +1609,32 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
1408 struct intel_panel *panel = &connector->panel; 1609 struct intel_panel *panel = &connector->panel;
1409 u32 pwm_ctl, val; 1610 u32 pwm_ctl, val;
1410 1611
1411 pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1); 1612 /*
1613 * For BXT hard coding the Backlight controller to 0.
1614 * TODO : Read the controller value from VBT and generalize
1615 */
1616 panel->backlight.controller = 0;
1617
1618 pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
1619
1620 /* Keeping the check if controller 1 is to be programmed.
1621 * This will come into affect once the VBT parsing
1622 * is fixed for controller selection, and controller 1 is used
1623 * for a prticular display configuration.
1624 */
1625 if (panel->backlight.controller == 1) {
1626 val = I915_READ(UTIL_PIN_CTL);
1627 panel->backlight.util_pin_active_low =
1628 val & UTIL_PIN_POLARITY;
1629 }
1630
1412 panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; 1631 panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
1632 panel->backlight.max =
1633 I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
1634
1635 if (!panel->backlight.max)
1636 panel->backlight.max = get_backlight_max_vbt(connector);
1413 1637
1414 panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1);
1415 if (!panel->backlight.max) 1638 if (!panel->backlight.max)
1416 return -ENODEV; 1639 return -ENODEV;
1417 1640
@@ -1475,9 +1698,13 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
1475 } 1698 }
1476 } 1699 }
1477 1700
1701 /* ensure intel_panel has been initialized first */
1702 if (WARN_ON(!panel->backlight.setup))
1703 return -ENODEV;
1704
1478 /* set level and max in panel struct */ 1705 /* set level and max in panel struct */
1479 mutex_lock(&dev_priv->backlight_lock); 1706 mutex_lock(&dev_priv->backlight_lock);
1480 ret = dev_priv->display.setup_backlight(intel_connector, pipe); 1707 ret = panel->backlight.setup(intel_connector, pipe);
1481 mutex_unlock(&dev_priv->backlight_lock); 1708 mutex_unlock(&dev_priv->backlight_lock);
1482 1709
1483 if (ret) { 1710 if (ret) {
@@ -1509,54 +1736,66 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
1509} 1736}
1510 1737
1511/* Set up chip specific backlight functions */ 1738/* Set up chip specific backlight functions */
1512void intel_panel_init_backlight_funcs(struct drm_device *dev) 1739static void
1740intel_panel_init_backlight_funcs(struct intel_panel *panel)
1513{ 1741{
1742 struct intel_connector *intel_connector =
1743 container_of(panel, struct intel_connector, panel);
1744 struct drm_device *dev = intel_connector->base.dev;
1514 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1515 1746
1516 if (IS_BROXTON(dev)) { 1747 if (IS_BROXTON(dev)) {
1517 dev_priv->display.setup_backlight = bxt_setup_backlight; 1748 panel->backlight.setup = bxt_setup_backlight;
1518 dev_priv->display.enable_backlight = bxt_enable_backlight; 1749 panel->backlight.enable = bxt_enable_backlight;
1519 dev_priv->display.disable_backlight = bxt_disable_backlight; 1750 panel->backlight.disable = bxt_disable_backlight;
1520 dev_priv->display.set_backlight = bxt_set_backlight; 1751 panel->backlight.set = bxt_set_backlight;
1521 dev_priv->display.get_backlight = bxt_get_backlight; 1752 panel->backlight.get = bxt_get_backlight;
1522 } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev)) { 1753 } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
1523 dev_priv->display.setup_backlight = bdw_setup_backlight; 1754 panel->backlight.setup = lpt_setup_backlight;
1524 dev_priv->display.enable_backlight = bdw_enable_backlight; 1755 panel->backlight.enable = lpt_enable_backlight;
1525 dev_priv->display.disable_backlight = pch_disable_backlight; 1756 panel->backlight.disable = lpt_disable_backlight;
1526 dev_priv->display.set_backlight = bdw_set_backlight; 1757 panel->backlight.set = lpt_set_backlight;
1527 dev_priv->display.get_backlight = bdw_get_backlight; 1758 panel->backlight.get = lpt_get_backlight;
1759 if (HAS_PCH_LPT(dev))
1760 panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
1761 else
1762 panel->backlight.hz_to_pwm = spt_hz_to_pwm;
1528 } else if (HAS_PCH_SPLIT(dev)) { 1763 } else if (HAS_PCH_SPLIT(dev)) {
1529 dev_priv->display.setup_backlight = pch_setup_backlight; 1764 panel->backlight.setup = pch_setup_backlight;
1530 dev_priv->display.enable_backlight = pch_enable_backlight; 1765 panel->backlight.enable = pch_enable_backlight;
1531 dev_priv->display.disable_backlight = pch_disable_backlight; 1766 panel->backlight.disable = pch_disable_backlight;
1532 dev_priv->display.set_backlight = pch_set_backlight; 1767 panel->backlight.set = pch_set_backlight;
1533 dev_priv->display.get_backlight = pch_get_backlight; 1768 panel->backlight.get = pch_get_backlight;
1769 panel->backlight.hz_to_pwm = pch_hz_to_pwm;
1534 } else if (IS_VALLEYVIEW(dev)) { 1770 } else if (IS_VALLEYVIEW(dev)) {
1535 if (dev_priv->vbt.has_mipi) { 1771 if (dev_priv->vbt.has_mipi) {
1536 dev_priv->display.setup_backlight = pwm_setup_backlight; 1772 panel->backlight.setup = pwm_setup_backlight;
1537 dev_priv->display.enable_backlight = pwm_enable_backlight; 1773 panel->backlight.enable = pwm_enable_backlight;
1538 dev_priv->display.disable_backlight = pwm_disable_backlight; 1774 panel->backlight.disable = pwm_disable_backlight;
1539 dev_priv->display.set_backlight = pwm_set_backlight; 1775 panel->backlight.set = pwm_set_backlight;
1540 dev_priv->display.get_backlight = pwm_get_backlight; 1776 panel->backlight.get = pwm_get_backlight;
1541 } else { 1777 } else {
1542 dev_priv->display.setup_backlight = vlv_setup_backlight; 1778 panel->backlight.setup = vlv_setup_backlight;
1543 dev_priv->display.enable_backlight = vlv_enable_backlight; 1779 panel->backlight.enable = vlv_enable_backlight;
1544 dev_priv->display.disable_backlight = vlv_disable_backlight; 1780 panel->backlight.disable = vlv_disable_backlight;
1545 dev_priv->display.set_backlight = vlv_set_backlight; 1781 panel->backlight.set = vlv_set_backlight;
1546 dev_priv->display.get_backlight = vlv_get_backlight; 1782 panel->backlight.get = vlv_get_backlight;
1783 panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
1547 } 1784 }
1548 } else if (IS_GEN4(dev)) { 1785 } else if (IS_GEN4(dev)) {
1549 dev_priv->display.setup_backlight = i965_setup_backlight; 1786 panel->backlight.setup = i965_setup_backlight;
1550 dev_priv->display.enable_backlight = i965_enable_backlight; 1787 panel->backlight.enable = i965_enable_backlight;
1551 dev_priv->display.disable_backlight = i965_disable_backlight; 1788 panel->backlight.disable = i965_disable_backlight;
1552 dev_priv->display.set_backlight = i9xx_set_backlight; 1789 panel->backlight.set = i9xx_set_backlight;
1553 dev_priv->display.get_backlight = i9xx_get_backlight; 1790 panel->backlight.get = i9xx_get_backlight;
1791 panel->backlight.hz_to_pwm = i965_hz_to_pwm;
1554 } else { 1792 } else {
1555 dev_priv->display.setup_backlight = i9xx_setup_backlight; 1793 panel->backlight.setup = i9xx_setup_backlight;
1556 dev_priv->display.enable_backlight = i9xx_enable_backlight; 1794 panel->backlight.enable = i9xx_enable_backlight;
1557 dev_priv->display.disable_backlight = i9xx_disable_backlight; 1795 panel->backlight.disable = i9xx_disable_backlight;
1558 dev_priv->display.set_backlight = i9xx_set_backlight; 1796 panel->backlight.set = i9xx_set_backlight;
1559 dev_priv->display.get_backlight = i9xx_get_backlight; 1797 panel->backlight.get = i9xx_get_backlight;
1798 panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
1560 } 1799 }
1561} 1800}
1562 1801
@@ -1564,6 +1803,8 @@ int intel_panel_init(struct intel_panel *panel,
1564 struct drm_display_mode *fixed_mode, 1803 struct drm_display_mode *fixed_mode,
1565 struct drm_display_mode *downclock_mode) 1804 struct drm_display_mode *downclock_mode)
1566{ 1805{
1806 intel_panel_init_backlight_funcs(panel);
1807
1567 panel->fixed_mode = fixed_mode; 1808 panel->fixed_mode = fixed_mode;
1568 panel->downclock_mode = downclock_mode; 1809 panel->downclock_mode = downclock_mode;
1569 1810
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ddbb7ed0a193..d52a15df6917 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,82 +52,20 @@
52#define INTEL_RC6p_ENABLE (1<<1) 52#define INTEL_RC6p_ENABLE (1<<1)
53#define INTEL_RC6pp_ENABLE (1<<2) 53#define INTEL_RC6pp_ENABLE (1<<2)
54 54
55static void gen9_init_clock_gating(struct drm_device *dev)
56{
57 struct drm_i915_private *dev_priv = dev->dev_private;
58
59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
62
63 /* WaDisableKillLogic:bxt,skl */
64 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
65 ECOCHK_DIS_TLB);
66}
67
68static void skl_init_clock_gating(struct drm_device *dev)
69{
70 struct drm_i915_private *dev_priv = dev->dev_private;
71
72 gen9_init_clock_gating(dev);
73
74 if (INTEL_REVID(dev) <= SKL_REVID_B0) {
75 /*
76 * WaDisableSDEUnitClockGating:skl
77 * WaSetGAPSunitClckGateDisable:skl
78 */
79 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
80 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
81 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
82
83 /* WaDisableVFUnitClockGating:skl */
84 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
85 GEN6_VFUNIT_CLOCK_GATE_DISABLE);
86 }
87
88 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
89 /* WaDisableHDCInvalidation:skl */
90 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
91 BDW_DISABLE_HDC_INVALIDATION);
92
93 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
94 I915_WRITE(FF_SLICE_CS_CHICKEN2,
95 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
96 }
97
98 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
99 * involving this register should also be added to WA batch as required.
100 */
101 if (INTEL_REVID(dev) <= SKL_REVID_E0)
102 /* WaDisableLSQCROPERFforOCL:skl */
103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
104 GEN8_LQSC_RO_PERF_DIS);
105
106 /* WaEnableGapsTsvCreditFix:skl */
107 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
108 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
109 GEN9_GAPS_TSV_CREDIT_DISABLE));
110 }
111}
112
113static void bxt_init_clock_gating(struct drm_device *dev) 55static void bxt_init_clock_gating(struct drm_device *dev)
114{ 56{
115 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
116 58
117 gen9_init_clock_gating(dev); 59 /* WaDisableSDEUnitClockGating:bxt */
60 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
61 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
118 62
119 /* 63 /*
120 * FIXME: 64 * FIXME:
121 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
122 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 65 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
123 */ 66 */
124 /* WaDisableSDEUnitClockGating:bxt */
125 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 67 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
126 GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
127 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 68 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
128
129 /* FIXME: apply on A0 only */
130 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
131} 69}
132 70
133static void i915_pineview_get_mem_freq(struct drm_device *dev) 71static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -691,12 +629,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
691 629
692 crtc = single_enabled_crtc(dev); 630 crtc = single_enabled_crtc(dev);
693 if (crtc) { 631 if (crtc) {
694 const struct drm_display_mode *adjusted_mode; 632 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
695 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 633 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
696 int clock; 634 int clock = adjusted_mode->crtc_clock;
697
698 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
699 clock = adjusted_mode->crtc_clock;
700 635
701 /* Display SR */ 636 /* Display SR */
702 wm = intel_calculate_wm(clock, &pineview_display_wm, 637 wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1200,7 +1135,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
1200 case DRM_PLANE_TYPE_CURSOR: 1135 case DRM_PLANE_TYPE_CURSOR:
1201 for (level = 0; level < wm_state->num_levels; level++) 1136 for (level = 0; level < wm_state->num_levels; level++)
1202 wm_state->sr[level].cursor = 1137 wm_state->sr[level].cursor =
1203 wm_state->sr[level].cursor; 1138 wm_state->wm[level].cursor;
1204 break; 1139 break;
1205 case DRM_PLANE_TYPE_PRIMARY: 1140 case DRM_PLANE_TYPE_PRIMARY:
1206 for (level = 0; level < wm_state->num_levels; level++) 1141 for (level = 0; level < wm_state->num_levels; level++)
@@ -1490,8 +1425,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1490 if (crtc) { 1425 if (crtc) {
1491 /* self-refresh has much higher latency */ 1426 /* self-refresh has much higher latency */
1492 static const int sr_latency_ns = 12000; 1427 static const int sr_latency_ns = 12000;
1493 const struct drm_display_mode *adjusted_mode = 1428 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1494 &to_intel_crtc(crtc)->config->base.adjusted_mode;
1495 int clock = adjusted_mode->crtc_clock; 1429 int clock = adjusted_mode->crtc_clock;
1496 int htotal = adjusted_mode->crtc_htotal; 1430 int htotal = adjusted_mode->crtc_htotal;
1497 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 1431 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
@@ -1638,8 +1572,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1638 if (HAS_FW_BLC(dev) && enabled) { 1572 if (HAS_FW_BLC(dev) && enabled) {
1639 /* self-refresh has much higher latency */ 1573 /* self-refresh has much higher latency */
1640 static const int sr_latency_ns = 6000; 1574 static const int sr_latency_ns = 6000;
1641 const struct drm_display_mode *adjusted_mode = 1575 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1642 &to_intel_crtc(enabled)->config->base.adjusted_mode;
1643 int clock = adjusted_mode->crtc_clock; 1576 int clock = adjusted_mode->crtc_clock;
1644 int htotal = adjusted_mode->crtc_htotal; 1577 int htotal = adjusted_mode->crtc_htotal;
1645 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; 1578 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
@@ -1780,16 +1713,6 @@ struct skl_pipe_wm_parameters {
1780 uint32_t pipe_htotal; 1713 uint32_t pipe_htotal;
1781 uint32_t pixel_rate; /* in KHz */ 1714 uint32_t pixel_rate; /* in KHz */
1782 struct intel_plane_wm_parameters plane[I915_MAX_PLANES]; 1715 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1783 struct intel_plane_wm_parameters cursor;
1784};
1785
1786struct ilk_pipe_wm_parameters {
1787 bool active;
1788 uint32_t pipe_htotal;
1789 uint32_t pixel_rate;
1790 struct intel_plane_wm_parameters pri;
1791 struct intel_plane_wm_parameters spr;
1792 struct intel_plane_wm_parameters cur;
1793}; 1716};
1794 1717
1795struct ilk_wm_maximums { 1718struct ilk_wm_maximums {
@@ -1810,26 +1733,26 @@ struct intel_wm_config {
1810 * For both WM_PIPE and WM_LP. 1733 * For both WM_PIPE and WM_LP.
1811 * mem_value must be in 0.1us units. 1734 * mem_value must be in 0.1us units.
1812 */ 1735 */
1813static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, 1736static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1737 const struct intel_plane_state *pstate,
1814 uint32_t mem_value, 1738 uint32_t mem_value,
1815 bool is_lp) 1739 bool is_lp)
1816{ 1740{
1741 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1817 uint32_t method1, method2; 1742 uint32_t method1, method2;
1818 1743
1819 if (!params->active || !params->pri.enabled) 1744 if (!cstate->base.active || !pstate->visible)
1820 return 0; 1745 return 0;
1821 1746
1822 method1 = ilk_wm_method1(params->pixel_rate, 1747 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1823 params->pri.bytes_per_pixel,
1824 mem_value);
1825 1748
1826 if (!is_lp) 1749 if (!is_lp)
1827 return method1; 1750 return method1;
1828 1751
1829 method2 = ilk_wm_method2(params->pixel_rate, 1752 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1830 params->pipe_htotal, 1753 cstate->base.adjusted_mode.crtc_htotal,
1831 params->pri.horiz_pixels, 1754 drm_rect_width(&pstate->dst),
1832 params->pri.bytes_per_pixel, 1755 bpp,
1833 mem_value); 1756 mem_value);
1834 1757
1835 return min(method1, method2); 1758 return min(method1, method2);
@@ -1839,21 +1762,21 @@ static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1839 * For both WM_PIPE and WM_LP. 1762 * For both WM_PIPE and WM_LP.
1840 * mem_value must be in 0.1us units. 1763 * mem_value must be in 0.1us units.
1841 */ 1764 */
1842static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, 1765static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1766 const struct intel_plane_state *pstate,
1843 uint32_t mem_value) 1767 uint32_t mem_value)
1844{ 1768{
1769 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1845 uint32_t method1, method2; 1770 uint32_t method1, method2;
1846 1771
1847 if (!params->active || !params->spr.enabled) 1772 if (!cstate->base.active || !pstate->visible)
1848 return 0; 1773 return 0;
1849 1774
1850 method1 = ilk_wm_method1(params->pixel_rate, 1775 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1851 params->spr.bytes_per_pixel, 1776 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1852 mem_value); 1777 cstate->base.adjusted_mode.crtc_htotal,
1853 method2 = ilk_wm_method2(params->pixel_rate, 1778 drm_rect_width(&pstate->dst),
1854 params->pipe_htotal, 1779 bpp,
1855 params->spr.horiz_pixels,
1856 params->spr.bytes_per_pixel,
1857 mem_value); 1780 mem_value);
1858 return min(method1, method2); 1781 return min(method1, method2);
1859} 1782}
@@ -1862,29 +1785,33 @@ static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1862 * For both WM_PIPE and WM_LP. 1785 * For both WM_PIPE and WM_LP.
1863 * mem_value must be in 0.1us units. 1786 * mem_value must be in 0.1us units.
1864 */ 1787 */
1865static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, 1788static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1789 const struct intel_plane_state *pstate,
1866 uint32_t mem_value) 1790 uint32_t mem_value)
1867{ 1791{
1868 if (!params->active || !params->cur.enabled) 1792 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1793
1794 if (!cstate->base.active || !pstate->visible)
1869 return 0; 1795 return 0;
1870 1796
1871 return ilk_wm_method2(params->pixel_rate, 1797 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1872 params->pipe_htotal, 1798 cstate->base.adjusted_mode.crtc_htotal,
1873 params->cur.horiz_pixels, 1799 drm_rect_width(&pstate->dst),
1874 params->cur.bytes_per_pixel, 1800 bpp,
1875 mem_value); 1801 mem_value);
1876} 1802}
1877 1803
1878/* Only for WM_LP. */ 1804/* Only for WM_LP. */
1879static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, 1805static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1806 const struct intel_plane_state *pstate,
1880 uint32_t pri_val) 1807 uint32_t pri_val)
1881{ 1808{
1882 if (!params->active || !params->pri.enabled) 1809 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1810
1811 if (!cstate->base.active || !pstate->visible)
1883 return 0; 1812 return 0;
1884 1813
1885 return ilk_wm_fbc(pri_val, 1814 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
1886 params->pri.horiz_pixels,
1887 params->pri.bytes_per_pixel);
1888} 1815}
1889 1816
1890static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 1817static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -2049,10 +1976,12 @@ static bool ilk_validate_wm_level(int level,
2049} 1976}
2050 1977
2051static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 1978static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1979 const struct intel_crtc *intel_crtc,
2052 int level, 1980 int level,
2053 const struct ilk_pipe_wm_parameters *p, 1981 struct intel_crtc_state *cstate,
2054 struct intel_wm_level *result) 1982 struct intel_wm_level *result)
2055{ 1983{
1984 struct intel_plane *intel_plane;
2056 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1985 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2057 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 1986 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2058 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 1987 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
@@ -2064,10 +1993,29 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2064 cur_latency *= 5; 1993 cur_latency *= 5;
2065 } 1994 }
2066 1995
2067 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level); 1996 for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) {
2068 result->spr_val = ilk_compute_spr_wm(p, spr_latency); 1997 struct intel_plane_state *pstate =
2069 result->cur_val = ilk_compute_cur_wm(p, cur_latency); 1998 to_intel_plane_state(intel_plane->base.state);
2070 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); 1999
2000 switch (intel_plane->base.type) {
2001 case DRM_PLANE_TYPE_PRIMARY:
2002 result->pri_val = ilk_compute_pri_wm(cstate, pstate,
2003 pri_latency,
2004 level);
2005 result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
2006 result->pri_val);
2007 break;
2008 case DRM_PLANE_TYPE_OVERLAY:
2009 result->spr_val = ilk_compute_spr_wm(cstate, pstate,
2010 spr_latency);
2011 break;
2012 case DRM_PLANE_TYPE_CURSOR:
2013 result->cur_val = ilk_compute_cur_wm(cstate, pstate,
2014 cur_latency);
2015 break;
2016 }
2017 }
2018
2071 result->enable = true; 2019 result->enable = true;
2072} 2020}
2073 2021
@@ -2076,7 +2024,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2076{ 2024{
2077 struct drm_i915_private *dev_priv = dev->dev_private; 2025 struct drm_i915_private *dev_priv = dev->dev_private;
2078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2079 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 2027 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
2080 u32 linetime, ips_linetime; 2028 u32 linetime, ips_linetime;
2081 2029
2082 if (!intel_crtc->active) 2030 if (!intel_crtc->active)
@@ -2085,9 +2033,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2085 /* The WM are computed with base on how long it takes to fill a single 2033 /* The WM are computed with base on how long it takes to fill a single
2086 * row at the given clock rate, multiplied by 8. 2034 * row at the given clock rate, multiplied by 8.
2087 * */ 2035 * */
2088 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2036 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2089 mode->crtc_clock); 2037 adjusted_mode->crtc_clock);
2090 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2038 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2091 dev_priv->cdclk_freq); 2039 dev_priv->cdclk_freq);
2092 2040
2093 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2041 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -2326,48 +2274,6 @@ static void skl_setup_wm_latency(struct drm_device *dev)
2326 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2274 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2327} 2275}
2328 2276
2329static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2330 struct ilk_pipe_wm_parameters *p)
2331{
2332 struct drm_device *dev = crtc->dev;
2333 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2334 enum pipe pipe = intel_crtc->pipe;
2335 struct drm_plane *plane;
2336
2337 if (!intel_crtc->active)
2338 return;
2339
2340 p->active = true;
2341 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2342 p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
2343
2344 if (crtc->primary->state->fb)
2345 p->pri.bytes_per_pixel =
2346 crtc->primary->state->fb->bits_per_pixel / 8;
2347 else
2348 p->pri.bytes_per_pixel = 4;
2349
2350 p->cur.bytes_per_pixel = 4;
2351 /*
2352 * TODO: for now, assume primary and cursor planes are always enabled.
2353 * Setting them to false makes the screen flicker.
2354 */
2355 p->pri.enabled = true;
2356 p->cur.enabled = true;
2357
2358 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
2359 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
2360
2361 drm_for_each_legacy_plane(plane, dev) {
2362 struct intel_plane *intel_plane = to_intel_plane(plane);
2363
2364 if (intel_plane->pipe == pipe) {
2365 p->spr = intel_plane->wm;
2366 break;
2367 }
2368 }
2369}
2370
2371static void ilk_compute_wm_config(struct drm_device *dev, 2277static void ilk_compute_wm_config(struct drm_device *dev,
2372 struct intel_wm_config *config) 2278 struct intel_wm_config *config)
2373{ 2279{
@@ -2387,34 +2293,47 @@ static void ilk_compute_wm_config(struct drm_device *dev,
2387} 2293}
2388 2294
2389/* Compute new watermarks for the pipe */ 2295/* Compute new watermarks for the pipe */
2390static bool intel_compute_pipe_wm(struct drm_crtc *crtc, 2296static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2391 const struct ilk_pipe_wm_parameters *params,
2392 struct intel_pipe_wm *pipe_wm) 2297 struct intel_pipe_wm *pipe_wm)
2393{ 2298{
2299 struct drm_crtc *crtc = cstate->base.crtc;
2394 struct drm_device *dev = crtc->dev; 2300 struct drm_device *dev = crtc->dev;
2395 const struct drm_i915_private *dev_priv = dev->dev_private; 2301 const struct drm_i915_private *dev_priv = dev->dev_private;
2302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2303 struct intel_plane *intel_plane;
2304 struct intel_plane_state *sprstate = NULL;
2396 int level, max_level = ilk_wm_max_level(dev); 2305 int level, max_level = ilk_wm_max_level(dev);
2397 /* LP0 watermark maximums depend on this pipe alone */ 2306 /* LP0 watermark maximums depend on this pipe alone */
2398 struct intel_wm_config config = { 2307 struct intel_wm_config config = {
2399 .num_pipes_active = 1, 2308 .num_pipes_active = 1,
2400 .sprites_enabled = params->spr.enabled,
2401 .sprites_scaled = params->spr.scaled,
2402 }; 2309 };
2403 struct ilk_wm_maximums max; 2310 struct ilk_wm_maximums max;
2404 2311
2405 pipe_wm->pipe_enabled = params->active; 2312 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2406 pipe_wm->sprites_enabled = params->spr.enabled; 2313 if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) {
2407 pipe_wm->sprites_scaled = params->spr.scaled; 2314 sprstate = to_intel_plane_state(intel_plane->base.state);
2315 break;
2316 }
2317 }
2318
2319 config.sprites_enabled = sprstate->visible;
2320 config.sprites_scaled = sprstate->visible &&
2321 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2322 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2323
2324 pipe_wm->pipe_enabled = cstate->base.active;
2325 pipe_wm->sprites_enabled = sprstate->visible;
2326 pipe_wm->sprites_scaled = config.sprites_scaled;
2408 2327
2409 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2328 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2410 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) 2329 if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
2411 max_level = 1; 2330 max_level = 1;
2412 2331
2413 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2332 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2414 if (params->spr.scaled) 2333 if (config.sprites_scaled)
2415 max_level = 0; 2334 max_level = 0;
2416 2335
2417 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]); 2336 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]);
2418 2337
2419 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2338 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2420 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2339 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
@@ -2431,7 +2350,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2431 for (level = 1; level <= max_level; level++) { 2350 for (level = 1; level <= max_level; level++) {
2432 struct intel_wm_level wm = {}; 2351 struct intel_wm_level wm = {};
2433 2352
2434 ilk_compute_wm_level(dev_priv, level, params, &wm); 2353 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm);
2435 2354
2436 /* 2355 /*
2437 * Disable any watermark level that exceeds the 2356 * Disable any watermark level that exceeds the
@@ -2899,7 +2818,12 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2899 int plane; 2818 int plane;
2900 u32 val; 2819 u32 val;
2901 2820
2821 memset(ddb, 0, sizeof(*ddb));
2822
2902 for_each_pipe(dev_priv, pipe) { 2823 for_each_pipe(dev_priv, pipe) {
2824 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
2825 continue;
2826
2903 for_each_plane(dev_priv, pipe, plane) { 2827 for_each_plane(dev_priv, pipe, plane) {
2904 val = I915_READ(PLANE_BUF_CFG(pipe, plane)); 2828 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2905 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], 2829 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
@@ -2907,7 +2831,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2907 } 2831 }
2908 2832
2909 val = I915_READ(CUR_BUF_CFG(pipe)); 2833 val = I915_READ(CUR_BUF_CFG(pipe));
2910 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val); 2834 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2835 val);
2911 } 2836 }
2912} 2837}
2913 2838
@@ -2976,13 +2901,14 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2976 alloc_size = skl_ddb_entry_size(alloc); 2901 alloc_size = skl_ddb_entry_size(alloc);
2977 if (alloc_size == 0) { 2902 if (alloc_size == 0) {
2978 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 2903 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2979 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe])); 2904 memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
2905 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
2980 return; 2906 return;
2981 } 2907 }
2982 2908
2983 cursor_blocks = skl_cursor_allocation(config); 2909 cursor_blocks = skl_cursor_allocation(config);
2984 ddb->cursor[pipe].start = alloc->end - cursor_blocks; 2910 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
2985 ddb->cursor[pipe].end = alloc->end; 2911 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
2986 2912
2987 alloc_size -= cursor_blocks; 2913 alloc_size -= cursor_blocks;
2988 alloc->end -= cursor_blocks; 2914 alloc->end -= cursor_blocks;
@@ -3121,8 +3047,8 @@ static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3121 sizeof(new_ddb->plane[pipe]))) 3047 sizeof(new_ddb->plane[pipe])))
3122 return true; 3048 return true;
3123 3049
3124 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe], 3050 if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR],
3125 sizeof(new_ddb->cursor[pipe]))) 3051 sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
3126 return true; 3052 return true;
3127 3053
3128 return false; 3054 return false;
@@ -3166,7 +3092,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3166 if (fb) { 3092 if (fb) {
3167 p->plane[0].enabled = true; 3093 p->plane[0].enabled = true;
3168 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 3094 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3169 drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8; 3095 drm_format_plane_cpp(fb->pixel_format, 1) :
3096 drm_format_plane_cpp(fb->pixel_format, 0);
3170 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 3097 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3171 drm_format_plane_cpp(fb->pixel_format, 0) : 0; 3098 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
3172 p->plane[0].tiling = fb->modifier[0]; 3099 p->plane[0].tiling = fb->modifier[0];
@@ -3181,17 +3108,17 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3181 p->plane[0].rotation = crtc->primary->state->rotation; 3108 p->plane[0].rotation = crtc->primary->state->rotation;
3182 3109
3183 fb = crtc->cursor->state->fb; 3110 fb = crtc->cursor->state->fb;
3184 p->cursor.y_bytes_per_pixel = 0; 3111 p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
3185 if (fb) { 3112 if (fb) {
3186 p->cursor.enabled = true; 3113 p->plane[PLANE_CURSOR].enabled = true;
3187 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; 3114 p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
3188 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w; 3115 p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
3189 p->cursor.vert_pixels = crtc->cursor->state->crtc_h; 3116 p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
3190 } else { 3117 } else {
3191 p->cursor.enabled = false; 3118 p->plane[PLANE_CURSOR].enabled = false;
3192 p->cursor.bytes_per_pixel = 0; 3119 p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
3193 p->cursor.horiz_pixels = 64; 3120 p->plane[PLANE_CURSOR].horiz_pixels = 64;
3194 p->cursor.vert_pixels = 64; 3121 p->plane[PLANE_CURSOR].vert_pixels = 64;
3195 } 3122 }
3196 } 3123 }
3197 3124
@@ -3305,11 +3232,12 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3305 &result->plane_res_l[i]); 3232 &result->plane_res_l[i]);
3306 } 3233 }
3307 3234
3308 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); 3235 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
3309 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor, 3236 result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
3237 &p->plane[PLANE_CURSOR],
3310 ddb_blocks, level, 3238 ddb_blocks, level,
3311 &result->cursor_res_b, 3239 &result->plane_res_b[PLANE_CURSOR],
3312 &result->cursor_res_l); 3240 &result->plane_res_l[PLANE_CURSOR]);
3313} 3241}
3314 3242
3315static uint32_t 3243static uint32_t
@@ -3337,7 +3265,7 @@ static void skl_compute_transition_wm(struct drm_crtc *crtc,
3337 /* Until we know more, just disable transition WMs */ 3265 /* Until we know more, just disable transition WMs */
3338 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3266 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3339 trans_wm->plane_en[i] = false; 3267 trans_wm->plane_en[i] = false;
3340 trans_wm->cursor_en = false; 3268 trans_wm->plane_en[PLANE_CURSOR] = false;
3341} 3269}
3342 3270
3343static void skl_compute_pipe_wm(struct drm_crtc *crtc, 3271static void skl_compute_pipe_wm(struct drm_crtc *crtc,
@@ -3386,13 +3314,13 @@ static void skl_compute_wm_results(struct drm_device *dev,
3386 3314
3387 temp = 0; 3315 temp = 0;
3388 3316
3389 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT; 3317 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3390 temp |= p_wm->wm[level].cursor_res_b; 3318 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3391 3319
3392 if (p_wm->wm[level].cursor_en) 3320 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3393 temp |= PLANE_WM_EN; 3321 temp |= PLANE_WM_EN;
3394 3322
3395 r->cursor[pipe][level] = temp; 3323 r->plane[pipe][PLANE_CURSOR][level] = temp;
3396 3324
3397 } 3325 }
3398 3326
@@ -3408,12 +3336,12 @@ static void skl_compute_wm_results(struct drm_device *dev,
3408 } 3336 }
3409 3337
3410 temp = 0; 3338 temp = 0;
3411 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT; 3339 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3412 temp |= p_wm->trans_wm.cursor_res_b; 3340 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3413 if (p_wm->trans_wm.cursor_en) 3341 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3414 temp |= PLANE_WM_EN; 3342 temp |= PLANE_WM_EN;
3415 3343
3416 r->cursor_trans[pipe] = temp; 3344 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3417 3345
3418 r->wm_linetime[pipe] = p_wm->linetime; 3346 r->wm_linetime[pipe] = p_wm->linetime;
3419} 3347}
@@ -3447,12 +3375,13 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3447 I915_WRITE(PLANE_WM(pipe, i, level), 3375 I915_WRITE(PLANE_WM(pipe, i, level),
3448 new->plane[pipe][i][level]); 3376 new->plane[pipe][i][level]);
3449 I915_WRITE(CUR_WM(pipe, level), 3377 I915_WRITE(CUR_WM(pipe, level),
3450 new->cursor[pipe][level]); 3378 new->plane[pipe][PLANE_CURSOR][level]);
3451 } 3379 }
3452 for (i = 0; i < intel_num_planes(crtc); i++) 3380 for (i = 0; i < intel_num_planes(crtc); i++)
3453 I915_WRITE(PLANE_WM_TRANS(pipe, i), 3381 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3454 new->plane_trans[pipe][i]); 3382 new->plane_trans[pipe][i]);
3455 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]); 3383 I915_WRITE(CUR_WM_TRANS(pipe),
3384 new->plane_trans[pipe][PLANE_CURSOR]);
3456 3385
3457 for (i = 0; i < intel_num_planes(crtc); i++) { 3386 for (i = 0; i < intel_num_planes(crtc); i++) {
3458 skl_ddb_entry_write(dev_priv, 3387 skl_ddb_entry_write(dev_priv,
@@ -3464,7 +3393,7 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3464 } 3393 }
3465 3394
3466 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 3395 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3467 &new->ddb.cursor[pipe]); 3396 &new->ddb.plane[pipe][PLANE_CURSOR]);
3468 } 3397 }
3469} 3398}
3470 3399
@@ -3672,6 +3601,26 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3672 } 3601 }
3673} 3602}
3674 3603
3604static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
3605{
3606 watermarks->wm_linetime[pipe] = 0;
3607 memset(watermarks->plane[pipe], 0,
3608 sizeof(uint32_t) * 8 * I915_MAX_PLANES);
3609 memset(watermarks->plane_trans[pipe],
3610 0, sizeof(uint32_t) * I915_MAX_PLANES);
3611 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
3612
3613 /* Clear ddb entries for pipe */
3614 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
3615 memset(&watermarks->ddb.plane[pipe], 0,
3616 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3617 memset(&watermarks->ddb.y_plane[pipe], 0,
3618 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3619 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
3620 sizeof(struct skl_ddb_entry));
3621
3622}
3623
3675static void skl_update_wm(struct drm_crtc *crtc) 3624static void skl_update_wm(struct drm_crtc *crtc)
3676{ 3625{
3677 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3682,7 +3631,11 @@ static void skl_update_wm(struct drm_crtc *crtc)
3682 struct skl_pipe_wm pipe_wm = {}; 3631 struct skl_pipe_wm pipe_wm = {};
3683 struct intel_wm_config config = {}; 3632 struct intel_wm_config config = {};
3684 3633
3685 memset(results, 0, sizeof(*results)); 3634
3635 /* Clear all dirty flags */
3636 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3637
3638 skl_clear_wm(results, intel_crtc->pipe);
3686 3639
3687 skl_compute_wm_global_parameters(dev, &config); 3640 skl_compute_wm_global_parameters(dev, &config);
3688 3641
@@ -3737,19 +3690,19 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3737static void ilk_update_wm(struct drm_crtc *crtc) 3690static void ilk_update_wm(struct drm_crtc *crtc)
3738{ 3691{
3739 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3692 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3693 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3740 struct drm_device *dev = crtc->dev; 3694 struct drm_device *dev = crtc->dev;
3741 struct drm_i915_private *dev_priv = dev->dev_private; 3695 struct drm_i915_private *dev_priv = dev->dev_private;
3742 struct ilk_wm_maximums max; 3696 struct ilk_wm_maximums max;
3743 struct ilk_pipe_wm_parameters params = {};
3744 struct ilk_wm_values results = {}; 3697 struct ilk_wm_values results = {};
3745 enum intel_ddb_partitioning partitioning; 3698 enum intel_ddb_partitioning partitioning;
3746 struct intel_pipe_wm pipe_wm = {}; 3699 struct intel_pipe_wm pipe_wm = {};
3747 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 3700 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3748 struct intel_wm_config config = {}; 3701 struct intel_wm_config config = {};
3749 3702
3750 ilk_compute_wm_parameters(crtc, &params); 3703 WARN_ON(cstate->base.active != intel_crtc->active);
3751 3704
3752 intel_compute_pipe_wm(crtc, &params, &pipe_wm); 3705 intel_compute_pipe_wm(cstate, &pipe_wm);
3753 3706
3754 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) 3707 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3755 return; 3708 return;
@@ -3789,12 +3742,6 @@ ilk_update_sprite_wm(struct drm_plane *plane,
3789 struct drm_device *dev = plane->dev; 3742 struct drm_device *dev = plane->dev;
3790 struct intel_plane *intel_plane = to_intel_plane(plane); 3743 struct intel_plane *intel_plane = to_intel_plane(plane);
3791 3744
3792 intel_plane->wm.enabled = enabled;
3793 intel_plane->wm.scaled = scaled;
3794 intel_plane->wm.horiz_pixels = sprite_width;
3795 intel_plane->wm.vert_pixels = sprite_width;
3796 intel_plane->wm.bytes_per_pixel = pixel_size;
3797
3798 /* 3745 /*
3799 * IVB workaround: must disable low power watermarks for at least 3746 * IVB workaround: must disable low power watermarks for at least
3800 * one frame before enabling scaling. LP watermarks can be re-enabled 3747 * one frame before enabling scaling. LP watermarks can be re-enabled
@@ -3826,10 +3773,10 @@ static void skl_pipe_wm_active_state(uint32_t val,
3826 (val >> PLANE_WM_LINES_SHIFT) & 3773 (val >> PLANE_WM_LINES_SHIFT) &
3827 PLANE_WM_LINES_MASK; 3774 PLANE_WM_LINES_MASK;
3828 } else { 3775 } else {
3829 active->wm[level].cursor_en = is_enabled; 3776 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
3830 active->wm[level].cursor_res_b = 3777 active->wm[level].plane_res_b[PLANE_CURSOR] =
3831 val & PLANE_WM_BLOCKS_MASK; 3778 val & PLANE_WM_BLOCKS_MASK;
3832 active->wm[level].cursor_res_l = 3779 active->wm[level].plane_res_l[PLANE_CURSOR] =
3833 (val >> PLANE_WM_LINES_SHIFT) & 3780 (val >> PLANE_WM_LINES_SHIFT) &
3834 PLANE_WM_LINES_MASK; 3781 PLANE_WM_LINES_MASK;
3835 } 3782 }
@@ -3842,10 +3789,10 @@ static void skl_pipe_wm_active_state(uint32_t val,
3842 (val >> PLANE_WM_LINES_SHIFT) & 3789 (val >> PLANE_WM_LINES_SHIFT) &
3843 PLANE_WM_LINES_MASK; 3790 PLANE_WM_LINES_MASK;
3844 } else { 3791 } else {
3845 active->trans_wm.cursor_en = is_enabled; 3792 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
3846 active->trans_wm.cursor_res_b = 3793 active->trans_wm.plane_res_b[PLANE_CURSOR] =
3847 val & PLANE_WM_BLOCKS_MASK; 3794 val & PLANE_WM_BLOCKS_MASK;
3848 active->trans_wm.cursor_res_l = 3795 active->trans_wm.plane_res_l[PLANE_CURSOR] =
3849 (val >> PLANE_WM_LINES_SHIFT) & 3796 (val >> PLANE_WM_LINES_SHIFT) &
3850 PLANE_WM_LINES_MASK; 3797 PLANE_WM_LINES_MASK;
3851 } 3798 }
@@ -3871,12 +3818,12 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3871 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3818 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3872 hw->plane[pipe][i][level] = 3819 hw->plane[pipe][i][level] =
3873 I915_READ(PLANE_WM(pipe, i, level)); 3820 I915_READ(PLANE_WM(pipe, i, level));
3874 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level)); 3821 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
3875 } 3822 }
3876 3823
3877 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3824 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3878 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); 3825 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3879 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); 3826 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
3880 3827
3881 if (!intel_crtc->active) 3828 if (!intel_crtc->active)
3882 return; 3829 return;
@@ -3891,7 +3838,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3891 skl_pipe_wm_active_state(temp, active, false, 3838 skl_pipe_wm_active_state(temp, active, false,
3892 false, i, level); 3839 false, i, level);
3893 } 3840 }
3894 temp = hw->cursor[pipe][level]; 3841 temp = hw->plane[pipe][PLANE_CURSOR][level];
3895 skl_pipe_wm_active_state(temp, active, false, true, i, level); 3842 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3896 } 3843 }
3897 3844
@@ -3900,7 +3847,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3900 skl_pipe_wm_active_state(temp, active, true, false, i, 0); 3847 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3901 } 3848 }
3902 3849
3903 temp = hw->cursor_trans[pipe]; 3850 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3904 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 3851 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3905} 3852}
3906 3853
@@ -4261,7 +4208,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
4261 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 4208 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4262 MEMMODE_FSTART_SHIFT; 4209 MEMMODE_FSTART_SHIFT;
4263 4210
4264 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 4211 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4265 PXVFREQ_PX_SHIFT; 4212 PXVFREQ_PX_SHIFT;
4266 4213
4267 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 4214 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
@@ -4292,10 +4239,10 @@ static void ironlake_enable_drps(struct drm_device *dev)
4292 4239
4293 ironlake_set_drps(dev, fstart); 4240 ironlake_set_drps(dev, fstart);
4294 4241
4295 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 4242 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4296 I915_READ(0x112e0); 4243 I915_READ(DDREC) + I915_READ(CSIEC);
4297 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 4244 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4298 dev_priv->ips.last_count2 = I915_READ(0x112f4); 4245 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4299 dev_priv->ips.last_time2 = ktime_get_raw_ns(); 4246 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4300 4247
4301 spin_unlock_irq(&mchdev_lock); 4248 spin_unlock_irq(&mchdev_lock);
@@ -4466,6 +4413,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4466{ 4413{
4467 struct drm_i915_private *dev_priv = dev->dev_private; 4414 struct drm_i915_private *dev_priv = dev->dev_private;
4468 4415
4416 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4417 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
4418 return;
4419
4469 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4420 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4470 WARN_ON(val > dev_priv->rps.max_freq); 4421 WARN_ON(val > dev_priv->rps.max_freq);
4471 WARN_ON(val < dev_priv->rps.min_freq); 4422 WARN_ON(val < dev_priv->rps.min_freq);
@@ -4786,6 +4737,12 @@ static void gen9_enable_rps(struct drm_device *dev)
4786 4737
4787 gen6_init_rps_frequencies(dev); 4738 gen6_init_rps_frequencies(dev);
4788 4739
4740 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4741 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
4742 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4743 return;
4744 }
4745
4789 /* Program defaults and thresholds for RPS*/ 4746 /* Program defaults and thresholds for RPS*/
4790 I915_WRITE(GEN6_RC_VIDEO_FREQ, 4747 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4791 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 4748 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
@@ -4823,13 +4780,22 @@ static void gen9_enable_rc6(struct drm_device *dev)
4823 I915_WRITE(GEN6_RC_CONTROL, 0); 4780 I915_WRITE(GEN6_RC_CONTROL, 0);
4824 4781
4825 /* 2b: Program RC6 thresholds.*/ 4782 /* 2b: Program RC6 thresholds.*/
4826 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4783
4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4785 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
4786 (INTEL_REVID(dev) <= SKL_REVID_E0)))
4787 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4788 else
4789 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4827 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 4790 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4828 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 4791 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4829 for_each_ring(ring, dev_priv, unused) 4792 for_each_ring(ring, dev_priv, unused)
4830 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4793 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4794
4795 if (HAS_GUC_UCODE(dev))
4796 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4797
4831 I915_WRITE(GEN6_RC_SLEEP, 0); 4798 I915_WRITE(GEN6_RC_SLEEP, 0);
4832 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4833 4799
4834 /* 2c: Program Coarse Power Gating Policies. */ 4800 /* 2c: Program Coarse Power Gating Policies. */
4835 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); 4801 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
@@ -4840,17 +4806,30 @@ static void gen9_enable_rc6(struct drm_device *dev)
4840 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 4806 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4841 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4807 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4842 "on" : "off"); 4808 "on" : "off");
4843 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4809 /* WaRsUseTimeoutMode */
4844 GEN6_RC_CTL_EI_MODE(1) | 4810 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
4845 rc6_mask); 4811 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) {
4812 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4813 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4814 GEN7_RC_CTL_TO_MODE |
4815 rc6_mask);
4816 } else {
4817 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4818 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4819 GEN6_RC_CTL_EI_MODE(1) |
4820 rc6_mask);
4821 }
4846 4822
4847 /* 4823 /*
4848 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 4824 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4849 * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6. 4825 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4850 */ 4826 */
4851 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4827 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
4852 GEN9_MEDIA_PG_ENABLE : 0); 4828 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
4853 4829 I915_WRITE(GEN9_PG_ENABLE, 0);
4830 else
4831 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4832 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4854 4833
4855 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4834 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4856 4835
@@ -5148,32 +5127,27 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5148 struct drm_device *dev = dev_priv->dev; 5127 struct drm_device *dev = dev_priv->dev;
5149 u32 val, rp0; 5128 u32 val, rp0;
5150 5129
5151 if (dev->pdev->revision >= 0x20) { 5130 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5152 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5153 5131
5154 switch (INTEL_INFO(dev)->eu_total) { 5132 switch (INTEL_INFO(dev)->eu_total) {
5155 case 8: 5133 case 8:
5156 /* (2 * 4) config */ 5134 /* (2 * 4) config */
5157 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5135 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5158 break; 5136 break;
5159 case 12: 5137 case 12:
5160 /* (2 * 6) config */ 5138 /* (2 * 6) config */
5161 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 5139 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5162 break; 5140 break;
5163 case 16: 5141 case 16:
5164 /* (2 * 8) config */ 5142 /* (2 * 8) config */
5165 default: 5143 default:
5166 /* Setting (2 * 8) Min RP0 for any other combination */ 5144 /* Setting (2 * 8) Min RP0 for any other combination */
5167 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 5145 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5168 break; 5146 break;
5169 }
5170 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5171 } else {
5172 /* For pre-production hardware */
5173 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
5174 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5175 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
5176 } 5147 }
5148
5149 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5150
5177 return rp0; 5151 return rp0;
5178} 5152}
5179 5153
@@ -5189,18 +5163,11 @@ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5189 5163
5190static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 5164static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5191{ 5165{
5192 struct drm_device *dev = dev_priv->dev;
5193 u32 val, rp1; 5166 u32 val, rp1;
5194 5167
5195 if (dev->pdev->revision >= 0x20) { 5168 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5196 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5169 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5197 rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 5170
5198 } else {
5199 /* For pre-production hardware */
5200 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5201 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5202 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
5203 }
5204 return rp1; 5171 return rp1;
5205} 5172}
5206 5173
@@ -5415,25 +5382,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5415 mutex_unlock(&dev_priv->sb_lock); 5382 mutex_unlock(&dev_priv->sb_lock);
5416 5383
5417 switch ((val >> 2) & 0x7) { 5384 switch ((val >> 2) & 0x7) {
5418 case 0:
5419 case 1:
5420 dev_priv->rps.cz_freq = 200;
5421 dev_priv->mem_freq = 1600;
5422 break;
5423 case 2:
5424 dev_priv->rps.cz_freq = 267;
5425 dev_priv->mem_freq = 1600;
5426 break;
5427 case 3: 5385 case 3:
5428 dev_priv->rps.cz_freq = 333;
5429 dev_priv->mem_freq = 2000; 5386 dev_priv->mem_freq = 2000;
5430 break; 5387 break;
5431 case 4: 5388 default:
5432 dev_priv->rps.cz_freq = 320;
5433 dev_priv->mem_freq = 1600;
5434 break;
5435 case 5:
5436 dev_priv->rps.cz_freq = 400;
5437 dev_priv->mem_freq = 1600; 5389 dev_priv->mem_freq = 1600;
5438 break; 5390 break;
5439 } 5391 }
@@ -5565,7 +5517,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
5565 /* RPS code assumes GPLL is used */ 5517 /* RPS code assumes GPLL is used */
5566 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5518 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5567 5519
5568 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); 5520 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5569 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5521 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5570 5522
5571 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5523 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5655,7 +5607,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
5655 /* RPS code assumes GPLL is used */ 5607 /* RPS code assumes GPLL is used */
5656 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5608 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5657 5609
5658 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); 5610 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5659 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5611 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5660 5612
5661 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5613 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5864,7 +5816,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5864 5816
5865 assert_spin_locked(&mchdev_lock); 5817 assert_spin_locked(&mchdev_lock);
5866 5818
5867 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); 5819 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
5868 pxvid = (pxvid >> 24) & 0x7f; 5820 pxvid = (pxvid >> 24) & 0x7f;
5869 ext_v = pvid_to_extvid(dev_priv, pxvid); 5821 ext_v = pvid_to_extvid(dev_priv, pxvid);
5870 5822
@@ -6107,13 +6059,13 @@ static void intel_init_emon(struct drm_device *dev)
6107 I915_WRITE(CSIEW2, 0x04000004); 6059 I915_WRITE(CSIEW2, 0x04000004);
6108 6060
6109 for (i = 0; i < 5; i++) 6061 for (i = 0; i < 5; i++)
6110 I915_WRITE(PEW + (i * 4), 0); 6062 I915_WRITE(PEW(i), 0);
6111 for (i = 0; i < 3; i++) 6063 for (i = 0; i < 3; i++)
6112 I915_WRITE(DEW + (i * 4), 0); 6064 I915_WRITE(DEW(i), 0);
6113 6065
6114 /* Program P-state weights to account for frequency power adjustment */ 6066 /* Program P-state weights to account for frequency power adjustment */
6115 for (i = 0; i < 16; i++) { 6067 for (i = 0; i < 16; i++) {
6116 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 6068 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6117 unsigned long freq = intel_pxfreq(pxvidfreq); 6069 unsigned long freq = intel_pxfreq(pxvidfreq);
6118 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 6070 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6119 PXVFREQ_PX_SHIFT; 6071 PXVFREQ_PX_SHIFT;
@@ -6134,7 +6086,7 @@ static void intel_init_emon(struct drm_device *dev)
6134 for (i = 0; i < 4; i++) { 6086 for (i = 0; i < 4; i++) {
6135 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 6087 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6136 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 6088 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6137 I915_WRITE(PXW + (i * 4), val); 6089 I915_WRITE(PXW(i), val);
6138 } 6090 }
6139 6091
6140 /* Adjust magic regs to magic values (more experimental results) */ 6092 /* Adjust magic regs to magic values (more experimental results) */
@@ -6150,7 +6102,7 @@ static void intel_init_emon(struct drm_device *dev)
6150 I915_WRITE(EG7, 0); 6102 I915_WRITE(EG7, 0);
6151 6103
6152 for (i = 0; i < 8; i++) 6104 for (i = 0; i < 8; i++)
6153 I915_WRITE(PXWL + (i * 4), 0); 6105 I915_WRITE(PXWL(i), 0);
6154 6106
6155 /* Enable PMON + select events */ 6107 /* Enable PMON + select events */
6156 I915_WRITE(ECR, 0x80000019); 6108 I915_WRITE(ECR, 0x80000019);
@@ -6604,14 +6556,14 @@ static void lpt_init_clock_gating(struct drm_device *dev)
6604 * TODO: this bit should only be enabled when really needed, then 6556 * TODO: this bit should only be enabled when really needed, then
6605 * disabled when not needed anymore in order to save power. 6557 * disabled when not needed anymore in order to save power.
6606 */ 6558 */
6607 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 6559 if (HAS_PCH_LPT_LP(dev))
6608 I915_WRITE(SOUTH_DSPCLK_GATE_D, 6560 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6609 I915_READ(SOUTH_DSPCLK_GATE_D) | 6561 I915_READ(SOUTH_DSPCLK_GATE_D) |
6610 PCH_LP_PARTITION_LEVEL_DISABLE); 6562 PCH_LP_PARTITION_LEVEL_DISABLE);
6611 6563
6612 /* WADPOClockGatingDisable:hsw */ 6564 /* WADPOClockGatingDisable:hsw */
6613 I915_WRITE(_TRANSA_CHICKEN1, 6565 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6614 I915_READ(_TRANSA_CHICKEN1) | 6566 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6615 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6567 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6616} 6568}
6617 6569
@@ -6619,7 +6571,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
6619{ 6571{
6620 struct drm_i915_private *dev_priv = dev->dev_private; 6572 struct drm_i915_private *dev_priv = dev->dev_private;
6621 6573
6622 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 6574 if (HAS_PCH_LPT_LP(dev)) {
6623 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 6575 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6624 6576
6625 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 6577 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -7105,9 +7057,6 @@ void intel_init_pm(struct drm_device *dev)
7105 if (IS_BROXTON(dev)) 7057 if (IS_BROXTON(dev))
7106 dev_priv->display.init_clock_gating = 7058 dev_priv->display.init_clock_gating =
7107 bxt_init_clock_gating; 7059 bxt_init_clock_gating;
7108 else if (IS_SKYLAKE(dev))
7109 dev_priv->display.init_clock_gating =
7110 skl_init_clock_gating;
7111 dev_priv->display.update_wm = skl_update_wm; 7060 dev_priv->display.update_wm = skl_update_wm;
7112 dev_priv->display.update_sprite_wm = skl_update_sprite_wm; 7061 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
7113 } else if (HAS_PCH_SPLIT(dev)) { 7062 } else if (HAS_PCH_SPLIT(dev)) {
@@ -7260,7 +7209,7 @@ static int vlv_gpu_freq_div(unsigned int czclk_freq)
7260 7209
7261static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7210static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7262{ 7211{
7263 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 7212 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7264 7213
7265 div = vlv_gpu_freq_div(czclk_freq); 7214 div = vlv_gpu_freq_div(czclk_freq);
7266 if (div < 0) 7215 if (div < 0)
@@ -7271,7 +7220,7 @@ static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7271 7220
7272static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7221static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7273{ 7222{
7274 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 7223 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7275 7224
7276 mul = vlv_gpu_freq_div(czclk_freq); 7225 mul = vlv_gpu_freq_div(czclk_freq);
7277 if (mul < 0) 7226 if (mul < 0)
@@ -7282,7 +7231,7 @@ static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7282 7231
7283static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7232static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7284{ 7233{
7285 int div, czclk_freq = dev_priv->rps.cz_freq; 7234 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7286 7235
7287 div = vlv_gpu_freq_div(czclk_freq) / 2; 7236 div = vlv_gpu_freq_div(czclk_freq) / 2;
7288 if (div < 0) 7237 if (div < 0)
@@ -7293,7 +7242,7 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7293 7242
7294static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7243static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7295{ 7244{
7296 int mul, czclk_freq = dev_priv->rps.cz_freq; 7245 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7297 7246
7298 mul = vlv_gpu_freq_div(czclk_freq) / 2; 7247 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7299 if (mul < 0) 7248 if (mul < 0)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a04b4dc5ed9b..213581c215b3 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -73,14 +73,14 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
73} 73}
74 74
75static void intel_psr_write_vsc(struct intel_dp *intel_dp, 75static void intel_psr_write_vsc(struct intel_dp *intel_dp,
76 struct edp_vsc_psr *vsc_psr) 76 const struct edp_vsc_psr *vsc_psr)
77{ 77{
78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79 struct drm_device *dev = dig_port->base.base.dev; 79 struct drm_device *dev = dig_port->base.base.dev;
80 struct drm_i915_private *dev_priv = dev->dev_private; 80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config->cpu_transcoder); 82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config->cpu_transcoder); 83 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
84 uint32_t *data = (uint32_t *) vsc_psr; 84 uint32_t *data = (uint32_t *) vsc_psr;
85 unsigned int i; 85 unsigned int i;
86 86
@@ -90,12 +90,14 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
90 I915_WRITE(ctl_reg, 0); 90 I915_WRITE(ctl_reg, 0);
91 POSTING_READ(ctl_reg); 91 POSTING_READ(ctl_reg);
92 92
93 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { 93 for (i = 0; i < sizeof(*vsc_psr); i += 4) {
94 if (i < sizeof(struct edp_vsc_psr)) 94 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
95 I915_WRITE(data_reg + i, *data++); 95 i >> 2), *data);
96 else 96 data++;
97 I915_WRITE(data_reg + i, 0);
98 } 97 }
98 for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
99 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
100 i >> 2), 0);
99 101
100 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); 102 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
101 POSTING_READ(ctl_reg); 103 POSTING_READ(ctl_reg);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 61b451fbd09e..9461a238f5d5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -719,7 +719,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
719 struct drm_i915_private *dev_priv = dev->dev_private; 719 struct drm_i915_private *dev_priv = dev->dev_private;
720 struct i915_workarounds *w = &dev_priv->workarounds; 720 struct i915_workarounds *w = &dev_priv->workarounds;
721 721
722 if (WARN_ON_ONCE(w->count == 0)) 722 if (w->count == 0)
723 return 0; 723 return 0;
724 724
725 ring->gpu_caches_dirty = true; 725 ring->gpu_caches_dirty = true;
@@ -802,42 +802,29 @@ static int wa_add(struct drm_i915_private *dev_priv,
802 802
803#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 803#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
804 804
805static int bdw_init_workarounds(struct intel_engine_cs *ring) 805static int gen8_init_workarounds(struct intel_engine_cs *ring)
806{ 806{
807 struct drm_device *dev = ring->dev; 807 struct drm_device *dev = ring->dev;
808 struct drm_i915_private *dev_priv = dev->dev_private; 808 struct drm_i915_private *dev_priv = dev->dev_private;
809 809
810 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 810 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
811 811
812 /* WaDisableAsyncFlipPerfMode:bdw */ 812 /* WaDisableAsyncFlipPerfMode:bdw,chv */
813 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 813 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
814 814
815 /* WaDisablePartialInstShootdown:bdw */ 815 /* WaDisablePartialInstShootdown:bdw,chv */
816 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
817 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 816 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
818 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | 817 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
819 STALL_DOP_GATING_DISABLE);
820
821 /* WaDisableDopClockGating:bdw */
822 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
823 DOP_CLOCK_GATING_DISABLE);
824
825 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
826 GEN8_SAMPLER_POWER_BYPASS_DIS);
827 818
828 /* Use Force Non-Coherent whenever executing a 3D context. This is a 819 /* Use Force Non-Coherent whenever executing a 3D context. This is a
829 * workaround for for a possible hang in the unlikely event a TLB 820 * workaround for for a possible hang in the unlikely event a TLB
830 * invalidation occurs during a PSD flush. 821 * invalidation occurs during a PSD flush.
831 */ 822 */
823 /* WaForceEnableNonCoherent:bdw,chv */
824 /* WaHdcDisableFetchWhenMasked:bdw,chv */
832 WA_SET_BIT_MASKED(HDC_CHICKEN0, 825 WA_SET_BIT_MASKED(HDC_CHICKEN0,
833 /* WaForceEnableNonCoherent:bdw */
834 HDC_FORCE_NON_COHERENT |
835 /* WaForceContextSaveRestoreNonCoherent:bdw */
836 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
837 /* WaHdcDisableFetchWhenMasked:bdw */
838 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 826 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
839 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 827 HDC_FORCE_NON_COHERENT);
840 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
841 828
842 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 829 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
843 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 830 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
@@ -845,13 +832,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
845 * stalling waiting for the earlier ones to write to Hierarchical Z 832 * stalling waiting for the earlier ones to write to Hierarchical Z
846 * buffer." 833 * buffer."
847 * 834 *
848 * This optimization is off by default for Broadwell; turn it on. 835 * This optimization is off by default for BDW and CHV; turn it on.
849 */ 836 */
850 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 837 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
851 838
852 /* Wa4x4STCOptimizationDisable:bdw */ 839 /* Wa4x4STCOptimizationDisable:bdw,chv */
853 WA_SET_BIT_MASKED(CACHE_MODE_1, 840 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
854 GEN8_4x4_STC_OPTIMIZATION_DISABLE);
855 841
856 /* 842 /*
857 * BSpec recommends 8x4 when MSAA is used, 843 * BSpec recommends 8x4 when MSAA is used,
@@ -868,56 +854,51 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
868 return 0; 854 return 0;
869} 855}
870 856
871static int chv_init_workarounds(struct intel_engine_cs *ring) 857static int bdw_init_workarounds(struct intel_engine_cs *ring)
872{ 858{
859 int ret;
873 struct drm_device *dev = ring->dev; 860 struct drm_device *dev = ring->dev;
874 struct drm_i915_private *dev_priv = dev->dev_private; 861 struct drm_i915_private *dev_priv = dev->dev_private;
875 862
876 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 863 ret = gen8_init_workarounds(ring);
864 if (ret)
865 return ret;
877 866
878 /* WaDisableAsyncFlipPerfMode:chv */ 867 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
879 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 868 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
880 869
881 /* WaDisablePartialInstShootdown:chv */ 870 /* WaDisableDopClockGating:bdw */
882 /* WaDisableThreadStallDopClockGating:chv */ 871 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
883 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 872 DOP_CLOCK_GATING_DISABLE);
884 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | 873
885 STALL_DOP_GATING_DISABLE); 874 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
875 GEN8_SAMPLER_POWER_BYPASS_DIS);
886 876
887 /* Use Force Non-Coherent whenever executing a 3D context. This is a
888 * workaround for a possible hang in the unlikely event a TLB
889 * invalidation occurs during a PSD flush.
890 */
891 /* WaForceEnableNonCoherent:chv */
892 /* WaHdcDisableFetchWhenMasked:chv */
893 WA_SET_BIT_MASKED(HDC_CHICKEN0, 877 WA_SET_BIT_MASKED(HDC_CHICKEN0,
894 HDC_FORCE_NON_COHERENT | 878 /* WaForceContextSaveRestoreNonCoherent:bdw */
895 HDC_DONOT_FETCH_MEM_WHEN_MASKED); 879 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
880 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
881 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
896 882
897 /* According to the CACHE_MODE_0 default value documentation, some 883 return 0;
898 * CHV platforms disable this optimization by default. Turn it on. 884}
899 */
900 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
901 885
902 /* Wa4x4STCOptimizationDisable:chv */ 886static int chv_init_workarounds(struct intel_engine_cs *ring)
903 WA_SET_BIT_MASKED(CACHE_MODE_1, 887{
904 GEN8_4x4_STC_OPTIMIZATION_DISABLE); 888 int ret;
889 struct drm_device *dev = ring->dev;
890 struct drm_i915_private *dev_priv = dev->dev_private;
891
892 ret = gen8_init_workarounds(ring);
893 if (ret)
894 return ret;
895
896 /* WaDisableThreadStallDopClockGating:chv */
897 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
905 898
906 /* Improve HiZ throughput on CHV. */ 899 /* Improve HiZ throughput on CHV. */
907 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 900 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
908 901
909 /*
910 * BSpec recommends 8x4 when MSAA is used,
911 * however in practice 16x4 seems fastest.
912 *
913 * Note that PS/WM thread counts depend on the WIZ hashing
914 * disable bit, which we don't touch here, but it's good
915 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
916 */
917 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
918 GEN6_WIZ_HASHING_MASK,
919 GEN6_WIZ_HASHING_16x4);
920
921 return 0; 902 return 0;
922} 903}
923 904
@@ -927,6 +908,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
927 struct drm_i915_private *dev_priv = dev->dev_private; 908 struct drm_i915_private *dev_priv = dev->dev_private;
928 uint32_t tmp; 909 uint32_t tmp;
929 910
911 /* WaEnableLbsSlaRetryTimerDecrement:skl */
912 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
913 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
914
915 /* WaDisableKillLogic:bxt,skl */
916 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
917 ECOCHK_DIS_TLB);
918
930 /* WaDisablePartialInstShootdown:skl,bxt */ 919 /* WaDisablePartialInstShootdown:skl,bxt */
931 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 920 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
932 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 921 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
@@ -963,10 +952,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
963 } 952 }
964 953
965 /* Wa4x4STCOptimizationDisable:skl,bxt */ 954 /* Wa4x4STCOptimizationDisable:skl,bxt */
966 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
967
968 /* WaDisablePartialResolveInVc:skl,bxt */ 955 /* WaDisablePartialResolveInVc:skl,bxt */
969 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); 956 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
957 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
970 958
971 /* WaCcsTlbPrefetchDisable:skl,bxt */ 959 /* WaCcsTlbPrefetchDisable:skl,bxt */
972 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 960 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
@@ -985,6 +973,16 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
985 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
986 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
987 975
976 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
977 if (IS_SKYLAKE(dev) ||
978 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
979 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
980 GEN8_SAMPLER_POWER_BYPASS_DIS);
981 }
982
983 /* WaDisableSTUnitPowerOptimization:skl,bxt */
984 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
985
988 return 0; 986 return 0;
989} 987}
990 988
@@ -1030,13 +1028,39 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
1030 return 0; 1028 return 0;
1031} 1029}
1032 1030
1033
1034static int skl_init_workarounds(struct intel_engine_cs *ring) 1031static int skl_init_workarounds(struct intel_engine_cs *ring)
1035{ 1032{
1033 int ret;
1036 struct drm_device *dev = ring->dev; 1034 struct drm_device *dev = ring->dev;
1037 struct drm_i915_private *dev_priv = dev->dev_private; 1035 struct drm_i915_private *dev_priv = dev->dev_private;
1038 1036
1039 gen9_init_workarounds(ring); 1037 ret = gen9_init_workarounds(ring);
1038 if (ret)
1039 return ret;
1040
1041 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
1042 /* WaDisableHDCInvalidation:skl */
1043 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1044 BDW_DISABLE_HDC_INVALIDATION);
1045
1046 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1047 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1048 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1049 }
1050
1051 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1052 * involving this register should also be added to WA batch as required.
1053 */
1054 if (INTEL_REVID(dev) <= SKL_REVID_E0)
1055 /* WaDisableLSQCROPERFforOCL:skl */
1056 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1057 GEN8_LQSC_RO_PERF_DIS);
1058
1059 /* WaEnableGapsTsvCreditFix:skl */
1060 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
1061 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1062 GEN9_GAPS_TSV_CREDIT_DISABLE));
1063 }
1040 1064
1041 /* WaDisablePowerCompilerClockGating:skl */ 1065 /* WaDisablePowerCompilerClockGating:skl */
1042 if (INTEL_REVID(dev) == SKL_REVID_B0) 1066 if (INTEL_REVID(dev) == SKL_REVID_B0)
@@ -1073,10 +1097,24 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1073 1097
1074static int bxt_init_workarounds(struct intel_engine_cs *ring) 1098static int bxt_init_workarounds(struct intel_engine_cs *ring)
1075{ 1099{
1100 int ret;
1076 struct drm_device *dev = ring->dev; 1101 struct drm_device *dev = ring->dev;
1077 struct drm_i915_private *dev_priv = dev->dev_private; 1102 struct drm_i915_private *dev_priv = dev->dev_private;
1078 1103
1079 gen9_init_workarounds(ring); 1104 ret = gen9_init_workarounds(ring);
1105 if (ret)
1106 return ret;
1107
1108 /* WaStoreMultiplePTEenable:bxt */
1109 /* This is a requirement according to Hardware specification */
1110 if (INTEL_REVID(dev) == BXT_REVID_A0)
1111 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1112
1113 /* WaSetClckGatingDisableMedia:bxt */
1114 if (INTEL_REVID(dev) == BXT_REVID_A0) {
1115 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1116 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1117 }
1080 1118
1081 /* WaDisableThreadStallDopClockGating:bxt */ 1119 /* WaDisableThreadStallDopClockGating:bxt */
1082 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1120 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -1998,14 +2036,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1998 return 0; 2036 return 0;
1999} 2037}
2000 2038
2001void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2039static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2002{ 2040{
2003 drm_gem_object_unreference(&ringbuf->obj->base); 2041 drm_gem_object_unreference(&ringbuf->obj->base);
2004 ringbuf->obj = NULL; 2042 ringbuf->obj = NULL;
2005} 2043}
2006 2044
2007int intel_alloc_ringbuffer_obj(struct drm_device *dev, 2045static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
2008 struct intel_ringbuffer *ringbuf) 2046 struct intel_ringbuffer *ringbuf)
2009{ 2047{
2010 struct drm_i915_gem_object *obj; 2048 struct drm_i915_gem_object *obj;
2011 2049
@@ -2025,6 +2063,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
2025 return 0; 2063 return 0;
2026} 2064}
2027 2065
2066struct intel_ringbuffer *
2067intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2068{
2069 struct intel_ringbuffer *ring;
2070 int ret;
2071
2072 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2073 if (ring == NULL)
2074 return ERR_PTR(-ENOMEM);
2075
2076 ring->ring = engine;
2077
2078 ring->size = size;
2079 /* Workaround an erratum on the i830 which causes a hang if
2080 * the TAIL pointer points to within the last 2 cachelines
2081 * of the buffer.
2082 */
2083 ring->effective_size = size;
2084 if (IS_I830(engine->dev) || IS_845G(engine->dev))
2085 ring->effective_size -= 2 * CACHELINE_BYTES;
2086
2087 ring->last_retired_head = -1;
2088 intel_ring_update_space(ring);
2089
2090 ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
2091 if (ret) {
2092 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
2093 engine->name, ret);
2094 kfree(ring);
2095 return ERR_PTR(ret);
2096 }
2097
2098 return ring;
2099}
2100
2101void
2102intel_ringbuffer_free(struct intel_ringbuffer *ring)
2103{
2104 intel_destroy_ringbuffer_obj(ring);
2105 kfree(ring);
2106}
2107
2028static int intel_init_ring_buffer(struct drm_device *dev, 2108static int intel_init_ring_buffer(struct drm_device *dev,
2029 struct intel_engine_cs *ring) 2109 struct intel_engine_cs *ring)
2030{ 2110{
@@ -2033,22 +2113,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2033 2113
2034 WARN_ON(ring->buffer); 2114 WARN_ON(ring->buffer);
2035 2115
2036 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2037 if (!ringbuf)
2038 return -ENOMEM;
2039 ring->buffer = ringbuf;
2040
2041 ring->dev = dev; 2116 ring->dev = dev;
2042 INIT_LIST_HEAD(&ring->active_list); 2117 INIT_LIST_HEAD(&ring->active_list);
2043 INIT_LIST_HEAD(&ring->request_list); 2118 INIT_LIST_HEAD(&ring->request_list);
2044 INIT_LIST_HEAD(&ring->execlist_queue); 2119 INIT_LIST_HEAD(&ring->execlist_queue);
2045 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2120 i915_gem_batch_pool_init(dev, &ring->batch_pool);
2046 ringbuf->size = 32 * PAGE_SIZE;
2047 ringbuf->ring = ring;
2048 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 2121 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
2049 2122
2050 init_waitqueue_head(&ring->irq_queue); 2123 init_waitqueue_head(&ring->irq_queue);
2051 2124
2125 ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
2126 if (IS_ERR(ringbuf))
2127 return PTR_ERR(ringbuf);
2128 ring->buffer = ringbuf;
2129
2052 if (I915_NEED_GFX_HWS(dev)) { 2130 if (I915_NEED_GFX_HWS(dev)) {
2053 ret = init_status_page(ring); 2131 ret = init_status_page(ring);
2054 if (ret) 2132 if (ret)
@@ -2060,15 +2138,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2060 goto error; 2138 goto error;
2061 } 2139 }
2062 2140
2063 WARN_ON(ringbuf->obj);
2064
2065 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
2066 if (ret) {
2067 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
2068 ring->name, ret);
2069 goto error;
2070 }
2071
2072 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2141 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
2073 if (ret) { 2142 if (ret) {
2074 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2143 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
@@ -2077,14 +2146,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2077 goto error; 2146 goto error;
2078 } 2147 }
2079 2148
2080 /* Workaround an erratum on the i830 which causes a hang if
2081 * the TAIL pointer points to within the last 2 cachelines
2082 * of the buffer.
2083 */
2084 ringbuf->effective_size = ringbuf->size;
2085 if (IS_I830(dev) || IS_845G(dev))
2086 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2087
2088 ret = i915_cmd_parser_init_ring(ring); 2149 ret = i915_cmd_parser_init_ring(ring);
2089 if (ret) 2150 if (ret)
2090 goto error; 2151 goto error;
@@ -2092,7 +2153,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2092 return 0; 2153 return 0;
2093 2154
2094error: 2155error:
2095 kfree(ringbuf); 2156 intel_ringbuffer_free(ringbuf);
2096 ring->buffer = NULL; 2157 ring->buffer = NULL;
2097 return ret; 2158 return ret;
2098} 2159}
@@ -2100,19 +2161,18 @@ error:
2100void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 2161void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
2101{ 2162{
2102 struct drm_i915_private *dev_priv; 2163 struct drm_i915_private *dev_priv;
2103 struct intel_ringbuffer *ringbuf;
2104 2164
2105 if (!intel_ring_initialized(ring)) 2165 if (!intel_ring_initialized(ring))
2106 return; 2166 return;
2107 2167
2108 dev_priv = to_i915(ring->dev); 2168 dev_priv = to_i915(ring->dev);
2109 ringbuf = ring->buffer;
2110 2169
2111 intel_stop_ring_buffer(ring); 2170 intel_stop_ring_buffer(ring);
2112 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 2171 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
2113 2172
2114 intel_unpin_ringbuffer_obj(ringbuf); 2173 intel_unpin_ringbuffer_obj(ring->buffer);
2115 intel_destroy_ringbuffer_obj(ringbuf); 2174 intel_ringbuffer_free(ring->buffer);
2175 ring->buffer = NULL;
2116 2176
2117 if (ring->cleanup) 2177 if (ring->cleanup)
2118 ring->cleanup(ring); 2178 ring->cleanup(ring);
@@ -2121,9 +2181,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
2121 2181
2122 i915_cmd_parser_fini_ring(ring); 2182 i915_cmd_parser_fini_ring(ring);
2123 i915_gem_batch_pool_fini(&ring->batch_pool); 2183 i915_gem_batch_pool_fini(&ring->batch_pool);
2124
2125 kfree(ringbuf);
2126 ring->buffer = NULL;
2127} 2184}
2128 2185
2129static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 2186static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
@@ -2610,6 +2667,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2610 GEN8_RING_SEMAPHORE_INIT; 2667 GEN8_RING_SEMAPHORE_INIT;
2611 } 2668 }
2612 } else if (INTEL_INFO(dev)->gen >= 6) { 2669 } else if (INTEL_INFO(dev)->gen >= 6) {
2670 ring->init_context = intel_rcs_ctx_init;
2613 ring->add_request = gen6_add_request; 2671 ring->add_request = gen6_add_request;
2614 ring->flush = gen7_render_ring_flush; 2672 ring->flush = gen7_render_ring_flush;
2615 if (INTEL_INFO(dev)->gen == 6) 2673 if (INTEL_INFO(dev)->gen == 6)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2e85fda94963..49fa41dc0eb6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -377,6 +377,13 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
377 return idx; 377 return idx;
378} 378}
379 379
380static inline void
381intel_flush_status_page(struct intel_engine_cs *ring, int reg)
382{
383 drm_clflush_virt_range(&ring->status_page.page_addr[reg],
384 sizeof(uint32_t));
385}
386
380static inline u32 387static inline u32
381intel_read_status_page(struct intel_engine_cs *ring, 388intel_read_status_page(struct intel_engine_cs *ring,
382 int reg) 389 int reg)
@@ -413,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
413#define I915_GEM_HWS_SCRATCH_INDEX 0x40 420#define I915_GEM_HWS_SCRATCH_INDEX 0x40
414#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 421#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
415 422
416void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 423struct intel_ringbuffer *
424intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
417int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 425int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
418 struct intel_ringbuffer *ringbuf); 426 struct intel_ringbuffer *ringbuf);
419void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 427void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
420int intel_alloc_ringbuffer_obj(struct drm_device *dev, 428void intel_ringbuffer_free(struct intel_ringbuffer *ring);
421 struct intel_ringbuffer *ringbuf);
422 429
423void intel_stop_ring_buffer(struct intel_engine_cs *ring); 430void intel_stop_ring_buffer(struct intel_engine_cs *ring);
424void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 431void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7401cf90b0db..d89c1d0aa1b7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -464,14 +464,14 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
464 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 464 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
465 SKL_DISP_PW_2); 465 SKL_DISP_PW_2);
466 466
467 WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n"); 467 WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
468 WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 468 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
469 WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 469 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
470 470
471 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 471 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
472 "DC5 already programmed to be enabled.\n"); 472 "DC5 already programmed to be enabled.\n");
473 WARN(dev_priv->pm.suspended, 473 WARN_ONCE(dev_priv->pm.suspended,
474 "DC5 cannot be enabled, if platform is runtime-suspended.\n"); 474 "DC5 cannot be enabled, if platform is runtime-suspended.\n");
475 475
476 assert_csr_loaded(dev_priv); 476 assert_csr_loaded(dev_priv);
477} 477}
@@ -487,8 +487,8 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
487 if (dev_priv->power_domains.initializing) 487 if (dev_priv->power_domains.initializing)
488 return; 488 return;
489 489
490 WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n"); 490 WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
491 WARN(dev_priv->pm.suspended, 491 WARN_ONCE(dev_priv->pm.suspended,
492 "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); 492 "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
493} 493}
494 494
@@ -527,12 +527,12 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
527{ 527{
528 struct drm_device *dev = dev_priv->dev; 528 struct drm_device *dev = dev_priv->dev;
529 529
530 WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n"); 530 WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
531 WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 531 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
532 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 532 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
533 "Backlight is not disabled.\n"); 533 "Backlight is not disabled.\n");
534 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 534 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
535 "DC6 already programmed to be enabled.\n"); 535 "DC6 already programmed to be enabled.\n");
536 536
537 assert_csr_loaded(dev_priv); 537 assert_csr_loaded(dev_priv);
538} 538}
@@ -547,8 +547,8 @@ static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
547 return; 547 return;
548 548
549 assert_csr_loaded(dev_priv); 549 assert_csr_loaded(dev_priv);
550 WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 550 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
551 "DC6 already programmed to be disabled.\n"); 551 "DC6 already programmed to be disabled.\n");
552} 552}
553 553
554static void skl_enable_dc6(struct drm_i915_private *dev_priv) 554static void skl_enable_dc6(struct drm_i915_private *dev_priv)
@@ -657,9 +657,15 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
657 } 657 }
658 } else { 658 } else {
659 if (enable_requested) { 659 if (enable_requested) {
660 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 660 if (IS_SKYLAKE(dev) &&
661 POSTING_READ(HSW_PWR_WELL_DRIVER); 661 (power_well->data == SKL_DISP_PW_1) &&
662 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 662 (intel_csr_load_status_get(dev_priv) == FW_LOADED))
663 DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
664 else {
665 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
666 POSTING_READ(HSW_PWR_WELL_DRIVER);
667 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
668 }
663 669
664 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 670 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
665 power_well->data == SKL_DISP_PW_2) { 671 power_well->data == SKL_DISP_PW_2) {
@@ -671,7 +677,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
671 wait_for((state = intel_csr_load_status_get(dev_priv)) != 677 wait_for((state = intel_csr_load_status_get(dev_priv)) !=
672 FW_UNINITIALIZED, 1000); 678 FW_UNINITIALIZED, 1000);
673 if (state != FW_LOADED) 679 if (state != FW_LOADED)
674 DRM_ERROR("CSR firmware not ready (%d)\n", 680 DRM_DEBUG("CSR firmware not ready (%d)\n",
675 state); 681 state);
676 else 682 else
677 if (SKL_ENABLE_DC6(dev)) 683 if (SKL_ENABLE_DC6(dev))
@@ -856,6 +862,25 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
856 862
857static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 863static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
858{ 864{
865 enum pipe pipe;
866
867 /*
868 * Enable the CRI clock source so we can get at the
869 * display and the reference clock for VGA
870 * hotplug / manual detection. Supposedly DSI also
871 * needs the ref clock up and running.
872 *
873 * CHV DPLL B/C have some issues if VGA mode is enabled.
874 */
875 for_each_pipe(dev_priv->dev, pipe) {
876 u32 val = I915_READ(DPLL(pipe));
877
878 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
879 if (pipe != PIPE_A)
880 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
881
882 I915_WRITE(DPLL(pipe), val);
883 }
859 884
860 spin_lock_irq(&dev_priv->irq_lock); 885 spin_lock_irq(&dev_priv->irq_lock);
861 valleyview_enable_display_irqs(dev_priv); 886 valleyview_enable_display_irqs(dev_priv);
@@ -907,13 +932,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
907{ 932{
908 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 933 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
909 934
910 /* 935 /* since ref/cri clock was enabled */
911 * Enable the CRI clock source so we can get at the
912 * display and the reference clock for VGA
913 * hotplug / manual detection.
914 */
915 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
916 DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
917 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 936 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
918 937
919 vlv_set_power_well(dev_priv, power_well, true); 938 vlv_set_power_well(dev_priv, power_well, true);
@@ -948,30 +967,149 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
948 vlv_set_power_well(dev_priv, power_well, false); 967 vlv_set_power_well(dev_priv, power_well, false);
949} 968}
950 969
970#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
971
972static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
973 int power_well_id)
974{
975 struct i915_power_domains *power_domains = &dev_priv->power_domains;
976 struct i915_power_well *power_well;
977 int i;
978
979 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
980 if (power_well->data == power_well_id)
981 return power_well;
982 }
983
984 return NULL;
985}
986
987#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
988
989static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
990{
991 struct i915_power_well *cmn_bc =
992 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
993 struct i915_power_well *cmn_d =
994 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
995 u32 phy_control = dev_priv->chv_phy_control;
996 u32 phy_status = 0;
997 u32 phy_status_mask = 0xffffffff;
998 u32 tmp;
999
1000 /*
1001 * The BIOS can leave the PHY is some weird state
1002 * where it doesn't fully power down some parts.
1003 * Disable the asserts until the PHY has been fully
1004 * reset (ie. the power well has been disabled at
1005 * least once).
1006 */
1007 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1008 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1009 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1010 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1011 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1012 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1013 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1014
1015 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1016 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1017 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1018 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1019
1020 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1021 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1022
1023 /* this assumes override is only used to enable lanes */
1024 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1025 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1026
1027 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1028 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1029
1030 /* CL1 is on whenever anything is on in either channel */
1031 if (BITS_SET(phy_control,
1032 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1033 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1034 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1035
1036 /*
1037 * The DPLLB check accounts for the pipe B + port A usage
1038 * with CL2 powered up but all the lanes in the second channel
1039 * powered down.
1040 */
1041 if (BITS_SET(phy_control,
1042 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1043 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1044 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1045
1046 if (BITS_SET(phy_control,
1047 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1048 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1049 if (BITS_SET(phy_control,
1050 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1051 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1052
1053 if (BITS_SET(phy_control,
1054 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1055 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1056 if (BITS_SET(phy_control,
1057 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1058 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1059 }
1060
1061 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1062 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1063
1064 /* this assumes override is only used to enable lanes */
1065 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1066 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1067
1068 if (BITS_SET(phy_control,
1069 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1070 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1071
1072 if (BITS_SET(phy_control,
1073 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1074 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1075 if (BITS_SET(phy_control,
1076 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1077 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1078 }
1079
1080 phy_status &= phy_status_mask;
1081
1082 /*
1083 * The PHY may be busy with some initial calibration and whatnot,
1084 * so the power state can take a while to actually change.
1085 */
1086 if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1087 WARN(phy_status != tmp,
1088 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1089 tmp, phy_status, dev_priv->chv_phy_control);
1090}
1091
1092#undef BITS_SET
1093
951static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1094static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
952 struct i915_power_well *power_well) 1095 struct i915_power_well *power_well)
953{ 1096{
954 enum dpio_phy phy; 1097 enum dpio_phy phy;
1098 enum pipe pipe;
1099 uint32_t tmp;
955 1100
956 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1101 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
957 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1102 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
958 1103
959 /*
960 * Enable the CRI clock source so we can get at the
961 * display and the reference clock for VGA
962 * hotplug / manual detection.
963 */
964 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1104 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1105 pipe = PIPE_A;
965 phy = DPIO_PHY0; 1106 phy = DPIO_PHY0;
966 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
967 DPLL_REF_CLK_ENABLE_VLV);
968 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
969 DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
970 } else { 1107 } else {
1108 pipe = PIPE_C;
971 phy = DPIO_PHY1; 1109 phy = DPIO_PHY1;
972 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
973 DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
974 } 1110 }
1111
1112 /* since ref/cri clock was enabled */
975 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1113 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
976 vlv_set_power_well(dev_priv, power_well, true); 1114 vlv_set_power_well(dev_priv, power_well, true);
977 1115
@@ -979,8 +1117,38 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
979 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 1117 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
980 DRM_ERROR("Display PHY %d is not power up\n", phy); 1118 DRM_ERROR("Display PHY %d is not power up\n", phy);
981 1119
1120 mutex_lock(&dev_priv->sb_lock);
1121
1122 /* Enable dynamic power down */
1123 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1124 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1125 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1126 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1127
1128 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1129 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1130 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1131 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1132 } else {
1133 /*
1134 * Force the non-existing CL2 off. BXT does this
1135 * too, so maybe it saves some power even though
1136 * CL2 doesn't exist?
1137 */
1138 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1139 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1140 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1141 }
1142
1143 mutex_unlock(&dev_priv->sb_lock);
1144
982 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1145 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
983 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1146 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1147
1148 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1149 phy, dev_priv->chv_phy_control);
1150
1151 assert_chv_phy_status(dev_priv);
984} 1152}
985 1153
986static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1154static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1004,6 +1172,137 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1004 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1172 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1005 1173
1006 vlv_set_power_well(dev_priv, power_well, false); 1174 vlv_set_power_well(dev_priv, power_well, false);
1175
1176 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1177 phy, dev_priv->chv_phy_control);
1178
1179 /* PHY is fully reset now, so we can enable the PHY state asserts */
1180 dev_priv->chv_phy_assert[phy] = true;
1181
1182 assert_chv_phy_status(dev_priv);
1183}
1184
1185static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1186 enum dpio_channel ch, bool override, unsigned int mask)
1187{
1188 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1189 u32 reg, val, expected, actual;
1190
1191 /*
1192 * The BIOS can leave the PHY is some weird state
1193 * where it doesn't fully power down some parts.
1194 * Disable the asserts until the PHY has been fully
1195 * reset (ie. the power well has been disabled at
1196 * least once).
1197 */
1198 if (!dev_priv->chv_phy_assert[phy])
1199 return;
1200
1201 if (ch == DPIO_CH0)
1202 reg = _CHV_CMN_DW0_CH0;
1203 else
1204 reg = _CHV_CMN_DW6_CH1;
1205
1206 mutex_lock(&dev_priv->sb_lock);
1207 val = vlv_dpio_read(dev_priv, pipe, reg);
1208 mutex_unlock(&dev_priv->sb_lock);
1209
1210 /*
1211 * This assumes !override is only used when the port is disabled.
1212 * All lanes should power down even without the override when
1213 * the port is disabled.
1214 */
1215 if (!override || mask == 0xf) {
1216 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1217 /*
1218 * If CH1 common lane is not active anymore
1219 * (eg. for pipe B DPLL) the entire channel will
1220 * shut down, which causes the common lane registers
1221 * to read as 0. That means we can't actually check
1222 * the lane power down status bits, but as the entire
1223 * register reads as 0 it's a good indication that the
1224 * channel is indeed entirely powered down.
1225 */
1226 if (ch == DPIO_CH1 && val == 0)
1227 expected = 0;
1228 } else if (mask != 0x0) {
1229 expected = DPIO_ANYDL_POWERDOWN;
1230 } else {
1231 expected = 0;
1232 }
1233
1234 if (ch == DPIO_CH0)
1235 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1236 else
1237 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1238 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1239
1240 WARN(actual != expected,
1241 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1242 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1243 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1244 reg, val);
1245}
1246
1247bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1248 enum dpio_channel ch, bool override)
1249{
1250 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1251 bool was_override;
1252
1253 mutex_lock(&power_domains->lock);
1254
1255 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1256
1257 if (override == was_override)
1258 goto out;
1259
1260 if (override)
1261 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1262 else
1263 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1264
1265 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1266
1267 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1268 phy, ch, dev_priv->chv_phy_control);
1269
1270 assert_chv_phy_status(dev_priv);
1271
1272out:
1273 mutex_unlock(&power_domains->lock);
1274
1275 return was_override;
1276}
1277
1278void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1279 bool override, unsigned int mask)
1280{
1281 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1282 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1283 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1284 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1285
1286 mutex_lock(&power_domains->lock);
1287
1288 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1289 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1290
1291 if (override)
1292 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1293 else
1294 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1295
1296 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1297
1298 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1299 phy, ch, mask, dev_priv->chv_phy_control);
1300
1301 assert_chv_phy_status(dev_priv);
1302
1303 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1304
1305 mutex_unlock(&power_domains->lock);
1007} 1306}
1008 1307
1009static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1308static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -1167,8 +1466,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1167 intel_runtime_pm_put(dev_priv); 1466 intel_runtime_pm_put(dev_priv);
1168} 1467}
1169 1468
1170#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1171
1172#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 1469#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
1173 BIT(POWER_DOMAIN_PIPE_A) | \ 1470 BIT(POWER_DOMAIN_PIPE_A) | \
1174 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 1471 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
@@ -1430,21 +1727,6 @@ static struct i915_power_well chv_power_wells[] = {
1430 }, 1727 },
1431}; 1728};
1432 1729
1433static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1434 int power_well_id)
1435{
1436 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1437 struct i915_power_well *power_well;
1438 int i;
1439
1440 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1441 if (power_well->data == power_well_id)
1442 return power_well;
1443 }
1444
1445 return NULL;
1446}
1447
1448bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 1730bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1449 int power_well_id) 1731 int power_well_id)
1450{ 1732{
@@ -1529,6 +1811,21 @@ static struct i915_power_well bxt_power_wells[] = {
1529 } 1811 }
1530}; 1812};
1531 1813
1814static int
1815sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1816 int disable_power_well)
1817{
1818 if (disable_power_well >= 0)
1819 return !!disable_power_well;
1820
1821 if (IS_SKYLAKE(dev_priv)) {
1822 DRM_DEBUG_KMS("Disabling display power well support\n");
1823 return 0;
1824 }
1825
1826 return 1;
1827}
1828
1532#define set_power_wells(power_domains, __power_wells) ({ \ 1829#define set_power_wells(power_domains, __power_wells) ({ \
1533 (power_domains)->power_wells = (__power_wells); \ 1830 (power_domains)->power_wells = (__power_wells); \
1534 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 1831 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -1545,6 +1842,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1545{ 1842{
1546 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1843 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1547 1844
1845 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1846 i915.disable_power_well);
1847
1548 mutex_init(&power_domains->lock); 1848 mutex_init(&power_domains->lock);
1549 1849
1550 /* 1850 /*
@@ -1583,7 +1883,6 @@ static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1583 1883
1584 /* Make sure we're not suspended first. */ 1884 /* Make sure we're not suspended first. */
1585 pm_runtime_get_sync(device); 1885 pm_runtime_get_sync(device);
1586 pm_runtime_disable(device);
1587} 1886}
1588 1887
1589/** 1888/**
@@ -1630,19 +1929,80 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1630 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1929 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1631 * workaround never ever read DISPLAY_PHY_CONTROL, and 1930 * workaround never ever read DISPLAY_PHY_CONTROL, and
1632 * instead maintain a shadow copy ourselves. Use the actual 1931 * instead maintain a shadow copy ourselves. Use the actual
1633 * power well state to reconstruct the expected initial 1932 * power well state and lane status to reconstruct the
1634 * value. 1933 * expected initial value.
1635 */ 1934 */
1636 dev_priv->chv_phy_control = 1935 dev_priv->chv_phy_control =
1637 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1936 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1638 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1937 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1639 PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) | 1938 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1640 PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) | 1939 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1641 PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0); 1940 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1642 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) 1941
1942 /*
1943 * If all lanes are disabled we leave the override disabled
1944 * with all power down bits cleared to match the state we
1945 * would use after disabling the port. Otherwise enable the
1946 * override and set the lane powerdown bits accding to the
1947 * current lane status.
1948 */
1949 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1950 uint32_t status = I915_READ(DPLL(PIPE_A));
1951 unsigned int mask;
1952
1953 mask = status & DPLL_PORTB_READY_MASK;
1954 if (mask == 0xf)
1955 mask = 0x0;
1956 else
1957 dev_priv->chv_phy_control |=
1958 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1959
1960 dev_priv->chv_phy_control |=
1961 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1962
1963 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1964 if (mask == 0xf)
1965 mask = 0x0;
1966 else
1967 dev_priv->chv_phy_control |=
1968 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1969
1970 dev_priv->chv_phy_control |=
1971 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1972
1643 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1973 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1644 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) 1974
1975 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
1976 } else {
1977 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
1978 }
1979
1980 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1981 uint32_t status = I915_READ(DPIO_PHY_STATUS);
1982 unsigned int mask;
1983
1984 mask = status & DPLL_PORTD_READY_MASK;
1985
1986 if (mask == 0xf)
1987 mask = 0x0;
1988 else
1989 dev_priv->chv_phy_control |=
1990 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1991
1992 dev_priv->chv_phy_control |=
1993 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1994
1645 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 1995 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1996
1997 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
1998 } else {
1999 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2000 }
2001
2002 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2003
2004 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2005 dev_priv->chv_phy_control);
1646} 2006}
1647 2007
1648static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2008static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
@@ -1688,7 +2048,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1688 power_domains->initializing = true; 2048 power_domains->initializing = true;
1689 2049
1690 if (IS_CHERRYVIEW(dev)) { 2050 if (IS_CHERRYVIEW(dev)) {
2051 mutex_lock(&power_domains->lock);
1691 chv_phy_control_init(dev_priv); 2052 chv_phy_control_init(dev_priv);
2053 mutex_unlock(&power_domains->lock);
1692 } else if (IS_VALLEYVIEW(dev)) { 2054 } else if (IS_VALLEYVIEW(dev)) {
1693 mutex_lock(&power_domains->lock); 2055 mutex_lock(&power_domains->lock);
1694 vlv_cmnlane_wa(dev_priv); 2056 vlv_cmnlane_wa(dev_priv);
@@ -1820,8 +2182,6 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
1820 if (!HAS_RUNTIME_PM(dev)) 2182 if (!HAS_RUNTIME_PM(dev))
1821 return; 2183 return;
1822 2184
1823 pm_runtime_set_active(device);
1824
1825 /* 2185 /*
1826 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 2186 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1827 * requirement. 2187 * requirement.
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c98098e884cc..c42b636c2087 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -53,7 +53,7 @@
53#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) 53#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
54 54
55 55
56static const char *tv_format_names[] = { 56static const char * const tv_format_names[] = {
57 "NTSC_M" , "NTSC_J" , "NTSC_443", 57 "NTSC_M" , "NTSC_J" , "NTSC_443",
58 "PAL_B" , "PAL_D" , "PAL_G" , 58 "PAL_B" , "PAL_D" , "PAL_G" ,
59 "PAL_H" , "PAL_I" , "PAL_M" , 59 "PAL_H" , "PAL_I" , "PAL_M" ,
@@ -63,7 +63,7 @@ static const char *tv_format_names[] = {
63 "SECAM_60" 63 "SECAM_60"
64}; 64};
65 65
66#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) 66#define TV_FORMAT_NUM ARRAY_SIZE(tv_format_names)
67 67
68struct intel_sdvo { 68struct intel_sdvo {
69 struct intel_encoder base; 69 struct intel_encoder base;
@@ -107,6 +107,11 @@ struct intel_sdvo {
107 bool color_range_auto; 107 bool color_range_auto;
108 108
109 /** 109 /**
110 * HDMI user specified aspect ratio
111 */
112 enum hdmi_picture_aspect aspect_ratio;
113
114 /**
110 * This is set if we're going to treat the device as TV-out. 115 * This is set if we're going to treat the device as TV-out.
111 * 116 *
112 * While we have these nice friendly flags for output types that ought 117 * While we have these nice friendly flags for output types that ought
@@ -452,7 +457,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
452 DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); 457 DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
453} 458}
454 459
455static const char *cmd_status_names[] = { 460static const char * const cmd_status_names[] = {
456 "Power on", 461 "Power on",
457 "Success", 462 "Success",
458 "Not supported", 463 "Not supported",
@@ -603,11 +608,11 @@ log_fail:
603 return false; 608 return false;
604} 609}
605 610
606static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) 611static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjusted_mode)
607{ 612{
608 if (mode->clock >= 100000) 613 if (adjusted_mode->crtc_clock >= 100000)
609 return 1; 614 return 1;
610 else if (mode->clock >= 50000) 615 else if (adjusted_mode->crtc_clock >= 50000)
611 return 2; 616 return 2;
612 else 617 else
613 return 4; 618 return 4;
@@ -1181,6 +1186,10 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1181 if (intel_sdvo->is_tv) 1186 if (intel_sdvo->is_tv)
1182 i9xx_adjust_sdvo_tv_clock(pipe_config); 1187 i9xx_adjust_sdvo_tv_clock(pipe_config);
1183 1188
1189 /* Set user selected PAR to incoming mode's member */
1190 if (intel_sdvo->is_hdmi)
1191 adjusted_mode->picture_aspect_ratio = intel_sdvo->aspect_ratio;
1192
1184 return true; 1193 return true;
1185} 1194}
1186 1195
@@ -1189,8 +1198,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
1189 struct drm_device *dev = intel_encoder->base.dev; 1198 struct drm_device *dev = intel_encoder->base.dev;
1190 struct drm_i915_private *dev_priv = dev->dev_private; 1199 struct drm_i915_private *dev_priv = dev->dev_private;
1191 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); 1200 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
1192 struct drm_display_mode *adjusted_mode = 1201 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1193 &crtc->config->base.adjusted_mode;
1194 struct drm_display_mode *mode = &crtc->config->base.mode; 1202 struct drm_display_mode *mode = &crtc->config->base.mode;
1195 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1203 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
1196 u32 sdvox; 1204 u32 sdvox;
@@ -2044,6 +2052,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
2044 goto done; 2052 goto done;
2045 } 2053 }
2046 2054
2055 if (property == connector->dev->mode_config.aspect_ratio_property) {
2056 switch (val) {
2057 case DRM_MODE_PICTURE_ASPECT_NONE:
2058 intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
2059 break;
2060 case DRM_MODE_PICTURE_ASPECT_4_3:
2061 intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
2062 break;
2063 case DRM_MODE_PICTURE_ASPECT_16_9:
2064 intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
2065 break;
2066 default:
2067 return -EINVAL;
2068 }
2069 goto done;
2070 }
2071
2047#define CHECK_PROPERTY(name, NAME) \ 2072#define CHECK_PROPERTY(name, NAME) \
2048 if (intel_sdvo_connector->name == property) { \ 2073 if (intel_sdvo_connector->name == property) { \
2049 if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ 2074 if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -2222,7 +2247,7 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
2222 */ 2247 */
2223static void 2248static void
2224intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, 2249intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2225 struct intel_sdvo *sdvo, u32 reg) 2250 struct intel_sdvo *sdvo)
2226{ 2251{
2227 struct sdvo_device_mapping *mapping; 2252 struct sdvo_device_mapping *mapping;
2228 2253
@@ -2239,7 +2264,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2239 2264
2240static void 2265static void
2241intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, 2266intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
2242 struct intel_sdvo *sdvo, u32 reg) 2267 struct intel_sdvo *sdvo)
2243{ 2268{
2244 struct sdvo_device_mapping *mapping; 2269 struct sdvo_device_mapping *mapping;
2245 u8 pin; 2270 u8 pin;
@@ -2383,6 +2408,8 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
2383 intel_attach_broadcast_rgb_property(&connector->base.base); 2408 intel_attach_broadcast_rgb_property(&connector->base.base);
2384 intel_sdvo->color_range_auto = true; 2409 intel_sdvo->color_range_auto = true;
2385 } 2410 }
2411 intel_attach_aspect_ratio_property(&connector->base.base);
2412 intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
2386} 2413}
2387 2414
2388static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) 2415static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
@@ -2925,7 +2952,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2925 intel_sdvo->sdvo_reg = sdvo_reg; 2952 intel_sdvo->sdvo_reg = sdvo_reg;
2926 intel_sdvo->is_sdvob = is_sdvob; 2953 intel_sdvo->is_sdvob = is_sdvob;
2927 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; 2954 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2928 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2955 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
2929 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) 2956 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
2930 goto err_i2c_bus; 2957 goto err_i2c_bus;
2931 2958
@@ -2987,7 +3014,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2987 */ 3014 */
2988 intel_sdvo->base.cloneable = 0; 3015 intel_sdvo->base.cloneable = 0;
2989 3016
2990 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 3017 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
2991 3018
2992 /* Set the input timing to the screen. Assume always input 0. */ 3019 /* Set the input timing to the screen. Assume always input 0. */
2993 if (!intel_sdvo_set_target_input(intel_sdvo)) 3020 if (!intel_sdvo_set_target_input(intel_sdvo))
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9d8af2f8a875..56dc132e8e20 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -53,13 +53,15 @@ format_is_yuv(uint32_t format)
53 } 53 }
54} 54}
55 55
56static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs) 56static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
57 int usecs)
57{ 58{
58 /* paranoia */ 59 /* paranoia */
59 if (!mode->crtc_htotal) 60 if (!adjusted_mode->crtc_htotal)
60 return 1; 61 return 1;
61 62
62 return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal); 63 return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
64 1000 * adjusted_mode->crtc_htotal);
63} 65}
64 66
65/** 67/**
@@ -76,26 +78,25 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
76 * avoid random delays. The value written to @start_vbl_count should be 78 * avoid random delays. The value written to @start_vbl_count should be
77 * supplied to intel_pipe_update_end() for error checking. 79 * supplied to intel_pipe_update_end() for error checking.
78 */ 80 */
79void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) 81void intel_pipe_update_start(struct intel_crtc *crtc)
80{ 82{
81 struct drm_device *dev = crtc->base.dev; 83 struct drm_device *dev = crtc->base.dev;
82 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 84 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
83 enum pipe pipe = crtc->pipe; 85 enum pipe pipe = crtc->pipe;
84 long timeout = msecs_to_jiffies_timeout(1); 86 long timeout = msecs_to_jiffies_timeout(1);
85 int scanline, min, max, vblank_start; 87 int scanline, min, max, vblank_start;
86 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 88 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
87 DEFINE_WAIT(wait); 89 DEFINE_WAIT(wait);
88 90
89 vblank_start = mode->crtc_vblank_start; 91 vblank_start = adjusted_mode->crtc_vblank_start;
90 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 92 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
91 vblank_start = DIV_ROUND_UP(vblank_start, 2); 93 vblank_start = DIV_ROUND_UP(vblank_start, 2);
92 94
93 /* FIXME needs to be calibrated sensibly */ 95 /* FIXME needs to be calibrated sensibly */
94 min = vblank_start - usecs_to_scanlines(mode, 100); 96 min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
95 max = vblank_start - 1; 97 max = vblank_start - 1;
96 98
97 local_irq_disable(); 99 local_irq_disable();
98 *start_vbl_count = 0;
99 100
100 if (min <= 0 || max <= 0) 101 if (min <= 0 || max <= 0)
101 return; 102 return;
@@ -103,7 +104,9 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
103 if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) 104 if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
104 return; 105 return;
105 106
106 trace_i915_pipe_update_start(crtc, min, max); 107 crtc->debug.min_vbl = min;
108 crtc->debug.max_vbl = max;
109 trace_i915_pipe_update_start(crtc);
107 110
108 for (;;) { 111 for (;;) {
109 /* 112 /*
@@ -134,9 +137,12 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
134 137
135 drm_crtc_vblank_put(&crtc->base); 138 drm_crtc_vblank_put(&crtc->base);
136 139
137 *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 140 crtc->debug.scanline_start = scanline;
141 crtc->debug.start_vbl_time = ktime_get();
142 crtc->debug.start_vbl_count =
143 dev->driver->get_vblank_counter(dev, pipe);
138 144
139 trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count); 145 trace_i915_pipe_update_vblank_evaded(crtc);
140} 146}
141 147
142/** 148/**
@@ -148,19 +154,27 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
148 * re-enables interrupts and verifies the update was actually completed 154 * re-enables interrupts and verifies the update was actually completed
149 * before a vblank using the value of @start_vbl_count. 155 * before a vblank using the value of @start_vbl_count.
150 */ 156 */
151void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count) 157void intel_pipe_update_end(struct intel_crtc *crtc)
152{ 158{
153 struct drm_device *dev = crtc->base.dev; 159 struct drm_device *dev = crtc->base.dev;
154 enum pipe pipe = crtc->pipe; 160 enum pipe pipe = crtc->pipe;
161 int scanline_end = intel_get_crtc_scanline(crtc);
155 u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 162 u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
163 ktime_t end_vbl_time = ktime_get();
156 164
157 trace_i915_pipe_update_end(crtc, end_vbl_count); 165 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
158 166
159 local_irq_enable(); 167 local_irq_enable();
160 168
161 if (start_vbl_count && start_vbl_count != end_vbl_count) 169 if (crtc->debug.start_vbl_count &&
162 DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n", 170 crtc->debug.start_vbl_count != end_vbl_count) {
163 pipe_name(pipe), start_vbl_count, end_vbl_count); 171 DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
172 pipe_name(pipe), crtc->debug.start_vbl_count,
173 end_vbl_count,
174 ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
175 crtc->debug.min_vbl, crtc->debug.max_vbl,
176 crtc->debug.scanline_start, scanline_end);
177 }
164} 178}
165 179
166static void 180static void
@@ -189,6 +203,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
189 int scaler_id; 203 int scaler_id;
190 204
191 plane_ctl = PLANE_CTL_ENABLE | 205 plane_ctl = PLANE_CTL_ENABLE |
206 PLANE_CTL_PIPE_GAMMA_ENABLE |
192 PLANE_CTL_PIPE_CSC_ENABLE; 207 PLANE_CTL_PIPE_CSC_ENABLE;
193 208
194 plane_ctl |= skl_plane_ctl_format(fb->pixel_format); 209 plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
@@ -223,12 +238,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
223 else if (key->flags & I915_SET_COLORKEY_SOURCE) 238 else if (key->flags & I915_SET_COLORKEY_SOURCE)
224 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 239 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
225 240
226 surf_addr = intel_plane_obj_offset(intel_plane, obj); 241 surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
227 242
228 if (intel_rotation_90_or_270(rotation)) { 243 if (intel_rotation_90_or_270(rotation)) {
229 /* stride: Surface height in tiles */ 244 /* stride: Surface height in tiles */
230 tile_height = intel_tile_height(dev, fb->pixel_format, 245 tile_height = intel_tile_height(dev, fb->pixel_format,
231 fb->modifier[0]); 246 fb->modifier[0], 0);
232 stride = DIV_ROUND_UP(fb->height, tile_height); 247 stride = DIV_ROUND_UP(fb->height, tile_height);
233 plane_size = (src_w << 16) | src_h; 248 plane_size = (src_w << 16) | src_h;
234 x_offset = stride * tile_height - y - (src_h + 1); 249 x_offset = stride * tile_height - y - (src_h + 1);
@@ -598,7 +613,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
598 struct intel_plane *intel_plane = to_intel_plane(plane); 613 struct intel_plane *intel_plane = to_intel_plane(plane);
599 int pipe = intel_plane->pipe; 614 int pipe = intel_plane->pipe;
600 615
601 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 616 I915_WRITE(SPRCTL(pipe), 0);
602 /* Can't leave the scaler enabled... */ 617 /* Can't leave the scaler enabled... */
603 if (intel_plane->can_scale) 618 if (intel_plane->can_scale)
604 I915_WRITE(SPRSCALE(pipe), 0); 619 I915_WRITE(SPRSCALE(pipe), 0);
@@ -923,8 +938,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
923 938
924 crtc = crtc ? crtc : plane->crtc; 939 crtc = crtc ? crtc : plane->crtc;
925 940
926 plane->fb = fb;
927
928 if (!crtc->state->active) 941 if (!crtc->state->active)
929 return; 942 return;
930 943
@@ -1121,7 +1134,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1121 1134
1122 intel_plane->pipe = pipe; 1135 intel_plane->pipe = pipe;
1123 intel_plane->plane = plane; 1136 intel_plane->plane = plane;
1124 intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe); 1137 intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
1125 intel_plane->check_plane = intel_check_sprite_plane; 1138 intel_plane->check_plane = intel_check_sprite_plane;
1126 intel_plane->commit_plane = intel_commit_sprite_plane; 1139 intel_plane->commit_plane = intel_commit_sprite_plane;
1127 possible_crtcs = (1 << pipe); 1140 possible_crtcs = (1 << pipe);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 0568ae6ec9dd..6bea78944cd6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1138,13 +1138,13 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder)
1138 1138
1139 j = 0; 1139 j = 0;
1140 for (i = 0; i < 60; i++) 1140 for (i = 0; i < 60; i++)
1141 I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); 1141 I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
1142 for (i = 0; i < 60; i++) 1142 for (i = 0; i < 60; i++)
1143 I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); 1143 I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
1144 for (i = 0; i < 43; i++) 1144 for (i = 0; i < 43; i++)
1145 I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); 1145 I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
1146 for (i = 0; i < 43; i++) 1146 for (i = 0; i < 43; i++)
1147 I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); 1147 I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
1148 I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); 1148 I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
1149 I915_WRITE(TV_CTL, tv_ctl); 1149 I915_WRITE(TV_CTL, tv_ctl);
1150} 1150}
@@ -1291,7 +1291,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1291 return; 1291 return;
1292 1292
1293 1293
1294 for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { 1294 for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
1295 tv_mode = tv_modes + i; 1295 tv_mode = tv_modes + i;
1296 1296
1297 if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == 1297 if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1579,7 +1579,7 @@ intel_tv_init(struct drm_device *dev)
1579 struct intel_encoder *intel_encoder; 1579 struct intel_encoder *intel_encoder;
1580 struct intel_connector *intel_connector; 1580 struct intel_connector *intel_connector;
1581 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1581 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1582 char *tv_format_names[ARRAY_SIZE(tv_modes)]; 1582 const char *tv_format_names[ARRAY_SIZE(tv_modes)];
1583 int i, initial_mode = 0; 1583 int i, initial_mode = 0;
1584 1584
1585 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) 1585 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1677,7 +1677,7 @@ intel_tv_init(struct drm_device *dev)
1677 1677
1678 /* Create TV properties then attach current values */ 1678 /* Create TV properties then attach current values */
1679 for (i = 0; i < ARRAY_SIZE(tv_modes); i++) 1679 for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
1680 tv_format_names[i] = (char *)tv_modes[i].name; 1680 tv_format_names[i] = tv_modes[i].name;
1681 drm_mode_create_tv_properties(dev, 1681 drm_mode_create_tv_properties(dev,
1682 ARRAY_SIZE(tv_modes), 1682 ARRAY_SIZE(tv_modes),
1683 tv_format_names); 1683 tv_format_names);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9d3c2e420d2b..43cba129a0c0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -27,7 +27,7 @@
27 27
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29 29
30#define FORCEWAKE_ACK_TIMEOUT_MS 2 30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31 31
32#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 32#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
33#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 33#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
@@ -52,8 +52,7 @@ static const char * const forcewake_domain_names[] = {
52const char * 52const char *
53intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 53intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
54{ 54{
55 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) != 55 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
56 FW_DOMAIN_ID_COUNT);
57 56
58 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 57 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
59 return forcewake_domain_names[id]; 58 return forcewake_domain_names[id];
@@ -526,7 +525,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
526} 525}
527 526
528/* We give fast paths for the really cool registers */ 527/* We give fast paths for the really cool registers */
529#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 528#define NEEDS_FORCE_WAKE(reg) \
530 ((reg) < 0x40000 && (reg) != FORCEWAKE) 529 ((reg) < 0x40000 && (reg) != FORCEWAKE)
531 530
532#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 531#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
@@ -728,7 +727,7 @@ static u##x \
728gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 727gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
729 GEN6_READ_HEADER(x); \ 728 GEN6_READ_HEADER(x); \
730 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 729 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
731 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 730 if (NEEDS_FORCE_WAKE(reg)) \
732 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 731 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
733 val = __raw_i915_read##x(dev_priv, reg); \ 732 val = __raw_i915_read##x(dev_priv, reg); \
734 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 733 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
@@ -762,7 +761,7 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
762 GEN6_READ_FOOTER; \ 761 GEN6_READ_FOOTER; \
763} 762}
764 763
765#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 764#define SKL_NEEDS_FORCE_WAKE(reg) \
766 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 765 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
767 766
768#define __gen9_read(x) \ 767#define __gen9_read(x) \
@@ -770,9 +769,10 @@ static u##x \
770gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 769gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
771 enum forcewake_domains fw_engine; \ 770 enum forcewake_domains fw_engine; \
772 GEN6_READ_HEADER(x); \ 771 GEN6_READ_HEADER(x); \
773 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 772 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
773 if (!SKL_NEEDS_FORCE_WAKE(reg)) \
774 fw_engine = 0; \ 774 fw_engine = 0; \
775 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 775 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
776 fw_engine = FORCEWAKE_RENDER; \ 776 fw_engine = FORCEWAKE_RENDER; \
777 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 777 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
778 fw_engine = FORCEWAKE_MEDIA; \ 778 fw_engine = FORCEWAKE_MEDIA; \
@@ -783,6 +783,7 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
783 if (fw_engine) \ 783 if (fw_engine) \
784 __force_wake_get(dev_priv, fw_engine); \ 784 __force_wake_get(dev_priv, fw_engine); \
785 val = __raw_i915_read##x(dev_priv, reg); \ 785 val = __raw_i915_read##x(dev_priv, reg); \
786 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
786 GEN6_READ_FOOTER; \ 787 GEN6_READ_FOOTER; \
787} 788}
788 789
@@ -867,7 +868,7 @@ static void \
867gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 868gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
868 u32 __fifo_ret = 0; \ 869 u32 __fifo_ret = 0; \
869 GEN6_WRITE_HEADER; \ 870 GEN6_WRITE_HEADER; \
870 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 871 if (NEEDS_FORCE_WAKE(reg)) { \
871 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 872 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
872 } \ 873 } \
873 __raw_i915_write##x(dev_priv, reg, val); \ 874 __raw_i915_write##x(dev_priv, reg, val); \
@@ -882,7 +883,7 @@ static void \
882hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 883hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
883 u32 __fifo_ret = 0; \ 884 u32 __fifo_ret = 0; \
884 GEN6_WRITE_HEADER; \ 885 GEN6_WRITE_HEADER; \
885 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 886 if (NEEDS_FORCE_WAKE(reg)) { \
886 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 887 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
887 } \ 888 } \
888 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 889 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
@@ -983,7 +984,8 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
983 bool trace) { \ 984 bool trace) { \
984 enum forcewake_domains fw_engine; \ 985 enum forcewake_domains fw_engine; \
985 GEN6_WRITE_HEADER; \ 986 GEN6_WRITE_HEADER; \
986 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 987 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
988 if (!SKL_NEEDS_FORCE_WAKE(reg) || \
987 is_gen9_shadowed(dev_priv, reg)) \ 989 is_gen9_shadowed(dev_priv, reg)) \
988 fw_engine = 0; \ 990 fw_engine = 0; \
989 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 991 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
@@ -997,6 +999,8 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
997 if (fw_engine) \ 999 if (fw_engine) \
998 __force_wake_get(dev_priv, fw_engine); \ 1000 __force_wake_get(dev_priv, fw_engine); \
999 __raw_i915_write##x(dev_priv, reg, val); \ 1001 __raw_i915_write##x(dev_priv, reg, val); \
1002 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1003 hsw_unclaimed_reg_detect(dev_priv); \
1000 GEN6_WRITE_FOOTER; \ 1004 GEN6_WRITE_FOOTER; \
1001} 1005}
1002 1006
@@ -1198,8 +1202,6 @@ void intel_uncore_init(struct drm_device *dev)
1198 1202
1199 switch (INTEL_INFO(dev)->gen) { 1203 switch (INTEL_INFO(dev)->gen) {
1200 default: 1204 default:
1201 MISSING_CASE(INTEL_INFO(dev)->gen);
1202 return;
1203 case 9: 1205 case 9:
1204 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1206 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1205 ASSIGN_READ_MMIO_VFUNCS(gen9); 1207 ASSIGN_READ_MMIO_VFUNCS(gen9);
@@ -1427,21 +1429,21 @@ static int ironlake_do_reset(struct drm_device *dev)
1427 struct drm_i915_private *dev_priv = dev->dev_private; 1429 struct drm_i915_private *dev_priv = dev->dev_private;
1428 int ret; 1430 int ret;
1429 1431
1430 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1432 I915_WRITE(ILK_GDSR,
1431 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1433 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1432 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1434 ret = wait_for((I915_READ(ILK_GDSR) &
1433 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1435 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1434 if (ret) 1436 if (ret)
1435 return ret; 1437 return ret;
1436 1438
1437 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1439 I915_WRITE(ILK_GDSR,
1438 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1440 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1439 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1441 ret = wait_for((I915_READ(ILK_GDSR) &
1440 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1442 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1441 if (ret) 1443 if (ret)
1442 return ret; 1444 return ret;
1443 1445
1444 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1446 I915_WRITE(ILK_GDSR, 0);
1445 1447
1446 return 0; 1448 return 0;
1447} 1449}
@@ -1529,13 +1531,22 @@ static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1529 1531
1530int intel_gpu_reset(struct drm_device *dev) 1532int intel_gpu_reset(struct drm_device *dev)
1531{ 1533{
1534 struct drm_i915_private *dev_priv = to_i915(dev);
1532 int (*reset)(struct drm_device *); 1535 int (*reset)(struct drm_device *);
1536 int ret;
1533 1537
1534 reset = intel_get_gpu_reset(dev); 1538 reset = intel_get_gpu_reset(dev);
1535 if (reset == NULL) 1539 if (reset == NULL)
1536 return -ENODEV; 1540 return -ENODEV;
1537 1541
1538 return reset(dev); 1542 /* If the power well sleeps during the reset, the reset
1543 * request may be dropped and never completes (causing -EIO).
1544 */
1545 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1546 ret = reset(dev);
1547 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1548
1549 return ret;
1539} 1550}
1540 1551
1541bool intel_has_gpu_reset(struct drm_device *dev) 1552bool intel_has_gpu_reset(struct drm_device *dev)
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 644edf65dbe0..98605ea2ad9d 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -48,11 +48,17 @@ static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
48 { 0x40a2, 0x000a }, 48 { 0x40a2, 0x000a },
49 }, 49 },
50 }, { 50 }, {
51 ~0UL, { 51 216000000, {
52 { 0x00a0, 0x000a }, 52 { 0x00a0, 0x000a },
53 { 0x2001, 0x000f }, 53 { 0x2001, 0x000f },
54 { 0x4002, 0x000f }, 54 { 0x4002, 0x000f },
55 }, 55 },
56 }, {
57 ~0UL, {
58 { 0x0000, 0x0000 },
59 { 0x0000, 0x0000 },
60 { 0x0000, 0x0000 },
61 },
56 } 62 }
57}; 63};
58 64
@@ -82,7 +88,7 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
82 */ 88 */
83static const struct dw_hdmi_phy_config imx_phy_config[] = { 89static const struct dw_hdmi_phy_config imx_phy_config[] = {
84 /*pixelclk symbol term vlev */ 90 /*pixelclk symbol term vlev */
85 { 148500000, 0x800d, 0x0005, 0x01ad}, 91 { 216000000, 0x800d, 0x0005, 0x01ad},
86 { ~0UL, 0x0000, 0x0000, 0x0000} 92 { ~0UL, 0x0000, 0x0000, 0x0000}
87}; 93};
88 94
@@ -148,7 +154,8 @@ static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
148{ 154{
149 if (mode->clock < 13500) 155 if (mode->clock < 13500)
150 return MODE_CLOCK_LOW; 156 return MODE_CLOCK_LOW;
151 if (mode->clock > 266000) 157 /* FIXME: Hardware is capable of 266MHz, but setup data is missing. */
158 if (mode->clock > 216000)
152 return MODE_CLOCK_HIGH; 159 return MODE_CLOCK_HIGH;
153 160
154 return MODE_OK; 161 return MODE_OK;
@@ -159,7 +166,8 @@ static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
159{ 166{
160 if (mode->clock < 13500) 167 if (mode->clock < 13500)
161 return MODE_CLOCK_LOW; 168 return MODE_CLOCK_LOW;
162 if (mode->clock > 270000) 169 /* FIXME: Hardware is capable of 270MHz, but setup data is missing. */
170 if (mode->clock > 216000)
163 return MODE_CLOCK_HIGH; 171 return MODE_CLOCK_HIGH;
164 172
165 return MODE_OK; 173 return MODE_OK;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 74f505b0dd02..64f16ea779ef 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -145,10 +145,10 @@ void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
145} 145}
146EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); 146EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
147 147
148static int imx_drm_enable_vblank(struct drm_device *drm, int crtc) 148static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
149{ 149{
150 struct imx_drm_device *imxdrm = drm->dev_private; 150 struct imx_drm_device *imxdrm = drm->dev_private;
151 struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc]; 151 struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[pipe];
152 int ret; 152 int ret;
153 153
154 if (!imx_drm_crtc) 154 if (!imx_drm_crtc)
@@ -163,10 +163,10 @@ static int imx_drm_enable_vblank(struct drm_device *drm, int crtc)
163 return ret; 163 return ret;
164} 164}
165 165
166static void imx_drm_disable_vblank(struct drm_device *drm, int crtc) 166static void imx_drm_disable_vblank(struct drm_device *drm, unsigned int pipe)
167{ 167{
168 struct imx_drm_device *imxdrm = drm->dev_private; 168 struct imx_drm_device *imxdrm = drm->dev_private;
169 struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc]; 169 struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[pipe];
170 170
171 if (!imx_drm_crtc) 171 if (!imx_drm_crtc)
172 return; 172 return;
@@ -487,7 +487,7 @@ static struct drm_driver imx_drm_driver = {
487 .gem_prime_vmap = drm_gem_cma_prime_vmap, 487 .gem_prime_vmap = drm_gem_cma_prime_vmap,
488 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 488 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
489 .gem_prime_mmap = drm_gem_cma_prime_mmap, 489 .gem_prime_mmap = drm_gem_cma_prime_mmap,
490 .get_vblank_counter = drm_vblank_count, 490 .get_vblank_counter = drm_vblank_no_hw_counter,
491 .enable_vblank = imx_drm_enable_vblank, 491 .enable_vblank = imx_drm_enable_vblank,
492 .disable_vblank = imx_drm_disable_vblank, 492 .disable_vblank = imx_drm_disable_vblank,
493 .ioctls = imx_drm_ioctls, 493 .ioctls = imx_drm_ioctls,
@@ -531,59 +531,12 @@ static const struct component_master_ops imx_drm_ops = {
531 531
532static int imx_drm_platform_probe(struct platform_device *pdev) 532static int imx_drm_platform_probe(struct platform_device *pdev)
533{ 533{
534 struct device_node *ep, *port, *remote; 534 int ret = drm_of_component_probe(&pdev->dev, compare_of, &imx_drm_ops);
535 struct component_match *match = NULL;
536 int ret;
537 int i;
538
539 /*
540 * Bind the IPU display interface ports first, so that
541 * imx_drm_encoder_parse_of called from encoder .bind callbacks
542 * works as expected.
543 */
544 for (i = 0; ; i++) {
545 port = of_parse_phandle(pdev->dev.of_node, "ports", i);
546 if (!port)
547 break;
548
549 component_match_add(&pdev->dev, &match, compare_of, port);
550 }
551 535
552 if (i == 0) { 536 if (!ret)
553 dev_err(&pdev->dev, "missing 'ports' property\n"); 537 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
554 return -ENODEV;
555 }
556 538
557 /* Then bind all encoders */ 539 return ret;
558 for (i = 0; ; i++) {
559 port = of_parse_phandle(pdev->dev.of_node, "ports", i);
560 if (!port)
561 break;
562
563 for_each_child_of_node(port, ep) {
564 remote = of_graph_get_remote_port_parent(ep);
565 if (!remote || !of_device_is_available(remote)) {
566 of_node_put(remote);
567 continue;
568 } else if (!of_device_is_available(remote->parent)) {
569 dev_warn(&pdev->dev, "parent device of %s is not available\n",
570 remote->full_name);
571 of_node_put(remote);
572 continue;
573 }
574
575 component_match_add(&pdev->dev, &match, compare_of,
576 remote);
577 of_node_put(remote);
578 }
579 of_node_put(port);
580 }
581
582 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
583 if (ret)
584 return ret;
585
586 return component_master_add_with_match(&pdev->dev, &imx_drm_ops, match);
587} 540}
588 541
589static int imx_drm_platform_remove(struct platform_device *pdev) 542static int imx_drm_platform_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 878a643d72e4..575f4c84388f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -23,12 +23,21 @@
23#define to_ipu_plane(x) container_of(x, struct ipu_plane, base) 23#define to_ipu_plane(x) container_of(x, struct ipu_plane, base)
24 24
25static const uint32_t ipu_plane_formats[] = { 25static const uint32_t ipu_plane_formats[] = {
26 DRM_FORMAT_ARGB1555,
26 DRM_FORMAT_XRGB1555, 27 DRM_FORMAT_XRGB1555,
28 DRM_FORMAT_ABGR1555,
27 DRM_FORMAT_XBGR1555, 29 DRM_FORMAT_XBGR1555,
30 DRM_FORMAT_RGBA5551,
31 DRM_FORMAT_BGRA5551,
32 DRM_FORMAT_ARGB4444,
28 DRM_FORMAT_ARGB8888, 33 DRM_FORMAT_ARGB8888,
29 DRM_FORMAT_XRGB8888, 34 DRM_FORMAT_XRGB8888,
30 DRM_FORMAT_ABGR8888, 35 DRM_FORMAT_ABGR8888,
31 DRM_FORMAT_XBGR8888, 36 DRM_FORMAT_XBGR8888,
37 DRM_FORMAT_RGBA8888,
38 DRM_FORMAT_RGBX8888,
39 DRM_FORMAT_BGRA8888,
40 DRM_FORMAT_BGRA8888,
32 DRM_FORMAT_YUYV, 41 DRM_FORMAT_YUYV,
33 DRM_FORMAT_YVYU, 42 DRM_FORMAT_YVYU,
34 DRM_FORMAT_YUV420, 43 DRM_FORMAT_YUV420,
@@ -175,8 +184,15 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
175 ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y); 184 ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
176 /* Enable local alpha on partial plane */ 185 /* Enable local alpha on partial plane */
177 switch (fb->pixel_format) { 186 switch (fb->pixel_format) {
187 case DRM_FORMAT_ARGB1555:
188 case DRM_FORMAT_ABGR1555:
189 case DRM_FORMAT_RGBA5551:
190 case DRM_FORMAT_BGRA5551:
191 case DRM_FORMAT_ARGB4444:
178 case DRM_FORMAT_ARGB8888: 192 case DRM_FORMAT_ARGB8888:
179 case DRM_FORMAT_ABGR8888: 193 case DRM_FORMAT_ABGR8888:
194 case DRM_FORMAT_RGBA8888:
195 case DRM_FORMAT_BGRA8888:
180 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); 196 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
181 break; 197 break;
182 default: 198 default:
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 8cfa9cb74c86..1f2f9ca25901 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -416,7 +416,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
416 return 0; 416 return 0;
417} 417}
418 418
419#if __OS_HAS_AGP 419#if IS_ENABLED(CONFIG_AGP)
420/** 420/**
421 * Bootstrap the driver for AGP DMA. 421 * Bootstrap the driver for AGP DMA.
422 * 422 *
@@ -947,7 +947,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
947 drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 947 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
948 948
949 if (dev_priv->used_new_dma_init) { 949 if (dev_priv->used_new_dma_init) {
950#if __OS_HAS_AGP 950#if IS_ENABLED(CONFIG_AGP)
951 if (dev_priv->agp_handle != 0) { 951 if (dev_priv->agp_handle != 0) {
952 struct drm_agp_binding unbind_req; 952 struct drm_agp_binding unbind_req;
953 struct drm_agp_buffer free_req; 953 struct drm_agp_buffer free_req;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index b4a2014917e5..bb312339e0b0 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -183,9 +183,9 @@ extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
183extern int mga_warp_init(drm_mga_private_t *dev_priv); 183extern int mga_warp_init(drm_mga_private_t *dev_priv);
184 184
185 /* mga_irq.c */ 185 /* mga_irq.c */
186extern int mga_enable_vblank(struct drm_device *dev, int crtc); 186extern int mga_enable_vblank(struct drm_device *dev, unsigned int pipe);
187extern void mga_disable_vblank(struct drm_device *dev, int crtc); 187extern void mga_disable_vblank(struct drm_device *dev, unsigned int pipe);
188extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); 188extern u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
189extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence); 189extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
190extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); 190extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
191extern irqreturn_t mga_driver_irq_handler(int irq, void *arg); 191extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 1b071b8ff9dc..693ba708cfed 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -35,12 +35,12 @@
35#include <drm/mga_drm.h> 35#include <drm/mga_drm.h>
36#include "mga_drv.h" 36#include "mga_drv.h"
37 37
38u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) 38u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
39{ 39{
40 const drm_mga_private_t *const dev_priv = 40 const drm_mga_private_t *const dev_priv =
41 (drm_mga_private_t *) dev->dev_private; 41 (drm_mga_private_t *) dev->dev_private;
42 42
43 if (crtc != 0) 43 if (pipe != 0)
44 return 0; 44 return 0;
45 45
46 return atomic_read(&dev_priv->vbl_received); 46 return atomic_read(&dev_priv->vbl_received);
@@ -88,13 +88,13 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
88 return IRQ_NONE; 88 return IRQ_NONE;
89} 89}
90 90
91int mga_enable_vblank(struct drm_device *dev, int crtc) 91int mga_enable_vblank(struct drm_device *dev, unsigned int pipe)
92{ 92{
93 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 93 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
94 94
95 if (crtc != 0) { 95 if (pipe != 0) {
96 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 96 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
97 crtc); 97 pipe);
98 return 0; 98 return 0;
99 } 99 }
100 100
@@ -103,11 +103,11 @@ int mga_enable_vblank(struct drm_device *dev, int crtc)
103} 103}
104 104
105 105
106void mga_disable_vblank(struct drm_device *dev, int crtc) 106void mga_disable_vblank(struct drm_device *dev, unsigned int pipe)
107{ 107{
108 if (crtc != 0) { 108 if (pipe != 0) {
109 DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", 109 DRM_ERROR("tried to disable vblank on non-existent crtc %u\n",
110 crtc); 110 pipe);
111 } 111 }
112 112
113 /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have 113 /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 8e6c7c638e24..84d3ec98e6b9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,20 +14,6 @@ config DRM_MSM
14 help 14 help
15 DRM/KMS driver for MSM/snapdragon. 15 DRM/KMS driver for MSM/snapdragon.
16 16
17config DRM_MSM_FBDEV
18 bool "Enable legacy fbdev support for MSM modesetting driver"
19 depends on DRM_MSM
20 select DRM_KMS_FB_HELPER
21 select FB_SYS_FILLRECT
22 select FB_SYS_COPYAREA
23 select FB_SYS_IMAGEBLIT
24 select FB_SYS_FOPS
25 default y
26 help
27 Choose this option if you have a need for the legacy fbdev
28 support. Note that this support also provide the linux console
29 support on top of the MSM modesetting driver.
30
31config DRM_MSM_REGISTER_LOGGING 17config DRM_MSM_REGISTER_LOGGING
32 bool "MSM DRM register logging" 18 bool "MSM DRM register logging"
33 depends on DRM_MSM 19 depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 0a543eb5e5d7..1c90290be716 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,7 +50,7 @@ msm-y := \
50 msm_rd.o \ 50 msm_rd.o \
51 msm_ringbuffer.o 51 msm_ringbuffer.o
52 52
53msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o 53msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
54msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o 54msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
55 55
56msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ 56msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 0261f0d31612..9e2aceb4ffe6 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
18 19
19Copyright (C) 2013-2015 by the following authors: 20Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 21- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 48d133711487..97dc1c6ec107 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
18 19
19Copyright (C) 2013-2015 by the following authors: 20Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 21- Rob Clark <robdclark@gmail.com> (robclark)
@@ -280,6 +281,8 @@ enum a3xx_rb_blend_opcode {
280enum a3xx_intp_mode { 281enum a3xx_intp_mode {
281 SMOOTH = 0, 282 SMOOTH = 0,
282 FLAT = 1, 283 FLAT = 1,
284 ZERO = 2,
285 ONE = 3,
283}; 286};
284 287
285enum a3xx_repl_mode { 288enum a3xx_repl_mode {
@@ -680,9 +683,16 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
680#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 683#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
681#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000 684#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
682#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000 685#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
686#define A3XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
683#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000 687#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
684#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000 688#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
685#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000 689#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
690#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK 0x1c000000
691#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT 26
692static inline uint32_t A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(uint32_t val)
693{
694 return ((val) << A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT) & A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK;
695}
686 696
687#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044 697#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
688#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff 698#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -773,7 +783,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
773#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 783#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
774static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) 784static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
775{ 785{
776 return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; 786 return ((((int32_t)(val * 1048576.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
777} 787}
778 788
779#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d 789#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
@@ -894,6 +904,9 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val)
894#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 904#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
895 905
896#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1 906#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
907#define A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE 0x00000001
908#define A3XX_RB_RENDER_CONTROL_YUV_IN_ENABLE 0x00000002
909#define A3XX_RB_RENDER_CONTROL_COV_VALUE_INPUT_ENABLE 0x00000004
897#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008 910#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
898#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0 911#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
899#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4 912#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
@@ -907,6 +920,8 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
907#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000 920#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000
908#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000 921#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000
909#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000 922#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000
923#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000
924#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000
910#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000 925#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
911#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 926#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
912#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 927#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
@@ -914,6 +929,8 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compar
914{ 929{
915 return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK; 930 return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
916} 931}
932#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_COVERAGE 0x40000000
933#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_ONE 0x80000000
917 934
918#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2 935#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
919#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400 936#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index ac55066db3b0..99de8271dba8 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
18 19
19Copyright (C) 2013-2015 by the following authors: 20Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 21- Rob Clark <robdclark@gmail.com> (robclark)
@@ -162,10 +163,13 @@ enum a4xx_tex_fmt {
162 TFMT4_8_UNORM = 4, 163 TFMT4_8_UNORM = 4,
163 TFMT4_8_8_UNORM = 14, 164 TFMT4_8_8_UNORM = 14,
164 TFMT4_8_8_8_8_UNORM = 28, 165 TFMT4_8_8_8_8_UNORM = 28,
166 TFMT4_8_SNORM = 5,
165 TFMT4_8_8_SNORM = 15, 167 TFMT4_8_8_SNORM = 15,
166 TFMT4_8_8_8_8_SNORM = 29, 168 TFMT4_8_8_8_8_SNORM = 29,
169 TFMT4_8_UINT = 6,
167 TFMT4_8_8_UINT = 16, 170 TFMT4_8_8_UINT = 16,
168 TFMT4_8_8_8_8_UINT = 30, 171 TFMT4_8_8_8_8_UINT = 30,
172 TFMT4_8_SINT = 7,
169 TFMT4_8_8_SINT = 17, 173 TFMT4_8_8_SINT = 17,
170 TFMT4_8_8_8_8_SINT = 31, 174 TFMT4_8_8_8_8_SINT = 31,
171 TFMT4_16_UINT = 21, 175 TFMT4_16_UINT = 21,
@@ -246,7 +250,8 @@ enum a4xx_tex_clamp {
246 A4XX_TEX_REPEAT = 0, 250 A4XX_TEX_REPEAT = 0,
247 A4XX_TEX_CLAMP_TO_EDGE = 1, 251 A4XX_TEX_CLAMP_TO_EDGE = 1,
248 A4XX_TEX_MIRROR_REPEAT = 2, 252 A4XX_TEX_MIRROR_REPEAT = 2,
249 A4XX_TEX_CLAMP_NONE = 3, 253 A4XX_TEX_CLAMP_TO_BORDER = 3,
254 A4XX_TEX_MIRROR_CLAMP = 4,
250}; 255};
251 256
252enum a4xx_tex_aniso { 257enum a4xx_tex_aniso {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 399a9e528139..c304468cf2bd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
18 19
19Copyright (C) 2013-2015 by the following authors: 20Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 21- Rob Clark <robdclark@gmail.com> (robclark)
@@ -85,6 +86,10 @@ enum adreno_rb_blend_factor {
85 FACTOR_CONSTANT_ALPHA = 14, 86 FACTOR_CONSTANT_ALPHA = 14,
86 FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15, 87 FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
87 FACTOR_SRC_ALPHA_SATURATE = 16, 88 FACTOR_SRC_ALPHA_SATURATE = 16,
89 FACTOR_SRC1_COLOR = 20,
90 FACTOR_ONE_MINUS_SRC1_COLOR = 21,
91 FACTOR_SRC1_ALPHA = 22,
92 FACTOR_ONE_MINUS_SRC1_ALPHA = 23,
88}; 93};
89 94
90enum adreno_rb_surface_endian { 95enum adreno_rb_surface_endian {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 41904fed1350..a22fef569499 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
18 19
19Copyright (C) 2013-2015 by the following authors: 20Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 21- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 1d2e32f0817b..b2b5f3dd1b4c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
@@ -567,114 +567,234 @@ static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
567#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc 567#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
568#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000 568#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
569 569
570static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; } 570static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
571 571
572static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; } 572static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
573 573
574static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; } 574static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
575 575
576static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; } 576static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
577 577
578static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; } 578static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; }
579 579
580static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; } 580static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; }
581 581
582static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; } 582static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; }
583 583
584#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400 584#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0 0x00000100
585 585
586#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404 586#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1 0x00000104
587 587
588#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408 588#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2 0x00000108
589 589
590#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c 590#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH 0x0000010c
591 591
592#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414 592#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0 0x00000114
593 593
594#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418 594#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1 0x00000118
595 595
596#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440 596#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0 0x00000140
597#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
598#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
599static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
600{
601 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
602}
603
604#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1 0x00000144
605#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
606#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
607static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
608{
609 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
610}
611
612#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2 0x00000148
613#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
614#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
615static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
616{
617 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
618}
619
620#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3 0x0000014c
621
622#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4 0x00000150
623#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
624#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
625static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
626{
627 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
628}
629
630#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5 0x00000154
631#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
632#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
633static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
634{
635 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
636}
637
638#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6 0x00000158
639#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
640#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
641static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
642{
643 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
644}
645
646#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7 0x0000015c
647#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
648#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
649static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
650{
651 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
652}
653
654#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8 0x00000160
655#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
656#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
657static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
658{
659 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK;
660}
661
662#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9 0x00000164
663#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
664#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
665static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
666{
667 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK;
668}
669#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
670#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
671static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
672{
673 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK;
674}
675
676#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10 0x00000168
677#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
678#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
679static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
680{
681 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK;
682}
683
684#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11 0x0000016c
685#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
686#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
687static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
688{
689 return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
690}
691
692#define REG_DSI_28nm_8960_PHY_CTRL_0 0x00000170
693
694#define REG_DSI_28nm_8960_PHY_CTRL_1 0x00000174
695
696#define REG_DSI_28nm_8960_PHY_CTRL_2 0x00000178
697
698#define REG_DSI_28nm_8960_PHY_CTRL_3 0x0000017c
699
700#define REG_DSI_28nm_8960_PHY_STRENGTH_0 0x00000180
701
702#define REG_DSI_28nm_8960_PHY_STRENGTH_1 0x00000184
703
704#define REG_DSI_28nm_8960_PHY_STRENGTH_2 0x00000188
705
706#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0 0x0000018c
707
708#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1 0x00000190
709
710#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2 0x00000194
711
712#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3 0x00000198
713
714#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4 0x0000019c
597 715
598#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444 716#define REG_DSI_28nm_8960_PHY_LDO_CTRL 0x000001b0
599 717
600#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448 718#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0 0x00000000
601 719
602#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c 720#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1 0x00000004
603 721
604#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450 722#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2 0x00000008
605 723
606#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454 724#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3 0x0000000c
607 725
608#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458 726#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4 0x00000010
609 727
610#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c 728#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5 0x00000014
611 729
612#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460 730#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG 0x00000018
613 731
614#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464 732#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER 0x00000028
615 733
616#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468 734#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0 0x0000002c
617 735
618#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c 736#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1 0x00000030
619 737
620#define REG_DSI_8960_PHY_CTRL_0 0x00000470 738#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2 0x00000034
621 739
622#define REG_DSI_8960_PHY_CTRL_1 0x00000474 740#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0 0x00000038
623 741
624#define REG_DSI_8960_PHY_CTRL_2 0x00000478 742#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1 0x0000003c
625 743
626#define REG_DSI_8960_PHY_CTRL_3 0x0000047c 744#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2 0x00000040
627 745
628#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480 746#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3 0x00000044
629 747
630#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484 748#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4 0x00000048
631 749
632#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488 750#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS 0x00000050
751#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY 0x00000010
633 752
634#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c 753#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0 0x00000000
754#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE 0x00000001
635 755
636#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490 756#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1 0x00000004
637 757
638#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494 758#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2 0x00000008
639 759
640#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498 760#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3 0x0000000c
641 761
642#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c 762#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4 0x00000010
643 763
644#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0 764#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5 0x00000014
645 765
646#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500 766#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6 0x00000018
647 767
648#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504 768#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7 0x0000001c
649 769
650#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508 770#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8 0x00000020
651 771
652#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c 772#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9 0x00000024
653 773
654#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510 774#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10 0x00000028
655 775
656#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518 776#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11 0x0000002c
657 777
658#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528 778#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12 0x00000030
659 779
660#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c 780#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13 0x00000034
661 781
662#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530 782#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14 0x00000038
663 783
664#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534 784#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15 0x0000003c
665 785
666#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538 786#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16 0x00000040
667 787
668#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c 788#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17 0x00000044
669 789
670#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540 790#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18 0x00000048
671 791
672#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544 792#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19 0x0000004c
673 793
674#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548 794#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20 0x00000050
675 795
676#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550 796#define REG_DSI_28nm_8960_PHY_PLL_RDY 0x00000080
677#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010 797#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY 0x00000001
678 798
679static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } 799static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
680 800
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 8d82973fe9db..4c49868efcda 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -278,7 +278,7 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
278 } 278 }
279 279
280 for (i = 0; i < num; i++) { 280 for (i = 0; i < num; i++) {
281 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) { 281 if (regulator_can_change_voltage(s[i].consumer)) {
282 ret = regulator_set_voltage(s[i].consumer, 282 ret = regulator_set_voltage(s[i].consumer,
283 regs[i].min_voltage, regs[i].max_voltage); 283 regs[i].min_voltage, regs[i].max_voltage);
284 if (ret < 0) { 284 if (ret < 0) {
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 5de505e627be..80ec65e47468 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 401ff58d6893..f1f955f571fa 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -178,7 +178,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
178 } 178 }
179 179
180 for (i = 0; i < num; i++) { 180 for (i = 0; i < num; i++) {
181 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) { 181 if (regulator_can_change_voltage(s[i].consumer)) {
182 ret = regulator_set_voltage(s[i].consumer, 182 ret = regulator_set_voltage(s[i].consumer,
183 regs[i].min_voltage, regs[i].max_voltage); 183 regs[i].min_voltage, regs[i].max_voltage);
184 if (ret < 0) { 184 if (ret < 0) {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index f1a7c7b46420..edf74110ced7 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -99,16 +99,14 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
99 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0); 99 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
100 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0); 100 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
101 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0); 101 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
102 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
102 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0); 103 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
103 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0); 104 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
104 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1); 105 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
105 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97); 106 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
106 } 107 }
107 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
108 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
109 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
110 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
111 108
109 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
112 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0); 110 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
113 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1); 111 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
114 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb); 112 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 06cbddfc914f..7d7662e69e11 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
@@ -45,7 +45,18 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
45*/ 45*/
46 46
47 47
48#define REG_SFPB_CFG 0x00000058 48enum sfpb_ahb_arb_master_port_en {
49 SFPB_MASTER_PORT_ENABLE = 3,
50 SFPB_MASTER_PORT_DISABLE = 0,
51};
52
53#define REG_SFPB_GPREG 0x00000058
54#define SFPB_GPREG_MASTER_PORT_EN__MASK 0x00001800
55#define SFPB_GPREG_MASTER_PORT_EN__SHIFT 11
56static inline uint32_t SFPB_GPREG_MASTER_PORT_EN(enum sfpb_ahb_arb_master_port_en val)
57{
58 return ((val) << SFPB_GPREG_MASTER_PORT_EN__SHIFT) & SFPB_GPREG_MASTER_PORT_EN__MASK;
59}
49 60
50 61
51#endif /* SFPB_XML */ 62#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index bef1d65fe28c..90bf5ed46746 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 101b324cdeef..1f4a95eeb348 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -328,6 +328,9 @@ fail:
328 .item ## _names = item ##_names_ ## entry, \ 328 .item ## _names = item ##_names_ ## entry, \
329 .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry) 329 .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
330 330
331static const char *pwr_reg_names_none[] = {};
332static const char *hpd_reg_names_none[] = {};
333
331static struct hdmi_platform_config hdmi_tx_8660_config = { 334static struct hdmi_platform_config hdmi_tx_8660_config = {
332 .phy_init = hdmi_phy_8x60_init, 335 .phy_init = hdmi_phy_8x60_init,
333}; 336};
@@ -367,18 +370,26 @@ static struct hdmi_platform_config hdmi_tx_8084_config = {
367 .hpd_freq = hpd_clk_freq_8x74, 370 .hpd_freq = hpd_clk_freq_8x74,
368}; 371};
369 372
370static const char *hpd_reg_names_8x94[] = {};
371
372static struct hdmi_platform_config hdmi_tx_8994_config = { 373static struct hdmi_platform_config hdmi_tx_8994_config = {
373 .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */ 374 .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
374 HDMI_CFG(pwr_reg, 8x74), 375 HDMI_CFG(pwr_reg, 8x74),
375 HDMI_CFG(hpd_reg, 8x94), 376 HDMI_CFG(hpd_reg, none),
377 HDMI_CFG(pwr_clk, 8x74),
378 HDMI_CFG(hpd_clk, 8x74),
379 .hpd_freq = hpd_clk_freq_8x74,
380};
381
382static struct hdmi_platform_config hdmi_tx_8996_config = {
383 .phy_init = NULL,
384 HDMI_CFG(pwr_reg, none),
385 HDMI_CFG(hpd_reg, none),
376 HDMI_CFG(pwr_clk, 8x74), 386 HDMI_CFG(pwr_clk, 8x74),
377 HDMI_CFG(hpd_clk, 8x74), 387 HDMI_CFG(hpd_clk, 8x74),
378 .hpd_freq = hpd_clk_freq_8x74, 388 .hpd_freq = hpd_clk_freq_8x74,
379}; 389};
380 390
381static const struct of_device_id dt_match[] = { 391static const struct of_device_id dt_match[] = {
392 { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
382 { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config }, 393 { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
383 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, 394 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
384 { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config }, 395 { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 0b1b5586ff35..10c45700aefe 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 2aa23b98f8aa..dbd9cc4daf2e 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 74b86734fef5..d5d94575fa1b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index e9dee367b597..30d57e74c42f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -99,22 +99,28 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
99}; 99};
100 100
101static int mdp4_plane_prepare_fb(struct drm_plane *plane, 101static int mdp4_plane_prepare_fb(struct drm_plane *plane,
102 struct drm_framebuffer *fb,
103 const struct drm_plane_state *new_state) 102 const struct drm_plane_state *new_state)
104{ 103{
105 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
106 struct mdp4_kms *mdp4_kms = get_kms(plane); 105 struct mdp4_kms *mdp4_kms = get_kms(plane);
106 struct drm_framebuffer *fb = new_state->fb;
107
108 if (!fb)
109 return 0;
107 110
108 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); 111 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
109 return msm_framebuffer_prepare(fb, mdp4_kms->id); 112 return msm_framebuffer_prepare(fb, mdp4_kms->id);
110} 113}
111 114
112static void mdp4_plane_cleanup_fb(struct drm_plane *plane, 115static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
113 struct drm_framebuffer *fb,
114 const struct drm_plane_state *old_state) 116 const struct drm_plane_state *old_state)
115{ 117{
116 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 118 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
117 struct mdp4_kms *mdp4_kms = get_kms(plane); 119 struct mdp4_kms *mdp4_kms = get_kms(plane);
120 struct drm_framebuffer *fb = old_state->fb;
121
122 if (!fb)
123 return;
118 124
119 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); 125 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
120 msm_framebuffer_cleanup(fb, mdp4_kms->id); 126 msm_framebuffer_cleanup(fb, mdp4_kms->id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 3469f50d5590..c37da9c61e29 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
@@ -895,6 +895,7 @@ static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
895#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 895#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
896#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 896#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
897#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 897#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
898#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000
898 899
899static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); } 900static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
900 901
@@ -932,6 +933,83 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
932 return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; 933 return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
933} 934}
934 935
936static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx)
937{
938 switch (idx) {
939 case COMP_0: return 0x00000100;
940 case COMP_1_2: return 0x00000110;
941 case COMP_3: return 0x00000120;
942 default: return INVALID_IDX(idx);
943 }
944}
945static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
946
947static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
948#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff
949#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0
950static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val)
951{
952 return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK;
953}
954#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00
955#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8
956static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val)
957{
958 return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK;
959}
960#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000
961#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16
962static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val)
963{
964 return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK;
965}
966#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000
967#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24
968static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val)
969{
970 return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK;
971}
972
973static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
974#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff
975#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0
976static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val)
977{
978 return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK;
979}
980#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00
981#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8
982static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val)
983{
984 return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK;
985}
986#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000
987#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16
988static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val)
989{
990 return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK;
991}
992#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000
993#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24
994static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val)
995{
996 return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK;
997}
998
999static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
1000#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff
1001#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0
1002static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val)
1003{
1004 return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK;
1005}
1006#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000
1007#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16
1008static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val)
1009{
1010 return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK;
1011}
1012
935static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } 1013static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
936#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 1014#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
937#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 1015#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index a1e26f23c7cc..bb1225aa2f75 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -27,6 +27,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
27 .mdp = { 27 .mdp = {
28 .count = 1, 28 .count = 1,
29 .base = { 0x00100 }, 29 .base = { 0x00100 },
30 .caps = MDP_CAP_SMP |
31 0,
30 }, 32 },
31 .smp = { 33 .smp = {
32 .mmb_count = 22, 34 .mmb_count = 22,
@@ -96,6 +98,8 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
96 .mdp = { 98 .mdp = {
97 .count = 1, 99 .count = 1,
98 .base = { 0x00100 }, 100 .base = { 0x00100 },
101 .caps = MDP_CAP_SMP |
102 0,
99 }, 103 },
100 .smp = { 104 .smp = {
101 .mmb_count = 22, 105 .mmb_count = 22,
@@ -165,6 +169,8 @@ const struct mdp5_cfg_hw apq8084_config = {
165 .mdp = { 169 .mdp = {
166 .count = 1, 170 .count = 1,
167 .base = { 0x00100 }, 171 .base = { 0x00100 },
172 .caps = MDP_CAP_SMP |
173 0,
168 }, 174 },
169 .smp = { 175 .smp = {
170 .mmb_count = 44, 176 .mmb_count = 44,
@@ -242,6 +248,8 @@ const struct mdp5_cfg_hw msm8x16_config = {
242 .mdp = { 248 .mdp = {
243 .count = 1, 249 .count = 1,
244 .base = { 0x01000 }, 250 .base = { 0x01000 },
251 .caps = MDP_CAP_SMP |
252 0,
245 }, 253 },
246 .smp = { 254 .smp = {
247 .mmb_count = 8, 255 .mmb_count = 8,
@@ -301,6 +309,8 @@ const struct mdp5_cfg_hw msm8x94_config = {
301 .mdp = { 309 .mdp = {
302 .count = 1, 310 .count = 1,
303 .base = { 0x01000 }, 311 .base = { 0x01000 },
312 .caps = MDP_CAP_SMP |
313 0,
304 }, 314 },
305 .smp = { 315 .smp = {
306 .mmb_count = 44, 316 .mmb_count = 44,
@@ -370,7 +380,89 @@ const struct mdp5_cfg_hw msm8x94_config = {
370 [3] = INTF_HDMI, 380 [3] = INTF_HDMI,
371 }, 381 },
372 }, 382 },
373 .max_clk = 320000000, 383 .max_clk = 400000000,
384};
385
386const struct mdp5_cfg_hw msm8x96_config = {
387 .name = "msm8x96",
388 .mdp = {
389 .count = 1,
390 .base = { 0x01000 },
391 .caps = MDP_CAP_DSC |
392 MDP_CAP_CDM |
393 0,
394 },
395 .ctl = {
396 .count = 5,
397 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
398 .flush_hw_mask = 0xf4ffffff,
399 },
400 .pipe_vig = {
401 .count = 4,
402 .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
403 .caps = MDP_PIPE_CAP_HFLIP |
404 MDP_PIPE_CAP_VFLIP |
405 MDP_PIPE_CAP_SCALE |
406 MDP_PIPE_CAP_CSC |
407 MDP_PIPE_CAP_DECIMATION |
408 MDP_PIPE_CAP_SW_PIX_EXT |
409 0,
410 },
411 .pipe_rgb = {
412 .count = 4,
413 .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
414 .caps = MDP_PIPE_CAP_HFLIP |
415 MDP_PIPE_CAP_VFLIP |
416 MDP_PIPE_CAP_SCALE |
417 MDP_PIPE_CAP_DECIMATION |
418 MDP_PIPE_CAP_SW_PIX_EXT |
419 0,
420 },
421 .pipe_dma = {
422 .count = 2,
423 .base = { 0x25000, 0x27000 },
424 .caps = MDP_PIPE_CAP_HFLIP |
425 MDP_PIPE_CAP_VFLIP |
426 MDP_PIPE_CAP_SW_PIX_EXT |
427 0,
428 },
429 .lm = {
430 .count = 6,
431 .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
432 .nb_stages = 8,
433 .max_width = 2560,
434 .max_height = 0xFFFF,
435 },
436 .dspp = {
437 .count = 2,
438 .base = { 0x55000, 0x57000 },
439 },
440 .ad = {
441 .count = 3,
442 .base = { 0x79000, 0x79800, 0x7a000 },
443 },
444 .pp = {
445 .count = 4,
446 .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
447 },
448 .cdm = {
449 .count = 1,
450 .base = { 0x7a200 },
451 },
452 .dsc = {
453 .count = 2,
454 .base = { 0x81000, 0x81400 },
455 },
456 .intf = {
457 .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
458 .connect = {
459 [0] = INTF_DISABLED,
460 [1] = INTF_DSI,
461 [2] = INTF_DSI,
462 [3] = INTF_HDMI,
463 },
464 },
465 .max_clk = 412500000,
374}; 466};
375 467
376static const struct mdp5_cfg_handler cfg_handlers[] = { 468static const struct mdp5_cfg_handler cfg_handlers[] = {
@@ -379,6 +471,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
379 { .revision = 3, .config = { .hw = &apq8084_config } }, 471 { .revision = 3, .config = { .hw = &apq8084_config } },
380 { .revision = 6, .config = { .hw = &msm8x16_config } }, 472 { .revision = 6, .config = { .hw = &msm8x16_config } },
381 { .revision = 9, .config = { .hw = &msm8x94_config } }, 473 { .revision = 9, .config = { .hw = &msm8x94_config } },
474 { .revision = 7, .config = { .hw = &msm8x96_config } },
382}; 475};
383 476
384static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev); 477static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index efb918d9f68b..050e1618c836 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -61,7 +61,12 @@ struct mdp5_smp_block {
61 int mmb_size; /* MMB: size in bytes */ 61 int mmb_size; /* MMB: size in bytes */
62 uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */ 62 uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */
63 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ 63 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
64 int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ 64 uint8_t reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
65};
66
67struct mdp5_mdp_block {
68 MDP5_SUB_BLOCK_DEFINITION;
69 uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */
65}; 70};
66 71
67#define MDP5_INTF_NUM_MAX 5 72#define MDP5_INTF_NUM_MAX 5
@@ -74,7 +79,7 @@ struct mdp5_intf_block {
74struct mdp5_cfg_hw { 79struct mdp5_cfg_hw {
75 char *name; 80 char *name;
76 81
77 struct mdp5_sub_block mdp; 82 struct mdp5_mdp_block mdp;
78 struct mdp5_smp_block smp; 83 struct mdp5_smp_block smp;
79 struct mdp5_ctl_block ctl; 84 struct mdp5_ctl_block ctl;
80 struct mdp5_pipe_block pipe_vig; 85 struct mdp5_pipe_block pipe_vig;
@@ -84,6 +89,8 @@ struct mdp5_cfg_hw {
84 struct mdp5_sub_block dspp; 89 struct mdp5_sub_block dspp;
85 struct mdp5_sub_block ad; 90 struct mdp5_sub_block ad;
86 struct mdp5_sub_block pp; 91 struct mdp5_sub_block pp;
92 struct mdp5_sub_block dsc;
93 struct mdp5_sub_block cdm;
87 struct mdp5_intf_block intf; 94 struct mdp5_intf_block intf;
88 95
89 uint32_t max_clk; 96 uint32_t max_clk;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 047cb0433ccb..b532faa8026d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -452,15 +452,19 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms,
452} 452}
453 453
454static int get_clk(struct platform_device *pdev, struct clk **clkp, 454static int get_clk(struct platform_device *pdev, struct clk **clkp,
455 const char *name) 455 const char *name, bool mandatory)
456{ 456{
457 struct device *dev = &pdev->dev; 457 struct device *dev = &pdev->dev;
458 struct clk *clk = devm_clk_get(dev, name); 458 struct clk *clk = devm_clk_get(dev, name);
459 if (IS_ERR(clk)) { 459 if (IS_ERR(clk) && mandatory) {
460 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 460 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
461 return PTR_ERR(clk); 461 return PTR_ERR(clk);
462 } 462 }
463 *clkp = clk; 463 if (IS_ERR(clk))
464 DBG("skipping %s", name);
465 else
466 *clkp = clk;
467
464 return 0; 468 return 0;
465} 469}
466 470
@@ -514,25 +518,26 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
514 goto fail; 518 goto fail;
515 } 519 }
516 520
517 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk"); 521 /* mandatory clocks: */
522 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
518 if (ret) 523 if (ret)
519 goto fail; 524 goto fail;
520 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk"); 525 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
521 if (ret) 526 if (ret)
522 goto fail; 527 goto fail;
523 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src"); 528 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
524 if (ret) 529 if (ret)
525 goto fail; 530 goto fail;
526 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk"); 531 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
527 if (ret) 532 if (ret)
528 goto fail; 533 goto fail;
529 ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk"); 534 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
530 if (ret)
531 DBG("failed to get (optional) lut_clk clock");
532 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
533 if (ret) 535 if (ret)
534 goto fail; 536 goto fail;
535 537
538 /* optional clocks: */
539 get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
540
536 /* we need to set a default rate before enabling. Set a safe 541 /* we need to set a default rate before enabling. Set a safe
537 * rate first, then figure out hw revision, and then set a 542 * rate first, then figure out hw revision, and then set a
538 * more optimal rate: 543 * more optimal rate:
@@ -549,15 +554,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
549 } 554 }
550 555
551 config = mdp5_cfg_get_config(mdp5_kms->cfg); 556 config = mdp5_cfg_get_config(mdp5_kms->cfg);
557 mdp5_kms->caps = config->hw->mdp.caps;
552 558
553 /* TODO: compute core clock rate at runtime */ 559 /* TODO: compute core clock rate at runtime */
554 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk); 560 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
555 561
556 mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp); 562 /*
557 if (IS_ERR(mdp5_kms->smp)) { 563 * Some chipsets have a Shared Memory Pool (SMP), while others
558 ret = PTR_ERR(mdp5_kms->smp); 564 * have dedicated latency buffering per source pipe instead;
559 mdp5_kms->smp = NULL; 565 * this section initializes the SMP:
560 goto fail; 566 */
567 if (mdp5_kms->caps & MDP_CAP_SMP) {
568 mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
569 if (IS_ERR(mdp5_kms->smp)) {
570 ret = PTR_ERR(mdp5_kms->smp);
571 mdp5_kms->smp = NULL;
572 goto fail;
573 }
561 } 574 }
562 575
563 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); 576 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
@@ -586,6 +599,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
586 if (IS_ERR(mmu)) { 599 if (IS_ERR(mmu)) {
587 ret = PTR_ERR(mmu); 600 ret = PTR_ERR(mmu);
588 dev_err(dev->dev, "failed to init iommu: %d\n", ret); 601 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
602 iommu_domain_free(config->platform.iommu);
589 goto fail; 603 goto fail;
590 } 604 }
591 605
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 0bb62423586e..84f65d415598 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -32,6 +32,8 @@ struct mdp5_kms {
32 struct drm_device *dev; 32 struct drm_device *dev;
33 33
34 struct mdp5_cfg_handler *cfg; 34 struct mdp5_cfg_handler *cfg;
35 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
36
35 37
36 /* mapper-id used to request GEM buffer mapped for scanout: */ 38 /* mapper-id used to request GEM buffer mapped for scanout: */
37 int id; 39 int id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 07fb62fea6dc..81cd49045ffc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -250,22 +250,28 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
250}; 250};
251 251
252static int mdp5_plane_prepare_fb(struct drm_plane *plane, 252static int mdp5_plane_prepare_fb(struct drm_plane *plane,
253 struct drm_framebuffer *fb,
254 const struct drm_plane_state *new_state) 253 const struct drm_plane_state *new_state)
255{ 254{
256 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 255 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
257 struct mdp5_kms *mdp5_kms = get_kms(plane); 256 struct mdp5_kms *mdp5_kms = get_kms(plane);
257 struct drm_framebuffer *fb = new_state->fb;
258
259 if (!new_state->fb)
260 return 0;
258 261
259 DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id); 262 DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
260 return msm_framebuffer_prepare(fb, mdp5_kms->id); 263 return msm_framebuffer_prepare(fb, mdp5_kms->id);
261} 264}
262 265
263static void mdp5_plane_cleanup_fb(struct drm_plane *plane, 266static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
264 struct drm_framebuffer *fb,
265 const struct drm_plane_state *old_state) 267 const struct drm_plane_state *old_state)
266{ 268{
267 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 269 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
268 struct mdp5_kms *mdp5_kms = get_kms(plane); 270 struct mdp5_kms *mdp5_kms = get_kms(plane);
271 struct drm_framebuffer *fb = old_state->fb;
272
273 if (!fb)
274 return;
269 275
270 DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id); 276 DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
271 msm_framebuffer_cleanup(fb, mdp5_kms->id); 277 msm_framebuffer_cleanup(fb, mdp5_kms->id);
@@ -494,7 +500,7 @@ static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
494 500
495static int calc_scalex_steps(struct drm_plane *plane, 501static int calc_scalex_steps(struct drm_plane *plane,
496 uint32_t pixel_format, uint32_t src, uint32_t dest, 502 uint32_t pixel_format, uint32_t src, uint32_t dest,
497 uint32_t phasex_steps[2]) 503 uint32_t phasex_steps[COMP_MAX])
498{ 504{
499 struct mdp5_kms *mdp5_kms = get_kms(plane); 505 struct mdp5_kms *mdp5_kms = get_kms(plane);
500 struct device *dev = mdp5_kms->dev->dev; 506 struct device *dev = mdp5_kms->dev->dev;
@@ -510,15 +516,16 @@ static int calc_scalex_steps(struct drm_plane *plane,
510 516
511 hsub = drm_format_horz_chroma_subsampling(pixel_format); 517 hsub = drm_format_horz_chroma_subsampling(pixel_format);
512 518
513 phasex_steps[0] = phasex_step; 519 phasex_steps[COMP_0] = phasex_step;
514 phasex_steps[1] = phasex_step / hsub; 520 phasex_steps[COMP_3] = phasex_step;
521 phasex_steps[COMP_1_2] = phasex_step / hsub;
515 522
516 return 0; 523 return 0;
517} 524}
518 525
519static int calc_scaley_steps(struct drm_plane *plane, 526static int calc_scaley_steps(struct drm_plane *plane,
520 uint32_t pixel_format, uint32_t src, uint32_t dest, 527 uint32_t pixel_format, uint32_t src, uint32_t dest,
521 uint32_t phasey_steps[2]) 528 uint32_t phasey_steps[COMP_MAX])
522{ 529{
523 struct mdp5_kms *mdp5_kms = get_kms(plane); 530 struct mdp5_kms *mdp5_kms = get_kms(plane);
524 struct device *dev = mdp5_kms->dev->dev; 531 struct device *dev = mdp5_kms->dev->dev;
@@ -534,46 +541,127 @@ static int calc_scaley_steps(struct drm_plane *plane,
534 541
535 vsub = drm_format_vert_chroma_subsampling(pixel_format); 542 vsub = drm_format_vert_chroma_subsampling(pixel_format);
536 543
537 phasey_steps[0] = phasey_step; 544 phasey_steps[COMP_0] = phasey_step;
538 phasey_steps[1] = phasey_step / vsub; 545 phasey_steps[COMP_3] = phasey_step;
546 phasey_steps[COMP_1_2] = phasey_step / vsub;
539 547
540 return 0; 548 return 0;
541} 549}
542 550
543static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample, 551static uint32_t get_scale_config(const struct mdp_format *format,
544 uint32_t src, uint32_t dest, bool hor) 552 uint32_t src, uint32_t dst, bool horz)
545{ 553{
546 uint32_t y_filter = (src <= dest) ? SCALE_FILTER_CA : SCALE_FILTER_PCMN; 554 bool scaling = format->is_yuv ? true : (src != dst);
547 uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; 555 uint32_t sub, pix_fmt = format->base.pixel_format;
548 uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */ 556 uint32_t ya_filter, uv_filter;
549 SCALE_FILTER_BIL : SCALE_FILTER_PCMN; 557 bool yuv = format->is_yuv;
550 uint32_t value = 0; 558
551 559 if (!scaling)
552 if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) { 560 return 0;
553 if (hor) 561
554 value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | 562 if (yuv) {
555 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) | 563 sub = horz ? drm_format_horz_chroma_subsampling(pix_fmt) :
556 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) | 564 drm_format_vert_chroma_subsampling(pix_fmt);
557 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter); 565 uv_filter = ((src / sub) <= dst) ?
558 else 566 SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
559 value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
560 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) |
561 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) |
562 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter);
563 } else if (src != dest) {
564 if (hor)
565 value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
566 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) |
567 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter);
568 else
569 value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
570 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) |
571 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter);
572 } 567 }
568 ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
573 569
574 return value; 570 if (horz)
571 return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
572 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
573 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
574 COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
575 else
576 return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
577 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
578 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
579 COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
575} 580}
576 581
582static void calc_pixel_ext(const struct mdp_format *format,
583 uint32_t src, uint32_t dst, uint32_t phase_step[2],
584 int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
585 bool horz)
586{
587 bool scaling = format->is_yuv ? true : (src != dst);
588 int i;
589
590 /*
591 * Note:
592 * We assume here that:
593 * 1. PCMN filter is used for downscale
594 * 2. bilinear filter is used for upscale
595 * 3. we are in a single pipe configuration
596 */
597
598 for (i = 0; i < COMP_MAX; i++) {
599 pix_ext_edge1[i] = 0;
600 pix_ext_edge2[i] = scaling ? 1 : 0;
601 }
602}
603
604static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
605 const struct mdp_format *format,
606 uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
607 uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
608{
609 uint32_t pix_fmt = format->base.pixel_format;
610 uint32_t lr, tb, req;
611 int i;
612
613 for (i = 0; i < COMP_MAX; i++) {
614 uint32_t roi_w = src_w;
615 uint32_t roi_h = src_h;
616
617 if (format->is_yuv && i == COMP_1_2) {
618 roi_w /= drm_format_horz_chroma_subsampling(pix_fmt);
619 roi_h /= drm_format_vert_chroma_subsampling(pix_fmt);
620 }
621
622 lr = (pe_left[i] >= 0) ?
623 MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
624 MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
625
626 lr |= (pe_right[i] >= 0) ?
627 MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
628 MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
629
630 tb = (pe_top[i] >= 0) ?
631 MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
632 MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
633
634 tb |= (pe_bottom[i] >= 0) ?
635 MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
636 MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
637
638 req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
639 pe_left[i] + pe_right[i]);
640
641 req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
642 pe_top[i] + pe_bottom[i]);
643
644 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
645 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
646 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
647
648 DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
649 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
650 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
651 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
652 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
653 FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
654
655 DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
656 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
657 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
658 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
659 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
660 FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
661 }
662}
663
664
577static int mdp5_plane_mode_set(struct drm_plane *plane, 665static int mdp5_plane_mode_set(struct drm_plane *plane,
578 struct drm_crtc *crtc, struct drm_framebuffer *fb, 666 struct drm_crtc *crtc, struct drm_framebuffer *fb,
579 int crtc_x, int crtc_y, 667 int crtc_x, int crtc_y,
@@ -587,8 +675,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
587 enum mdp5_pipe pipe = mdp5_plane->pipe; 675 enum mdp5_pipe pipe = mdp5_plane->pipe;
588 const struct mdp_format *format; 676 const struct mdp_format *format;
589 uint32_t nplanes, config = 0; 677 uint32_t nplanes, config = 0;
590 /* below array -> index 0: comp 0/3 ; index 1: comp 1/2 */ 678 uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,};
591 uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,}; 679 bool pe = mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT;
680 int pe_left[COMP_MAX], pe_right[COMP_MAX];
681 int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
592 uint32_t hdecm = 0, vdecm = 0; 682 uint32_t hdecm = 0, vdecm = 0;
593 uint32_t pix_format; 683 uint32_t pix_format;
594 bool vflip, hflip; 684 bool vflip, hflip;
@@ -615,10 +705,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
615 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); 705 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
616 706
617 /* Request some memory from the SMP: */ 707 /* Request some memory from the SMP: */
618 ret = mdp5_smp_request(mdp5_kms->smp, 708 if (mdp5_kms->smp) {
619 mdp5_plane->pipe, format, src_w, false); 709 ret = mdp5_smp_request(mdp5_kms->smp,
620 if (ret) 710 mdp5_plane->pipe, format, src_w, false);
621 return ret; 711 if (ret)
712 return ret;
713 }
622 714
623 /* 715 /*
624 * Currently we update the hw for allocations/requests immediately, 716 * Currently we update the hw for allocations/requests immediately,
@@ -626,7 +718,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
626 * would move into atomic->check_plane_state(), while updating the 718 * would move into atomic->check_plane_state(), while updating the
627 * hw would remain here: 719 * hw would remain here:
628 */ 720 */
629 mdp5_smp_configure(mdp5_kms->smp, pipe); 721 if (mdp5_kms->smp)
722 mdp5_smp_configure(mdp5_kms->smp, pipe);
630 723
631 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); 724 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
632 if (ret) 725 if (ret)
@@ -636,11 +729,18 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
636 if (ret) 729 if (ret)
637 return ret; 730 return ret;
638 731
732 if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
733 calc_pixel_ext(format, src_w, crtc_w, phasex_step,
734 pe_left, pe_right, true);
735 calc_pixel_ext(format, src_h, crtc_h, phasey_step,
736 pe_top, pe_bottom, false);
737 }
738
639 /* TODO calc hdecm, vdecm */ 739 /* TODO calc hdecm, vdecm */
640 740
641 /* SCALE is used to both scale and up-sample chroma components */ 741 /* SCALE is used to both scale and up-sample chroma components */
642 config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true); 742 config |= get_scale_config(format, src_w, crtc_w, true);
643 config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false); 743 config |= get_scale_config(format, src_h, crtc_h, false);
644 DBG("scale config = %x", config); 744 DBG("scale config = %x", config);
645 745
646 hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X)); 746 hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
@@ -689,20 +789,26 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
689 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), 789 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
690 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | 790 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
691 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | 791 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
792 COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
692 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); 793 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
693 794
694 /* not using secure mode: */ 795 /* not using secure mode: */
695 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); 796 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
696 797
798 if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT)
799 mdp5_write_pixel_ext(mdp5_kms, pipe, format,
800 src_w, pe_left, pe_right,
801 src_h, pe_top, pe_bottom);
802
697 if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) { 803 if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
698 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), 804 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
699 phasex_step[0]); 805 phasex_step[COMP_0]);
700 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), 806 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
701 phasey_step[0]); 807 phasey_step[COMP_0]);
702 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), 808 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
703 phasex_step[1]); 809 phasex_step[COMP_1_2]);
704 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), 810 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
705 phasey_step[1]); 811 phasey_step[COMP_1_2]);
706 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), 812 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
707 MDP5_PIPE_DECIMATION_VERT(vdecm) | 813 MDP5_PIPE_DECIMATION_VERT(vdecm) |
708 MDP5_PIPE_DECIMATION_HORZ(hdecm)); 814 MDP5_PIPE_DECIMATION_HORZ(hdecm));
@@ -732,7 +838,8 @@ void mdp5_plane_complete_flip(struct drm_plane *plane)
732 838
733 DBG("%s: complete flip", mdp5_plane->name); 839 DBG("%s: complete flip", mdp5_plane->name);
734 840
735 mdp5_smp_commit(mdp5_kms->smp, pipe); 841 if (mdp5_kms->smp)
842 mdp5_smp_commit(mdp5_kms->smp, pipe);
736 843
737 to_mdp5_plane_state(plane->state)->pending = false; 844 to_mdp5_plane_state(plane->state)->pending = false;
738} 845}
@@ -758,7 +865,7 @@ void mdp5_plane_complete_commit(struct drm_plane *plane,
758 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 865 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
759 enum mdp5_pipe pipe = mdp5_plane->pipe; 866 enum mdp5_pipe pipe = mdp5_plane->pipe;
760 867
761 if (!plane_enabled(plane->state)) { 868 if (!plane_enabled(plane->state) && mdp5_kms->smp) {
762 DBG("%s: free SMP", mdp5_plane->name); 869 DBG("%s: free SMP", mdp5_plane->name);
763 mdp5_smp_release(mdp5_kms->smp, pipe); 870 mdp5_smp_release(mdp5_kms->smp, pipe);
764 } 871 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 563cca972dcb..6f425c25d9fe 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -90,7 +90,7 @@
90struct mdp5_smp { 90struct mdp5_smp {
91 struct drm_device *dev; 91 struct drm_device *dev;
92 92
93 const struct mdp5_smp_block *cfg; 93 uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
94 94
95 int blk_cnt; 95 int blk_cnt;
96 int blk_size; 96 int blk_size;
@@ -141,10 +141,10 @@ static int smp_request_block(struct mdp5_smp *smp,
141 struct mdp5_kms *mdp5_kms = get_kms(smp); 141 struct mdp5_kms *mdp5_kms = get_kms(smp);
142 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 142 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
143 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; 143 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
144 int reserved; 144 uint8_t reserved;
145 unsigned long flags; 145 unsigned long flags;
146 146
147 reserved = smp->cfg->reserved[cid]; 147 reserved = smp->reserved[cid];
148 148
149 spin_lock_irqsave(&smp->state_lock, flags); 149 spin_lock_irqsave(&smp->state_lock, flags);
150 150
@@ -405,12 +405,12 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
405 } 405 }
406 406
407 smp->dev = dev; 407 smp->dev = dev;
408 smp->cfg = cfg;
409 smp->blk_cnt = cfg->mmb_count; 408 smp->blk_cnt = cfg->mmb_count;
410 smp->blk_size = cfg->mmb_size; 409 smp->blk_size = cfg->mmb_size;
411 410
412 /* statically tied MMBs cannot be re-allocated: */ 411 /* statically tied MMBs cannot be re-allocated: */
413 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt); 412 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
413 memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
414 spin_lock_init(&smp->state_lock); 414 spin_lock_init(&smp->state_lock);
415 415
416 return smp; 416 return smp;
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 4f792c4e40f4..0aec1ac1f6d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
@@ -78,6 +78,13 @@ enum mdp_alpha_type {
78 BG_PIXEL = 3, 78 BG_PIXEL = 3,
79}; 79};
80 80
81enum mdp_component_type {
82 COMP_0 = 0,
83 COMP_1_2 = 1,
84 COMP_3 = 2,
85 COMP_MAX = 3,
86};
87
81enum mdp_bpc { 88enum mdp_bpc {
82 BPC1 = 0, 89 BPC1 = 0,
83 BPC5 = 1, 90 BPC5 = 1,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 46a94e7d50e2..303130320748 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -100,12 +100,18 @@ struct mdp_format {
100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); 100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); 101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
102 102
103/* MDP capabilities */
104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
105#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
106#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
107
103/* MDP pipe capabilities */ 108/* MDP pipe capabilities */
104#define MDP_PIPE_CAP_HFLIP BIT(0) 109#define MDP_PIPE_CAP_HFLIP BIT(0)
105#define MDP_PIPE_CAP_VFLIP BIT(1) 110#define MDP_PIPE_CAP_VFLIP BIT(1)
106#define MDP_PIPE_CAP_SCALE BIT(2) 111#define MDP_PIPE_CAP_SCALE BIT(2)
107#define MDP_PIPE_CAP_CSC BIT(3) 112#define MDP_PIPE_CAP_CSC BIT(3)
108#define MDP_PIPE_CAP_DECIMATION BIT(4) 113#define MDP_PIPE_CAP_DECIMATION BIT(4)
114#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
109 115
110static inline bool pipe_supports_yuv(uint32_t pipe_caps) 116static inline bool pipe_supports_yuv(uint32_t pipe_caps)
111{ 117{
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1ceb4f22dd89..7eb253bc24df 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -125,7 +125,7 @@ static void complete_commit(struct msm_commit *c)
125 125
126 drm_atomic_helper_commit_modeset_disables(dev, state); 126 drm_atomic_helper_commit_modeset_disables(dev, state);
127 127
128 drm_atomic_helper_commit_planes(dev, state); 128 drm_atomic_helper_commit_planes(dev, state, false);
129 129
130 drm_atomic_helper_commit_modeset_enables(dev, state); 130 drm_atomic_helper_commit_modeset_enables(dev, state);
131 131
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0339c5d82d37..b88ce514eb8e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -21,11 +21,9 @@
21 21
22static void msm_fb_output_poll_changed(struct drm_device *dev) 22static void msm_fb_output_poll_changed(struct drm_device *dev)
23{ 23{
24#ifdef CONFIG_DRM_MSM_FBDEV
25 struct msm_drm_private *priv = dev->dev_private; 24 struct msm_drm_private *priv = dev->dev_private;
26 if (priv->fbdev) 25 if (priv->fbdev)
27 drm_fb_helper_hotplug_event(priv->fbdev); 26 drm_fb_helper_hotplug_event(priv->fbdev);
28#endif
29} 27}
30 28
31static const struct drm_mode_config_funcs mode_config_funcs = { 29static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -56,7 +54,7 @@ module_param(reglog, bool, 0600);
56#define reglog 0 54#define reglog 0
57#endif 55#endif
58 56
59#ifdef CONFIG_DRM_MSM_FBDEV 57#ifdef CONFIG_DRM_FBDEV_EMULATION
60static bool fbdev = true; 58static bool fbdev = true;
61MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); 59MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
62module_param(fbdev, bool, 0600); 60module_param(fbdev, bool, 0600);
@@ -423,7 +421,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
423 421
424 drm_mode_config_reset(dev); 422 drm_mode_config_reset(dev);
425 423
426#ifdef CONFIG_DRM_MSM_FBDEV 424#ifdef CONFIG_DRM_FBDEV_EMULATION
427 if (fbdev) 425 if (fbdev)
428 priv->fbdev = msm_fbdev_init(dev); 426 priv->fbdev = msm_fbdev_init(dev);
429#endif 427#endif
@@ -491,11 +489,9 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
491 489
492static void msm_lastclose(struct drm_device *dev) 490static void msm_lastclose(struct drm_device *dev)
493{ 491{
494#ifdef CONFIG_DRM_MSM_FBDEV
495 struct msm_drm_private *priv = dev->dev_private; 492 struct msm_drm_private *priv = dev->dev_private;
496 if (priv->fbdev) 493 if (priv->fbdev)
497 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); 494 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
498#endif
499} 495}
500 496
501static irqreturn_t msm_irq(int irq, void *arg) 497static irqreturn_t msm_irq(int irq, void *arg)
@@ -531,24 +527,24 @@ static void msm_irq_uninstall(struct drm_device *dev)
531 kms->funcs->irq_uninstall(kms); 527 kms->funcs->irq_uninstall(kms);
532} 528}
533 529
534static int msm_enable_vblank(struct drm_device *dev, int crtc_id) 530static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
535{ 531{
536 struct msm_drm_private *priv = dev->dev_private; 532 struct msm_drm_private *priv = dev->dev_private;
537 struct msm_kms *kms = priv->kms; 533 struct msm_kms *kms = priv->kms;
538 if (!kms) 534 if (!kms)
539 return -ENXIO; 535 return -ENXIO;
540 DBG("dev=%p, crtc=%d", dev, crtc_id); 536 DBG("dev=%p, crtc=%u", dev, pipe);
541 return vblank_ctrl_queue_work(priv, crtc_id, true); 537 return vblank_ctrl_queue_work(priv, pipe, true);
542} 538}
543 539
544static void msm_disable_vblank(struct drm_device *dev, int crtc_id) 540static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
545{ 541{
546 struct msm_drm_private *priv = dev->dev_private; 542 struct msm_drm_private *priv = dev->dev_private;
547 struct msm_kms *kms = priv->kms; 543 struct msm_kms *kms = priv->kms;
548 if (!kms) 544 if (!kms)
549 return; 545 return;
550 DBG("dev=%p, crtc=%d", dev, crtc_id); 546 DBG("dev=%p, crtc=%u", dev, pipe);
551 vblank_ctrl_queue_work(priv, crtc_id, false); 547 vblank_ctrl_queue_work(priv, pipe, false);
552} 548}
553 549
554/* 550/*
@@ -932,13 +928,13 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
932} 928}
933 929
934static const struct drm_ioctl_desc msm_ioctls[] = { 930static const struct drm_ioctl_desc msm_ioctls[] = {
935 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 931 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
936 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 932 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
937 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 933 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
938 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 934 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
939 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 935 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
940 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 936 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
941 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 937 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
942}; 938};
943 939
944static const struct vm_operations_struct vm_ops = { 940static const struct vm_operations_struct vm_ops = {
@@ -978,7 +974,7 @@ static struct drm_driver msm_driver = {
978 .irq_preinstall = msm_irq_preinstall, 974 .irq_preinstall = msm_irq_preinstall,
979 .irq_postinstall = msm_irq_postinstall, 975 .irq_postinstall = msm_irq_postinstall,
980 .irq_uninstall = msm_irq_uninstall, 976 .irq_uninstall = msm_irq_uninstall,
981 .get_vblank_counter = drm_vblank_count, 977 .get_vblank_counter = drm_vblank_no_hw_counter,
982 .enable_vblank = msm_enable_vblank, 978 .enable_vblank = msm_enable_vblank,
983 .disable_vblank = msm_disable_vblank, 979 .disable_vblank = msm_disable_vblank,
984 .gem_free_object = msm_gem_free_object, 980 .gem_free_object = msm_gem_free_object,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index f97a1964ef39..3f6ec077b51d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -68,12 +68,7 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
68 if (drm_device_is_unplugged(dev)) 68 if (drm_device_is_unplugged(dev))
69 return -ENODEV; 69 return -ENODEV;
70 70
71 mutex_lock(&dev->struct_mutex);
72
73 ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); 71 ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
74
75 mutex_unlock(&dev->struct_mutex);
76
77 if (ret) { 72 if (ret) {
78 pr_err("%s:drm_gem_mmap_obj fail\n", __func__); 73 pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
79 return ret; 74 return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 831461bc98a5..121975b07cd4 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -45,9 +45,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
45{ 45{
46 int ret; 46 int ret;
47 47
48 mutex_lock(&obj->dev->struct_mutex);
49 ret = drm_gem_mmap_obj(obj, obj->size, vma); 48 ret = drm_gem_mmap_obj(obj, obj->size, vma);
50 mutex_unlock(&obj->dev->struct_mutex);
51 if (ret < 0) 49 if (ret < 0)
52 return ret; 50 return ret;
53 51
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8f70d9248ac5..6b02ada6579a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -651,6 +651,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
651 if (iommu) { 651 if (iommu) {
652 dev_info(drm->dev, "%s: using IOMMU\n", name); 652 dev_info(drm->dev, "%s: using IOMMU\n", name);
653 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); 653 gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
654 if (IS_ERR(gpu->mmu)) {
655 ret = PTR_ERR(gpu->mmu);
656 dev_err(drm->dev, "failed to init iommu: %d\n", ret);
657 gpu->mmu = NULL;
658 iommu_domain_free(iommu);
659 goto fail;
660 }
661
654 } else { 662 } else {
655 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 663 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
656 } 664 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 08c6f5e50610..903c473d266f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -32,7 +32,7 @@
32#include "hw.h" 32#include "hw.h"
33#include "tvnv17.h" 33#include "tvnv17.h"
34 34
35char *nv17_tv_norm_names[NUM_TV_NORMS] = { 35const char * const nv17_tv_norm_names[NUM_TV_NORMS] = {
36 [TV_NORM_PAL] = "PAL", 36 [TV_NORM_PAL] = "PAL",
37 [TV_NORM_PAL_M] = "PAL-M", 37 [TV_NORM_PAL_M] = "PAL-M",
38 [TV_NORM_PAL_N] = "PAL-N", 38 [TV_NORM_PAL_N] = "PAL-N",
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 459910b6bb32..1b07521cde0d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -85,7 +85,7 @@ struct nv17_tv_encoder {
85#define to_tv_enc(x) container_of(nouveau_encoder(x), \ 85#define to_tv_enc(x) container_of(nouveau_encoder(x), \
86 struct nv17_tv_encoder, base) 86 struct nv17_tv_encoder, base)
87 87
88extern char *nv17_tv_norm_names[NUM_TV_NORMS]; 88extern const char * const nv17_tv_norm_names[NUM_TV_NORMS];
89 89
90extern struct nv17_tv_norm_params { 90extern struct nv17_tv_norm_params {
91 enum { 91 enum {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 3accc99d8e0b..9fcab67c8557 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -27,6 +27,7 @@
27#include <linux/agp_backend.h> 27#include <linux/agp_backend.h>
28#include <linux/reset.h> 28#include <linux/reset.h>
29#include <linux/iommu.h> 29#include <linux/iommu.h>
30#include <linux/of_device.h>
30 31
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
32 33
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 5aa2480da25f..16641cec18a2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -4,6 +4,7 @@
4#include <core/mm.h> 4#include <core/mm.h>
5 5
6struct nvkm_device_tegra { 6struct nvkm_device_tegra {
7 const struct nvkm_device_tegra_func *func;
7 struct nvkm_device device; 8 struct nvkm_device device;
8 struct platform_device *pdev; 9 struct platform_device *pdev;
9 int irq; 10 int irq;
@@ -28,7 +29,17 @@ struct nvkm_device_tegra {
28 int gpu_speedo; 29 int gpu_speedo;
29}; 30};
30 31
31int nvkm_device_tegra_new(struct platform_device *, 32struct nvkm_device_tegra_func {
33 /*
34 * If an IOMMU is used, indicates which address bit will trigger a
35 * IOMMU translation when set (when this bit is not set, IOMMU is
36 * bypassed). A value of 0 means an IOMMU is never used.
37 */
38 u8 iommu_bit;
39};
40
41int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
42 struct platform_device *,
32 const char *cfg, const char *dbg, 43 const char *cfg, const char *dbg,
33 bool detect, bool mmio, u64 subdev_mask, 44 bool detect, bool mmio, u64 subdev_mask,
34 struct nvkm_device **); 45 struct nvkm_device **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index 33be260ddd38..a47d46dda704 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -15,6 +15,7 @@ enum dcb_gpio_func_name {
15 DCB_GPIO_VID5 = 0x74, 15 DCB_GPIO_VID5 = 0x74,
16 DCB_GPIO_VID6 = 0x75, 16 DCB_GPIO_VID6 = 0x75,
17 DCB_GPIO_VID7 = 0x76, 17 DCB_GPIO_VID7 = 0x76,
18 DCB_GPIO_VID_PWM = 0x81,
18}; 19};
19 20
20#define DCB_GPIO_LOG_DIR 0x02 21#define DCB_GPIO_LOG_DIR 0x02
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
index d606875c125a..3a643df6de04 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
@@ -4,8 +4,6 @@ struct nvbios_pmuT {
4}; 4};
5 5
6u32 nvbios_pmuTe(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 6u32 nvbios_pmuTe(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
7u32 nvbios_pmuTp(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
8 struct nvbios_pmuT *);
9 7
10struct nvbios_pmuE { 8struct nvbios_pmuE {
11 u8 type; 9 u8 type;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 3a9abd38aca8..dca6c060a24f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -39,6 +39,7 @@ struct nvbios_ramcfg {
39 unsigned ramcfg_timing; 39 unsigned ramcfg_timing;
40 unsigned ramcfg_DLLoff; 40 unsigned ramcfg_DLLoff;
41 unsigned ramcfg_RON; 41 unsigned ramcfg_RON;
42 unsigned ramcfg_FBVDDQ;
42 union { 43 union {
43 struct { 44 struct {
44 unsigned ramcfg_00_03_01:1; 45 unsigned ramcfg_00_03_01:1;
@@ -78,7 +79,6 @@ struct nvbios_ramcfg {
78 unsigned ramcfg_11_01_04:1; 79 unsigned ramcfg_11_01_04:1;
79 unsigned ramcfg_11_01_08:1; 80 unsigned ramcfg_11_01_08:1;
80 unsigned ramcfg_11_01_10:1; 81 unsigned ramcfg_11_01_10:1;
81 unsigned ramcfg_11_01_20:1;
82 unsigned ramcfg_11_01_40:1; 82 unsigned ramcfg_11_01_40:1;
83 unsigned ramcfg_11_01_80:1; 83 unsigned ramcfg_11_01_80:1;
84 unsigned ramcfg_11_02_03:2; 84 unsigned ramcfg_11_02_03:2;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index eb2de4b85bbd..b0df610cec2b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -1,11 +1,24 @@
1#ifndef __NVBIOS_VOLT_H__ 1#ifndef __NVBIOS_VOLT_H__
2#define __NVBIOS_VOLT_H__ 2#define __NVBIOS_VOLT_H__
3
4enum nvbios_volt_type {
5 NVBIOS_VOLT_GPIO = 0,
6 NVBIOS_VOLT_PWM,
7};
8
3struct nvbios_volt { 9struct nvbios_volt {
4 u8 vidmask; 10 enum nvbios_volt_type type;
5 u32 min; 11 u32 min;
6 u32 max; 12 u32 max;
7 u32 base; 13 u32 base;
14
15 /* GPIO mode */
16 u8 vidmask;
8 s16 step; 17 s16 step;
18
19 /* PWM mode */
20 u32 pwm_freq;
21 u32 pwm_range;
9}; 22};
10 23
11u16 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 24u16 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index 6a04d9c07944..33a057c334f2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -14,6 +14,7 @@ int nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
14void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data); 14void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
15void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data); 15void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
16void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data); 16void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
17void nvkm_hwsq_wait_vblank(struct nvkm_hwsq *);
17void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec); 18void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
18 19
19int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **); 20int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 9d512cd5a0a7..c4dcd2680fe1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -3,6 +3,7 @@
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); 5int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
6int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
6int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); 7int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
7int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); 8int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
8#endif 9#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index c773b5e958b4..3d4dbbf9aab3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -30,7 +30,11 @@ void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
30int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]); 30int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
31int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32); 31int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
32 32
33void nvkm_ltc_invalidate(struct nvkm_ltc *);
34void nvkm_ltc_flush(struct nvkm_ltc *);
35
33int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 36int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
34int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 37int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
38int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
35int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 39int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
36#endif 40#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 5b3c054f3b55..fee0a97c44c5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -24,11 +24,14 @@ struct nvkm_pci {
24u32 nvkm_pci_rd32(struct nvkm_pci *, u16 addr); 24u32 nvkm_pci_rd32(struct nvkm_pci *, u16 addr);
25void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data); 25void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data);
26void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data); 26void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
27u32 nvkm_pci_mask(struct nvkm_pci *, u16 addr, u32 mask, u32 value);
27void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow); 28void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
28 29
29int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 30int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
30int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 31int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
32int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
31int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 33int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
32int nv50_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 34int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
35int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
33int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 36int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
34#endif 37#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 62ed0880b0e1..82d3e28918fd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -59,6 +59,16 @@ void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
59#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond) 59#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
60#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond) 60#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
61 61
62#define nvkm_wait_nsec(d,n,addr,mask,data) \
63 nvkm_nsec(d, n, \
64 if ((nvkm_rd32(d, (addr)) & (mask)) == (data)) \
65 break; \
66 )
67#define nvkm_wait_usec(d,u,addr,mask,data) \
68 nvkm_wait_nsec((d), (u) * 1000, (addr), (mask), (data))
69#define nvkm_wait_msec(d,m,addr,mask,data) \
70 nvkm_wait_usec((d), (m) * 1000, (addr), (mask), (data))
71
62int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **); 72int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
63int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **); 73int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
64int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **); 74int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 5c8a3f1196de..b458d046dba7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -18,5 +18,6 @@ int nvkm_volt_get(struct nvkm_volt *);
18int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition); 18int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
19 19
20int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **); 20int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
21int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
21int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **); 22int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
22#endif 23#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d336c2247d6a..7f50cf5f929e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -25,6 +25,7 @@
25#include <nvif/driver.h> 25#include <nvif/driver.h>
26#include <nvif/ioctl.h> 26#include <nvif/ioctl.h>
27#include <nvif/class.h> 27#include <nvif/class.h>
28#include <nvif/unpack.h>
28 29
29#include "nouveau_drm.h" 30#include "nouveau_drm.h"
30#include "nouveau_dma.h" 31#include "nouveau_dma.h"
@@ -32,11 +33,10 @@
32#include "nouveau_chan.h" 33#include "nouveau_chan.h"
33#include "nouveau_abi16.h" 34#include "nouveau_abi16.h"
34 35
35struct nouveau_abi16 * 36static struct nouveau_abi16 *
36nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev) 37nouveau_abi16(struct drm_file *file_priv)
37{ 38{
38 struct nouveau_cli *cli = nouveau_cli(file_priv); 39 struct nouveau_cli *cli = nouveau_cli(file_priv);
39 mutex_lock(&cli->mutex);
40 if (!cli->abi16) { 40 if (!cli->abi16) {
41 struct nouveau_abi16 *abi16; 41 struct nouveau_abi16 *abi16;
42 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL); 42 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
@@ -51,8 +51,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
51 * device (ie. the one that belongs to the fd it 51 * device (ie. the one that belongs to the fd it
52 * opened) 52 * opened)
53 */ 53 */
54 if (nvif_device_init(&cli->base.object, 54 if (nvif_device_init(&cli->base.object, 0, NV_DEVICE,
55 NOUVEAU_ABI16_DEVICE, NV_DEVICE,
56 &args, sizeof(args), 55 &args, sizeof(args),
57 &abi16->device) == 0) 56 &abi16->device) == 0)
58 return cli->abi16; 57 return cli->abi16;
@@ -60,12 +59,21 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
60 kfree(cli->abi16); 59 kfree(cli->abi16);
61 cli->abi16 = NULL; 60 cli->abi16 = NULL;
62 } 61 }
63
64 mutex_unlock(&cli->mutex);
65 } 62 }
66 return cli->abi16; 63 return cli->abi16;
67} 64}
68 65
66struct nouveau_abi16 *
67nouveau_abi16_get(struct drm_file *file_priv)
68{
69 struct nouveau_cli *cli = nouveau_cli(file_priv);
70 mutex_lock(&cli->mutex);
71 if (nouveau_abi16(file_priv))
72 return cli->abi16;
73 mutex_unlock(&cli->mutex);
74 return NULL;
75}
76
69int 77int
70nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) 78nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
71{ 79{
@@ -133,7 +141,6 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
133 141
134 /* destroy channel object, all children will be killed too */ 142 /* destroy channel object, all children will be killed too */
135 if (chan->chan) { 143 if (chan->chan) {
136 abi16->handles &= ~(1ULL << (chan->chan->user.handle & 0xffff));
137 nouveau_channel_idle(chan->chan); 144 nouveau_channel_idle(chan->chan);
138 nouveau_channel_del(&chan->chan); 145 nouveau_channel_del(&chan->chan);
139 } 146 }
@@ -238,7 +245,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
238 struct drm_nouveau_channel_alloc *init = data; 245 struct drm_nouveau_channel_alloc *init = data;
239 struct nouveau_cli *cli = nouveau_cli(file_priv); 246 struct nouveau_cli *cli = nouveau_cli(file_priv);
240 struct nouveau_drm *drm = nouveau_drm(dev); 247 struct nouveau_drm *drm = nouveau_drm(dev);
241 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 248 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
242 struct nouveau_abi16_chan *chan; 249 struct nouveau_abi16_chan *chan;
243 struct nvif_device *device; 250 struct nvif_device *device;
244 int ret; 251 int ret;
@@ -268,26 +275,21 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
268 return nouveau_abi16_put(abi16, -EINVAL); 275 return nouveau_abi16_put(abi16, -EINVAL);
269 276
270 /* allocate "abi16 channel" data and make up a handle for it */ 277 /* allocate "abi16 channel" data and make up a handle for it */
271 init->channel = __ffs64(~abi16->handles);
272 if (~abi16->handles == 0)
273 return nouveau_abi16_put(abi16, -ENOSPC);
274
275 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 278 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
276 if (!chan) 279 if (!chan)
277 return nouveau_abi16_put(abi16, -ENOMEM); 280 return nouveau_abi16_put(abi16, -ENOMEM);
278 281
279 INIT_LIST_HEAD(&chan->notifiers); 282 INIT_LIST_HEAD(&chan->notifiers);
280 list_add(&chan->head, &abi16->channels); 283 list_add(&chan->head, &abi16->channels);
281 abi16->handles |= (1ULL << init->channel);
282 284
283 /* create channel object and initialise dma and fence management */ 285 /* create channel object and initialise dma and fence management */
284 ret = nouveau_channel_new(drm, device, 286 ret = nouveau_channel_new(drm, device, init->fb_ctxdma_handle,
285 NOUVEAU_ABI16_CHAN(init->channel),
286 init->fb_ctxdma_handle,
287 init->tt_ctxdma_handle, &chan->chan); 287 init->tt_ctxdma_handle, &chan->chan);
288 if (ret) 288 if (ret)
289 goto done; 289 goto done;
290 290
291 init->channel = chan->chan->chid;
292
291 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) 293 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
292 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 294 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
293 NOUVEAU_GEM_DOMAIN_GART; 295 NOUVEAU_GEM_DOMAIN_GART;
@@ -338,7 +340,7 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
338 struct nouveau_abi16_chan *chan; 340 struct nouveau_abi16_chan *chan;
339 341
340 list_for_each_entry(chan, &abi16->channels, head) { 342 list_for_each_entry(chan, &abi16->channels, head) {
341 if (chan->chan->user.handle == NOUVEAU_ABI16_CHAN(channel)) 343 if (chan->chan->chid == channel)
342 return chan; 344 return chan;
343 } 345 }
344 346
@@ -346,10 +348,48 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
346} 348}
347 349
348int 350int
351nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
352{
353 union {
354 struct nvif_ioctl_v0 v0;
355 } *args = data;
356 struct nouveau_abi16_chan *chan;
357 struct nouveau_abi16 *abi16;
358 int ret;
359
360 if (nvif_unpack(args->v0, 0, 0, true)) {
361 switch (args->v0.type) {
362 case NVIF_IOCTL_V0_NEW:
363 case NVIF_IOCTL_V0_MTHD:
364 case NVIF_IOCTL_V0_SCLASS:
365 break;
366 default:
367 return -EACCES;
368 }
369 } else
370 return ret;
371
372 if (!(abi16 = nouveau_abi16(file_priv)))
373 return -ENOMEM;
374
375 if (args->v0.token != ~0ULL) {
376 if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
377 return -EINVAL;
378 args->v0.object = nvif_handle(&chan->chan->user);
379 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
380 return 0;
381 }
382
383 args->v0.object = nvif_handle(&abi16->device.object);
384 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
385 return 0;
386}
387
388int
349nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) 389nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
350{ 390{
351 struct drm_nouveau_channel_free *req = data; 391 struct drm_nouveau_channel_free *req = data;
352 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 392 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
353 struct nouveau_abi16_chan *chan; 393 struct nouveau_abi16_chan *chan;
354 394
355 if (unlikely(!abi16)) 395 if (unlikely(!abi16))
@@ -366,7 +406,7 @@ int
366nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 406nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
367{ 407{
368 struct drm_nouveau_grobj_alloc *init = data; 408 struct drm_nouveau_grobj_alloc *init = data;
369 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 409 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
370 struct nouveau_abi16_chan *chan; 410 struct nouveau_abi16_chan *chan;
371 struct nouveau_abi16_ntfy *ntfy; 411 struct nouveau_abi16_ntfy *ntfy;
372 struct nvif_client *client; 412 struct nvif_client *client;
@@ -459,7 +499,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
459{ 499{
460 struct drm_nouveau_notifierobj_alloc *info = data; 500 struct drm_nouveau_notifierobj_alloc *info = data;
461 struct nouveau_drm *drm = nouveau_drm(dev); 501 struct nouveau_drm *drm = nouveau_drm(dev);
462 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 502 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
463 struct nouveau_abi16_chan *chan; 503 struct nouveau_abi16_chan *chan;
464 struct nouveau_abi16_ntfy *ntfy; 504 struct nouveau_abi16_ntfy *ntfy;
465 struct nvif_device *device = &abi16->device; 505 struct nvif_device *device = &abi16->device;
@@ -531,7 +571,7 @@ int
531nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 571nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
532{ 572{
533 struct drm_nouveau_gpuobj_free *fini = data; 573 struct drm_nouveau_gpuobj_free *fini = data;
534 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 574 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
535 struct nouveau_abi16_chan *chan; 575 struct nouveau_abi16_chan *chan;
536 struct nouveau_abi16_ntfy *ntfy; 576 struct nouveau_abi16_ntfy *ntfy;
537 int ret = -ENOENT; 577 int ret = -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 6584557afa40..841cc556fad8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -33,11 +33,11 @@ struct nouveau_abi16 {
33 u64 handles; 33 u64 handles;
34}; 34};
35 35
36struct nouveau_drm; 36struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *);
37struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
38int nouveau_abi16_put(struct nouveau_abi16 *, int); 37int nouveau_abi16_put(struct nouveau_abi16 *, int);
39void nouveau_abi16_fini(struct nouveau_abi16 *); 38void nouveau_abi16_fini(struct nouveau_abi16 *);
40s32 nouveau_abi16_swclass(struct nouveau_drm *); 39s32 nouveau_abi16_swclass(struct nouveau_drm *);
40int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
41 41
42#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 42#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
43#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 43#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index df2d9818aba3..8b8332e46f24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -206,7 +206,7 @@ static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
206 return VGA_SWITCHEROO_DIS; 206 return VGA_SWITCHEROO_DIS;
207} 207}
208 208
209static struct vga_switcheroo_handler nouveau_dsm_handler = { 209static const struct vga_switcheroo_handler nouveau_dsm_handler = {
210 .switchto = nouveau_dsm_switchto, 210 .switchto = nouveau_dsm_switchto,
211 .power_state = nouveau_dsm_power_state, 211 .power_state = nouveau_dsm_power_state,
212 .get_client_id = nouveau_dsm_get_client_id, 212 .get_client_id = nouveau_dsm_get_client_id,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 15057b39491c..78f520d05de9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -574,7 +574,7 @@ static struct ttm_tt *
574nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 574nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
575 uint32_t page_flags, struct page *dummy_read) 575 uint32_t page_flags, struct page *dummy_read)
576{ 576{
577#if __OS_HAS_AGP 577#if IS_ENABLED(CONFIG_AGP)
578 struct nouveau_drm *drm = nouveau_bdev(bdev); 578 struct nouveau_drm *drm = nouveau_bdev(bdev);
579 579
580 if (drm->agp.bridge) { 580 if (drm->agp.bridge) {
@@ -1366,7 +1366,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1366 /* System memory */ 1366 /* System memory */
1367 return 0; 1367 return 0;
1368 case TTM_PL_TT: 1368 case TTM_PL_TT:
1369#if __OS_HAS_AGP 1369#if IS_ENABLED(CONFIG_AGP)
1370 if (drm->agp.bridge) { 1370 if (drm->agp.bridge) {
1371 mem->bus.offset = mem->start << PAGE_SHIFT; 1371 mem->bus.offset = mem->start << PAGE_SHIFT;
1372 mem->bus.base = drm->agp.base; 1372 mem->bus.base = drm->agp.base;
@@ -1496,7 +1496,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1496 ttm->caching_state == tt_uncached) 1496 ttm->caching_state == tt_uncached)
1497 return ttm_dma_populate(ttm_dma, dev->dev); 1497 return ttm_dma_populate(ttm_dma, dev->dev);
1498 1498
1499#if __OS_HAS_AGP 1499#if IS_ENABLED(CONFIG_AGP)
1500 if (drm->agp.bridge) { 1500 if (drm->agp.bridge) {
1501 return ttm_agp_tt_populate(ttm); 1501 return ttm_agp_tt_populate(ttm);
1502 } 1502 }
@@ -1563,7 +1563,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1563 return; 1563 return;
1564 } 1564 }
1565 1565
1566#if __OS_HAS_AGP 1566#if IS_ENABLED(CONFIG_AGP)
1567 if (drm->agp.bridge) { 1567 if (drm->agp.bridge) {
1568 ttm_agp_tt_unpopulate(ttm); 1568 ttm_agp_tt_unpopulate(ttm);
1569 return; 1569 return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index ff5e59db49db..1860f389f21f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -55,10 +55,8 @@ nouveau_channel_idle(struct nouveau_channel *chan)
55 } 55 }
56 56
57 if (ret) { 57 if (ret) {
58 NV_PRINTK(err, cli, "failed to idle channel " 58 NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
59 "0x%08x [%s]\n", 59 chan->chid, nvxx_client(&cli->base)->name);
60 chan->user.handle,
61 nvxx_client(&cli->base)->name);
62 return ret; 60 return ret;
63 } 61 }
64 } 62 }
@@ -89,7 +87,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
89 87
90static int 88static int
91nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, 89nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
92 u32 handle, u32 size, struct nouveau_channel **pchan) 90 u32 size, struct nouveau_channel **pchan)
93{ 91{
94 struct nouveau_cli *cli = (void *)device->object.client; 92 struct nouveau_cli *cli = (void *)device->object.client;
95 struct nvkm_mmu *mmu = nvxx_mmu(device); 93 struct nvkm_mmu *mmu = nvxx_mmu(device);
@@ -174,8 +172,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
174 } 172 }
175 } 173 }
176 174
177 ret = nvif_object_init(&device->object, NVDRM_PUSH | 175 ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
178 (handle & 0xffff), NV_DMA_FROM_MEMORY,
179 &args, sizeof(args), &chan->push.ctxdma); 176 &args, sizeof(args), &chan->push.ctxdma);
180 if (ret) { 177 if (ret) {
181 nouveau_channel_del(pchan); 178 nouveau_channel_del(pchan);
@@ -187,7 +184,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
187 184
188static int 185static int
189nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, 186nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
190 u32 handle, u32 engine, struct nouveau_channel **pchan) 187 u32 engine, struct nouveau_channel **pchan)
191{ 188{
192 static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A, 189 static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
193 KEPLER_CHANNEL_GPFIFO_A, 190 KEPLER_CHANNEL_GPFIFO_A,
@@ -206,7 +203,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
206 int ret; 203 int ret;
207 204
208 /* allocate dma push buffer */ 205 /* allocate dma push buffer */
209 ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan); 206 ret = nouveau_channel_prep(drm, device, 0x12000, &chan);
210 *pchan = chan; 207 *pchan = chan;
211 if (ret) 208 if (ret)
212 return ret; 209 return ret;
@@ -236,7 +233,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
236 size = sizeof(args.nv50); 233 size = sizeof(args.nv50);
237 } 234 }
238 235
239 ret = nvif_object_init(&device->object, handle, *oclass++, 236 ret = nvif_object_init(&device->object, 0, *oclass++,
240 &args, size, &chan->user); 237 &args, size, &chan->user);
241 if (ret == 0) { 238 if (ret == 0) {
242 if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A) 239 if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
@@ -256,7 +253,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
256 253
257static int 254static int
258nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device, 255nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
259 u32 handle, struct nouveau_channel **pchan) 256 struct nouveau_channel **pchan)
260{ 257{
261 static const u16 oclasses[] = { NV40_CHANNEL_DMA, 258 static const u16 oclasses[] = { NV40_CHANNEL_DMA,
262 NV17_CHANNEL_DMA, 259 NV17_CHANNEL_DMA,
@@ -269,7 +266,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
269 int ret; 266 int ret;
270 267
271 /* allocate dma push buffer */ 268 /* allocate dma push buffer */
272 ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan); 269 ret = nouveau_channel_prep(drm, device, 0x10000, &chan);
273 *pchan = chan; 270 *pchan = chan;
274 if (ret) 271 if (ret)
275 return ret; 272 return ret;
@@ -280,7 +277,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
280 args.offset = chan->push.vma.offset; 277 args.offset = chan->push.vma.offset;
281 278
282 do { 279 do {
283 ret = nvif_object_init(&device->object, handle, *oclass++, 280 ret = nvif_object_init(&device->object, 0, *oclass++,
284 &args, sizeof(args), &chan->user); 281 &args, sizeof(args), &chan->user);
285 if (ret == 0) { 282 if (ret == 0) {
286 chan->chid = args.chid; 283 chan->chid = args.chid;
@@ -401,8 +398,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
401 398
402int 399int
403nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, 400nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
404 u32 handle, u32 arg0, u32 arg1, 401 u32 arg0, u32 arg1, struct nouveau_channel **pchan)
405 struct nouveau_channel **pchan)
406{ 402{
407 struct nouveau_cli *cli = (void *)device->object.client; 403 struct nouveau_cli *cli = (void *)device->object.client;
408 bool super; 404 bool super;
@@ -412,10 +408,10 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
412 super = cli->base.super; 408 super = cli->base.super;
413 cli->base.super = true; 409 cli->base.super = true;
414 410
415 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); 411 ret = nouveau_channel_ind(drm, device, arg0, pchan);
416 if (ret) { 412 if (ret) {
417 NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret); 413 NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
418 ret = nouveau_channel_dma(drm, device, handle, pchan); 414 ret = nouveau_channel_dma(drm, device, pchan);
419 if (ret) { 415 if (ret) {
420 NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret); 416 NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
421 goto done; 417 goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 2ed32414cb69..48062c94f36d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -42,8 +42,7 @@ struct nouveau_channel {
42 42
43 43
44int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, 44int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
45 u32 handle, u32 arg0, u32 arg1, 45 u32 arg0, u32 arg1, struct nouveau_channel **);
46 struct nouveau_channel **);
47void nouveau_channel_del(struct nouveau_channel **); 46void nouveau_channel_del(struct nouveau_channel **);
48int nouveau_channel_idle(struct nouveau_channel *); 47int nouveau_channel_idle(struct nouveau_channel *);
49 48
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e905c00acf1a..db6bc6760545 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -51,12 +51,12 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
51} 51}
52 52
53int 53int
54nouveau_display_vblank_enable(struct drm_device *dev, int head) 54nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
55{ 55{
56 struct drm_crtc *crtc; 56 struct drm_crtc *crtc;
57 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 57 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
58 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 58 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
59 if (nv_crtc->index == head) { 59 if (nv_crtc->index == pipe) {
60 nvif_notify_get(&nv_crtc->vblank); 60 nvif_notify_get(&nv_crtc->vblank);
61 return 0; 61 return 0;
62 } 62 }
@@ -65,12 +65,12 @@ nouveau_display_vblank_enable(struct drm_device *dev, int head)
65} 65}
66 66
67void 67void
68nouveau_display_vblank_disable(struct drm_device *dev, int head) 68nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
69{ 69{
70 struct drm_crtc *crtc; 70 struct drm_crtc *crtc;
71 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 71 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
72 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 72 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
73 if (nv_crtc->index == head) { 73 if (nv_crtc->index == pipe) {
74 nvif_notify_put(&nv_crtc->vblank); 74 nvif_notify_put(&nv_crtc->vblank);
75 return; 75 return;
76 } 76 }
@@ -103,6 +103,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
103 .base.head = nouveau_crtc(crtc)->index, 103 .base.head = nouveau_crtc(crtc)->index,
104 }; 104 };
105 struct nouveau_display *disp = nouveau_display(crtc->dev); 105 struct nouveau_display *disp = nouveau_display(crtc->dev);
106 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
106 int ret, retry = 1; 107 int ret, retry = 1;
107 108
108 do { 109 do {
@@ -116,7 +117,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
116 break; 117 break;
117 } 118 }
118 119
119 if (retry) ndelay(crtc->linedur_ns); 120 if (retry) ndelay(vblank->linedur_ns);
120 } while (retry--); 121 } while (retry--);
121 122
122 *hpos = args.scan.hline; 123 *hpos = args.scan.hline;
@@ -131,13 +132,15 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
131} 132}
132 133
133int 134int
134nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags, 135nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
135 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) 136 unsigned int flags, int *vpos, int *hpos,
137 ktime_t *stime, ktime_t *etime,
138 const struct drm_display_mode *mode)
136{ 139{
137 struct drm_crtc *crtc; 140 struct drm_crtc *crtc;
138 141
139 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 142 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
140 if (nouveau_crtc(crtc)->index == head) { 143 if (nouveau_crtc(crtc)->index == pipe) {
141 return nouveau_display_scanoutpos_head(crtc, vpos, hpos, 144 return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
142 stime, etime); 145 stime, etime);
143 } 146 }
@@ -147,15 +150,15 @@ nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
147} 150}
148 151
149int 152int
150nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error, 153nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe,
151 struct timeval *time, unsigned flags) 154 int *max_error, struct timeval *time, unsigned flags)
152{ 155{
153 struct drm_crtc *crtc; 156 struct drm_crtc *crtc;
154 157
155 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 158 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
156 if (nouveau_crtc(crtc)->index == head) { 159 if (nouveau_crtc(crtc)->index == pipe) {
157 return drm_calc_vbltimestamp_from_scanoutpos(dev, 160 return drm_calc_vbltimestamp_from_scanoutpos(dev,
158 head, max_error, time, flags, crtc, 161 pipe, max_error, time, flags,
159 &crtc->hwmode); 162 &crtc->hwmode);
160 } 163 }
161 } 164 }
@@ -506,9 +509,8 @@ nouveau_display_create(struct drm_device *dev)
506 int i; 509 int i;
507 510
508 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { 511 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
509 ret = nvif_object_init(&drm->device.object, 512 ret = nvif_object_init(&drm->device.object, 0,
510 NVDRM_DISPLAY, oclass[i], 513 oclass[i], NULL, 0, &disp->disp);
511 NULL, 0, &disp->disp);
512 } 514 }
513 515
514 if (ret == 0) { 516 if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index a6213e2425c5..856abe0f070d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -65,11 +65,12 @@ int nouveau_display_init(struct drm_device *dev);
65void nouveau_display_fini(struct drm_device *dev); 65void nouveau_display_fini(struct drm_device *dev);
66int nouveau_display_suspend(struct drm_device *dev, bool runtime); 66int nouveau_display_suspend(struct drm_device *dev, bool runtime);
67void nouveau_display_resume(struct drm_device *dev, bool runtime); 67void nouveau_display_resume(struct drm_device *dev, bool runtime);
68int nouveau_display_vblank_enable(struct drm_device *, int); 68int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
69void nouveau_display_vblank_disable(struct drm_device *, int); 69void nouveau_display_vblank_disable(struct drm_device *, unsigned int);
70int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, 70int nouveau_display_scanoutpos(struct drm_device *, unsigned int,
71 int *, int *, ktime_t *, ktime_t *); 71 unsigned int, int *, int *, ktime_t *,
72int nouveau_display_vblstamp(struct drm_device *, int, int *, 72 ktime_t *, const struct drm_display_mode *);
73int nouveau_display_vblstamp(struct drm_device *, unsigned int, int *,
73 struct timeval *, unsigned); 74 struct timeval *, unsigned);
74 75
75int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 76int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ccefb645fd55..1d3ee5179ab8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -208,7 +208,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
208 } 208 }
209 209
210 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { 210 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
211 ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, 211 ret = nouveau_channel_new(drm, &drm->device,
212 KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0| 212 KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
213 KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1, 213 KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
214 0, &drm->cechan); 214 0, &drm->cechan);
@@ -221,7 +221,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
221 if (device->info.chipset >= 0xa3 && 221 if (device->info.chipset >= 0xa3 &&
222 device->info.chipset != 0xaa && 222 device->info.chipset != 0xaa &&
223 device->info.chipset != 0xac) { 223 device->info.chipset != 0xac) {
224 ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, 224 ret = nouveau_channel_new(drm, &drm->device,
225 NvDmaFB, NvDmaTT, &drm->cechan); 225 NvDmaFB, NvDmaTT, &drm->cechan);
226 if (ret) 226 if (ret)
227 NV_ERROR(drm, "failed to create ce channel, %d\n", ret); 227 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -233,8 +233,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
233 arg1 = NvDmaTT; 233 arg1 = NvDmaTT;
234 } 234 }
235 235
236 ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1, 236 ret = nouveau_channel_new(drm, &drm->device, arg0, arg1, &drm->channel);
237 &drm->channel);
238 if (ret) { 237 if (ret) {
239 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); 238 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
240 nouveau_accel_fini(drm); 239 nouveau_accel_fini(drm);
@@ -403,8 +402,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
403 402
404 nouveau_get_hdmi_dev(drm); 403 nouveau_get_hdmi_dev(drm);
405 404
406 ret = nvif_device_init(&drm->client.base.object, 405 ret = nvif_device_init(&drm->client.base.object, 0, NV_DEVICE,
407 NVDRM_DEVICE, NV_DEVICE,
408 &(struct nv_device_v0) { 406 &(struct nv_device_v0) {
409 .device = ~0, 407 .device = ~0,
410 }, sizeof(struct nv_device_v0), 408 }, sizeof(struct nv_device_v0),
@@ -862,18 +860,18 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
862 860
863static const struct drm_ioctl_desc 861static const struct drm_ioctl_desc
864nouveau_ioctls[] = { 862nouveau_ioctls[] = {
865 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 863 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
866 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 864 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
867 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 865 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
868 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 866 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW),
869 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 867 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
870 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 868 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
871 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 869 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
872 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 870 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
873 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 871 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
874 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 872 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
875 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 873 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
876 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 874 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
877}; 875};
878 876
879long 877long
@@ -934,7 +932,7 @@ driver_stub = {
934 .debugfs_cleanup = nouveau_debugfs_takedown, 932 .debugfs_cleanup = nouveau_debugfs_takedown,
935#endif 933#endif
936 934
937 .get_vblank_counter = drm_vblank_count, 935 .get_vblank_counter = drm_vblank_no_hw_counter,
938 .enable_vblank = nouveau_display_vblank_enable, 936 .enable_vblank = nouveau_display_vblank_enable,
939 .disable_vblank = nouveau_display_vblank_disable, 937 .disable_vblank = nouveau_display_vblank_disable,
940 .get_scanout_position = nouveau_display_scanoutpos, 938 .get_scanout_position = nouveau_display_scanoutpos,
@@ -1030,13 +1028,14 @@ nouveau_drm_pci_driver = {
1030}; 1028};
1031 1029
1032struct drm_device * 1030struct drm_device *
1033nouveau_platform_device_create(struct platform_device *pdev, 1031nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
1032 struct platform_device *pdev,
1034 struct nvkm_device **pdevice) 1033 struct nvkm_device **pdevice)
1035{ 1034{
1036 struct drm_device *drm; 1035 struct drm_device *drm;
1037 int err; 1036 int err;
1038 1037
1039 err = nvkm_device_tegra_new(pdev, nouveau_config, nouveau_debug, 1038 err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
1040 true, true, ~0ULL, pdevice); 1039 true, true, ~0ULL, pdevice);
1041 if (err) 1040 if (err)
1042 goto err_free; 1041 goto err_free;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 3c902c24a8dd..3050042e6c6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -10,7 +10,7 @@
10 10
11#define DRIVER_MAJOR 1 11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 3 12#define DRIVER_MINOR 3
13#define DRIVER_PATCHLEVEL 0 13#define DRIVER_PATCHLEVEL 1
14 14
15/* 15/*
16 * 1.1.1: 16 * 1.1.1:
@@ -33,6 +33,8 @@
33 * 1.3.0: 33 * 1.3.0:
34 * - NVIF ABI modified, safe because only (current) users are test 34 * - NVIF ABI modified, safe because only (current) users are test
35 * programs that get directly linked with NVKM. 35 * programs that get directly linked with NVKM.
36 * 1.3.1:
37 * - implemented limited ABI16/NVIF interop
36 */ 38 */
37 39
38#include <nvif/client.h> 40#include <nvif/client.h>
@@ -74,11 +76,6 @@ enum nouveau_drm_notify_route {
74}; 76};
75 77
76enum nouveau_drm_handle { 78enum nouveau_drm_handle {
77 NVDRM_CLIENT = 0xffffffff,
78 NVDRM_DEVICE = 0xdddddddd,
79 NVDRM_CONTROL = 0xdddddddc,
80 NVDRM_DISPLAY = 0xd1500000,
81 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
82 NVDRM_CHAN = 0xcccc0000, /* |= client chid */ 79 NVDRM_CHAN = 0xcccc0000, /* |= client chid */
83 NVDRM_NVSW = 0x55550000, 80 NVDRM_NVSW = 0x55550000,
84}; 81};
@@ -183,8 +180,11 @@ nouveau_drm(struct drm_device *dev)
183int nouveau_pmops_suspend(struct device *); 180int nouveau_pmops_suspend(struct device *);
184int nouveau_pmops_resume(struct device *); 181int nouveau_pmops_resume(struct device *);
185 182
183#include <nvkm/core/tegra.h>
184
186struct drm_device * 185struct drm_device *
187nouveau_platform_device_create(struct platform_device *, struct nvkm_device **); 186nouveau_platform_device_create(const struct nvkm_device_tegra_func *,
187 struct platform_device *, struct nvkm_device **);
188void nouveau_drm_device_remove(struct drm_device *dev); 188void nouveau_drm_device_remove(struct drm_device *dev);
189 189
190#define NV_PRINTK(l,c,f,a...) do { \ 190#define NV_PRINTK(l,c,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 41be584147b9..a0865c49ec83 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -84,8 +84,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
84 } 84 }
85 85
86 ret = pm_runtime_get_sync(dev); 86 ret = pm_runtime_get_sync(dev);
87 if (ret < 0 && ret != -EACCES) 87 if (ret < 0 && ret != -EACCES) {
88 kfree(vma);
88 goto out; 89 goto out;
90 }
89 91
90 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); 92 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
91 if (ret) 93 if (ret)
@@ -666,7 +668,7 @@ int
666nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 668nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
667 struct drm_file *file_priv) 669 struct drm_file *file_priv)
668{ 670{
669 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 671 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
670 struct nouveau_cli *cli = nouveau_cli(file_priv); 672 struct nouveau_cli *cli = nouveau_cli(file_priv);
671 struct nouveau_abi16_chan *temp; 673 struct nouveau_abi16_chan *temp;
672 struct nouveau_drm *drm = nouveau_drm(dev); 674 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -682,7 +684,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
682 return -ENOMEM; 684 return -ENOMEM;
683 685
684 list_for_each_entry(temp, &abi16->channels, head) { 686 list_for_each_entry(temp, &abi16->channels, head) {
685 if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) { 687 if (temp->chan->chid == req->channel) {
686 chan = temp->chan; 688 chan = temp->chan;
687 break; 689 break;
688 } 690 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 3eb665453165..60e32c4e4e49 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -23,11 +23,14 @@
23 23
24static int nouveau_platform_probe(struct platform_device *pdev) 24static int nouveau_platform_probe(struct platform_device *pdev)
25{ 25{
26 const struct nvkm_device_tegra_func *func;
26 struct nvkm_device *device; 27 struct nvkm_device *device;
27 struct drm_device *drm; 28 struct drm_device *drm;
28 int ret; 29 int ret;
29 30
30 drm = nouveau_platform_device_create(pdev, &device); 31 func = of_device_get_match_data(&pdev->dev);
32
33 drm = nouveau_platform_device_create(func, pdev, &device);
31 if (IS_ERR(drm)) 34 if (IS_ERR(drm))
32 return PTR_ERR(drm); 35 return PTR_ERR(drm);
33 36
@@ -48,9 +51,19 @@ static int nouveau_platform_remove(struct platform_device *pdev)
48} 51}
49 52
50#if IS_ENABLED(CONFIG_OF) 53#if IS_ENABLED(CONFIG_OF)
54static const struct nvkm_device_tegra_func gk20a_platform_data = {
55 .iommu_bit = 34,
56};
57
51static const struct of_device_id nouveau_platform_match[] = { 58static const struct of_device_id nouveau_platform_match[] = {
52 { .compatible = "nvidia,gk20a" }, 59 {
53 { .compatible = "nvidia,gm20b" }, 60 .compatible = "nvidia,gk20a",
61 .data = &gk20a_platform_data,
62 },
63 {
64 .compatible = "nvidia,gm20b",
65 .data = &gk20a_platform_data,
66 },
54 { } 67 { }
55}; 68};
56 69
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index d12a5faee047..5dac3546c1b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -188,9 +188,8 @@ nouveau_sysfs_init(struct drm_device *dev)
188 if (!sysfs) 188 if (!sysfs)
189 return -ENOMEM; 189 return -ENOMEM;
190 190
191 ret = nvif_object_init(&device->object, NVDRM_CONTROL, 191 ret = nvif_object_init(&device->object, 0, NVIF_IOCTL_NEW_V0_CONTROL,
192 NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0, 192 NULL, 0, &sysfs->ctrl);
193 &sysfs->ctrl);
194 if (ret == 0) 193 if (ret == 0)
195 device_create_file(nvxx_device(device)->dev, &dev_attr_pstate); 194 device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
196 195
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 3f0fb55cb473..3f713c1b5dc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -29,6 +29,9 @@
29#include "nouveau_gem.h" 29#include "nouveau_gem.h"
30 30
31#include "drm_legacy.h" 31#include "drm_legacy.h"
32
33#include <core/tegra.h>
34
32static int 35static int
33nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
34{ 37{
@@ -338,7 +341,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
338 struct nvkm_device *device = nvxx_device(&drm->device); 341 struct nvkm_device *device = nvxx_device(&drm->device);
339 struct nvkm_pci *pci = device->pci; 342 struct nvkm_pci *pci = device->pci;
340 struct drm_device *dev = drm->dev; 343 struct drm_device *dev = drm->dev;
341 u32 bits; 344 u8 bits;
342 int ret; 345 int ret;
343 346
344 if (pci && pci->agp.bridge) { 347 if (pci && pci->agp.bridge) {
@@ -351,20 +354,28 @@ nouveau_ttm_init(struct nouveau_drm *drm)
351 bits = nvxx_mmu(&drm->device)->dma_bits; 354 bits = nvxx_mmu(&drm->device)->dma_bits;
352 if (nvxx_device(&drm->device)->func->pci) { 355 if (nvxx_device(&drm->device)->func->pci) {
353 if (drm->agp.bridge || 356 if (drm->agp.bridge ||
354 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) 357 !dma_supported(dev->dev, DMA_BIT_MASK(bits)))
355 bits = 32; 358 bits = 32;
359 } else if (device->func->tegra) {
360 struct nvkm_device_tegra *tegra = device->func->tegra(device);
356 361
357 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); 362 /*
358 if (ret) 363 * If the platform can use a IOMMU, then the addressable DMA
359 return ret; 364 * space is constrained by the IOMMU bit
365 */
366 if (tegra->func->iommu_bit)
367 bits = min(bits, tegra->func->iommu_bit);
360 368
361 ret = pci_set_consistent_dma_mask(dev->pdev,
362 DMA_BIT_MASK(bits));
363 if (ret)
364 pci_set_consistent_dma_mask(dev->pdev,
365 DMA_BIT_MASK(32));
366 } 369 }
367 370
371 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
372 if (ret)
373 return ret;
374
375 ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
376 if (ret)
377 dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
378
368 ret = nouveau_ttm_global_init(drm); 379 ret = nouveau_ttm_global_init(drm);
369 if (ret) 380 if (ret)
370 return ret; 381 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index cb1182d7e80e..89dc4ce63490 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -24,6 +24,7 @@
24 24
25#include "nouveau_drm.h" 25#include "nouveau_drm.h"
26#include "nouveau_usif.h" 26#include "nouveau_usif.h"
27#include "nouveau_abi16.h"
27 28
28#include <nvif/notify.h> 29#include <nvif/notify.h>
29#include <nvif/unpack.h> 30#include <nvif/unpack.h>
@@ -316,11 +317,21 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
316 } else 317 } else
317 goto done; 318 goto done;
318 319
320 /* USIF slightly abuses some return-only ioctl members in order
321 * to provide interoperability with the older ABI16 objects
322 */
319 mutex_lock(&cli->mutex); 323 mutex_lock(&cli->mutex);
324 if (argv->v0.route) {
325 if (ret = -EINVAL, argv->v0.route == 0xff)
326 ret = nouveau_abi16_usif(filp, argv, argc);
327 if (ret) {
328 mutex_unlock(&cli->mutex);
329 goto done;
330 }
331 }
332
320 switch (argv->v0.type) { 333 switch (argv->v0.type) {
321 case NVIF_IOCTL_V0_NEW: 334 case NVIF_IOCTL_V0_NEW:
322 /* ... except if we're creating children */
323 argv->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
324 ret = usif_object_new(filp, data, size, argv, argc); 335 ret = usif_object_new(filp, data, size, argv, argc);
325 break; 336 break;
326 case NVIF_IOCTL_V0_NTFY_NEW: 337 case NVIF_IOCTL_V0_NTFY_NEW:
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 4ae87aed4505..c053c50b346a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -68,7 +68,6 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
68 const s32 *oclass, u8 head, void *data, u32 size, 68 const s32 *oclass, u8 head, void *data, u32 size,
69 struct nv50_chan *chan) 69 struct nv50_chan *chan)
70{ 70{
71 const u32 handle = (oclass[0] << 16) | head;
72 struct nvif_sclass *sclass; 71 struct nvif_sclass *sclass;
73 int ret, i, n; 72 int ret, i, n;
74 73
@@ -81,7 +80,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
81 while (oclass[0]) { 80 while (oclass[0]) {
82 for (i = 0; i < n; i++) { 81 for (i = 0; i < n; i++) {
83 if (sclass[i].oclass == oclass[0]) { 82 if (sclass[i].oclass == oclass[0]) {
84 ret = nvif_object_init(disp, handle, oclass[0], 83 ret = nvif_object_init(disp, 0, oclass[0],
85 data, size, &chan->user); 84 data, size, &chan->user);
86 if (ret == 0) 85 if (ret == 0)
87 nvif_object_map(&chan->user); 86 nvif_object_map(&chan->user);
@@ -231,8 +230,8 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
231 if (!dmac->ptr) 230 if (!dmac->ptr)
232 return -ENOMEM; 231 return -ENOMEM;
233 232
234 ret = nvif_object_init(&device->object, 0xd0000000, 233 ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
235 NV_DMA_FROM_MEMORY, &(struct nv_dma_v0) { 234 &(struct nv_dma_v0) {
236 .target = NV_DMA_V0_TARGET_PCI_US, 235 .target = NV_DMA_V0_TARGET_PCI_US,
237 .access = NV_DMA_V0_ACCESS_RD, 236 .access = NV_DMA_V0_ACCESS_RD,
238 .start = dmac->handle + 0x0000, 237 .start = dmac->handle + 0x0000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 94a906b8cb88..bbc9824af6e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -637,7 +637,7 @@ nv46_chipset = {
637 .imem = nv40_instmem_new, 637 .imem = nv40_instmem_new,
638 .mc = nv44_mc_new, 638 .mc = nv44_mc_new,
639 .mmu = nv44_mmu_new, 639 .mmu = nv44_mmu_new,
640 .pci = nv4c_pci_new, 640 .pci = nv46_pci_new,
641 .therm = nv40_therm_new, 641 .therm = nv40_therm_new,
642 .timer = nv41_timer_new, 642 .timer = nv41_timer_new,
643 .volt = nv40_volt_new, 643 .volt = nv40_volt_new,
@@ -822,7 +822,7 @@ nv50_chipset = {
822 .mc = nv50_mc_new, 822 .mc = nv50_mc_new,
823 .mmu = nv50_mmu_new, 823 .mmu = nv50_mmu_new,
824 .mxm = nv50_mxm_new, 824 .mxm = nv50_mxm_new,
825 .pci = nv50_pci_new, 825 .pci = nv46_pci_new,
826 .therm = nv50_therm_new, 826 .therm = nv50_therm_new,
827 .timer = nv41_timer_new, 827 .timer = nv41_timer_new,
828 .volt = nv40_volt_new, 828 .volt = nv40_volt_new,
@@ -929,7 +929,7 @@ nv84_chipset = {
929 .mc = nv50_mc_new, 929 .mc = nv50_mc_new,
930 .mmu = nv50_mmu_new, 930 .mmu = nv50_mmu_new,
931 .mxm = nv50_mxm_new, 931 .mxm = nv50_mxm_new,
932 .pci = nv50_pci_new, 932 .pci = g84_pci_new,
933 .therm = g84_therm_new, 933 .therm = g84_therm_new,
934 .timer = nv41_timer_new, 934 .timer = nv41_timer_new,
935 .volt = nv40_volt_new, 935 .volt = nv40_volt_new,
@@ -961,7 +961,7 @@ nv86_chipset = {
961 .mc = nv50_mc_new, 961 .mc = nv50_mc_new,
962 .mmu = nv50_mmu_new, 962 .mmu = nv50_mmu_new,
963 .mxm = nv50_mxm_new, 963 .mxm = nv50_mxm_new,
964 .pci = nv50_pci_new, 964 .pci = g84_pci_new,
965 .therm = g84_therm_new, 965 .therm = g84_therm_new,
966 .timer = nv41_timer_new, 966 .timer = nv41_timer_new,
967 .volt = nv40_volt_new, 967 .volt = nv40_volt_new,
@@ -993,7 +993,7 @@ nv92_chipset = {
993 .mc = nv50_mc_new, 993 .mc = nv50_mc_new,
994 .mmu = nv50_mmu_new, 994 .mmu = nv50_mmu_new,
995 .mxm = nv50_mxm_new, 995 .mxm = nv50_mxm_new,
996 .pci = nv50_pci_new, 996 .pci = g84_pci_new,
997 .therm = g84_therm_new, 997 .therm = g84_therm_new,
998 .timer = nv41_timer_new, 998 .timer = nv41_timer_new,
999 .volt = nv40_volt_new, 999 .volt = nv40_volt_new,
@@ -1025,7 +1025,7 @@ nv94_chipset = {
1025 .mc = nv50_mc_new, 1025 .mc = nv50_mc_new,
1026 .mmu = nv50_mmu_new, 1026 .mmu = nv50_mmu_new,
1027 .mxm = nv50_mxm_new, 1027 .mxm = nv50_mxm_new,
1028 .pci = nv40_pci_new, 1028 .pci = g94_pci_new,
1029 .therm = g84_therm_new, 1029 .therm = g84_therm_new,
1030 .timer = nv41_timer_new, 1030 .timer = nv41_timer_new,
1031 .volt = nv40_volt_new, 1031 .volt = nv40_volt_new,
@@ -1057,7 +1057,7 @@ nv96_chipset = {
1057 .mc = nv50_mc_new, 1057 .mc = nv50_mc_new,
1058 .mmu = nv50_mmu_new, 1058 .mmu = nv50_mmu_new,
1059 .mxm = nv50_mxm_new, 1059 .mxm = nv50_mxm_new,
1060 .pci = nv40_pci_new, 1060 .pci = g94_pci_new,
1061 .therm = g84_therm_new, 1061 .therm = g84_therm_new,
1062 .timer = nv41_timer_new, 1062 .timer = nv41_timer_new,
1063 .volt = nv40_volt_new, 1063 .volt = nv40_volt_new,
@@ -1089,7 +1089,7 @@ nv98_chipset = {
1089 .mc = g98_mc_new, 1089 .mc = g98_mc_new,
1090 .mmu = nv50_mmu_new, 1090 .mmu = nv50_mmu_new,
1091 .mxm = nv50_mxm_new, 1091 .mxm = nv50_mxm_new,
1092 .pci = nv40_pci_new, 1092 .pci = g94_pci_new,
1093 .therm = g84_therm_new, 1093 .therm = g84_therm_new,
1094 .timer = nv41_timer_new, 1094 .timer = nv41_timer_new,
1095 .volt = nv40_volt_new, 1095 .volt = nv40_volt_new,
@@ -1121,7 +1121,7 @@ nva0_chipset = {
1121 .mc = g98_mc_new, 1121 .mc = g98_mc_new,
1122 .mmu = nv50_mmu_new, 1122 .mmu = nv50_mmu_new,
1123 .mxm = nv50_mxm_new, 1123 .mxm = nv50_mxm_new,
1124 .pci = nv40_pci_new, 1124 .pci = g94_pci_new,
1125 .therm = g84_therm_new, 1125 .therm = g84_therm_new,
1126 .timer = nv41_timer_new, 1126 .timer = nv41_timer_new,
1127 .volt = nv40_volt_new, 1127 .volt = nv40_volt_new,
@@ -1153,7 +1153,7 @@ nva3_chipset = {
1153 .mc = g98_mc_new, 1153 .mc = g98_mc_new,
1154 .mmu = nv50_mmu_new, 1154 .mmu = nv50_mmu_new,
1155 .mxm = nv50_mxm_new, 1155 .mxm = nv50_mxm_new,
1156 .pci = nv40_pci_new, 1156 .pci = g94_pci_new,
1157 .pmu = gt215_pmu_new, 1157 .pmu = gt215_pmu_new,
1158 .therm = gt215_therm_new, 1158 .therm = gt215_therm_new,
1159 .timer = nv41_timer_new, 1159 .timer = nv41_timer_new,
@@ -1187,7 +1187,7 @@ nva5_chipset = {
1187 .mc = g98_mc_new, 1187 .mc = g98_mc_new,
1188 .mmu = nv50_mmu_new, 1188 .mmu = nv50_mmu_new,
1189 .mxm = nv50_mxm_new, 1189 .mxm = nv50_mxm_new,
1190 .pci = nv40_pci_new, 1190 .pci = g94_pci_new,
1191 .pmu = gt215_pmu_new, 1191 .pmu = gt215_pmu_new,
1192 .therm = gt215_therm_new, 1192 .therm = gt215_therm_new,
1193 .timer = nv41_timer_new, 1193 .timer = nv41_timer_new,
@@ -1220,7 +1220,7 @@ nva8_chipset = {
1220 .mc = g98_mc_new, 1220 .mc = g98_mc_new,
1221 .mmu = nv50_mmu_new, 1221 .mmu = nv50_mmu_new,
1222 .mxm = nv50_mxm_new, 1222 .mxm = nv50_mxm_new,
1223 .pci = nv40_pci_new, 1223 .pci = g94_pci_new,
1224 .pmu = gt215_pmu_new, 1224 .pmu = gt215_pmu_new,
1225 .therm = gt215_therm_new, 1225 .therm = gt215_therm_new,
1226 .timer = nv41_timer_new, 1226 .timer = nv41_timer_new,
@@ -1253,7 +1253,7 @@ nvaa_chipset = {
1253 .mc = g98_mc_new, 1253 .mc = g98_mc_new,
1254 .mmu = nv50_mmu_new, 1254 .mmu = nv50_mmu_new,
1255 .mxm = nv50_mxm_new, 1255 .mxm = nv50_mxm_new,
1256 .pci = nv40_pci_new, 1256 .pci = g94_pci_new,
1257 .therm = g84_therm_new, 1257 .therm = g84_therm_new,
1258 .timer = nv41_timer_new, 1258 .timer = nv41_timer_new,
1259 .volt = nv40_volt_new, 1259 .volt = nv40_volt_new,
@@ -1285,7 +1285,7 @@ nvac_chipset = {
1285 .mc = g98_mc_new, 1285 .mc = g98_mc_new,
1286 .mmu = nv50_mmu_new, 1286 .mmu = nv50_mmu_new,
1287 .mxm = nv50_mxm_new, 1287 .mxm = nv50_mxm_new,
1288 .pci = nv40_pci_new, 1288 .pci = g94_pci_new,
1289 .therm = g84_therm_new, 1289 .therm = g84_therm_new,
1290 .timer = nv41_timer_new, 1290 .timer = nv41_timer_new,
1291 .volt = nv40_volt_new, 1291 .volt = nv40_volt_new,
@@ -1317,7 +1317,7 @@ nvaf_chipset = {
1317 .mc = g98_mc_new, 1317 .mc = g98_mc_new,
1318 .mmu = nv50_mmu_new, 1318 .mmu = nv50_mmu_new,
1319 .mxm = nv50_mxm_new, 1319 .mxm = nv50_mxm_new,
1320 .pci = nv40_pci_new, 1320 .pci = g94_pci_new,
1321 .pmu = gt215_pmu_new, 1321 .pmu = gt215_pmu_new,
1322 .therm = gt215_therm_new, 1322 .therm = gt215_therm_new,
1323 .timer = nv41_timer_new, 1323 .timer = nv41_timer_new,
@@ -1388,7 +1388,7 @@ nvc1_chipset = {
1388 .mc = gf100_mc_new, 1388 .mc = gf100_mc_new,
1389 .mmu = gf100_mmu_new, 1389 .mmu = gf100_mmu_new,
1390 .mxm = nv50_mxm_new, 1390 .mxm = nv50_mxm_new,
1391 .pci = nv40_pci_new, 1391 .pci = g94_pci_new,
1392 .pmu = gf100_pmu_new, 1392 .pmu = gf100_pmu_new,
1393 .therm = gt215_therm_new, 1393 .therm = gt215_therm_new,
1394 .timer = nv41_timer_new, 1394 .timer = nv41_timer_new,
@@ -1423,7 +1423,7 @@ nvc3_chipset = {
1423 .mc = gf100_mc_new, 1423 .mc = gf100_mc_new,
1424 .mmu = gf100_mmu_new, 1424 .mmu = gf100_mmu_new,
1425 .mxm = nv50_mxm_new, 1425 .mxm = nv50_mxm_new,
1426 .pci = nv40_pci_new, 1426 .pci = g94_pci_new,
1427 .pmu = gf100_pmu_new, 1427 .pmu = gf100_pmu_new,
1428 .therm = gt215_therm_new, 1428 .therm = gt215_therm_new,
1429 .timer = nv41_timer_new, 1429 .timer = nv41_timer_new,
@@ -1566,7 +1566,7 @@ nvcf_chipset = {
1566 .mc = gf100_mc_new, 1566 .mc = gf100_mc_new,
1567 .mmu = gf100_mmu_new, 1567 .mmu = gf100_mmu_new,
1568 .mxm = nv50_mxm_new, 1568 .mxm = nv50_mxm_new,
1569 .pci = nv40_pci_new, 1569 .pci = g94_pci_new,
1570 .pmu = gf100_pmu_new, 1570 .pmu = gf100_pmu_new,
1571 .therm = gt215_therm_new, 1571 .therm = gt215_therm_new,
1572 .timer = nv41_timer_new, 1572 .timer = nv41_timer_new,
@@ -1595,13 +1595,13 @@ nvd7_chipset = {
1595 .fuse = gf100_fuse_new, 1595 .fuse = gf100_fuse_new,
1596 .gpio = gf119_gpio_new, 1596 .gpio = gf119_gpio_new,
1597 .i2c = gf117_i2c_new, 1597 .i2c = gf117_i2c_new,
1598 .ibus = gf100_ibus_new, 1598 .ibus = gf117_ibus_new,
1599 .imem = nv50_instmem_new, 1599 .imem = nv50_instmem_new,
1600 .ltc = gf100_ltc_new, 1600 .ltc = gf100_ltc_new,
1601 .mc = gf100_mc_new, 1601 .mc = gf100_mc_new,
1602 .mmu = gf100_mmu_new, 1602 .mmu = gf100_mmu_new,
1603 .mxm = nv50_mxm_new, 1603 .mxm = nv50_mxm_new,
1604 .pci = nv40_pci_new, 1604 .pci = g94_pci_new,
1605 .therm = gf119_therm_new, 1605 .therm = gf119_therm_new,
1606 .timer = nv41_timer_new, 1606 .timer = nv41_timer_new,
1607 .ce[0] = gf100_ce_new, 1607 .ce[0] = gf100_ce_new,
@@ -1628,13 +1628,13 @@ nvd9_chipset = {
1628 .fuse = gf100_fuse_new, 1628 .fuse = gf100_fuse_new,
1629 .gpio = gf119_gpio_new, 1629 .gpio = gf119_gpio_new,
1630 .i2c = gf119_i2c_new, 1630 .i2c = gf119_i2c_new,
1631 .ibus = gf100_ibus_new, 1631 .ibus = gf117_ibus_new,
1632 .imem = nv50_instmem_new, 1632 .imem = nv50_instmem_new,
1633 .ltc = gf100_ltc_new, 1633 .ltc = gf100_ltc_new,
1634 .mc = gf100_mc_new, 1634 .mc = gf100_mc_new,
1635 .mmu = gf100_mmu_new, 1635 .mmu = gf100_mmu_new,
1636 .mxm = nv50_mxm_new, 1636 .mxm = nv50_mxm_new,
1637 .pci = nv40_pci_new, 1637 .pci = g94_pci_new,
1638 .pmu = gf119_pmu_new, 1638 .pmu = gf119_pmu_new,
1639 .therm = gf119_therm_new, 1639 .therm = gf119_therm_new,
1640 .timer = nv41_timer_new, 1640 .timer = nv41_timer_new,
@@ -1669,11 +1669,11 @@ nve4_chipset = {
1669 .mc = gf100_mc_new, 1669 .mc = gf100_mc_new,
1670 .mmu = gf100_mmu_new, 1670 .mmu = gf100_mmu_new,
1671 .mxm = nv50_mxm_new, 1671 .mxm = nv50_mxm_new,
1672 .pci = nv40_pci_new, 1672 .pci = g94_pci_new,
1673 .pmu = gk104_pmu_new, 1673 .pmu = gk104_pmu_new,
1674 .therm = gf119_therm_new, 1674 .therm = gf119_therm_new,
1675 .timer = nv41_timer_new, 1675 .timer = nv41_timer_new,
1676 .volt = nv40_volt_new, 1676 .volt = gk104_volt_new,
1677 .ce[0] = gk104_ce_new, 1677 .ce[0] = gk104_ce_new,
1678 .ce[1] = gk104_ce_new, 1678 .ce[1] = gk104_ce_new,
1679 .ce[2] = gk104_ce_new, 1679 .ce[2] = gk104_ce_new,
@@ -1706,11 +1706,11 @@ nve6_chipset = {
1706 .mc = gf100_mc_new, 1706 .mc = gf100_mc_new,
1707 .mmu = gf100_mmu_new, 1707 .mmu = gf100_mmu_new,
1708 .mxm = nv50_mxm_new, 1708 .mxm = nv50_mxm_new,
1709 .pci = nv40_pci_new, 1709 .pci = g94_pci_new,
1710 .pmu = gk104_pmu_new, 1710 .pmu = gk104_pmu_new,
1711 .therm = gf119_therm_new, 1711 .therm = gf119_therm_new,
1712 .timer = nv41_timer_new, 1712 .timer = nv41_timer_new,
1713 .volt = nv40_volt_new, 1713 .volt = gk104_volt_new,
1714 .ce[0] = gk104_ce_new, 1714 .ce[0] = gk104_ce_new,
1715 .ce[1] = gk104_ce_new, 1715 .ce[1] = gk104_ce_new,
1716 .ce[2] = gk104_ce_new, 1716 .ce[2] = gk104_ce_new,
@@ -1743,11 +1743,11 @@ nve7_chipset = {
1743 .mc = gf100_mc_new, 1743 .mc = gf100_mc_new,
1744 .mmu = gf100_mmu_new, 1744 .mmu = gf100_mmu_new,
1745 .mxm = nv50_mxm_new, 1745 .mxm = nv50_mxm_new,
1746 .pci = nv40_pci_new, 1746 .pci = g94_pci_new,
1747 .pmu = gf119_pmu_new, 1747 .pmu = gk104_pmu_new,
1748 .therm = gf119_therm_new, 1748 .therm = gf119_therm_new,
1749 .timer = nv41_timer_new, 1749 .timer = nv41_timer_new,
1750 .volt = nv40_volt_new, 1750 .volt = gk104_volt_new,
1751 .ce[0] = gk104_ce_new, 1751 .ce[0] = gk104_ce_new,
1752 .ce[1] = gk104_ce_new, 1752 .ce[1] = gk104_ce_new,
1753 .ce[2] = gk104_ce_new, 1753 .ce[2] = gk104_ce_new,
@@ -1804,11 +1804,11 @@ nvf0_chipset = {
1804 .mc = gf100_mc_new, 1804 .mc = gf100_mc_new,
1805 .mmu = gf100_mmu_new, 1805 .mmu = gf100_mmu_new,
1806 .mxm = nv50_mxm_new, 1806 .mxm = nv50_mxm_new,
1807 .pci = nv40_pci_new, 1807 .pci = g94_pci_new,
1808 .pmu = gk110_pmu_new, 1808 .pmu = gk110_pmu_new,
1809 .therm = gf119_therm_new, 1809 .therm = gf119_therm_new,
1810 .timer = nv41_timer_new, 1810 .timer = nv41_timer_new,
1811 .volt = nv40_volt_new, 1811 .volt = gk104_volt_new,
1812 .ce[0] = gk104_ce_new, 1812 .ce[0] = gk104_ce_new,
1813 .ce[1] = gk104_ce_new, 1813 .ce[1] = gk104_ce_new,
1814 .ce[2] = gk104_ce_new, 1814 .ce[2] = gk104_ce_new,
@@ -1840,11 +1840,11 @@ nvf1_chipset = {
1840 .mc = gf100_mc_new, 1840 .mc = gf100_mc_new,
1841 .mmu = gf100_mmu_new, 1841 .mmu = gf100_mmu_new,
1842 .mxm = nv50_mxm_new, 1842 .mxm = nv50_mxm_new,
1843 .pci = nv40_pci_new, 1843 .pci = g94_pci_new,
1844 .pmu = gk110_pmu_new, 1844 .pmu = gk110_pmu_new,
1845 .therm = gf119_therm_new, 1845 .therm = gf119_therm_new,
1846 .timer = nv41_timer_new, 1846 .timer = nv41_timer_new,
1847 .volt = nv40_volt_new, 1847 .volt = gk104_volt_new,
1848 .ce[0] = gk104_ce_new, 1848 .ce[0] = gk104_ce_new,
1849 .ce[1] = gk104_ce_new, 1849 .ce[1] = gk104_ce_new,
1850 .ce[2] = gk104_ce_new, 1850 .ce[2] = gk104_ce_new,
@@ -1876,11 +1876,11 @@ nv106_chipset = {
1876 .mc = gk20a_mc_new, 1876 .mc = gk20a_mc_new,
1877 .mmu = gf100_mmu_new, 1877 .mmu = gf100_mmu_new,
1878 .mxm = nv50_mxm_new, 1878 .mxm = nv50_mxm_new,
1879 .pci = nv40_pci_new, 1879 .pci = g94_pci_new,
1880 .pmu = gk208_pmu_new, 1880 .pmu = gk208_pmu_new,
1881 .therm = gf119_therm_new, 1881 .therm = gf119_therm_new,
1882 .timer = nv41_timer_new, 1882 .timer = nv41_timer_new,
1883 .volt = nv40_volt_new, 1883 .volt = gk104_volt_new,
1884 .ce[0] = gk104_ce_new, 1884 .ce[0] = gk104_ce_new,
1885 .ce[1] = gk104_ce_new, 1885 .ce[1] = gk104_ce_new,
1886 .ce[2] = gk104_ce_new, 1886 .ce[2] = gk104_ce_new,
@@ -1912,11 +1912,11 @@ nv108_chipset = {
1912 .mc = gk20a_mc_new, 1912 .mc = gk20a_mc_new,
1913 .mmu = gf100_mmu_new, 1913 .mmu = gf100_mmu_new,
1914 .mxm = nv50_mxm_new, 1914 .mxm = nv50_mxm_new,
1915 .pci = nv40_pci_new, 1915 .pci = g94_pci_new,
1916 .pmu = gk208_pmu_new, 1916 .pmu = gk208_pmu_new,
1917 .therm = gf119_therm_new, 1917 .therm = gf119_therm_new,
1918 .timer = nv41_timer_new, 1918 .timer = nv41_timer_new,
1919 .volt = nv40_volt_new, 1919 .volt = gk104_volt_new,
1920 .ce[0] = gk104_ce_new, 1920 .ce[0] = gk104_ce_new,
1921 .ce[1] = gk104_ce_new, 1921 .ce[1] = gk104_ce_new,
1922 .ce[2] = gk104_ce_new, 1922 .ce[2] = gk104_ce_new,
@@ -1948,10 +1948,11 @@ nv117_chipset = {
1948 .mc = gk20a_mc_new, 1948 .mc = gk20a_mc_new,
1949 .mmu = gf100_mmu_new, 1949 .mmu = gf100_mmu_new,
1950 .mxm = nv50_mxm_new, 1950 .mxm = nv50_mxm_new,
1951 .pci = nv40_pci_new, 1951 .pci = g94_pci_new,
1952 .pmu = gm107_pmu_new, 1952 .pmu = gm107_pmu_new,
1953 .therm = gm107_therm_new, 1953 .therm = gm107_therm_new,
1954 .timer = gk20a_timer_new, 1954 .timer = gk20a_timer_new,
1955 .volt = gk104_volt_new,
1955 .ce[0] = gk104_ce_new, 1956 .ce[0] = gk104_ce_new,
1956 .ce[2] = gk104_ce_new, 1957 .ce[2] = gk104_ce_new,
1957 .disp = gm107_disp_new, 1958 .disp = gm107_disp_new,
@@ -1978,9 +1979,10 @@ nv124_chipset = {
1978 .mc = gk20a_mc_new, 1979 .mc = gk20a_mc_new,
1979 .mmu = gf100_mmu_new, 1980 .mmu = gf100_mmu_new,
1980 .mxm = nv50_mxm_new, 1981 .mxm = nv50_mxm_new,
1981 .pci = nv40_pci_new, 1982 .pci = g94_pci_new,
1982 .pmu = gm107_pmu_new, 1983 .pmu = gm107_pmu_new,
1983 .timer = gk20a_timer_new, 1984 .timer = gk20a_timer_new,
1985 .volt = gk104_volt_new,
1984 .ce[0] = gm204_ce_new, 1986 .ce[0] = gm204_ce_new,
1985 .ce[1] = gm204_ce_new, 1987 .ce[1] = gm204_ce_new,
1986 .ce[2] = gm204_ce_new, 1988 .ce[2] = gm204_ce_new,
@@ -2008,9 +2010,10 @@ nv126_chipset = {
2008 .mc = gk20a_mc_new, 2010 .mc = gk20a_mc_new,
2009 .mmu = gf100_mmu_new, 2011 .mmu = gf100_mmu_new,
2010 .mxm = nv50_mxm_new, 2012 .mxm = nv50_mxm_new,
2011 .pci = nv40_pci_new, 2013 .pci = g94_pci_new,
2012 .pmu = gm107_pmu_new, 2014 .pmu = gm107_pmu_new,
2013 .timer = gk20a_timer_new, 2015 .timer = gk20a_timer_new,
2016 .volt = gk104_volt_new,
2014 .ce[0] = gm204_ce_new, 2017 .ce[0] = gm204_ce_new,
2015 .ce[1] = gm204_ce_new, 2018 .ce[1] = gm204_ce_new,
2016 .ce[2] = gm204_ce_new, 2019 .ce[2] = gm204_ce_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index e8eb14e438f4..e3c783d0e2ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -259,6 +259,12 @@ nvkm_device_pci_10de_0df4[] = {
259}; 259};
260 260
261static const struct nvkm_device_pci_vendor 261static const struct nvkm_device_pci_vendor
262nvkm_device_pci_10de_0fcd[] = {
263 { 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */
264 {}
265};
266
267static const struct nvkm_device_pci_vendor
262nvkm_device_pci_10de_0fd2[] = { 268nvkm_device_pci_10de_0fd2[] = {
263 { 0x1028, 0x0595, "GeForce GT 640M LE" }, 269 { 0x1028, 0x0595, "GeForce GT 640M LE" },
264 { 0x1028, 0x05b2, "GeForce GT 640M LE" }, 270 { 0x1028, 0x05b2, "GeForce GT 640M LE" },
@@ -678,6 +684,7 @@ nvkm_device_pci_10de_1189[] = {
678static const struct nvkm_device_pci_vendor 684static const struct nvkm_device_pci_vendor
679nvkm_device_pci_10de_1199[] = { 685nvkm_device_pci_10de_1199[] = {
680 { 0x1458, 0xd001, "GeForce GTX 760" }, 686 { 0x1458, 0xd001, "GeForce GTX 760" },
687 { 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */
681 {} 688 {}
682}; 689};
683 690
@@ -1349,7 +1356,7 @@ nvkm_device_pci_10de[] = {
1349 { 0x0fc6, "GeForce GTX 650" }, 1356 { 0x0fc6, "GeForce GTX 650" },
1350 { 0x0fc8, "GeForce GT 740" }, 1357 { 0x0fc8, "GeForce GT 740" },
1351 { 0x0fc9, "GeForce GT 730" }, 1358 { 0x0fc9, "GeForce GT 730" },
1352 { 0x0fcd, "GeForce GT 755M" }, 1359 { 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd },
1353 { 0x0fce, "GeForce GT 640M LE" }, 1360 { 0x0fce, "GeForce GT 640M LE" },
1354 { 0x0fd1, "GeForce GT 650M" }, 1361 { 0x0fd1, "GeForce GT 650M" },
1355 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, 1362 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index da57c8a60608..7f8a42721eb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -85,6 +85,9 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
85 unsigned long pgsize_bitmap; 85 unsigned long pgsize_bitmap;
86 int ret; 86 int ret;
87 87
88 if (!tdev->func->iommu_bit)
89 return;
90
88 mutex_init(&tdev->iommu.mutex); 91 mutex_init(&tdev->iommu.mutex);
89 92
90 if (iommu_present(&platform_bus_type)) { 93 if (iommu_present(&platform_bus_type)) {
@@ -114,7 +117,8 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
114 goto free_domain; 117 goto free_domain;
115 118
116 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 119 ret = nvkm_mm_init(&tdev->iommu.mm, 0,
117 (1ULL << 40) >> tdev->iommu.pgshift, 1); 120 (1ULL << tdev->func->iommu_bit) >>
121 tdev->iommu.pgshift, 1);
118 if (ret) 122 if (ret)
119 goto detach_device; 123 goto detach_device;
120 } 124 }
@@ -237,7 +241,8 @@ nvkm_device_tegra_func = {
237}; 241};
238 242
239int 243int
240nvkm_device_tegra_new(struct platform_device *pdev, 244nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
245 struct platform_device *pdev,
241 const char *cfg, const char *dbg, 246 const char *cfg, const char *dbg,
242 bool detect, bool mmio, u64 subdev_mask, 247 bool detect, bool mmio, u64 subdev_mask,
243 struct nvkm_device **pdevice) 248 struct nvkm_device **pdevice)
@@ -248,6 +253,7 @@ nvkm_device_tegra_new(struct platform_device *pdev,
248 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
249 return -ENOMEM; 254 return -ENOMEM;
250 *pdevice = &tdev->device; 255 *pdevice = &tdev->device;
256 tdev->func = func;
251 tdev->pdev = pdev; 257 tdev->pdev = pdev;
252 tdev->irq = -1; 258 tdev->irq = -1;
253 259
@@ -285,7 +291,8 @@ nvkm_device_tegra_new(struct platform_device *pdev,
285} 291}
286#else 292#else
287int 293int
288nvkm_device_tegra_new(struct platform_device *pdev, 294nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
295 struct platform_device *pdev,
289 const char *cfg, const char *dbg, 296 const char *cfg, const char *dbg,
290 bool detect, bool mmio, u64 subdev_mask, 297 bool detect, bool mmio, u64 subdev_mask,
291 struct nvkm_device **pdevice) 298 struct nvkm_device **pdevice)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
index 62d3fb66d0ec..2be846374d39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
@@ -109,7 +109,7 @@ nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
109 return -EINVAL; 109 return -EINVAL;
110} 110}
111 111
112static struct nvkm_object_func 112static const struct nvkm_object_func
113nv04_disp_root = { 113nv04_disp_root = {
114 .mthd = nv04_disp_mthd, 114 .mthd = nv04_disp_mthd,
115 .ntfy = nvkm_disp_ntfy, 115 .ntfy = nvkm_disp_ntfy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f1358a564e3e..dda7a7d224c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -882,6 +882,7 @@ static const struct nvkm_enum gf100_mp_warp_error[] = {
882 { 0x0d, "GPR_OUT_OF_BOUNDS" }, 882 { 0x0d, "GPR_OUT_OF_BOUNDS" },
883 { 0x0e, "MEM_OUT_OF_BOUNDS" }, 883 { 0x0e, "MEM_OUT_OF_BOUNDS" },
884 { 0x0f, "UNALIGNED_MEM_ACCESS" }, 884 { 0x0f, "UNALIGNED_MEM_ACCESS" },
885 { 0x10, "INVALID_ADDR_SPACE" },
885 { 0x11, "INVALID_PARAM" }, 886 { 0x11, "INVALID_PARAM" },
886 {} 887 {}
887}; 888};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index d13187409d68..d081ee41fc14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -98,6 +98,7 @@ gf110_gr = {
98 { -1, -1, FERMI_B, &gf100_fermi }, 98 { -1, -1, FERMI_B, &gf100_fermi },
99 { -1, -1, FERMI_C, &gf100_fermi }, 99 { -1, -1, FERMI_C, &gf100_fermi },
100 { -1, -1, FERMI_COMPUTE_A }, 100 { -1, -1, FERMI_COMPUTE_A },
101 { -1, -1, FERMI_COMPUTE_B },
101 {} 102 {}
102 } 103 }
103}; 104};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 28483d8bf3d2..d8e8af4d3b30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -135,6 +135,7 @@ gf117_gr = {
135 { -1, -1, FERMI_B, &gf100_fermi }, 135 { -1, -1, FERMI_B, &gf100_fermi },
136 { -1, -1, FERMI_C, &gf100_fermi }, 136 { -1, -1, FERMI_C, &gf100_fermi },
137 { -1, -1, FERMI_COMPUTE_A }, 137 { -1, -1, FERMI_COMPUTE_A },
138 { -1, -1, FERMI_COMPUTE_B },
138 {} 139 {}
139 } 140 }
140}; 141};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 9811a72e0313..01faf9a73774 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -189,6 +189,7 @@ gf119_gr = {
189 { -1, -1, FERMI_B, &gf100_fermi }, 189 { -1, -1, FERMI_B, &gf100_fermi },
190 { -1, -1, FERMI_C, &gf100_fermi }, 190 { -1, -1, FERMI_C, &gf100_fermi },
191 { -1, -1, FERMI_COMPUTE_A }, 191 { -1, -1, FERMI_COMPUTE_A },
192 { -1, -1, FERMI_COMPUTE_B },
192 {} 193 {}
193 } 194 }
194}; 195};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 0db9be202c42..2721592d3031 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -633,7 +633,7 @@ nvkm_perfmon_dtor(struct nvkm_object *object)
633 return perfmon; 633 return perfmon;
634} 634}
635 635
636static struct nvkm_object_func 636static const struct nvkm_object_func
637nvkm_perfmon = { 637nvkm_perfmon = {
638 .dtor = nvkm_perfmon_dtor, 638 .dtor = nvkm_perfmon_dtor,
639 .mthd = nvkm_perfmon_mthd, 639 .mthd = nvkm_perfmon_mthd,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index 441ec451b788..c268e5afe852 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -62,19 +62,6 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
62} 62}
63 63
64u32 64u32
65nvbios_pmuTp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
66 struct nvbios_pmuT *info)
67{
68 u32 data = nvbios_pmuTe(bios, ver, hdr, cnt, len);
69 memset(info, 0x00, sizeof(*info));
70 switch (!!data * *ver) {
71 default:
72 break;
73 }
74 return data;
75}
76
77u32
78nvbios_pmuEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr) 65nvbios_pmuEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
79{ 66{
80 u8 cnt, len; 67 u8 cnt, len;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index f0e1fc74a52e..d0ae7454764e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -171,6 +171,7 @@ nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
171 p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2; 171 p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
172 p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3; 172 p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
173 p->ramcfg_RON = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3; 173 p->ramcfg_RON = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
174 p->ramcfg_FBVDDQ = (nvbios_rd08(bios, data + 0x03) & 0x80) >> 7;
174 p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1; 175 p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
175 p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2; 176 p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
176 p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5; 177 p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
@@ -205,6 +206,7 @@ nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
205 p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6; 206 p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
206 p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0; 207 p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
207 p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0; 208 p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
209 p->ramcfg_FBVDDQ = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
208 p->ramcfg_10_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0; 210 p->ramcfg_10_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
209 p->ramcfg_10_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0; 211 p->ramcfg_10_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
210 p->ramcfg_10_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0; 212 p->ramcfg_10_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
@@ -219,7 +221,7 @@ nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
219 p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2; 221 p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
220 p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3; 222 p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
221 p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4; 223 p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
222 p->ramcfg_11_01_20 = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5; 224 p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
223 p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6; 225 p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
224 p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7; 226 p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
225 p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0; 227 p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 615804c3887b..6e0a33648be9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -73,15 +73,19 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
73 memset(info, 0x00, sizeof(*info)); 73 memset(info, 0x00, sizeof(*info));
74 switch (!!volt * *ver) { 74 switch (!!volt * *ver) {
75 case 0x12: 75 case 0x12:
76 info->type = NVBIOS_VOLT_GPIO;
76 info->vidmask = nvbios_rd08(bios, volt + 0x04); 77 info->vidmask = nvbios_rd08(bios, volt + 0x04);
77 break; 78 break;
78 case 0x20: 79 case 0x20:
80 info->type = NVBIOS_VOLT_GPIO;
79 info->vidmask = nvbios_rd08(bios, volt + 0x05); 81 info->vidmask = nvbios_rd08(bios, volt + 0x05);
80 break; 82 break;
81 case 0x30: 83 case 0x30:
84 info->type = NVBIOS_VOLT_GPIO;
82 info->vidmask = nvbios_rd08(bios, volt + 0x04); 85 info->vidmask = nvbios_rd08(bios, volt + 0x04);
83 break; 86 break;
84 case 0x40: 87 case 0x40:
88 info->type = NVBIOS_VOLT_GPIO;
85 info->base = nvbios_rd32(bios, volt + 0x04); 89 info->base = nvbios_rd32(bios, volt + 0x04);
86 info->step = nvbios_rd16(bios, volt + 0x08); 90 info->step = nvbios_rd16(bios, volt + 0x08);
87 info->vidmask = nvbios_rd08(bios, volt + 0x0b); 91 info->vidmask = nvbios_rd08(bios, volt + 0x0b);
@@ -90,11 +94,20 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
90 info->max = info->base; 94 info->max = info->base;
91 break; 95 break;
92 case 0x50: 96 case 0x50:
93 info->vidmask = nvbios_rd08(bios, volt + 0x06);
94 info->min = nvbios_rd32(bios, volt + 0x0a); 97 info->min = nvbios_rd32(bios, volt + 0x0a);
95 info->max = nvbios_rd32(bios, volt + 0x0e); 98 info->max = nvbios_rd32(bios, volt + 0x0e);
96 info->base = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff; 99 info->base = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff;
97 info->step = nvbios_rd16(bios, volt + 0x16); 100
101 /* offset 4 seems to be a flag byte */
102 if (nvbios_rd32(bios, volt + 0x4) & 1) {
103 info->type = NVBIOS_VOLT_PWM;
104 info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000;
105 info->pwm_range = nvbios_rd32(bios, volt + 0x16);
106 } else {
107 info->type = NVBIOS_VOLT_GPIO;
108 info->vidmask = nvbios_rd08(bios, volt + 0x06);
109 info->step = nvbios_rd16(bios, volt + 0x16);
110 }
98 break; 111 break;
99 } 112 }
100 return volt; 113 return volt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index 79f1cf513b36..2a5668938f2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -132,6 +132,38 @@ nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
132} 132}
133 133
134void 134void
135nvkm_hwsq_wait_vblank(struct nvkm_hwsq *hwsq)
136{
137 struct nvkm_subdev *subdev = hwsq->subdev;
138 struct nvkm_device *device = subdev->device;
139 u32 heads, x, y, px = 0;
140 int i, head_sync;
141
142 heads = nvkm_rd32(device, 0x610050);
143 for (i = 0; i < 2; i++) {
144 /* Heuristic: sync to head with biggest resolution */
145 if (heads & (2 << (i << 3))) {
146 x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
147 y = (x & 0xffff0000) >> 16;
148 x &= 0x0000ffff;
149 if ((x * y) > px) {
150 px = (x * y);
151 head_sync = i;
152 }
153 }
154 }
155
156 if (px == 0) {
157 nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
158 return;
159 }
160
161 nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
162 nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x0);
163 nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x1);
164}
165
166void
135nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec) 167nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
136{ 168{
137 u8 shift = 0, usec = nsec / 1000; 169 u8 shift = 0, usec = nsec / 1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 8117ec5a1468..54ec3b131dfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -134,6 +134,12 @@ hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
134} 134}
135 135
136static inline void 136static inline void
137hwsq_wait_vblank(struct hwsq *ram)
138{
139 nvkm_hwsq_wait_vblank(ram->hwsq);
140}
141
142static inline void
137hwsq_nsec(struct hwsq *ram, u32 nsec) 143hwsq_nsec(struct hwsq *ram, u32 nsec)
138{ 144{
139 nvkm_hwsq_nsec(ram->hwsq, nsec); 145 nvkm_hwsq_nsec(ram->hwsq, nsec);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index 347da9ee20f5..f97e3ec196bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -44,5 +44,5 @@ int
44g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) 44g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
45{ 45{
46 return nv50_clk_new_(&g84_clk, device, index, 46 return nv50_clk_new_(&g84_clk, device, index,
47 (device->chipset == 0xa0), pclk); 47 (device->chipset >= 0x94), pclk);
48} 48}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 79b523aa52aa..60ece0a8a2e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -63,7 +63,7 @@ ramgddr3_wr_lo[] = {
63 { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 }, 63 { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
64 { 11, 0 }, { 13 , 1 }, 64 { 11, 0 }, { 13 , 1 },
65 /* the below are mentioned in some, but not all, gddr3 docs */ 65 /* the below are mentioned in some, but not all, gddr3 docs */
66 { 4, 1 }, { 6, 3 }, { 12, 1 }, 66 { 4, 0 }, { 6, 3 }, { 12, 1 },
67 { -1 } 67 { -1 }
68}; 68};
69 69
@@ -87,15 +87,17 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
87 WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16; 87 WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
88 /* XXX: Get these values from the VBIOS instead */ 88 /* XXX: Get these values from the VBIOS instead */
89 DLL = !(ram->mr[1] & 0x1); 89 DLL = !(ram->mr[1] & 0x1);
90 ODT = (ram->mr[1] & 0x004) >> 2 |
91 (ram->mr[1] & 0x040) >> 5 |
92 (ram->mr[1] & 0x200) >> 7;
93 RON = !(ram->mr[1] & 0x300) >> 8; 90 RON = !(ram->mr[1] & 0x300) >> 8;
94 break; 91 break;
95 default: 92 default:
96 return -ENOSYS; 93 return -ENOSYS;
97 } 94 }
98 95
96 if (ram->next->bios.timing_ver == 0x20 ||
97 ram->next->bios.ramcfg_timing == 0xff) {
98 ODT = (ram->mr[1] & 0xc) >> 2;
99 }
100
99 hi = ram->mr[2] & 0x1; 101 hi = ram->mr[2] & 0x1;
100 CL = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL); 102 CL = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL);
101 WR = ramxlat(ramgddr3_wr_lo, WR); 103 WR = ramxlat(ramgddr3_wr_lo, WR);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
index 24f83b09e6a1..2cc074d3901a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
@@ -38,11 +38,12 @@ nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts)
38 int WL, CL, WR, at[2], dt, ds; 38 int WL, CL, WR, at[2], dt, ds;
39 int rq = ram->freq < 1000000; /* XXX */ 39 int rq = ram->freq < 1000000; /* XXX */
40 40
41 xd = !ram->next->bios.ramcfg_DLLoff;
42
41 switch (ram->next->bios.ramcfg_ver) { 43 switch (ram->next->bios.ramcfg_ver) {
42 case 0x11: 44 case 0x11:
43 pd = ram->next->bios.ramcfg_11_01_80; 45 pd = ram->next->bios.ramcfg_11_01_80;
44 lf = ram->next->bios.ramcfg_11_01_40; 46 lf = ram->next->bios.ramcfg_11_01_40;
45 xd = !ram->next->bios.ramcfg_11_01_20;
46 vh = ram->next->bios.ramcfg_11_02_10; 47 vh = ram->next->bios.ramcfg_11_02_10;
47 vr = ram->next->bios.ramcfg_11_02_04; 48 vr = ram->next->bios.ramcfg_11_02_04;
48 vo = ram->next->bios.ramcfg_11_06; 49 vo = ram->next->bios.ramcfg_11_06;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 989355622aac..9df45030ff9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -673,6 +673,25 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
673 * DDR3 673 * DDR3
674 ******************************************************************************/ 674 ******************************************************************************/
675 675
676static void
677nvkm_sddr3_dll_reset(struct gk104_ramfuc *fuc)
678{
679 ram_nuke(fuc, mr[0]);
680 ram_mask(fuc, mr[0], 0x100, 0x100);
681 ram_mask(fuc, mr[0], 0x100, 0x000);
682}
683
684static void
685nvkm_sddr3_dll_disable(struct gk104_ramfuc *fuc)
686{
687 u32 mr1_old = ram_rd32(fuc, mr[1]);
688
689 if (!(mr1_old & 0x1)) {
690 ram_mask(fuc, mr[1], 0x1, 0x1);
691 ram_nsec(fuc, 1000);
692 }
693}
694
676static int 695static int
677gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq) 696gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
678{ 697{
@@ -702,6 +721,10 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
702 ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000); 721 ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
703 722
704 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ 723 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
724
725 if (next->bios.ramcfg_DLLoff)
726 nvkm_sddr3_dll_disable(fuc);
727
705 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */ 728 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
706 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ 729 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
707 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000); 730 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
@@ -879,17 +902,20 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
879 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */ 902 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
880 ram_nsec(fuc, 1000); 903 ram_nsec(fuc, 1000);
881 904
882 ram_nuke(fuc, mr[0]); 905 if (!next->bios.ramcfg_DLLoff) {
883 ram_mask(fuc, mr[0], 0x100, 0x100); 906 ram_mask(fuc, mr[1], 0x1, 0x0);
884 ram_mask(fuc, mr[0], 0x100, 0x000); 907 nvkm_sddr3_dll_reset(fuc);
908 }
885 909
886 ram_mask(fuc, mr[2], 0xfff, ram->base.mr[2]); 910 ram_mask(fuc, mr[2], 0x00000fff, ram->base.mr[2]);
911 ram_mask(fuc, mr[1], 0xffffffff, ram->base.mr[1]);
887 ram_wr32(fuc, mr[0], ram->base.mr[0]); 912 ram_wr32(fuc, mr[0], ram->base.mr[0]);
888 ram_nsec(fuc, 1000); 913 ram_nsec(fuc, 1000);
889 914
890 ram_nuke(fuc, mr[0]); 915 if (!next->bios.ramcfg_DLLoff) {
891 ram_mask(fuc, mr[0], 0x100, 0x100); 916 nvkm_sddr3_dll_reset(fuc);
892 ram_mask(fuc, mr[0], 0x100, 0x000); 917 ram_nsec(fuc, 1000);
918 }
893 919
894 if (vc == 0 && ram_have(fuc, gpio2E)) { 920 if (vc == 0 && ram_have(fuc, gpio2E)) {
895 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]); 921 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
@@ -945,6 +971,67 @@ gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
945} 971}
946 972
947static int 973static int
974gk104_calc_pll_output(int fN, int M, int N, int P, int clk)
975{
976 return ((clk * N) + (((u16)(fN + 4096) * clk) >> 13)) / (M * P);
977}
978
979static int
980gk104_pll_calc_hiclk(int target_khz, int crystal,
981 int *N1, int *fN1, int *M1, int *P1,
982 int *N2, int *M2, int *P2)
983{
984 int best_clk = 0, best_err = target_khz, p_ref, n_ref;
985 bool upper = false;
986
987 *M1 = 1;
988 /* M has to be 1, otherwise it gets unstable */
989 *M2 = 1;
990 /* can be 1 or 2, sticking with 1 for simplicity */
991 *P2 = 1;
992
993 for (p_ref = 0x7; p_ref >= 0x5; --p_ref) {
994 for (n_ref = 0x25; n_ref <= 0x2b; ++n_ref) {
995 int cur_N, cur_clk, cur_err;
996
997 cur_clk = gk104_calc_pll_output(0, 1, n_ref, p_ref, crystal);
998 cur_N = target_khz / cur_clk;
999 cur_err = target_khz
1000 - gk104_calc_pll_output(0xf000, 1, cur_N, 1, cur_clk);
1001
1002 /* we found a better combination */
1003 if (cur_err < best_err) {
1004 best_err = cur_err;
1005 best_clk = cur_clk;
1006 *N2 = cur_N;
1007 *N1 = n_ref;
1008 *P1 = p_ref;
1009 upper = false;
1010 }
1011
1012 cur_N += 1;
1013 cur_err = gk104_calc_pll_output(0xf000, 1, cur_N, 1, cur_clk)
1014 - target_khz;
1015 if (cur_err < best_err) {
1016 best_err = cur_err;
1017 best_clk = cur_clk;
1018 *N2 = cur_N;
1019 *N1 = n_ref;
1020 *P1 = p_ref;
1021 upper = true;
1022 }
1023 }
1024 }
1025
1026 /* adjust fN to get closer to the target clock */
1027 *fN1 = (u16)((((best_err / *N2 * *P2) * (*P1 * *M1)) << 13) / crystal);
1028 if (upper)
1029 *fN1 = (u16)(1 - *fN1);
1030
1031 return gk104_calc_pll_output(*fN1, 1, *N1, *P1, crystal);
1032}
1033
1034static int
948gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next) 1035gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
949{ 1036{
950 struct gk104_ramfuc *fuc = &ram->fuc; 1037 struct gk104_ramfuc *fuc = &ram->fuc;
@@ -968,31 +1055,24 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
968 * kepler boards, no idea how/why they're chosen. 1055 * kepler boards, no idea how/why they're chosen.
969 */ 1056 */
970 refclk = next->freq; 1057 refclk = next->freq;
971 if (ram->mode == 2)
972 refclk = fuc->mempll.refclk;
973
974 /* calculate refpll coefficients */
975 ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
976 &ram->fN1, &ram->M1, &ram->P1);
977 fuc->mempll.refclk = ret;
978 if (ret <= 0) {
979 nvkm_error(subdev, "unable to calc refpll\n");
980 return -EINVAL;
981 }
982
983 /* calculate mempll coefficients, if we're using it */
984 if (ram->mode == 2) { 1058 if (ram->mode == 2) {
985 /* post-divider doesn't work... the reg takes the values but 1059 ret = gk104_pll_calc_hiclk(next->freq, subdev->device->crystal,
986 * appears to completely ignore it. there *is* a bit at 1060 &ram->N1, &ram->fN1, &ram->M1, &ram->P1,
987 * bit 28 that appears to divide the clock by 2 if set. 1061 &ram->N2, &ram->M2, &ram->P2);
988 */ 1062 fuc->mempll.refclk = ret;
989 fuc->mempll.min_p = 1; 1063 if (ret <= 0) {
990 fuc->mempll.max_p = 2; 1064 nvkm_error(subdev, "unable to calc plls\n");
991 1065 return -EINVAL;
992 ret = gt215_pll_calc(subdev, &fuc->mempll, next->freq, 1066 }
993 &ram->N2, NULL, &ram->M2, &ram->P2); 1067 nvkm_debug(subdev, "sucessfully calced PLLs for clock %i kHz"
1068 " (refclock: %i kHz)\n", next->freq, ret);
1069 } else {
1070 /* calculate refpll coefficients */
1071 ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
1072 &ram->fN1, &ram->M1, &ram->P1);
1073 fuc->mempll.refclk = ret;
994 if (ret <= 0) { 1074 if (ret <= 0) {
995 nvkm_error(subdev, "unable to calc mempll\n"); 1075 nvkm_error(subdev, "unable to calc refpll\n");
996 return -EINVAL; 1076 return -EINVAL;
997 } 1077 }
998 } 1078 }
@@ -1600,6 +1680,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
1600 break; 1680 break;
1601 case NVKM_RAM_TYPE_DDR3: 1681 case NVKM_RAM_TYPE_DDR3:
1602 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300); 1682 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
1683 ram->fuc.r_mr[1] = ramfuc_reg(0x10f304);
1603 ram->fuc.r_mr[2] = ramfuc_reg(0x10f320); 1684 ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
1604 break; 1685 break;
1605 default: 1686 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 5c08ae8023fa..d15ea886df27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -34,9 +34,6 @@
34#include <subdev/clk/gt215.h> 34#include <subdev/clk/gt215.h>
35#include <subdev/gpio.h> 35#include <subdev/gpio.h>
36 36
37/* XXX: Remove when memx gains GPIO support */
38extern int nv50_gpio_location(int line, u32 *reg, u32 *shift);
39
40struct gt215_ramfuc { 37struct gt215_ramfuc {
41 struct ramfuc base; 38 struct ramfuc base;
42 struct ramfuc_reg r_0x001610; 39 struct ramfuc_reg r_0x001610;
@@ -75,7 +72,7 @@ struct gt215_ramfuc {
75 struct ramfuc_reg r_0x111400; 72 struct ramfuc_reg r_0x111400;
76 struct ramfuc_reg r_0x611200; 73 struct ramfuc_reg r_0x611200;
77 struct ramfuc_reg r_mr[4]; 74 struct ramfuc_reg r_mr[4];
78 struct ramfuc_reg r_gpioFBVREF; 75 struct ramfuc_reg r_gpio[4];
79}; 76};
80 77
81struct gt215_ltrain { 78struct gt215_ltrain {
@@ -466,24 +463,27 @@ gt215_ram_lock_pll(struct gt215_ramfuc *fuc, struct gt215_clk_info *mclk)
466} 463}
467 464
468static void 465static void
469gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val) 466gt215_ram_gpio(struct gt215_ramfuc *fuc, u8 tag, u32 val)
470{ 467{
471 struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio; 468 struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio;
472 struct dcb_gpio_func func; 469 struct dcb_gpio_func func;
473 u32 reg, sh, gpio_val; 470 u32 reg, sh, gpio_val;
474 int ret; 471 int ret;
475 472
476 if (nvkm_gpio_get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) { 473 if (nvkm_gpio_get(gpio, 0, tag, DCB_GPIO_UNUSED) != val) {
477 ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); 474 ret = nvkm_gpio_find(gpio, 0, tag, DCB_GPIO_UNUSED, &func);
478 if (ret) 475 if (ret)
479 return; 476 return;
480 477
481 nv50_gpio_location(func.line, &reg, &sh); 478 reg = func.line >> 3;
482 gpio_val = ram_rd32(fuc, gpioFBVREF); 479 sh = (func.line & 0x7) << 2;
480 gpio_val = ram_rd32(fuc, gpio[reg]);
483 if (gpio_val & (8 << sh)) 481 if (gpio_val & (8 << sh))
484 val = !val; 482 val = !val;
483 if (!(func.log[1] & 1))
484 val = !val;
485 485
486 ram_mask(fuc, gpioFBVREF, (0x3 << sh), ((val | 0x2) << sh)); 486 ram_mask(fuc, gpio[reg], (0x3 << sh), ((val | 0x2) << sh));
487 ram_nsec(fuc, 20000); 487 ram_nsec(fuc, 20000);
488 } 488 }
489} 489}
@@ -498,6 +498,7 @@ gt215_ram_calc(struct nvkm_ram *base, u32 freq)
498 struct nvkm_device *device = subdev->device; 498 struct nvkm_device *device = subdev->device;
499 struct nvkm_bios *bios = device->bios; 499 struct nvkm_bios *bios = device->bios;
500 struct gt215_clk_info mclk; 500 struct gt215_clk_info mclk;
501 struct nvkm_gpio *gpio = device->gpio;
501 struct nvkm_ram_data *next; 502 struct nvkm_ram_data *next;
502 u8 ver, hdr, cnt, len, strap; 503 u8 ver, hdr, cnt, len, strap;
503 u32 data; 504 u32 data;
@@ -642,8 +643,8 @@ gt215_ram_calc(struct nvkm_ram *base, u32 freq)
642 break; 643 break;
643 } 644 }
644 645
645 if (fuc->r_gpioFBVREF.addr && next->bios.timing_10_ODT) 646 if (next->bios.timing_10_ODT)
646 gt215_ram_fbvref(fuc, 0); 647 gt215_ram_gpio(fuc, 0x2e, 1);
647 648
648 /* Brace RAM for impact */ 649 /* Brace RAM for impact */
649 ram_wr32(fuc, 0x1002d4, 0x00000001); 650 ram_wr32(fuc, 0x1002d4, 0x00000001);
@@ -656,6 +657,23 @@ gt215_ram_calc(struct nvkm_ram *base, u32 freq)
656 if (device->chipset == 0xa3 && freq <= 500000) 657 if (device->chipset == 0xa3 && freq <= 500000)
657 ram_mask(fuc, 0x100700, 0x00000006, 0x00000006); 658 ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
658 659
660 /* Alter FBVDD/Q, apparently must be done with PLL disabled, thus
661 * set it to bypass */
662 if (nvkm_gpio_get(gpio, 0, 0x18, DCB_GPIO_UNUSED) ==
663 next->bios.ramcfg_FBVDDQ) {
664 data = ram_rd32(fuc, 0x004000) & 0x9;
665
666 if (data == 0x1)
667 ram_mask(fuc, 0x004000, 0x8, 0x8);
668 if (data & 0x1)
669 ram_mask(fuc, 0x004000, 0x1, 0x0);
670
671 gt215_ram_gpio(fuc, 0x18, !next->bios.ramcfg_FBVDDQ);
672
673 if (data & 0x1)
674 ram_mask(fuc, 0x004000, 0x1, 0x1);
675 }
676
659 /* Fiddle with clocks */ 677 /* Fiddle with clocks */
660 /* There's 4 scenario's 678 /* There's 4 scenario's
661 * pll->pll: first switch to a 324MHz clock, set up new PLL, switch 679 * pll->pll: first switch to a 324MHz clock, set up new PLL, switch
@@ -753,39 +771,43 @@ gt215_ram_calc(struct nvkm_ram *base, u32 freq)
753 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100; 771 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
754 r111100 = ram_rd32(fuc, 0x111100) & ~0x3a800000; 772 r111100 = ram_rd32(fuc, 0x111100) & ~0x3a800000;
755 773
756 if (next->bios.ramcfg_10_02_04) { 774 /* NVA8 seems to skip various bits related to ramcfg_10_02_04 */
757 switch (ram->base.type) { 775 if (device->chipset == 0xa8) {
758 case NVKM_RAM_TYPE_DDR3: 776 r111100 |= 0x08000000;
759 if (device->chipset != 0xa8) 777 if (!next->bios.ramcfg_10_02_04)
760 r111100 |= 0x00000004;
761 /* no break */
762 case NVKM_RAM_TYPE_DDR2:
763 r111100 |= 0x08000000;
764 break;
765 default:
766 break;
767 }
768 } else {
769 switch (ram->base.type) {
770 case NVKM_RAM_TYPE_DDR2:
771 r111100 |= 0x1a800000;
772 unk714 |= 0x00000010; 778 unk714 |= 0x00000010;
773 break; 779 } else {
774 case NVKM_RAM_TYPE_DDR3: 780 if (next->bios.ramcfg_10_02_04) {
775 if (device->chipset == 0xa8) { 781 switch (ram->base.type) {
776 r111100 |= 0x08000000; 782 case NVKM_RAM_TYPE_DDR2:
777 } else { 783 case NVKM_RAM_TYPE_DDR3:
778 r111100 &= ~0x00000004; 784 r111100 &= ~0x00000020;
785 if (next->bios.ramcfg_10_02_10)
786 r111100 |= 0x08000004;
787 else
788 r111100 |= 0x00000024;
789 break;
790 default:
791 break;
792 }
793 } else {
794 switch (ram->base.type) {
795 case NVKM_RAM_TYPE_DDR2:
796 case NVKM_RAM_TYPE_DDR3:
797 r111100 &= ~0x00000024;
779 r111100 |= 0x12800000; 798 r111100 |= 0x12800000;
799
800 if (next->bios.ramcfg_10_02_10)
801 r111100 |= 0x08000000;
802 unk714 |= 0x00000010;
803 break;
804 case NVKM_RAM_TYPE_GDDR3:
805 r111100 |= 0x30000000;
806 unk714 |= 0x00000020;
807 break;
808 default:
809 break;
780 } 810 }
781 unk714 |= 0x00000010;
782 break;
783 case NVKM_RAM_TYPE_GDDR3:
784 r111100 |= 0x30000000;
785 unk714 |= 0x00000020;
786 break;
787 default:
788 break;
789 } 811 }
790 } 812 }
791 813
@@ -809,8 +831,8 @@ gt215_ram_calc(struct nvkm_ram *base, u32 freq)
809 ram_mask(fuc, 0x100718, 0xffffffff, unk718); 831 ram_mask(fuc, 0x100718, 0xffffffff, unk718);
810 ram_mask(fuc, 0x111100, 0xffffffff, r111100); 832 ram_mask(fuc, 0x111100, 0xffffffff, r111100);
811 833
812 if (fuc->r_gpioFBVREF.addr && !next->bios.timing_10_ODT) 834 if (!next->bios.timing_10_ODT)
813 gt215_ram_fbvref(fuc, 1); 835 gt215_ram_gpio(fuc, 0x2e, 0);
814 836
815 /* Reset DLL */ 837 /* Reset DLL */
816 if (!next->bios.ramcfg_DLLoff) 838 if (!next->bios.ramcfg_DLLoff)
@@ -919,10 +941,7 @@ gt215_ram_func = {
919int 941int
920gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) 942gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
921{ 943{
922 struct nvkm_gpio *gpio = fb->subdev.device->gpio;
923 struct dcb_gpio_func func;
924 struct gt215_ram *ram; 944 struct gt215_ram *ram;
925 u32 reg, shift;
926 int ret, i; 945 int ret, i;
927 946
928 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL))) 947 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
@@ -981,12 +1000,10 @@ gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
981 ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0); 1000 ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0);
982 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4); 1001 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
983 } 1002 }
984 1003 ram->fuc.r_gpio[0] = ramfuc_reg(0x00e104);
985 ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); 1004 ram->fuc.r_gpio[1] = ramfuc_reg(0x00e108);
986 if (ret == 0) { 1005 ram->fuc.r_gpio[2] = ramfuc_reg(0x00e120);
987 nv50_gpio_location(func.line, &reg, &shift); 1006 ram->fuc.r_gpio[3] = ramfuc_reg(0x00e124);
988 ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
989 }
990 1007
991 return 0; 1008 return 0;
992} 1009}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 9197e0ef5cdb..87bde8ff2d6b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -33,6 +33,7 @@
33#include <subdev/bios/rammap.h> 33#include <subdev/bios/rammap.h>
34#include <subdev/bios/timing.h> 34#include <subdev/bios/timing.h>
35#include <subdev/clk/pll.h> 35#include <subdev/clk/pll.h>
36#include <subdev/gpio.h>
36 37
37struct nv50_ramseq { 38struct nv50_ramseq {
38 struct hwsq base; 39 struct hwsq base;
@@ -59,6 +60,7 @@ struct nv50_ramseq {
59 struct hwsq_reg r_0x611200; 60 struct hwsq_reg r_0x611200;
60 struct hwsq_reg r_timing[9]; 61 struct hwsq_reg r_timing[9];
61 struct hwsq_reg r_mr[4]; 62 struct hwsq_reg r_mr[4];
63 struct hwsq_reg r_gpio[4];
62}; 64};
63 65
64struct nv50_ram { 66struct nv50_ram {
@@ -144,6 +146,38 @@ nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
144 nvkm_debug(subdev, " 240: %08x\n", timing[8]); 146 nvkm_debug(subdev, " 240: %08x\n", timing[8]);
145 return 0; 147 return 0;
146} 148}
149
150static int
151nv50_ram_timing_read(struct nv50_ram *ram, u32 *timing)
152{
153 unsigned int i;
154 struct nvbios_ramcfg *cfg = &ram->base.target.bios;
155 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
156 struct nvkm_device *device = subdev->device;
157
158 for (i = 0; i <= 8; i++)
159 timing[i] = nvkm_rd32(device, 0x100220 + (i * 4));
160
161 /* Derive the bare minimum for the MR calculation to succeed */
162 cfg->timing_ver = 0x10;
163 T(CL) = (timing[3] & 0xff) + 1;
164
165 switch (ram->base.type) {
166 case NVKM_RAM_TYPE_DDR2:
167 T(CWL) = T(CL) - 1;
168 break;
169 case NVKM_RAM_TYPE_GDDR3:
170 T(CWL) = ((timing[2] & 0xff000000) >> 24) + 1;
171 break;
172 default:
173 return -ENOSYS;
174 break;
175 }
176
177 T(WR) = ((timing[1] >> 24) & 0xff) - 1 - T(CWL);
178
179 return 0;
180}
147#undef T 181#undef T
148 182
149static void 183static void
@@ -154,6 +188,33 @@ nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
154 ram_nsec(hwsq, 24000); 188 ram_nsec(hwsq, 24000);
155} 189}
156 190
191static void
192nv50_ram_gpio(struct nv50_ramseq *hwsq, u8 tag, u32 val)
193{
194 struct nvkm_gpio *gpio = hwsq->base.subdev->device->gpio;
195 struct dcb_gpio_func func;
196 u32 reg, sh, gpio_val;
197 int ret;
198
199 if (nvkm_gpio_get(gpio, 0, tag, DCB_GPIO_UNUSED) != val) {
200 ret = nvkm_gpio_find(gpio, 0, tag, DCB_GPIO_UNUSED, &func);
201 if (ret)
202 return;
203
204 reg = func.line >> 3;
205 sh = (func.line & 0x7) << 2;
206 gpio_val = ram_rd32(hwsq, gpio[reg]);
207
208 if (gpio_val & (8 << sh))
209 val = !val;
210 if (!(func.log[1] & 1))
211 val = !val;
212
213 ram_mask(hwsq, gpio[reg], (0x3 << sh), ((val | 0x2) << sh));
214 ram_nsec(hwsq, 20000);
215 }
216}
217
157static int 218static int
158nv50_ram_calc(struct nvkm_ram *base, u32 freq) 219nv50_ram_calc(struct nvkm_ram *base, u32 freq)
159{ 220{
@@ -213,10 +274,11 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
213 strap, data, ver, hdr); 274 strap, data, ver, hdr);
214 return -EINVAL; 275 return -EINVAL;
215 } 276 }
277 nv50_ram_timing_calc(ram, timing);
278 } else {
279 nv50_ram_timing_read(ram, timing);
216 } 280 }
217 281
218 nv50_ram_timing_calc(ram, timing);
219
220 ret = ram_init(hwsq, subdev); 282 ret = ram_init(hwsq, subdev);
221 if (ret) 283 if (ret)
222 return ret; 284 return ret;
@@ -235,14 +297,18 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
235 break; 297 break;
236 } 298 }
237 299
238 if (ret) 300 if (ret) {
301 nvkm_error(subdev, "Could not calculate MR\n");
239 return ret; 302 return ret;
303 }
304
305 if (subdev->device->chipset <= 0x96 && !next->bios.ramcfg_00_03_02)
306 ram_mask(hwsq, 0x100710, 0x00000200, 0x00000000);
240 307
241 /* Always disable this bit during reclock */ 308 /* Always disable this bit during reclock */
242 ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000); 309 ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
243 310
244 ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */ 311 ram_wait_vblank(hwsq);
245 ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
246 ram_wr32(hwsq, 0x611200, 0x00003300); 312 ram_wr32(hwsq, 0x611200, 0x00003300);
247 ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */ 313 ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
248 ram_nsec(hwsq, 8000); 314 ram_nsec(hwsq, 8000);
@@ -250,6 +316,9 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
250 ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */ 316 ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
251 ram_nsec(hwsq, 2000); 317 ram_nsec(hwsq, 2000);
252 318
319 if (next->bios.timing_10_ODT)
320 nv50_ram_gpio(hwsq, 0x2e, 1);
321
253 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */ 322 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
254 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */ 323 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
255 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */ 324 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
@@ -286,8 +355,12 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
286 next->bios.rammap_00_16_40 << 14); 355 next->bios.rammap_00_16_40 << 14);
287 ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1); 356 ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
288 ram_mask(hwsq, 0x004008, 0x91ff0000, r004008); 357 ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
289 if (subdev->device->chipset >= 0x96) 358
359 /* XXX: GDDR3 only? */
360 if (subdev->device->chipset >= 0x92)
290 ram_wr32(hwsq, 0x100da0, r100da0); 361 ram_wr32(hwsq, 0x100da0, r100da0);
362
363 nv50_ram_gpio(hwsq, 0x18, !next->bios.ramcfg_FBVDDQ);
291 ram_nsec(hwsq, 64000); /*XXX*/ 364 ram_nsec(hwsq, 64000); /*XXX*/
292 ram_nsec(hwsq, 32000); /*XXX*/ 365 ram_nsec(hwsq, 32000); /*XXX*/
293 366
@@ -329,19 +402,33 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
329 ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12); 402 ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12);
330 403
331 /* XXX: A lot of this could be "chipset"/"ram type" specific stuff */ 404 /* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
332 unk710 = ram_rd32(hwsq, 0x100710) & ~0x00000101; 405 unk710 = ram_rd32(hwsq, 0x100710) & ~0x00000100;
333 unk714 = ram_rd32(hwsq, 0x100714) & ~0xf0000020; 406 unk714 = ram_rd32(hwsq, 0x100714) & ~0xf0000020;
334 unk718 = ram_rd32(hwsq, 0x100718) & ~0x00000100; 407 unk718 = ram_rd32(hwsq, 0x100718) & ~0x00000100;
335 unk71c = ram_rd32(hwsq, 0x10071c) & ~0x00000100; 408 unk71c = ram_rd32(hwsq, 0x10071c) & ~0x00000100;
409 if (subdev->device->chipset <= 0x96) {
410 unk710 &= ~0x0000006e;
411 unk714 &= ~0x00000100;
412
413 if (!next->bios.ramcfg_00_03_08)
414 unk710 |= 0x00000060;
415 if (!next->bios.ramcfg_FBVDDQ)
416 unk714 |= 0x00000100;
417 if ( next->bios.ramcfg_00_04_04)
418 unk710 |= 0x0000000e;
419 } else {
420 unk710 &= ~0x00000001;
421
422 if (!next->bios.ramcfg_00_03_08)
423 unk710 |= 0x00000001;
424 }
336 425
337 if ( next->bios.ramcfg_00_03_01) 426 if ( next->bios.ramcfg_00_03_01)
338 unk71c |= 0x00000100; 427 unk71c |= 0x00000100;
339 if ( next->bios.ramcfg_00_03_02) 428 if ( next->bios.ramcfg_00_03_02)
340 unk710 |= 0x00000100; 429 unk710 |= 0x00000100;
341 if (!next->bios.ramcfg_00_03_08) { 430 if (!next->bios.ramcfg_00_03_08)
342 unk710 |= 0x1; 431 unk714 |= 0x00000020;
343 unk714 |= 0x20;
344 }
345 if ( next->bios.ramcfg_00_04_04) 432 if ( next->bios.ramcfg_00_04_04)
346 unk714 |= 0x70000000; 433 unk714 |= 0x70000000;
347 if ( next->bios.ramcfg_00_04_20) 434 if ( next->bios.ramcfg_00_04_20)
@@ -352,6 +439,8 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
352 ram_mask(hwsq, 0x100718, 0xffffffff, unk718); 439 ram_mask(hwsq, 0x100718, 0xffffffff, unk718);
353 ram_mask(hwsq, 0x100710, 0xffffffff, unk710); 440 ram_mask(hwsq, 0x100710, 0xffffffff, unk710);
354 441
442 /* XXX: G94 does not even test these regs in trace. Harmless we do it,
443 * but why is it omitted? */
355 if (next->bios.rammap_00_16_20) { 444 if (next->bios.rammap_00_16_20) {
356 ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 | 445 ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 |
357 next->bios.ramcfg_00_06 << 8 | 446 next->bios.ramcfg_00_06 << 8 |
@@ -364,6 +453,9 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
364 } 453 }
365 ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]); 454 ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]);
366 455
456 if (!next->bios.timing_10_ODT)
457 nv50_ram_gpio(hwsq, 0x2e, 0);
458
367 /* Reset DLL */ 459 /* Reset DLL */
368 if (!next->bios.ramcfg_DLLoff) 460 if (!next->bios.ramcfg_DLLoff)
369 nvkm_sddr2_dll_reset(hwsq); 461 nvkm_sddr2_dll_reset(hwsq);
@@ -379,6 +471,8 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
379 ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000); 471 ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000);
380 if (next->bios.ramcfg_00_03_02) 472 if (next->bios.ramcfg_00_03_02)
381 ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000); 473 ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000);
474 if (subdev->device->chipset <= 0x96 && next->bios.ramcfg_00_03_02)
475 ram_mask(hwsq, 0x100710, 0x00000200, 0x00000200);
382 476
383 return 0; 477 return 0;
384} 478}
@@ -634,5 +728,10 @@ nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
634 ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4); 728 ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4);
635 } 729 }
636 730
731 ram->hwsq.r_gpio[0] = hwsq_reg(0x00e104);
732 ram->hwsq.r_gpio[1] = hwsq_reg(0x00e108);
733 ram->hwsq.r_gpio[2] = hwsq_reg(0x00e120);
734 ram->hwsq.r_gpio[3] = hwsq_reg(0x00e124);
735
637 return 0; 736 return 0;
638} 737}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
index 0f1f97ccd5f6..8df7306d5729 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
@@ -11,5 +11,6 @@
11#define ram_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d)) 11#define ram_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
12#define ram_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d)) 12#define ram_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
13#define ram_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d)) 13#define ram_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
14#define ram_wait_vblank(s) hwsq_wait_vblank(&(s)->base)
14#define ram_nsec(s,n) hwsq_nsec(&(s)->base, (n)) 15#define ram_nsec(s,n) hwsq_nsec(&(s)->base, (n))
15#endif 16#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index 86bf67456b14..b9f1ffdfc602 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -76,6 +76,12 @@ nvkm_sddr2_calc(struct nvkm_ram *ram)
76 return -ENOSYS; 76 return -ENOSYS;
77 } 77 }
78 78
79 if (ram->next->bios.timing_ver == 0x20 ||
80 ram->next->bios.ramcfg_timing == 0xff) {
81 ODT = (ram->mr[1] & 0x004) >> 2 |
82 (ram->mr[1] & 0x040) >> 5;
83 }
84
79 CL = ramxlat(ramddr2_cl, CL); 85 CL = ramxlat(ramddr2_cl, CL);
80 WR = ramxlat(ramddr2_wr, WR); 86 WR = ramxlat(ramddr2_wr, WR);
81 if (CL < 0 || WR < 0) 87 if (CL < 0 || WR < 0)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index b4edc97dc8c5..26900333b1d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -70,6 +70,8 @@ nvkm_sddr3_calc(struct nvkm_ram *ram)
70{ 70{
71 int CWL, CL, WR, DLL = 0, ODT = 0; 71 int CWL, CL, WR, DLL = 0, ODT = 0;
72 72
73 DLL = !ram->next->bios.ramcfg_DLLoff;
74
73 switch (ram->next->bios.timing_ver) { 75 switch (ram->next->bios.timing_ver) {
74 case 0x10: 76 case 0x10:
75 if (ram->next->bios.timing_hdr < 0x17) { 77 if (ram->next->bios.timing_hdr < 0x17) {
@@ -79,7 +81,6 @@ nvkm_sddr3_calc(struct nvkm_ram *ram)
79 CWL = ram->next->bios.timing_10_CWL; 81 CWL = ram->next->bios.timing_10_CWL;
80 CL = ram->next->bios.timing_10_CL; 82 CL = ram->next->bios.timing_10_CL;
81 WR = ram->next->bios.timing_10_WR; 83 WR = ram->next->bios.timing_10_WR;
82 DLL = !ram->next->bios.ramcfg_DLLoff;
83 ODT = ram->next->bios.timing_10_ODT; 84 ODT = ram->next->bios.timing_10_ODT;
84 break; 85 break;
85 case 0x20: 86 case 0x20:
@@ -87,7 +88,6 @@ nvkm_sddr3_calc(struct nvkm_ram *ram)
87 CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0; 88 CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
88 WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16; 89 WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
89 /* XXX: Get these values from the VBIOS instead */ 90 /* XXX: Get these values from the VBIOS instead */
90 DLL = !(ram->mr[1] & 0x1);
91 ODT = (ram->mr[1] & 0x004) >> 2 | 91 ODT = (ram->mr[1] & 0x004) >> 2 |
92 (ram->mr[1] & 0x040) >> 5 | 92 (ram->mr[1] & 0x040) >> 5 |
93 (ram->mr[1] & 0x200) >> 7; 93 (ram->mr[1] & 0x200) >> 7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 8996649209ab..73923fd5f7f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -54,7 +54,7 @@ nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
54 } 54 }
55} 55}
56 56
57int 57static int
58nv50_gpio_location(int line, u32 *reg, u32 *shift) 58nv50_gpio_location(int line, u32 *reg, u32 *shift)
59{ 59{
60 const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 60 const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
index a0b12d27284a..de888fa62b3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -1,3 +1,4 @@
1nvkm-y += nvkm/subdev/ibus/gf100.o 1nvkm-y += nvkm/subdev/ibus/gf100.o
2nvkm-y += nvkm/subdev/ibus/gf117.o
2nvkm-y += nvkm/subdev/ibus/gk104.o 3nvkm-y += nvkm/subdev/ibus/gk104.o
3nvkm-y += nvkm/subdev/ibus/gk20a.o 4nvkm-y += nvkm/subdev/ibus/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 37a0496f7ed1..72d6330d243d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/ibus.h> 24#include "priv.h"
25 25
26static void 26static void
27gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i) 27gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -56,7 +56,7 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
56 nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); 56 nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
57} 57}
58 58
59static void 59void
60gf100_ibus_intr(struct nvkm_subdev *ibus) 60gf100_ibus_intr(struct nvkm_subdev *ibus)
61{ 61{
62 struct nvkm_device *device = ibus->device; 62 struct nvkm_device *device = ibus->device;
@@ -92,8 +92,21 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
92 } 92 }
93} 93}
94 94
95static int
96gf100_ibus_init(struct nvkm_subdev *ibus)
97{
98 struct nvkm_device *device = ibus->device;
99 nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
100 nvkm_wr32(device, 0x12232c, 0x00100064);
101 nvkm_wr32(device, 0x122330, 0x00100064);
102 nvkm_wr32(device, 0x122334, 0x00100064);
103 nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
104 return 0;
105}
106
95static const struct nvkm_subdev_func 107static const struct nvkm_subdev_func
96gf100_ibus = { 108gf100_ibus = {
109 .init = gf100_ibus_init,
97 .intr = gf100_ibus_intr, 110 .intr = gf100_ibus_intr,
98}; 111};
99 112
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
new file mode 100644
index 000000000000..f69f263c5906
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2015 Samuel Pitosiet
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Samuel Pitoiset
23 */
24#include "priv.h"
25
26static int
27gf117_ibus_init(struct nvkm_subdev *ibus)
28{
29 struct nvkm_device *device = ibus->device;
30 nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
31 nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
32 nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
33 return 0;
34}
35
36static const struct nvkm_subdev_func
37gf117_ibus = {
38 .init = gf117_ibus_init,
39 .intr = gf100_ibus_intr,
40};
41
42int
43gf117_ibus_new(struct nvkm_device *device, int index,
44 struct nvkm_subdev **pibus)
45{
46 struct nvkm_subdev *ibus;
47 if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
48 return -ENOMEM;
49 nvkm_subdev_ctor(&gf117_ibus, device, index, 0, ibus);
50 return 0;
51}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
new file mode 100644
index 000000000000..48e1b6365ce6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
@@ -0,0 +1,7 @@
1#ifndef __NVKM_IBUS_PRIV_H__
2#define __NVKM_IBUS_PRIV_H__
3
4#include <subdev/ibus.h>
5
6void gf100_ibus_intr(struct nvkm_subdev *);
7#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index cd7feb1b25f6..fc419bb8eab7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -23,35 +23,42 @@
23/* 23/*
24 * GK20A does not have dedicated video memory, and to accurately represent this 24 * GK20A does not have dedicated video memory, and to accurately represent this
25 * fact Nouveau will not create a RAM device for it. Therefore its instmem 25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
26 * implementation must be done directly on top of system memory, while providing 26 * implementation must be done directly on top of system memory, while
27 * coherent read and write operations. 27 * preserving coherency for read and write operations.
28 * 28 *
29 * Instmem can be allocated through two means: 29 * Instmem can be allocated through two means:
30 * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory 30 * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
31 * pages contiguous to the GPU. This is the preferred way. 31 * pages contiguous to the GPU. This is the preferred way.
32 * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically 32 * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
33 * contiguous memory. 33 * contiguous memory.
34 * 34 *
35 * In both cases CPU read and writes are performed using PRAMIN (i.e. using the 35 * In both cases CPU read and writes are performed by creating a write-combined
36 * GPU path) to ensure these operations are coherent for the GPU. This allows us 36 * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
37 * to use more "relaxed" allocation parameters when using the DMA API, since we 37 * be conservative we do this every time we acquire or release an instobj, but
38 * never need a kernel mapping. 38 * ideally L2 management should be handled at a higher level.
39 *
40 * To improve performance, CPU mappings are not removed upon instobj release.
41 * Instead they are placed into a LRU list to be recycled when the mapped space
42 * goes beyond a certain threshold. At the moment this limit is 1MB.
39 */ 43 */
40#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
41#include "priv.h" 44#include "priv.h"
42 45
43#include <core/memory.h> 46#include <core/memory.h>
44#include <core/mm.h> 47#include <core/mm.h>
45#include <core/tegra.h> 48#include <core/tegra.h>
46#include <subdev/fb.h> 49#include <subdev/fb.h>
47 50#include <subdev/ltc.h>
48#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
49 51
50struct gk20a_instobj { 52struct gk20a_instobj {
51 struct nvkm_memory memory; 53 struct nvkm_memory memory;
52 struct gk20a_instmem *imem;
53 struct nvkm_mem mem; 54 struct nvkm_mem mem;
55 struct gk20a_instmem *imem;
56
57 /* CPU mapping */
58 u32 *vaddr;
59 struct list_head vaddr_node;
54}; 60};
61#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
55 62
56/* 63/*
57 * Used for objects allocated using the DMA API 64 * Used for objects allocated using the DMA API
@@ -59,10 +66,12 @@ struct gk20a_instobj {
59struct gk20a_instobj_dma { 66struct gk20a_instobj_dma {
60 struct gk20a_instobj base; 67 struct gk20a_instobj base;
61 68
62 void *cpuaddr; 69 u32 *cpuaddr;
63 dma_addr_t handle; 70 dma_addr_t handle;
64 struct nvkm_mm_node r; 71 struct nvkm_mm_node r;
65}; 72};
73#define gk20a_instobj_dma(p) \
74 container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
66 75
67/* 76/*
68 * Used for objects flattened using the IOMMU API 77 * Used for objects flattened using the IOMMU API
@@ -70,25 +79,38 @@ struct gk20a_instobj_dma {
70struct gk20a_instobj_iommu { 79struct gk20a_instobj_iommu {
71 struct gk20a_instobj base; 80 struct gk20a_instobj base;
72 81
73 /* array of base.mem->size pages */ 82 /* will point to the higher half of pages */
83 dma_addr_t *dma_addrs;
84 /* array of base.mem->size pages (+ dma_addr_ts) */
74 struct page *pages[]; 85 struct page *pages[];
75}; 86};
87#define gk20a_instobj_iommu(p) \
88 container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
76 89
77struct gk20a_instmem { 90struct gk20a_instmem {
78 struct nvkm_instmem base; 91 struct nvkm_instmem base;
79 unsigned long lock_flags; 92
93 /* protects vaddr_* and gk20a_instobj::vaddr* */
80 spinlock_t lock; 94 spinlock_t lock;
81 u64 addr; 95
96 /* CPU mappings LRU */
97 unsigned int vaddr_use;
98 unsigned int vaddr_max;
99 struct list_head vaddr_lru;
82 100
83 /* Only used if IOMMU if present */ 101 /* Only used if IOMMU if present */
84 struct mutex *mm_mutex; 102 struct mutex *mm_mutex;
85 struct nvkm_mm *mm; 103 struct nvkm_mm *mm;
86 struct iommu_domain *domain; 104 struct iommu_domain *domain;
87 unsigned long iommu_pgshift; 105 unsigned long iommu_pgshift;
106 u16 iommu_bit;
88 107
89 /* Only used by DMA API */ 108 /* Only used by DMA API */
90 struct dma_attrs attrs; 109 struct dma_attrs attrs;
110
111 void __iomem * (*cpu_map)(struct nvkm_memory *);
91}; 112};
113#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
92 114
93static enum nvkm_memory_target 115static enum nvkm_memory_target
94gk20a_instobj_target(struct nvkm_memory *memory) 116gk20a_instobj_target(struct nvkm_memory *memory)
@@ -100,7 +122,6 @@ static u64
100gk20a_instobj_addr(struct nvkm_memory *memory) 122gk20a_instobj_addr(struct nvkm_memory *memory)
101{ 123{
102 return gk20a_instobj(memory)->mem.offset; 124 return gk20a_instobj(memory)->mem.offset;
103
104} 125}
105 126
106static u64 127static u64
@@ -110,107 +131,217 @@ gk20a_instobj_size(struct nvkm_memory *memory)
110} 131}
111 132
112static void __iomem * 133static void __iomem *
134gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory)
135{
136 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
137 struct device *dev = node->base.imem->base.subdev.device->dev;
138 int npages = nvkm_memory_size(memory) >> 12;
139 struct page *pages[npages];
140 int i;
141
142 /* phys_to_page does not exist on all platforms... */
143 pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT);
144 for (i = 1; i < npages; i++)
145 pages[i] = pages[0] + i;
146
147 return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
148}
149
150static void __iomem *
151gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory)
152{
153 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
154 int npages = nvkm_memory_size(memory) >> 12;
155
156 return vmap(node->pages, npages, VM_MAP,
157 pgprot_writecombine(PAGE_KERNEL));
158}
159
160/*
161 * Must be called while holding gk20a_instmem_lock
162 */
163static void
164gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
165{
166 while (imem->vaddr_use + size > imem->vaddr_max) {
167 struct gk20a_instobj *obj;
168
169 /* no candidate that can be unmapped, abort... */
170 if (list_empty(&imem->vaddr_lru))
171 break;
172
173 obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj,
174 vaddr_node);
175 list_del(&obj->vaddr_node);
176 vunmap(obj->vaddr);
177 obj->vaddr = NULL;
178 imem->vaddr_use -= nvkm_memory_size(&obj->memory);
179 nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n",
180 imem->vaddr_use, imem->vaddr_max);
181
182 }
183}
184
185static void __iomem *
113gk20a_instobj_acquire(struct nvkm_memory *memory) 186gk20a_instobj_acquire(struct nvkm_memory *memory)
114{ 187{
115 struct gk20a_instmem *imem = gk20a_instobj(memory)->imem; 188 struct gk20a_instobj *node = gk20a_instobj(memory);
189 struct gk20a_instmem *imem = node->imem;
190 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
191 const u64 size = nvkm_memory_size(memory);
116 unsigned long flags; 192 unsigned long flags;
193
194 nvkm_ltc_flush(ltc);
195
117 spin_lock_irqsave(&imem->lock, flags); 196 spin_lock_irqsave(&imem->lock, flags);
118 imem->lock_flags = flags; 197
119 return NULL; 198 if (node->vaddr) {
199 /* remove us from the LRU list since we cannot be unmapped */
200 list_del(&node->vaddr_node);
201
202 goto out;
203 }
204
205 /* try to free some address space if we reached the limit */
206 gk20a_instmem_vaddr_gc(imem, size);
207
208 node->vaddr = imem->cpu_map(memory);
209
210 if (!node->vaddr) {
211 nvkm_error(&imem->base.subdev, "cannot map instobj - "
212 "this is not going to end well...\n");
213 goto out;
214 }
215
216 imem->vaddr_use += size;
217 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
218 imem->vaddr_use, imem->vaddr_max);
219
220out:
221 spin_unlock_irqrestore(&imem->lock, flags);
222
223 return node->vaddr;
120} 224}
121 225
122static void 226static void
123gk20a_instobj_release(struct nvkm_memory *memory) 227gk20a_instobj_release(struct nvkm_memory *memory)
124{ 228{
125 struct gk20a_instmem *imem = gk20a_instobj(memory)->imem; 229 struct gk20a_instobj *node = gk20a_instobj(memory);
126 spin_unlock_irqrestore(&imem->lock, imem->lock_flags); 230 struct gk20a_instmem *imem = node->imem;
127} 231 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
232 unsigned long flags;
128 233
129/* 234 spin_lock_irqsave(&imem->lock, flags);
130 * Use PRAMIN to read/write data and avoid coherency issues. 235
131 * PRAMIN uses the GPU path and ensures data will always be coherent. 236 /* add ourselves to the LRU list so our CPU mapping can be freed */
132 * 237 list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
133 * A dynamic mapping based solution would be desirable in the future, but 238
134 * the issue remains of how to maintain coherency efficiently. On ARM it is 239 spin_unlock_irqrestore(&imem->lock, flags);
135 * not easy (if possible at all?) to create uncached temporary mappings. 240
136 */ 241 wmb();
242 nvkm_ltc_invalidate(ltc);
243}
137 244
138static u32 245static u32
139gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset) 246gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
140{ 247{
141 struct gk20a_instobj *node = gk20a_instobj(memory); 248 struct gk20a_instobj *node = gk20a_instobj(memory);
142 struct gk20a_instmem *imem = node->imem; 249
143 struct nvkm_device *device = imem->base.subdev.device; 250 return node->vaddr[offset / 4];
144 u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
145 u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
146 u32 data;
147
148 if (unlikely(imem->addr != base)) {
149 nvkm_wr32(device, 0x001700, base >> 16);
150 imem->addr = base;
151 }
152 data = nvkm_rd32(device, 0x700000 + addr);
153 return data;
154} 251}
155 252
156static void 253static void
157gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) 254gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
158{ 255{
159 struct gk20a_instobj *node = gk20a_instobj(memory); 256 struct gk20a_instobj *node = gk20a_instobj(memory);
160 struct gk20a_instmem *imem = node->imem;
161 struct nvkm_device *device = imem->base.subdev.device;
162 u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
163 u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
164 257
165 if (unlikely(imem->addr != base)) { 258 node->vaddr[offset / 4] = data;
166 nvkm_wr32(device, 0x001700, base >> 16);
167 imem->addr = base;
168 }
169 nvkm_wr32(device, 0x700000 + addr, data);
170} 259}
171 260
172static void 261static void
173gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) 262gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
174{ 263{
175 struct gk20a_instobj *node = gk20a_instobj(memory); 264 struct gk20a_instobj *node = gk20a_instobj(memory);
265
176 nvkm_vm_map_at(vma, offset, &node->mem); 266 nvkm_vm_map_at(vma, offset, &node->mem);
177} 267}
178 268
269/*
270 * Clear the CPU mapping of an instobj if it exists
271 */
179static void 272static void
180gk20a_instobj_dtor_dma(struct gk20a_instobj *_node) 273gk20a_instobj_dtor(struct gk20a_instobj *node)
274{
275 struct gk20a_instmem *imem = node->imem;
276 struct gk20a_instobj *obj;
277 unsigned long flags;
278
279 spin_lock_irqsave(&imem->lock, flags);
280
281 if (!node->vaddr)
282 goto out;
283
284 list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) {
285 if (obj == node) {
286 list_del(&obj->vaddr_node);
287 break;
288 }
289 }
290 vunmap(node->vaddr);
291 node->vaddr = NULL;
292 imem->vaddr_use -= nvkm_memory_size(&node->memory);
293 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
294 imem->vaddr_use, imem->vaddr_max);
295
296out:
297 spin_unlock_irqrestore(&imem->lock, flags);
298}
299
300static void *
301gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
181{ 302{
182 struct gk20a_instobj_dma *node = (void *)_node; 303 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
183 struct gk20a_instmem *imem = _node->imem; 304 struct gk20a_instmem *imem = node->base.imem;
184 struct device *dev = imem->base.subdev.device->dev; 305 struct device *dev = imem->base.subdev.device->dev;
185 306
307 gk20a_instobj_dtor(&node->base);
308
186 if (unlikely(!node->cpuaddr)) 309 if (unlikely(!node->cpuaddr))
187 return; 310 goto out;
188 311
189 dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr, 312 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr,
190 node->handle, &imem->attrs); 313 node->handle, &imem->attrs);
314
315out:
316 return node;
191} 317}
192 318
193static void 319static void *
194gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node) 320gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
195{ 321{
196 struct gk20a_instobj_iommu *node = (void *)_node; 322 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
197 struct gk20a_instmem *imem = _node->imem; 323 struct gk20a_instmem *imem = node->base.imem;
324 struct device *dev = imem->base.subdev.device->dev;
198 struct nvkm_mm_node *r; 325 struct nvkm_mm_node *r;
199 int i; 326 int i;
200 327
201 if (unlikely(list_empty(&_node->mem.regions))) 328 gk20a_instobj_dtor(&node->base);
202 return;
203 329
204 r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node, 330 if (unlikely(list_empty(&node->base.mem.regions)))
331 goto out;
332
333 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
205 rl_entry); 334 rl_entry);
206 335
207 /* clear bit 34 to unmap pages */ 336 /* clear IOMMU bit to unmap pages */
208 r->offset &= ~BIT(34 - imem->iommu_pgshift); 337 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
209 338
210 /* Unmap pages from GPU address space and free them */ 339 /* Unmap pages from GPU address space and free them */
211 for (i = 0; i < _node->mem.size; i++) { 340 for (i = 0; i < node->base.mem.size; i++) {
212 iommu_unmap(imem->domain, 341 iommu_unmap(imem->domain,
213 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); 342 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
343 dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
344 DMA_BIDIRECTIONAL);
214 __free_page(node->pages[i]); 345 __free_page(node->pages[i]);
215 } 346 }
216 347
@@ -218,25 +349,27 @@ gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
218 mutex_lock(imem->mm_mutex); 349 mutex_lock(imem->mm_mutex);
219 nvkm_mm_free(imem->mm, &r); 350 nvkm_mm_free(imem->mm, &r);
220 mutex_unlock(imem->mm_mutex); 351 mutex_unlock(imem->mm_mutex);
221}
222
223static void *
224gk20a_instobj_dtor(struct nvkm_memory *memory)
225{
226 struct gk20a_instobj *node = gk20a_instobj(memory);
227 struct gk20a_instmem *imem = node->imem;
228
229 if (imem->domain)
230 gk20a_instobj_dtor_iommu(node);
231 else
232 gk20a_instobj_dtor_dma(node);
233 352
353out:
234 return node; 354 return node;
235} 355}
236 356
237static const struct nvkm_memory_func 357static const struct nvkm_memory_func
238gk20a_instobj_func = { 358gk20a_instobj_func_dma = {
239 .dtor = gk20a_instobj_dtor, 359 .dtor = gk20a_instobj_dtor_dma,
360 .target = gk20a_instobj_target,
361 .addr = gk20a_instobj_addr,
362 .size = gk20a_instobj_size,
363 .acquire = gk20a_instobj_acquire,
364 .release = gk20a_instobj_release,
365 .rd32 = gk20a_instobj_rd32,
366 .wr32 = gk20a_instobj_wr32,
367 .map = gk20a_instobj_map,
368};
369
370static const struct nvkm_memory_func
371gk20a_instobj_func_iommu = {
372 .dtor = gk20a_instobj_dtor_iommu,
240 .target = gk20a_instobj_target, 373 .target = gk20a_instobj_target,
241 .addr = gk20a_instobj_addr, 374 .addr = gk20a_instobj_addr,
242 .size = gk20a_instobj_size, 375 .size = gk20a_instobj_size,
@@ -259,6 +392,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
259 return -ENOMEM; 392 return -ENOMEM;
260 *_node = &node->base; 393 *_node = &node->base;
261 394
395 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
396
262 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, 397 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
263 &node->handle, GFP_KERNEL, 398 &node->handle, GFP_KERNEL,
264 &imem->attrs); 399 &imem->attrs);
@@ -292,24 +427,40 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
292{ 427{
293 struct gk20a_instobj_iommu *node; 428 struct gk20a_instobj_iommu *node;
294 struct nvkm_subdev *subdev = &imem->base.subdev; 429 struct nvkm_subdev *subdev = &imem->base.subdev;
430 struct device *dev = subdev->device->dev;
295 struct nvkm_mm_node *r; 431 struct nvkm_mm_node *r;
296 int ret; 432 int ret;
297 int i; 433 int i;
298 434
299 if (!(node = kzalloc(sizeof(*node) + 435 /*
300 sizeof( node->pages[0]) * npages, GFP_KERNEL))) 436 * despite their variable size, instmem allocations are small enough
437 * (< 1 page) to be handled by kzalloc
438 */
439 if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
440 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
301 return -ENOMEM; 441 return -ENOMEM;
302 *_node = &node->base; 442 *_node = &node->base;
443 node->dma_addrs = (void *)(node->pages + npages);
444
445 nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
303 446
304 /* Allocate backing memory */ 447 /* Allocate backing memory */
305 for (i = 0; i < npages; i++) { 448 for (i = 0; i < npages; i++) {
306 struct page *p = alloc_page(GFP_KERNEL); 449 struct page *p = alloc_page(GFP_KERNEL);
450 dma_addr_t dma_adr;
307 451
308 if (p == NULL) { 452 if (p == NULL) {
309 ret = -ENOMEM; 453 ret = -ENOMEM;
310 goto free_pages; 454 goto free_pages;
311 } 455 }
312 node->pages[i] = p; 456 node->pages[i] = p;
457 dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
458 if (dma_mapping_error(dev, dma_adr)) {
459 nvkm_error(subdev, "DMA mapping error!\n");
460 ret = -ENOMEM;
461 goto free_pages;
462 }
463 node->dma_addrs[i] = dma_adr;
313 } 464 }
314 465
315 mutex_lock(imem->mm_mutex); 466 mutex_lock(imem->mm_mutex);
@@ -318,16 +469,15 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
318 align >> imem->iommu_pgshift, &r); 469 align >> imem->iommu_pgshift, &r);
319 mutex_unlock(imem->mm_mutex); 470 mutex_unlock(imem->mm_mutex);
320 if (ret) { 471 if (ret) {
321 nvkm_error(subdev, "virtual space is full!\n"); 472 nvkm_error(subdev, "IOMMU space is full!\n");
322 goto free_pages; 473 goto free_pages;
323 } 474 }
324 475
325 /* Map into GPU address space */ 476 /* Map into GPU address space */
326 for (i = 0; i < npages; i++) { 477 for (i = 0; i < npages; i++) {
327 struct page *p = node->pages[i];
328 u32 offset = (r->offset + i) << imem->iommu_pgshift; 478 u32 offset = (r->offset + i) << imem->iommu_pgshift;
329 479
330 ret = iommu_map(imem->domain, offset, page_to_phys(p), 480 ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
331 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); 481 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
332 if (ret < 0) { 482 if (ret < 0) {
333 nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret); 483 nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
@@ -340,8 +490,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
340 } 490 }
341 } 491 }
342 492
343 /* Bit 34 tells that an address is to be resolved through the IOMMU */ 493 /* IOMMU bit tells that an address is to be resolved through the IOMMU */
344 r->offset |= BIT(34 - imem->iommu_pgshift); 494 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
345 495
346 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; 496 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
347 497
@@ -356,8 +506,13 @@ release_area:
356 mutex_unlock(imem->mm_mutex); 506 mutex_unlock(imem->mm_mutex);
357 507
358free_pages: 508free_pages:
359 for (i = 0; i < npages && node->pages[i] != NULL; i++) 509 for (i = 0; i < npages && node->pages[i] != NULL; i++) {
510 dma_addr_t dma_addr = node->dma_addrs[i];
511 if (dma_addr)
512 dma_unmap_page(dev, dma_addr, PAGE_SIZE,
513 DMA_BIDIRECTIONAL);
360 __free_page(node->pages[i]); 514 __free_page(node->pages[i]);
515 }
361 516
362 return ret; 517 return ret;
363} 518}
@@ -367,8 +522,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
367 struct nvkm_memory **pmemory) 522 struct nvkm_memory **pmemory)
368{ 523{
369 struct gk20a_instmem *imem = gk20a_instmem(base); 524 struct gk20a_instmem *imem = gk20a_instmem(base);
370 struct gk20a_instobj *node = NULL;
371 struct nvkm_subdev *subdev = &imem->base.subdev; 525 struct nvkm_subdev *subdev = &imem->base.subdev;
526 struct gk20a_instobj *node = NULL;
372 int ret; 527 int ret;
373 528
374 nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__, 529 nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
@@ -388,7 +543,6 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
388 if (ret) 543 if (ret)
389 return ret; 544 return ret;
390 545
391 nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
392 node->imem = imem; 546 node->imem = imem;
393 547
394 /* present memory for being mapped using small pages */ 548 /* present memory for being mapped using small pages */
@@ -402,15 +556,25 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
402 return 0; 556 return 0;
403} 557}
404 558
405static void 559static void *
406gk20a_instmem_fini(struct nvkm_instmem *base) 560gk20a_instmem_dtor(struct nvkm_instmem *base)
407{ 561{
408 gk20a_instmem(base)->addr = ~0ULL; 562 struct gk20a_instmem *imem = gk20a_instmem(base);
563
564 /* perform some sanity checks... */
565 if (!list_empty(&imem->vaddr_lru))
566 nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
567
568 if (imem->vaddr_use != 0)
569 nvkm_warn(&base->subdev, "instobj vmap area not empty! "
570 "0x%x bytes still mapped\n", imem->vaddr_use);
571
572 return imem;
409} 573}
410 574
411static const struct nvkm_instmem_func 575static const struct nvkm_instmem_func
412gk20a_instmem = { 576gk20a_instmem = {
413 .fini = gk20a_instmem_fini, 577 .dtor = gk20a_instmem_dtor,
414 .memory_new = gk20a_instobj_new, 578 .memory_new = gk20a_instobj_new,
415 .persistent = true, 579 .persistent = true,
416 .zero = false, 580 .zero = false,
@@ -429,23 +593,28 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
429 spin_lock_init(&imem->lock); 593 spin_lock_init(&imem->lock);
430 *pimem = &imem->base; 594 *pimem = &imem->base;
431 595
596 /* do not allow more than 1MB of CPU-mapped instmem */
597 imem->vaddr_use = 0;
598 imem->vaddr_max = 0x100000;
599 INIT_LIST_HEAD(&imem->vaddr_lru);
600
432 if (tdev->iommu.domain) { 601 if (tdev->iommu.domain) {
433 imem->domain = tdev->iommu.domain; 602 imem->mm_mutex = &tdev->iommu.mutex;
434 imem->mm = &tdev->iommu.mm; 603 imem->mm = &tdev->iommu.mm;
604 imem->domain = tdev->iommu.domain;
435 imem->iommu_pgshift = tdev->iommu.pgshift; 605 imem->iommu_pgshift = tdev->iommu.pgshift;
436 imem->mm_mutex = &tdev->iommu.mutex; 606 imem->cpu_map = gk20a_instobj_cpu_map_iommu;
607 imem->iommu_bit = tdev->func->iommu_bit;
437 608
438 nvkm_info(&imem->base.subdev, "using IOMMU\n"); 609 nvkm_info(&imem->base.subdev, "using IOMMU\n");
439 } else { 610 } else {
440 init_dma_attrs(&imem->attrs); 611 init_dma_attrs(&imem->attrs);
441 /* 612 /* We will access the memory through our own mapping */
442 * We will access instmem through PRAMIN and thus do not need a
443 * consistent CPU pointer or kernel mapping
444 */
445 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs); 613 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
446 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs); 614 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
447 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); 615 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
448 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs); 616 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
617 imem->cpu_map = gk20a_instobj_cpu_map_dma;
449 618
450 nvkm_info(&imem->base.subdev, "using DMA API\n"); 619 nvkm_info(&imem->base.subdev, "using DMA API\n");
451 } 620 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 930d25b6e63c..85b1464c0194 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -67,6 +67,20 @@ nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
67 return index; 67 return index;
68} 68}
69 69
70void
71nvkm_ltc_invalidate(struct nvkm_ltc *ltc)
72{
73 if (ltc->func->invalidate)
74 ltc->func->invalidate(ltc);
75}
76
77void
78nvkm_ltc_flush(struct nvkm_ltc *ltc)
79{
80 if (ltc->func->flush)
81 ltc->func->flush(ltc);
82}
83
70static void 84static void
71nvkm_ltc_intr(struct nvkm_subdev *subdev) 85nvkm_ltc_intr(struct nvkm_subdev *subdev)
72{ 86{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 45ac765b753e..fb0de83da13c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -122,6 +122,36 @@ gf100_ltc_intr(struct nvkm_ltc *ltc)
122 } 122 }
123} 123}
124 124
125void
126gf100_ltc_invalidate(struct nvkm_ltc *ltc)
127{
128 struct nvkm_device *device = ltc->subdev.device;
129 s64 taken;
130
131 nvkm_wr32(device, 0x70004, 0x00000001);
132 taken = nvkm_wait_msec(device, 2, 0x70004, 0x00000003, 0x00000000);
133 if (taken < 0)
134 nvkm_warn(&ltc->subdev, "LTC invalidate timeout\n");
135
136 if (taken > 0)
137 nvkm_debug(&ltc->subdev, "LTC invalidate took %lld ns\n", taken);
138}
139
140void
141gf100_ltc_flush(struct nvkm_ltc *ltc)
142{
143 struct nvkm_device *device = ltc->subdev.device;
144 s64 taken;
145
146 nvkm_wr32(device, 0x70010, 0x00000001);
147 taken = nvkm_wait_msec(device, 2, 0x70010, 0x00000003, 0x00000000);
148 if (taken < 0)
149 nvkm_warn(&ltc->subdev, "LTC flush timeout\n");
150
151 if (taken > 0)
152 nvkm_debug(&ltc->subdev, "LTC flush took %lld ns\n", taken);
153}
154
125/* TODO: Figure out tag memory details and drop the over-cautious allocation. 155/* TODO: Figure out tag memory details and drop the over-cautious allocation.
126 */ 156 */
127int 157int
@@ -215,6 +245,8 @@ gf100_ltc = {
215 .zbc = 16, 245 .zbc = 16,
216 .zbc_clear_color = gf100_ltc_zbc_clear_color, 246 .zbc_clear_color = gf100_ltc_zbc_clear_color,
217 .zbc_clear_depth = gf100_ltc_zbc_clear_depth, 247 .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
248 .invalidate = gf100_ltc_invalidate,
249 .flush = gf100_ltc_flush,
218}; 250};
219 251
220int 252int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index 839e6b4c597b..b4f6e0034d58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -45,6 +45,8 @@ gk104_ltc = {
45 .zbc = 16, 45 .zbc = 16,
46 .zbc_clear_color = gf100_ltc_zbc_clear_color, 46 .zbc_clear_color = gf100_ltc_zbc_clear_color,
47 .zbc_clear_depth = gf100_ltc_zbc_clear_depth, 47 .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
48 .invalidate = gf100_ltc_invalidate,
49 .flush = gf100_ltc_flush,
48}; 50};
49 51
50int 52int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 389331bb63ba..3043bbfd7384 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -138,6 +138,8 @@ gm107_ltc = {
138 .zbc = 16, 138 .zbc = 16,
139 .zbc_clear_color = gm107_ltc_zbc_clear_color, 139 .zbc_clear_color = gm107_ltc_zbc_clear_color,
140 .zbc_clear_depth = gm107_ltc_zbc_clear_depth, 140 .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
141 .invalidate = gf100_ltc_invalidate,
142 .flush = gf100_ltc_flush,
141}; 143};
142 144
143int 145int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 4e05037cc99f..4e3755b82769 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -17,6 +17,9 @@ struct nvkm_ltc_func {
17 int zbc; 17 int zbc;
18 void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]); 18 void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
19 void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32); 19 void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
20
21 void (*invalidate)(struct nvkm_ltc *);
22 void (*flush)(struct nvkm_ltc *);
20}; 23};
21 24
22int gf100_ltc_oneinit(struct nvkm_ltc *); 25int gf100_ltc_oneinit(struct nvkm_ltc *);
@@ -26,4 +29,6 @@ void gf100_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
26void gf100_ltc_cbc_wait(struct nvkm_ltc *); 29void gf100_ltc_cbc_wait(struct nvkm_ltc *);
27void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]); 30void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
28void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); 31void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
32void gf100_ltc_invalidate(struct nvkm_ltc *);
33void gf100_ltc_flush(struct nvkm_ltc *);
29#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 99672c3d0bad..4476ef75acd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -2,6 +2,8 @@ nvkm-y += nvkm/subdev/pci/agp.o
2nvkm-y += nvkm/subdev/pci/base.o 2nvkm-y += nvkm/subdev/pci/base.o
3nvkm-y += nvkm/subdev/pci/nv04.o 3nvkm-y += nvkm/subdev/pci/nv04.o
4nvkm-y += nvkm/subdev/pci/nv40.o 4nvkm-y += nvkm/subdev/pci/nv40.o
5nvkm-y += nvkm/subdev/pci/nv46.o
5nvkm-y += nvkm/subdev/pci/nv4c.o 6nvkm-y += nvkm/subdev/pci/nv4c.o
6nvkm-y += nvkm/subdev/pci/nv50.o 7nvkm-y += nvkm/subdev/pci/g84.o
8nvkm-y += nvkm/subdev/pci/g94.o
7nvkm-y += nvkm/subdev/pci/gf100.o 9nvkm-y += nvkm/subdev/pci/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index d1c148e51922..d671dcfaff3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -46,6 +46,14 @@ nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
46 pci->func->wr32(pci, addr, data); 46 pci->func->wr32(pci, addr, data);
47} 47}
48 48
49u32
50nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
51{
52 u32 data = pci->func->rd32(pci, addr);
53 pci->func->wr32(pci, addr, (data & ~mask) | value);
54 return data;
55}
56
49void 57void
50nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow) 58nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
51{ 59{
@@ -111,6 +119,9 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
111 return ret; 119 return ret;
112 } 120 }
113 121
122 if (pci->func->init)
123 pci->func->init(pci);
124
114 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci); 125 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
115 if (ret) 126 if (ret)
116 return ret; 127 return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
new file mode 100644
index 000000000000..3faa6bfb895b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26#include <core/pci.h>
27
28void
29g84_pci_init(struct nvkm_pci *pci)
30{
31 /* The following only concerns PCIe cards. */
32 if (!pci_is_pcie(pci->pdev))
33 return;
34
35 /* Tag field is 8-bit long, regardless of EXT_TAG.
36 * However, if EXT_TAG is disabled, only the lower 5 bits of the tag
37 * field should be used, limiting the number of request to 32.
38 *
39 * Apparently, 0x041c stores some limit on the number of requests
40 * possible, so if EXT_TAG is disabled, limit that requests number to
41 * 32
42 *
43 * Fixes fdo#86537
44 */
45 if (nvkm_pci_rd32(pci, 0x007c) & 0x00000020)
46 nvkm_pci_mask(pci, 0x0080, 0x00000100, 0x00000100);
47 else
48 nvkm_pci_mask(pci, 0x041c, 0x00000060, 0x00000000);
49}
50
51static const struct nvkm_pci_func
52g84_pci_func = {
53 .init = g84_pci_init,
54 .rd32 = nv40_pci_rd32,
55 .wr08 = nv40_pci_wr08,
56 .wr32 = nv40_pci_wr32,
57 .msi_rearm = nv46_pci_msi_rearm,
58};
59
60int
61g84_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
62{
63 return nvkm_pci_new_(&g84_pci_func, device, index, ppci);
64}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
new file mode 100644
index 000000000000..cd311ee311cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26static const struct nvkm_pci_func
27g94_pci_func = {
28 .init = g84_pci_init,
29 .rd32 = nv40_pci_rd32,
30 .wr08 = nv40_pci_wr08,
31 .wr32 = nv40_pci_wr32,
32 .msi_rearm = nv40_pci_msi_rearm,
33};
34
35int
36g94_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
37{
38 return nvkm_pci_new_(&g94_pci_func, device, index, ppci);
39}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 86f8226532d3..25e1ae70867f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -31,6 +31,7 @@ gf100_pci_msi_rearm(struct nvkm_pci *pci)
31 31
32static const struct nvkm_pci_func 32static const struct nvkm_pci_func
33gf100_pci_func = { 33gf100_pci_func = {
34 .init = g84_pci_init,
34 .rd32 = nv40_pci_rd32, 35 .rd32 = nv40_pci_rd32,
35 .wr08 = nv40_pci_wr08, 36 .wr08 = nv40_pci_wr08,
36 .wr32 = nv40_pci_wr32, 37 .wr32 = nv40_pci_wr32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 090a187f165f..6eb417765802 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -44,7 +44,7 @@ nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
44 nvkm_wr32(device, 0x088000 + addr, data); 44 nvkm_wr32(device, 0x088000 + addr, data);
45} 45}
46 46
47static void 47void
48nv40_pci_msi_rearm(struct nvkm_pci *pci) 48nv40_pci_msi_rearm(struct nvkm_pci *pci)
49{ 49{
50 nvkm_pci_wr08(pci, 0x0068, 0xff); 50 nvkm_pci_wr08(pci, 0x0068, 0xff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index 3e167d4a381f..fc617e4c0ab6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -25,11 +25,11 @@
25 25
26#include <core/pci.h> 26#include <core/pci.h>
27 27
28/* MSI re-arm through the PRI appears to be broken on the original G80, 28/* MSI re-arm through the PRI appears to be broken on NV46/NV50/G84/G86/G92,
29 * so we access it via alternate PCI config space mechanisms. 29 * so we access it via alternate PCI config space mechanisms.
30 */ 30 */
31static void 31void
32nv50_pci_msi_rearm(struct nvkm_pci *pci) 32nv46_pci_msi_rearm(struct nvkm_pci *pci)
33{ 33{
34 struct nvkm_device *device = pci->subdev.device; 34 struct nvkm_device *device = pci->subdev.device;
35 struct pci_dev *pdev = device->func->pci(device)->pdev; 35 struct pci_dev *pdev = device->func->pci(device)->pdev;
@@ -37,15 +37,15 @@ nv50_pci_msi_rearm(struct nvkm_pci *pci)
37} 37}
38 38
39static const struct nvkm_pci_func 39static const struct nvkm_pci_func
40nv50_pci_func = { 40nv46_pci_func = {
41 .rd32 = nv40_pci_rd32, 41 .rd32 = nv40_pci_rd32,
42 .wr08 = nv40_pci_wr08, 42 .wr08 = nv40_pci_wr08,
43 .wr32 = nv40_pci_wr32, 43 .wr32 = nv40_pci_wr32,
44 .msi_rearm = nv50_pci_msi_rearm, 44 .msi_rearm = nv46_pci_msi_rearm,
45}; 45};
46 46
47int 47int
48nv50_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) 48nv46_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
49{ 49{
50 return nvkm_pci_new_(&nv50_pci_func, device, index, ppci); 50 return nvkm_pci_new_(&nv46_pci_func, device, index, ppci);
51} 51}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index d22c2c117106..cf46d38d0b0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -7,6 +7,7 @@ int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
7 int index, struct nvkm_pci **); 7 int index, struct nvkm_pci **);
8 8
9struct nvkm_pci_func { 9struct nvkm_pci_func {
10 void (*init)(struct nvkm_pci *);
10 u32 (*rd32)(struct nvkm_pci *, u16 addr); 11 u32 (*rd32)(struct nvkm_pci *, u16 addr);
11 void (*wr08)(struct nvkm_pci *, u16 addr, u8 data); 12 void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
12 void (*wr32)(struct nvkm_pci *, u16 addr, u32 data); 13 void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
@@ -16,4 +17,9 @@ struct nvkm_pci_func {
16u32 nv40_pci_rd32(struct nvkm_pci *, u16); 17u32 nv40_pci_rd32(struct nvkm_pci *, u16);
17void nv40_pci_wr08(struct nvkm_pci *, u16, u8); 18void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
18void nv40_pci_wr32(struct nvkm_pci *, u16, u32); 19void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
20void nv40_pci_msi_rearm(struct nvkm_pci *);
21
22void nv46_pci_msi_rearm(struct nvkm_pci *);
23
24void g84_pci_init(struct nvkm_pci *pci);
19#endif 25#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 27a79c0c3888..d95eb8659d1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -28,7 +28,7 @@
28void 28void
29nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) 29nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
30{ 30{
31 if (pmu->func->pgob) 31 if (pmu && pmu->func->pgob)
32 pmu->func->pgob(pmu, enable); 32 pmu->func->pgob(pmu, enable);
33} 33}
34 34
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index e33f5c03b9ac..d942fa7b9f18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -27,6 +27,7 @@
27#include "fuc/gf119.fuc4.h" 27#include "fuc/gf119.fuc4.h"
28 28
29#include <core/option.h> 29#include <core/option.h>
30#include <subdev/fuse.h>
30#include <subdev/timer.h> 31#include <subdev/timer.h>
31 32
32static void 33static void
@@ -57,6 +58,9 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
57{ 58{
58 struct nvkm_device *device = pmu->subdev.device; 59 struct nvkm_device *device = pmu->subdev.device;
59 60
61 if (!(nvkm_fuse_read(device->fuse, 0x31c) & 0x00000001))
62 return;
63
60 nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); 64 nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
61 nvkm_rd32(device, 0x000200); 65 nvkm_rd32(device, 0x000200);
62 nvkm_mask(device, 0x000200, 0x08000000, 0x08000000); 66 nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index 6b46ff4213a3..b035c6e28be8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,4 +1,5 @@
1nvkm-y += nvkm/subdev/volt/base.o 1nvkm-y += nvkm/subdev/volt/base.o
2nvkm-y += nvkm/subdev/volt/gpio.o 2nvkm-y += nvkm/subdev/volt/gpio.o
3nvkm-y += nvkm/subdev/volt/nv40.o 3nvkm-y += nvkm/subdev/volt/nv40.o
4nvkm-y += nvkm/subdev/volt/gk104.o
4nvkm-y += nvkm/subdev/volt/gk20a.o 5nvkm-y += nvkm/subdev/volt/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 4752dbd33923..50b5649ad1a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -30,7 +30,12 @@
30int 30int
31nvkm_volt_get(struct nvkm_volt *volt) 31nvkm_volt_get(struct nvkm_volt *volt)
32{ 32{
33 int ret = volt->func->vid_get(volt), i; 33 int ret, i;
34
35 if (volt->func->volt_get)
36 return volt->func->volt_get(volt);
37
38 ret = volt->func->vid_get(volt);
34 if (ret >= 0) { 39 if (ret >= 0) {
35 for (i = 0; i < volt->vid_nr; i++) { 40 for (i = 0; i < volt->vid_nr; i++) {
36 if (volt->vid[i].vid == ret) 41 if (volt->vid[i].vid == ret)
@@ -46,6 +51,10 @@ nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
46{ 51{
47 struct nvkm_subdev *subdev = &volt->subdev; 52 struct nvkm_subdev *subdev = &volt->subdev;
48 int i, ret = -EINVAL; 53 int i, ret = -EINVAL;
54
55 if (volt->func->volt_set)
56 return volt->func->volt_set(volt, uv);
57
49 for (i = 0; i < volt->vid_nr; i++) { 58 for (i = 0; i < volt->vid_nr; i++) {
50 if (volt->vid[i].uv == uv) { 59 if (volt->vid[i].uv == uv) {
51 ret = volt->func->vid_set(volt, volt->vid[i].vid); 60 ret = volt->func->vid_set(volt, volt->vid[i].vid);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
new file mode 100644
index 000000000000..b61509e26ec9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -0,0 +1,119 @@
1/*
2 * Copyright 2015 Martin Peres
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24#include "priv.h"
25
26#include <subdev/volt.h>
27#include <subdev/gpio.h>
28#include <subdev/bios.h>
29#include <subdev/bios/volt.h>
30
31#define gk104_volt(p) container_of((p), struct gk104_volt, base)
32struct gk104_volt {
33 struct nvkm_volt base;
34 struct nvbios_volt bios;
35};
36
37int
38gk104_volt_get(struct nvkm_volt *base)
39{
40 struct nvbios_volt *bios = &gk104_volt(base)->bios;
41 struct nvkm_device *device = base->subdev.device;
42 u32 div, duty;
43
44 div = nvkm_rd32(device, 0x20340);
45 duty = nvkm_rd32(device, 0x20344);
46
47 return bios->base + bios->pwm_range * duty / div;
48}
49
50int
51gk104_volt_set(struct nvkm_volt *base, u32 uv)
52{
53 struct nvbios_volt *bios = &gk104_volt(base)->bios;
54 struct nvkm_device *device = base->subdev.device;
55 u32 div, duty;
56
57 /* the blob uses this crystal frequency, let's use it too. */
58 div = 27648000 / bios->pwm_freq;
59 duty = (uv - bios->base) * div / bios->pwm_range;
60
61 nvkm_wr32(device, 0x20340, div);
62 nvkm_wr32(device, 0x20344, 0x8000000 | duty);
63
64 return 0;
65}
66
67static const struct nvkm_volt_func
68gk104_volt_pwm = {
69 .volt_get = gk104_volt_get,
70 .volt_set = gk104_volt_set,
71}, gk104_volt_gpio = {
72 .vid_get = nvkm_voltgpio_get,
73 .vid_set = nvkm_voltgpio_set,
74};
75
76int
77gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
78{
79 const struct nvkm_volt_func *volt_func = &gk104_volt_gpio;
80 struct dcb_gpio_func gpio;
81 struct nvbios_volt bios;
82 struct gk104_volt *volt;
83 u8 ver, hdr, cnt, len;
84 const char *mode;
85
86 if (!nvbios_volt_parse(device->bios, &ver, &hdr, &cnt, &len, &bios))
87 return 0;
88
89 if (!nvkm_gpio_find(device->gpio, 0, DCB_GPIO_VID_PWM, 0xff, &gpio) &&
90 bios.type == NVBIOS_VOLT_PWM) {
91 volt_func = &gk104_volt_pwm;
92 }
93
94 if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
95 return -ENOMEM;
96 nvkm_volt_ctor(volt_func, device, index, &volt->base);
97 *pvolt = &volt->base;
98 volt->bios = bios;
99
100 /* now that we have a subdev, we can show an error if we found through
101 * the voltage table that we were supposed to use the PWN mode but we
102 * did not find the right GPIO for it.
103 */
104 if (bios.type == NVBIOS_VOLT_PWM && volt_func != &gk104_volt_pwm) {
105 nvkm_error(&volt->base.subdev,
106 "Type mismatch between the voltage table type and "
107 "the GPIO table. Fallback to GPIO mode.\n");
108 }
109
110 if (volt_func == &gk104_volt_gpio) {
111 nvkm_voltgpio_init(&volt->base);
112 mode = "GPIO";
113 } else
114 mode = "PWM";
115
116 nvkm_debug(&volt->base.subdev, "Using %s mode\n", mode);
117
118 return 0;
119}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index 394f37c723af..d5140d991161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -9,6 +9,8 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
9 int index, struct nvkm_volt **); 9 int index, struct nvkm_volt **);
10 10
11struct nvkm_volt_func { 11struct nvkm_volt_func {
12 int (*volt_get)(struct nvkm_volt *);
13 int (*volt_set)(struct nvkm_volt *, u32 uv);
12 int (*vid_get)(struct nvkm_volt *); 14 int (*vid_get)(struct nvkm_volt *);
13 int (*vid_set)(struct nvkm_volt *, u8 vid); 15 int (*vid_set)(struct nvkm_volt *, u8 vid);
14 int (*set_id)(struct nvkm_volt *, u8 id, int condition); 16 int (*set_id)(struct nvkm_volt *, u8 id, int condition);
@@ -17,4 +19,8 @@ struct nvkm_volt_func {
17int nvkm_voltgpio_init(struct nvkm_volt *); 19int nvkm_voltgpio_init(struct nvkm_volt *);
18int nvkm_voltgpio_get(struct nvkm_volt *); 20int nvkm_voltgpio_get(struct nvkm_volt *);
19int nvkm_voltgpio_set(struct nvkm_volt *, u8); 21int nvkm_voltgpio_set(struct nvkm_volt *, u8);
22
23int nvkm_voltpwm_init(struct nvkm_volt *volt);
24int nvkm_voltpwm_get(struct nvkm_volt *volt);
25int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv);
20#endif 26#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 9a4ba4f03567..ad09590e8a46 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -412,9 +412,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
412 dispc_mgr_go(omap_crtc->channel); 412 dispc_mgr_go(omap_crtc->channel);
413 omap_irq_register(crtc->dev, &omap_crtc->vblank_irq); 413 omap_irq_register(crtc->dev, &omap_crtc->vblank_irq);
414 } 414 }
415
416 crtc->invert_dimensions = !!(crtc->primary->state->rotation &
417 (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)));
418} 415}
419 416
420static int omap_crtc_atomic_set_property(struct drm_crtc *crtc, 417static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 419c2e49adf5..5c6609cbb6a2 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -96,7 +96,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
96 dispc_runtime_get(); 96 dispc_runtime_get();
97 97
98 drm_atomic_helper_commit_modeset_disables(dev, old_state); 98 drm_atomic_helper_commit_modeset_disables(dev, old_state);
99 drm_atomic_helper_commit_planes(dev, old_state); 99 drm_atomic_helper_commit_planes(dev, old_state, false);
100 drm_atomic_helper_commit_modeset_enables(dev, old_state); 100 drm_atomic_helper_commit_modeset_enables(dev, old_state);
101 101
102 omap_atomic_wait_for_completion(dev, old_state); 102 omap_atomic_wait_for_completion(dev, old_state);
@@ -626,12 +626,12 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
626} 626}
627 627
628static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { 628static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
629 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 629 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_AUTH),
630 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 630 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
631 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 631 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_AUTH),
632 DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH), 632 DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_AUTH),
633 DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH), 633 DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_AUTH),
634 DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH), 634 DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_AUTH),
635}; 635};
636 636
637/* 637/*
@@ -753,7 +753,7 @@ static void dev_lastclose(struct drm_device *dev)
753{ 753{
754 int i; 754 int i;
755 755
756 /* we don't support vga-switcheroo.. so just make sure the fbdev 756 /* we don't support vga_switcheroo.. so just make sure the fbdev
757 * mode is active 757 * mode is active
758 */ 758 */
759 struct omap_drm_private *priv = dev->dev_private; 759 struct omap_drm_private *priv = dev->dev_private;
@@ -839,7 +839,7 @@ static struct drm_driver omap_drm_driver = {
839 .preclose = dev_preclose, 839 .preclose = dev_preclose,
840 .postclose = dev_postclose, 840 .postclose = dev_postclose,
841 .set_busid = drm_platform_set_busid, 841 .set_busid = drm_platform_set_busid,
842 .get_vblank_counter = drm_vblank_count, 842 .get_vblank_counter = drm_vblank_no_hw_counter,
843 .enable_vblank = omap_irq_enable_vblank, 843 .enable_vblank = omap_irq_enable_vblank,
844 .disable_vblank = omap_irq_disable_vblank, 844 .disable_vblank = omap_irq_disable_vblank,
845#ifdef CONFIG_DEBUG_FS 845#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 12081e61d45a..5c367aad8a6e 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -129,8 +129,8 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
129int omap_gem_resume(struct device *dev); 129int omap_gem_resume(struct device *dev);
130#endif 130#endif
131 131
132int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id); 132int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe);
133void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id); 133void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe);
134void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); 134void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
135void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); 135void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
136void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); 136void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 51b1219af87f..636a1f921569 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -171,7 +171,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
171 uint32_t w = win->src_w; 171 uint32_t w = win->src_w;
172 uint32_t h = win->src_h; 172 uint32_t h = win->src_h;
173 173
174 switch (win->rotation & 0xf) { 174 switch (win->rotation & DRM_ROTATE_MASK) {
175 default: 175 default:
176 dev_err(fb->dev->dev, "invalid rotation: %02x", 176 dev_err(fb->dev->dev, "invalid rotation: %02x",
177 (uint32_t)win->rotation); 177 (uint32_t)win->rotation);
@@ -209,7 +209,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
209 info->rotation_type = OMAP_DSS_ROT_TILER; 209 info->rotation_type = OMAP_DSS_ROT_TILER;
210 info->screen_width = omap_gem_tiled_stride(plane->bo, orient); 210 info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
211 } else { 211 } else {
212 switch (win->rotation & 0xf) { 212 switch (win->rotation & DRM_ROTATE_MASK) {
213 case 0: 213 case 0:
214 case BIT(DRM_ROTATE_0): 214 case BIT(DRM_ROTATE_0):
215 /* OK */ 215 /* OK */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 0cc71c9d08d5..27c297672076 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -140,15 +140,12 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
140 struct vm_area_struct *vma) 140 struct vm_area_struct *vma)
141{ 141{
142 struct drm_gem_object *obj = buffer->priv; 142 struct drm_gem_object *obj = buffer->priv;
143 struct drm_device *dev = obj->dev;
144 int ret = 0; 143 int ret = 0;
145 144
146 if (WARN_ON(!obj->filp)) 145 if (WARN_ON(!obj->filp))
147 return -EINVAL; 146 return -EINVAL;
148 147
149 mutex_lock(&dev->struct_mutex);
150 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 148 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
151 mutex_unlock(&dev->struct_mutex);
152 if (ret < 0) 149 if (ret < 0)
153 return ret; 150 return ret;
154 151
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 249c0330d6ce..60e1e8016708 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -134,7 +134,7 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
134/** 134/**
135 * enable_vblank - enable vblank interrupt events 135 * enable_vblank - enable vblank interrupt events
136 * @dev: DRM device 136 * @dev: DRM device
137 * @crtc: which irq to enable 137 * @pipe: which irq to enable
138 * 138 *
139 * Enable vblank interrupts for @crtc. If the device doesn't have 139 * Enable vblank interrupts for @crtc. If the device doesn't have
140 * a hardware vblank counter, this routine should be a no-op, since 140 * a hardware vblank counter, this routine should be a no-op, since
@@ -144,13 +144,13 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
144 * Zero on success, appropriate errno if the given @crtc's vblank 144 * Zero on success, appropriate errno if the given @crtc's vblank
145 * interrupt cannot be enabled. 145 * interrupt cannot be enabled.
146 */ 146 */
147int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id) 147int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe)
148{ 148{
149 struct omap_drm_private *priv = dev->dev_private; 149 struct omap_drm_private *priv = dev->dev_private;
150 struct drm_crtc *crtc = priv->crtcs[crtc_id]; 150 struct drm_crtc *crtc = priv->crtcs[pipe];
151 unsigned long flags; 151 unsigned long flags;
152 152
153 DBG("dev=%p, crtc=%d", dev, crtc_id); 153 DBG("dev=%p, crtc=%u", dev, pipe);
154 154
155 spin_lock_irqsave(&list_lock, flags); 155 spin_lock_irqsave(&list_lock, flags);
156 priv->vblank_mask |= pipe2vbl(crtc); 156 priv->vblank_mask |= pipe2vbl(crtc);
@@ -163,19 +163,19 @@ int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
163/** 163/**
164 * disable_vblank - disable vblank interrupt events 164 * disable_vblank - disable vblank interrupt events
165 * @dev: DRM device 165 * @dev: DRM device
166 * @crtc: which irq to enable 166 * @pipe: which irq to enable
167 * 167 *
168 * Disable vblank interrupts for @crtc. If the device doesn't have 168 * Disable vblank interrupts for @crtc. If the device doesn't have
169 * a hardware vblank counter, this routine should be a no-op, since 169 * a hardware vblank counter, this routine should be a no-op, since
170 * interrupts will have to stay on to keep the count accurate. 170 * interrupts will have to stay on to keep the count accurate.
171 */ 171 */
172void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id) 172void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe)
173{ 173{
174 struct omap_drm_private *priv = dev->dev_private; 174 struct omap_drm_private *priv = dev->dev_private;
175 struct drm_crtc *crtc = priv->crtcs[crtc_id]; 175 struct drm_crtc *crtc = priv->crtcs[pipe];
176 unsigned long flags; 176 unsigned long flags;
177 177
178 DBG("dev=%p, crtc=%d", dev, crtc_id); 178 DBG("dev=%p, crtc=%u", dev, pipe);
179 179
180 spin_lock_irqsave(&list_lock, flags); 180 spin_lock_irqsave(&list_lock, flags);
181 priv->vblank_mask &= ~pipe2vbl(crtc); 181 priv->vblank_mask &= ~pipe2vbl(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 098904696a5c..3054bda72688 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -60,17 +60,19 @@ to_omap_plane_state(struct drm_plane_state *state)
60} 60}
61 61
62static int omap_plane_prepare_fb(struct drm_plane *plane, 62static int omap_plane_prepare_fb(struct drm_plane *plane,
63 struct drm_framebuffer *fb,
64 const struct drm_plane_state *new_state) 63 const struct drm_plane_state *new_state)
65{ 64{
66 return omap_framebuffer_pin(fb); 65 if (!new_state->fb)
66 return 0;
67
68 return omap_framebuffer_pin(new_state->fb);
67} 69}
68 70
69static void omap_plane_cleanup_fb(struct drm_plane *plane, 71static void omap_plane_cleanup_fb(struct drm_plane *plane,
70 struct drm_framebuffer *fb,
71 const struct drm_plane_state *old_state) 72 const struct drm_plane_state *old_state)
72{ 73{
73 omap_framebuffer_unpin(fb); 74 if (old_state->fb)
75 omap_framebuffer_unpin(old_state->fb);
74} 76}
75 77
76static void omap_plane_atomic_update(struct drm_plane *plane, 78static void omap_plane_atomic_update(struct drm_plane *plane,
@@ -106,7 +108,7 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
106 win.src_x = state->src_x >> 16; 108 win.src_x = state->src_x >> 16;
107 win.src_y = state->src_y >> 16; 109 win.src_y = state->src_y >> 16;
108 110
109 switch (state->rotation & 0xf) { 111 switch (state->rotation & DRM_ROTATE_MASK) {
110 case BIT(DRM_ROTATE_90): 112 case BIT(DRM_ROTATE_90):
111 case BIT(DRM_ROTATE_270): 113 case BIT(DRM_ROTATE_270):
112 win.src_w = state->src_h >> 16; 114 win.src_w = state->src_h >> 16;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 83f6f0b5e9ef..7307b07fe06b 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -196,17 +196,18 @@ static int qxl_pm_restore(struct device *dev)
196 return qxl_drm_resume(drm_dev, false); 196 return qxl_drm_resume(drm_dev, false);
197} 197}
198 198
199static u32 qxl_noop_get_vblank_counter(struct drm_device *dev, int crtc) 199static u32 qxl_noop_get_vblank_counter(struct drm_device *dev,
200 unsigned int pipe)
200{ 201{
201 return 0; 202 return 0;
202} 203}
203 204
204static int qxl_noop_enable_vblank(struct drm_device *dev, int crtc) 205static int qxl_noop_enable_vblank(struct drm_device *dev, unsigned int pipe)
205{ 206{
206 return 0; 207 return 0;
207} 208}
208 209
209static void qxl_noop_disable_vblank(struct drm_device *dev, int crtc) 210static void qxl_noop_disable_vblank(struct drm_device *dev, unsigned int pipe)
210{ 211{
211} 212}
212 213
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index bda5c5f80c24..2ae8577497ca 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -422,21 +422,21 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
422} 422}
423 423
424const struct drm_ioctl_desc qxl_ioctls[] = { 424const struct drm_ioctl_desc qxl_ioctls[] = {
425 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), 425 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
426 426
427 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), 427 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
428 428
429 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, 429 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
430 DRM_AUTH|DRM_UNLOCKED), 430 DRM_AUTH),
431 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, 431 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
432 DRM_AUTH|DRM_UNLOCKED), 432 DRM_AUTH),
433 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, 433 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
434 DRM_AUTH|DRM_UNLOCKED), 434 DRM_AUTH),
435 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, 435 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
436 DRM_AUTH|DRM_UNLOCKED), 436 DRM_AUTH),
437 437
438 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, 438 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
439 DRM_AUTH|DRM_UNLOCKED), 439 DRM_AUTH),
440}; 440};
441 441
442int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls); 442int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 2c45ac9c1dc3..14fd83b5f497 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -311,7 +311,7 @@ static void r128_cce_init_ring_buffer(struct drm_device *dev,
311 /* The manual (p. 2) says this address is in "VM space". This 311 /* The manual (p. 2) says this address is in "VM space". This
312 * means it's an offset from the start of AGP space. 312 * means it's an offset from the start of AGP space.
313 */ 313 */
314#if __OS_HAS_AGP 314#if IS_ENABLED(CONFIG_AGP)
315 if (!dev_priv->is_pci) 315 if (!dev_priv->is_pci)
316 ring_start = dev_priv->cce_ring->offset - dev->agp->base; 316 ring_start = dev_priv->cce_ring->offset - dev->agp->base;
317 else 317 else
@@ -505,7 +505,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
505 (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle + 505 (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
506 init->sarea_priv_offset); 506 init->sarea_priv_offset);
507 507
508#if __OS_HAS_AGP 508#if IS_ENABLED(CONFIG_AGP)
509 if (!dev_priv->is_pci) { 509 if (!dev_priv->is_pci) {
510 drm_legacy_ioremap_wc(dev_priv->cce_ring, dev); 510 drm_legacy_ioremap_wc(dev_priv->cce_ring, dev);
511 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); 511 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
@@ -529,7 +529,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
529 (void *)(unsigned long)dev->agp_buffer_map->offset; 529 (void *)(unsigned long)dev->agp_buffer_map->offset;
530 } 530 }
531 531
532#if __OS_HAS_AGP 532#if IS_ENABLED(CONFIG_AGP)
533 if (!dev_priv->is_pci) 533 if (!dev_priv->is_pci)
534 dev_priv->cce_buffers_offset = dev->agp->base; 534 dev_priv->cce_buffers_offset = dev->agp->base;
535 else 535 else
@@ -552,7 +552,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
552 dev_priv->sarea_priv->last_dispatch = 0; 552 dev_priv->sarea_priv->last_dispatch = 0;
553 R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch); 553 R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
554 554
555#if __OS_HAS_AGP 555#if IS_ENABLED(CONFIG_AGP)
556 if (dev_priv->is_pci) { 556 if (dev_priv->is_pci) {
557#endif 557#endif
558 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); 558 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
@@ -568,7 +568,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
568 return -ENOMEM; 568 return -ENOMEM;
569 } 569 }
570 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); 570 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
571#if __OS_HAS_AGP 571#if IS_ENABLED(CONFIG_AGP)
572 } 572 }
573#endif 573#endif
574 574
@@ -600,7 +600,7 @@ int r128_do_cleanup_cce(struct drm_device *dev)
600 if (dev->dev_private) { 600 if (dev->dev_private) {
601 drm_r128_private_t *dev_priv = dev->dev_private; 601 drm_r128_private_t *dev_priv = dev->dev_private;
602 602
603#if __OS_HAS_AGP 603#if IS_ENABLED(CONFIG_AGP)
604 if (!dev_priv->is_pci) { 604 if (!dev_priv->is_pci) {
605 if (dev_priv->cce_ring != NULL) 605 if (dev_priv->cce_ring != NULL)
606 drm_legacy_ioremapfree(dev_priv->cce_ring, dev); 606 drm_legacy_ioremapfree(dev_priv->cce_ring, dev);
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 723e5d6f10a4..09143b840482 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -154,9 +154,9 @@ extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
154extern int r128_do_cce_idle(drm_r128_private_t *dev_priv); 154extern int r128_do_cce_idle(drm_r128_private_t *dev_priv);
155extern int r128_do_cleanup_cce(struct drm_device *dev); 155extern int r128_do_cleanup_cce(struct drm_device *dev);
156 156
157extern int r128_enable_vblank(struct drm_device *dev, int crtc); 157extern int r128_enable_vblank(struct drm_device *dev, unsigned int pipe);
158extern void r128_disable_vblank(struct drm_device *dev, int crtc); 158extern void r128_disable_vblank(struct drm_device *dev, unsigned int pipe);
159extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); 159extern u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
160extern irqreturn_t r128_driver_irq_handler(int irq, void *arg); 160extern irqreturn_t r128_driver_irq_handler(int irq, void *arg);
161extern void r128_driver_irq_preinstall(struct drm_device *dev); 161extern void r128_driver_irq_preinstall(struct drm_device *dev);
162extern int r128_driver_irq_postinstall(struct drm_device *dev); 162extern int r128_driver_irq_postinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index c2ae496babb7..9730f4918944 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -34,11 +34,11 @@
34#include <drm/r128_drm.h> 34#include <drm/r128_drm.h>
35#include "r128_drv.h" 35#include "r128_drv.h"
36 36
37u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) 37u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
38{ 38{
39 const drm_r128_private_t *dev_priv = dev->dev_private; 39 const drm_r128_private_t *dev_priv = dev->dev_private;
40 40
41 if (crtc != 0) 41 if (pipe != 0)
42 return 0; 42 return 0;
43 43
44 return atomic_read(&dev_priv->vbl_received); 44 return atomic_read(&dev_priv->vbl_received);
@@ -62,12 +62,12 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
62 return IRQ_NONE; 62 return IRQ_NONE;
63} 63}
64 64
65int r128_enable_vblank(struct drm_device *dev, int crtc) 65int r128_enable_vblank(struct drm_device *dev, unsigned int pipe)
66{ 66{
67 drm_r128_private_t *dev_priv = dev->dev_private; 67 drm_r128_private_t *dev_priv = dev->dev_private;
68 68
69 if (crtc != 0) { 69 if (pipe != 0) {
70 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 70 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 73
@@ -75,10 +75,10 @@ int r128_enable_vblank(struct drm_device *dev, int crtc)
75 return 0; 75 return 0;
76} 76}
77 77
78void r128_disable_vblank(struct drm_device *dev, int crtc) 78void r128_disable_vblank(struct drm_device *dev, unsigned int pipe)
79{ 79{
80 if (crtc != 0) 80 if (pipe != 0)
81 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 81 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
82 82
83 /* 83 /*
84 * FIXME: implement proper interrupt disable by using the vblank 84 * FIXME: implement proper interrupt disable by using the vblank
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 9cd49c584263..bd73b4069069 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -179,6 +179,7 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
179 switch (msg->request & ~DP_AUX_I2C_MOT) { 179 switch (msg->request & ~DP_AUX_I2C_MOT) {
180 case DP_AUX_NATIVE_WRITE: 180 case DP_AUX_NATIVE_WRITE:
181 case DP_AUX_I2C_WRITE: 181 case DP_AUX_I2C_WRITE:
182 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
182 /* The atom implementation only supports writes with a max payload of 183 /* The atom implementation only supports writes with a max payload of
183 * 12 bytes since it uses 4 bits for the total count (header + payload) 184 * 12 bytes since it uses 4 bits for the total count (header + payload)
184 * in the parameter space. The atom interface supports 16 byte 185 * in the parameter space. The atom interface supports 16 byte
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index 98d009e154bf..9fec4d09f383 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -32,7 +32,7 @@
32 * evergreen cards need to use the 3D engine to blit data which requires 32 * evergreen cards need to use the 3D engine to blit data which requires
33 * quite a bit of hw state setup. Rather than pull the whole 3D driver 33 * quite a bit of hw state setup. Rather than pull the whole 3D driver
34 * (which normally generates the 3D state) into the DRM, we opt to use 34 * (which normally generates the 3D state) into the DRM, we opt to use
35 * statically generated state tables. The regsiter state and shaders 35 * statically generated state tables. The register state and shaders
36 * were hand generated to support blitting functionality. See the 3D 36 * were hand generated to support blitting functionality. See the 3D
37 * driver or documentation for descriptions of the registers and 37 * driver or documentation for descriptions of the registers and
38 * shader instructions. 38 * shader instructions.
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0acde1949c18..7f33767d7ed6 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1404,44 +1404,20 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
1404 * @crtc_id: crtc to cleanup pageflip on 1404 * @crtc_id: crtc to cleanup pageflip on
1405 * @crtc_base: new address of the crtc (GPU MC address) 1405 * @crtc_base: new address of the crtc (GPU MC address)
1406 * 1406 *
1407 * Does the actual pageflip (evergreen+). 1407 * Triggers the actual pageflip by updating the primary
1408 * During vblank we take the crtc lock and wait for the update_pending 1408 * surface base address (evergreen+).
1409 * bit to go high, when it does, we release the lock, and allow the
1410 * double buffered update to take place.
1411 * Returns the current update pending status.
1412 */ 1409 */
1413void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 1410void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
1414{ 1411{
1415 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 1412 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
1416 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
1417 int i;
1418
1419 /* Lock the graphics update lock */
1420 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
1421 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
1422 1413
1423 /* update the scanout addresses */ 1414 /* update the scanout addresses */
1424 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
1425 upper_32_bits(crtc_base));
1426 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1427 (u32)crtc_base);
1428
1429 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 1415 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
1430 upper_32_bits(crtc_base)); 1416 upper_32_bits(crtc_base));
1431 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1417 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1432 (u32)crtc_base); 1418 (u32)crtc_base);
1433 1419 /* post the write */
1434 /* Wait for update_pending to go high. */ 1420 RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset);
1435 for (i = 0; i < rdev->usec_timeout; i++) {
1436 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
1437 break;
1438 udelay(1);
1439 }
1440 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
1441
1442 /* Unlock the lock, so double-buffering can take place inside vblank */
1443 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1444 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
1445} 1421}
1446 1422
1447/** 1423/**
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index d43383470cdf..1a96ddb3e5ed 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -32,7 +32,7 @@
32 * evergreen cards need to use the 3D engine to blit data which requires 32 * evergreen cards need to use the 3D engine to blit data which requires
33 * quite a bit of hw state setup. Rather than pull the whole 3D driver 33 * quite a bit of hw state setup. Rather than pull the whole 3D driver
34 * (which normally generates the 3D state) into the DRM, we opt to use 34 * (which normally generates the 3D state) into the DRM, we opt to use
35 * statically generated state tables. The regsiter state and shaders 35 * statically generated state tables. The register state and shaders
36 * were hand generated to support blitting functionality. See the 3D 36 * were hand generated to support blitting functionality. See the 3D
37 * driver or documentation for descriptions of the registers and 37 * driver or documentation for descriptions of the registers and
38 * shader instructions. 38 * shader instructions.
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c9e0fbbf76a3..46f87d4aaf31 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
34#define MAX(a,b) (((a)>(b))?(a):(b)) 34#define MAX(a,b) (((a)>(b))?(a):(b))
35#define MIN(a,b) (((a)<(b))?(a):(b)) 35#define MIN(a,b) (((a)<(b))?(a):(b))
36 36
37#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
38
37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 39int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_bo_list **cs_reloc); 40 struct radeon_bo_list **cs_reloc);
39struct evergreen_cs_track { 41struct evergreen_cs_track {
@@ -84,6 +86,7 @@ struct evergreen_cs_track {
84 u32 htile_surface; 86 u32 htile_surface;
85 struct radeon_bo *htile_bo; 87 struct radeon_bo *htile_bo;
86 unsigned long indirect_draw_buffer_size; 88 unsigned long indirect_draw_buffer_size;
89 const unsigned *reg_safe_bm;
87}; 90};
88 91
89static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) 92static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -444,7 +447,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
444 * command stream. 447 * command stream.
445 */ 448 */
446 if (!surf.mode) { 449 if (!surf.mode) {
447 volatile u32 *ib = p->ib.ptr; 450 uint32_t *ib = p->ib.ptr;
448 unsigned long tmp, nby, bsize, size, min = 0; 451 unsigned long tmp, nby, bsize, size, min = 0;
449 452
450 /* find the height the ddx wants */ 453 /* find the height the ddx wants */
@@ -1083,41 +1086,18 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1083} 1086}
1084 1087
1085/** 1088/**
1086 * evergreen_cs_check_reg() - check if register is authorized or not 1089 * evergreen_cs_handle_reg() - process registers that need special handling.
1087 * @parser: parser structure holding parsing context 1090 * @parser: parser structure holding parsing context
1088 * @reg: register we are testing 1091 * @reg: register we are testing
1089 * @idx: index into the cs buffer 1092 * @idx: index into the cs buffer
1090 *
1091 * This function will test against evergreen_reg_safe_bm and return 0
1092 * if register is safe. If register is not flag as safe this function
1093 * will test it against a list of register needind special handling.
1094 */ 1093 */
1095static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1094static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1096{ 1095{
1097 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; 1096 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1098 struct radeon_bo_list *reloc; 1097 struct radeon_bo_list *reloc;
1099 u32 last_reg; 1098 u32 tmp, *ib;
1100 u32 m, i, tmp, *ib;
1101 int r; 1099 int r;
1102 1100
1103 if (p->rdev->family >= CHIP_CAYMAN)
1104 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1105 else
1106 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1107
1108 i = (reg >> 7);
1109 if (i >= last_reg) {
1110 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1111 return -EINVAL;
1112 }
1113 m = 1 << ((reg >> 2) & 31);
1114 if (p->rdev->family >= CHIP_CAYMAN) {
1115 if (!(cayman_reg_safe_bm[i] & m))
1116 return 0;
1117 } else {
1118 if (!(evergreen_reg_safe_bm[i] & m))
1119 return 0;
1120 }
1121 ib = p->ib.ptr; 1101 ib = p->ib.ptr;
1122 switch (reg) { 1102 switch (reg) {
1123 /* force following reg to 0 in an attempt to disable out buffer 1103 /* force following reg to 0 in an attempt to disable out buffer
@@ -1764,29 +1744,27 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1764 return 0; 1744 return 0;
1765} 1745}
1766 1746
1767static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1747/**
1748 * evergreen_is_safe_reg() - check if register is authorized or not
1749 * @parser: parser structure holding parsing context
1750 * @reg: register we are testing
1751 *
1752 * This function will test against reg_safe_bm and return true
1753 * if register is safe or false otherwise.
1754 */
1755static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
1768{ 1756{
1769 u32 last_reg, m, i; 1757 struct evergreen_cs_track *track = p->track;
1770 1758 u32 m, i;
1771 if (p->rdev->family >= CHIP_CAYMAN)
1772 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1773 else
1774 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1775 1759
1776 i = (reg >> 7); 1760 i = (reg >> 7);
1777 if (i >= last_reg) { 1761 if (unlikely(i >= REG_SAFE_BM_SIZE)) {
1778 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1779 return false; 1762 return false;
1780 } 1763 }
1781 m = 1 << ((reg >> 2) & 31); 1764 m = 1 << ((reg >> 2) & 31);
1782 if (p->rdev->family >= CHIP_CAYMAN) { 1765 if (!(track->reg_safe_bm[i] & m))
1783 if (!(cayman_reg_safe_bm[i] & m)) 1766 return true;
1784 return true; 1767
1785 } else {
1786 if (!(evergreen_reg_safe_bm[i] & m))
1787 return true;
1788 }
1789 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1790 return false; 1768 return false;
1791} 1769}
1792 1770
@@ -1795,7 +1773,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1795{ 1773{
1796 struct radeon_bo_list *reloc; 1774 struct radeon_bo_list *reloc;
1797 struct evergreen_cs_track *track; 1775 struct evergreen_cs_track *track;
1798 volatile u32 *ib; 1776 uint32_t *ib;
1799 unsigned idx; 1777 unsigned idx;
1800 unsigned i; 1778 unsigned i;
1801 unsigned start_reg, end_reg, reg; 1779 unsigned start_reg, end_reg, reg;
@@ -2321,9 +2299,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2321 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 2299 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2322 return -EINVAL; 2300 return -EINVAL;
2323 } 2301 }
2324 for (i = 0; i < pkt->count; i++) { 2302 for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
2325 reg = start_reg + (4 * i); 2303 if (evergreen_is_safe_reg(p, reg))
2326 r = evergreen_cs_check_reg(p, reg, idx+1+i); 2304 continue;
2305 r = evergreen_cs_handle_reg(p, reg, idx);
2327 if (r) 2306 if (r)
2328 return r; 2307 return r;
2329 } 2308 }
@@ -2337,9 +2316,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2337 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 2316 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2338 return -EINVAL; 2317 return -EINVAL;
2339 } 2318 }
2340 for (i = 0; i < pkt->count; i++) { 2319 for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
2341 reg = start_reg + (4 * i); 2320 if (evergreen_is_safe_reg(p, reg))
2342 r = evergreen_cs_check_reg(p, reg, idx+1+i); 2321 continue;
2322 r = evergreen_cs_handle_reg(p, reg, idx);
2343 if (r) 2323 if (r)
2344 return r; 2324 return r;
2345 } 2325 }
@@ -2594,8 +2574,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2594 } else { 2574 } else {
2595 /* SRC is a reg. */ 2575 /* SRC is a reg. */
2596 reg = radeon_get_ib_value(p, idx+1) << 2; 2576 reg = radeon_get_ib_value(p, idx+1) << 2;
2597 if (!evergreen_is_safe_reg(p, reg, idx+1)) 2577 if (!evergreen_is_safe_reg(p, reg)) {
2578 dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
2579 reg, idx + 1);
2598 return -EINVAL; 2580 return -EINVAL;
2581 }
2599 } 2582 }
2600 if (idx_value & 0x2) { 2583 if (idx_value & 0x2) {
2601 u64 offset; 2584 u64 offset;
@@ -2618,8 +2601,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2618 } else { 2601 } else {
2619 /* DST is a reg. */ 2602 /* DST is a reg. */
2620 reg = radeon_get_ib_value(p, idx+3) << 2; 2603 reg = radeon_get_ib_value(p, idx+3) << 2;
2621 if (!evergreen_is_safe_reg(p, reg, idx+3)) 2604 if (!evergreen_is_safe_reg(p, reg)) {
2605 dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
2606 reg, idx + 3);
2622 return -EINVAL; 2607 return -EINVAL;
2608 }
2623 } 2609 }
2624 break; 2610 break;
2625 case PACKET3_NOP: 2611 case PACKET3_NOP:
@@ -2644,11 +2630,15 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2644 if (track == NULL) 2630 if (track == NULL)
2645 return -ENOMEM; 2631 return -ENOMEM;
2646 evergreen_cs_track_init(track); 2632 evergreen_cs_track_init(track);
2647 if (p->rdev->family >= CHIP_CAYMAN) 2633 if (p->rdev->family >= CHIP_CAYMAN) {
2648 tmp = p->rdev->config.cayman.tile_config; 2634 tmp = p->rdev->config.cayman.tile_config;
2649 else 2635 track->reg_safe_bm = cayman_reg_safe_bm;
2636 } else {
2650 tmp = p->rdev->config.evergreen.tile_config; 2637 tmp = p->rdev->config.evergreen.tile_config;
2651 2638 track->reg_safe_bm = evergreen_reg_safe_bm;
2639 }
2640 BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
2641 BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
2652 switch (tmp & 0xf) { 2642 switch (tmp & 0xf) {
2653 case 0: 2643 case 0:
2654 track->npipes = 1; 2644 track->npipes = 1;
@@ -2757,7 +2747,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2757 struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 2747 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
2758 struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc; 2748 struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
2759 u32 header, cmd, count, sub_cmd; 2749 u32 header, cmd, count, sub_cmd;
2760 volatile u32 *ib = p->ib.ptr; 2750 uint32_t *ib = p->ib.ptr;
2761 u32 idx; 2751 u32 idx;
2762 u64 src_offset, dst_offset, dst2_offset; 2752 u64 src_offset, dst_offset, dst2_offset;
2763 int r; 2753 int r;
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index 34c8b2340f33..443cbe59b274 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -32,7 +32,7 @@
32 * R6xx+ cards need to use the 3D engine to blit data which requires 32 * R6xx+ cards need to use the 3D engine to blit data which requires
33 * quite a bit of hw state setup. Rather than pull the whole 3D driver 33 * quite a bit of hw state setup. Rather than pull the whole 3D driver
34 * (which normally generates the 3D state) into the DRM, we opt to use 34 * (which normally generates the 3D state) into the DRM, we opt to use
35 * statically generated state tables. The regsiter state and shaders 35 * statically generated state tables. The register state and shaders
36 * were hand generated to support blitting functionality. See the 3D 36 * were hand generated to support blitting functionality. See the 3D
37 * driver or documentation for descriptions of the registers and 37 * driver or documentation for descriptions of the registers and
38 * shader instructions. 38 * shader instructions.
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 98f9adaccc3d..e231eeafef23 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1837,7 +1837,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1837 SET_RING_HEAD(dev_priv, 0); 1837 SET_RING_HEAD(dev_priv, 0);
1838 dev_priv->ring.tail = 0; 1838 dev_priv->ring.tail = 0;
1839 1839
1840#if __OS_HAS_AGP 1840#if IS_ENABLED(CONFIG_AGP)
1841 if (dev_priv->flags & RADEON_IS_AGP) { 1841 if (dev_priv->flags & RADEON_IS_AGP) {
1842 rptr_addr = dev_priv->ring_rptr->offset 1842 rptr_addr = dev_priv->ring_rptr->offset
1843 - dev->agp->base + 1843 - dev->agp->base +
@@ -1863,7 +1863,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1863 dev_priv->ring.size_l2qw); 1863 dev_priv->ring.size_l2qw);
1864#endif 1864#endif
1865 1865
1866#if __OS_HAS_AGP 1866#if IS_ENABLED(CONFIG_AGP)
1867 if (dev_priv->flags & RADEON_IS_AGP) { 1867 if (dev_priv->flags & RADEON_IS_AGP) {
1868 /* XXX */ 1868 /* XXX */
1869 radeon_write_agp_base(dev_priv, dev->agp->base); 1869 radeon_write_agp_base(dev_priv, dev->agp->base);
@@ -1946,7 +1946,7 @@ int r600_do_cleanup_cp(struct drm_device *dev)
1946 if (dev->irq_enabled) 1946 if (dev->irq_enabled)
1947 drm_irq_uninstall(dev); 1947 drm_irq_uninstall(dev);
1948 1948
1949#if __OS_HAS_AGP 1949#if IS_ENABLED(CONFIG_AGP)
1950 if (dev_priv->flags & RADEON_IS_AGP) { 1950 if (dev_priv->flags & RADEON_IS_AGP) {
1951 if (dev_priv->cp_ring != NULL) { 1951 if (dev_priv->cp_ring != NULL) {
1952 drm_legacy_ioremapfree(dev_priv->cp_ring, dev); 1952 drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
@@ -2089,7 +2089,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2089 } 2089 }
2090 } 2090 }
2091 2091
2092#if __OS_HAS_AGP 2092#if IS_ENABLED(CONFIG_AGP)
2093 /* XXX */ 2093 /* XXX */
2094 if (dev_priv->flags & RADEON_IS_AGP) { 2094 if (dev_priv->flags & RADEON_IS_AGP) {
2095 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); 2095 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
@@ -2148,7 +2148,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2148 * location in the card and on the bus, though we have to 2148 * location in the card and on the bus, though we have to
2149 * align it down. 2149 * align it down.
2150 */ 2150 */
2151#if __OS_HAS_AGP 2151#if IS_ENABLED(CONFIG_AGP)
2152 /* XXX */ 2152 /* XXX */
2153 if (dev_priv->flags & RADEON_IS_AGP) { 2153 if (dev_priv->flags & RADEON_IS_AGP) {
2154 base = dev->agp->base; 2154 base = dev->agp->base;
@@ -2175,7 +2175,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2175 base, dev_priv->gart_vm_start); 2175 base, dev_priv->gart_vm_start);
2176 } 2176 }
2177 2177
2178#if __OS_HAS_AGP 2178#if IS_ENABLED(CONFIG_AGP)
2179 /* XXX */ 2179 /* XXX */
2180 if (dev_priv->flags & RADEON_IS_AGP) 2180 if (dev_priv->flags & RADEON_IS_AGP)
2181 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 2181 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
@@ -2212,7 +2212,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2212 2212
2213 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 2213 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
2214 2214
2215#if __OS_HAS_AGP 2215#if IS_ENABLED(CONFIG_AGP)
2216 if (dev_priv->flags & RADEON_IS_AGP) { 2216 if (dev_priv->flags & RADEON_IS_AGP) {
2217 /* XXX turn off pcie gart */ 2217 /* XXX turn off pcie gart */
2218 } else 2218 } else
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 77e9d07c55b6..59acd0e5c2c6 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -25,7 +25,6 @@
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/power_supply.h> 27#include <linux/power_supply.h>
28#include <linux/vga_switcheroo.h>
29#include <acpi/video.h> 28#include <acpi/video.h>
30#include <drm/drmP.h> 29#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index a9297b2c3524..fe994aac3b04 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -28,7 +28,7 @@
28#include "radeon.h" 28#include "radeon.h"
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30 30
31#if __OS_HAS_AGP 31#if IS_ENABLED(CONFIG_AGP)
32 32
33struct radeon_agpmode_quirk { 33struct radeon_agpmode_quirk {
34 u32 hostbridge_vendor; 34 u32 hostbridge_vendor;
@@ -123,7 +123,7 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
123 123
124int radeon_agp_init(struct radeon_device *rdev) 124int radeon_agp_init(struct radeon_device *rdev)
125{ 125{
126#if __OS_HAS_AGP 126#if IS_ENABLED(CONFIG_AGP)
127 struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list; 127 struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
128 struct drm_agp_mode mode; 128 struct drm_agp_mode mode;
129 struct drm_agp_info info; 129 struct drm_agp_info info;
@@ -257,7 +257,7 @@ int radeon_agp_init(struct radeon_device *rdev)
257 257
258void radeon_agp_resume(struct radeon_device *rdev) 258void radeon_agp_resume(struct radeon_device *rdev)
259{ 259{
260#if __OS_HAS_AGP 260#if IS_ENABLED(CONFIG_AGP)
261 int r; 261 int r;
262 if (rdev->flags & RADEON_IS_AGP) { 262 if (rdev->flags & RADEON_IS_AGP) {
263 r = radeon_agp_init(rdev); 263 r = radeon_agp_init(rdev);
@@ -269,7 +269,7 @@ void radeon_agp_resume(struct radeon_device *rdev)
269 269
270void radeon_agp_fini(struct radeon_device *rdev) 270void radeon_agp_fini(struct radeon_device *rdev)
271{ 271{
272#if __OS_HAS_AGP 272#if IS_ENABLED(CONFIG_AGP)
273 if (rdev->ddev->agp && rdev->ddev->agp->acquired) { 273 if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
274 drm_agp_release(rdev->ddev); 274 drm_agp_release(rdev->ddev);
275 } 275 }
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f2421bc3e901..1d4d4520a0ac 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -31,7 +31,6 @@
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h> 33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h>
35#include "radeon_reg.h" 34#include "radeon_reg.h"
36#include "radeon.h" 35#include "radeon.h"
37#include "radeon_asic.h" 36#include "radeon_asic.h"
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 8bc7d0bbd3c8..c4b4f298a283 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -499,7 +499,7 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev)
499 return VGA_SWITCHEROO_DIS; 499 return VGA_SWITCHEROO_DIS;
500} 500}
501 501
502static struct vga_switcheroo_handler radeon_atpx_handler = { 502static const struct vga_switcheroo_handler radeon_atpx_handler = {
503 .switchto = radeon_atpx_switchto, 503 .switchto = radeon_atpx_switchto,
504 .power_state = radeon_atpx_power_state, 504 .power_state = radeon_atpx_power_state,
505 .init = radeon_atpx_init, 505 .init = radeon_atpx_init,
@@ -535,7 +535,7 @@ static bool radeon_atpx_detect(void)
535 535
536 if (has_atpx && vga_count == 2) { 536 if (has_atpx && vga_count == 2) {
537 acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); 537 acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
538 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 538 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
539 acpi_method_name); 539 acpi_method_name);
540 radeon_atpx_priv.atpx_detected = true; 540 radeon_atpx_priv.atpx_detected = true;
541 return true; 541 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index d27e4ccb848c..21b6732425c5 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -30,7 +30,6 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32 32
33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h> 33#include <linux/slab.h>
35#include <linux/acpi.h> 34#include <linux/acpi.h>
36/* 35/*
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index ea134a7d51a5..500287eff55d 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -762,7 +762,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
762 ((dev_priv->gart_vm_start - 1) & 0xffff0000) 762 ((dev_priv->gart_vm_start - 1) & 0xffff0000)
763 | (dev_priv->fb_location >> 16)); 763 | (dev_priv->fb_location >> 16));
764 764
765#if __OS_HAS_AGP 765#if IS_ENABLED(CONFIG_AGP)
766 if (dev_priv->flags & RADEON_IS_AGP) { 766 if (dev_priv->flags & RADEON_IS_AGP) {
767 radeon_write_agp_base(dev_priv, dev->agp->base); 767 radeon_write_agp_base(dev_priv, dev->agp->base);
768 768
@@ -791,7 +791,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
791 SET_RING_HEAD(dev_priv, cur_read_ptr); 791 SET_RING_HEAD(dev_priv, cur_read_ptr);
792 dev_priv->ring.tail = cur_read_ptr; 792 dev_priv->ring.tail = cur_read_ptr;
793 793
794#if __OS_HAS_AGP 794#if IS_ENABLED(CONFIG_AGP)
795 if (dev_priv->flags & RADEON_IS_AGP) { 795 if (dev_priv->flags & RADEON_IS_AGP) {
796 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, 796 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
797 dev_priv->ring_rptr->offset 797 dev_priv->ring_rptr->offset
@@ -1335,7 +1335,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1335 } 1335 }
1336 } 1336 }
1337 1337
1338#if __OS_HAS_AGP 1338#if IS_ENABLED(CONFIG_AGP)
1339 if (dev_priv->flags & RADEON_IS_AGP) { 1339 if (dev_priv->flags & RADEON_IS_AGP) {
1340 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); 1340 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
1341 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); 1341 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
@@ -1394,7 +1394,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1394 * location in the card and on the bus, though we have to 1394 * location in the card and on the bus, though we have to
1395 * align it down. 1395 * align it down.
1396 */ 1396 */
1397#if __OS_HAS_AGP 1397#if IS_ENABLED(CONFIG_AGP)
1398 if (dev_priv->flags & RADEON_IS_AGP) { 1398 if (dev_priv->flags & RADEON_IS_AGP) {
1399 base = dev->agp->base; 1399 base = dev->agp->base;
1400 /* Check if valid */ 1400 /* Check if valid */
@@ -1424,7 +1424,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1424 RADEON_READ(RADEON_CONFIG_APER_SIZE); 1424 RADEON_READ(RADEON_CONFIG_APER_SIZE);
1425 } 1425 }
1426 1426
1427#if __OS_HAS_AGP 1427#if IS_ENABLED(CONFIG_AGP)
1428 if (dev_priv->flags & RADEON_IS_AGP) 1428 if (dev_priv->flags & RADEON_IS_AGP)
1429 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1429 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1430 - dev->agp->base 1430 - dev->agp->base
@@ -1455,7 +1455,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1455 1455
1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1457 1457
1458#if __OS_HAS_AGP 1458#if IS_ENABLED(CONFIG_AGP)
1459 if (dev_priv->flags & RADEON_IS_AGP) { 1459 if (dev_priv->flags & RADEON_IS_AGP) {
1460 /* Turn off PCI GART */ 1460 /* Turn off PCI GART */
1461 radeon_set_pcigart(dev_priv, 0); 1461 radeon_set_pcigart(dev_priv, 0);
@@ -1566,7 +1566,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev)
1566 if (dev->irq_enabled) 1566 if (dev->irq_enabled)
1567 drm_irq_uninstall(dev); 1567 drm_irq_uninstall(dev);
1568 1568
1569#if __OS_HAS_AGP 1569#if IS_ENABLED(CONFIG_AGP)
1570 if (dev_priv->flags & RADEON_IS_AGP) { 1570 if (dev_priv->flags & RADEON_IS_AGP) {
1571 if (dev_priv->cp_ring != NULL) { 1571 if (dev_priv->cp_ring != NULL) {
1572 drm_legacy_ioremapfree(dev_priv->cp_ring, dev); 1572 drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
@@ -1625,7 +1625,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
1625 1625
1626 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1626 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1627 1627
1628#if __OS_HAS_AGP 1628#if IS_ENABLED(CONFIG_AGP)
1629 if (dev_priv->flags & RADEON_IS_AGP) { 1629 if (dev_priv->flags & RADEON_IS_AGP) {
1630 /* Turn off PCI GART */ 1630 /* Turn off PCI GART */
1631 radeon_set_pcigart(dev_priv, 0); 1631 radeon_set_pcigart(dev_priv, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f3f562f6d848..c566993a2ec3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1197,7 +1197,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1197 * radeon_switcheroo_set_state - set switcheroo state 1197 * radeon_switcheroo_set_state - set switcheroo state
1198 * 1198 *
1199 * @pdev: pci dev pointer 1199 * @pdev: pci dev pointer
1200 * @state: vga switcheroo state 1200 * @state: vga_switcheroo state
1201 * 1201 *
1202 * Callback for the switcheroo driver. Suspends or resumes the 1202 * Callback for the switcheroo driver. Suspends or resumes the
1203 * the asics before or after it is powered up using ACPI methods. 1203 * the asics before or after it is powered up using ACPI methods.
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6743174acdbc..a8d9927ed9eb 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -323,7 +323,8 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
323 */ 323 */
324 if (update_pending && 324 if (update_pending &&
325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, 325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
326 &vpos, &hpos, NULL, NULL)) && 326 &vpos, &hpos, NULL, NULL,
327 &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
327 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 328 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
328 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { 329 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
329 /* crtc didn't flip in this target vblank interval, 330 /* crtc didn't flip in this target vblank interval,
@@ -1788,8 +1789,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1788 * unknown small number of scanlines wrt. real scanout position. 1789 * unknown small number of scanlines wrt. real scanout position.
1789 * 1790 *
1790 */ 1791 */
1791int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, 1792int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1792 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) 1793 unsigned int flags, int *vpos, int *hpos,
1794 ktime_t *stime, ktime_t *etime,
1795 const struct drm_display_mode *mode)
1793{ 1796{
1794 u32 stat_crtc = 0, vbl = 0, position = 0; 1797 u32 stat_crtc = 0, vbl = 0, position = 0;
1795 int vbl_start, vbl_end, vtotal, ret = 0; 1798 int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1804,42 +1807,42 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
1804 *stime = ktime_get(); 1807 *stime = ktime_get();
1805 1808
1806 if (ASIC_IS_DCE4(rdev)) { 1809 if (ASIC_IS_DCE4(rdev)) {
1807 if (crtc == 0) { 1810 if (pipe == 0) {
1808 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1811 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1809 EVERGREEN_CRTC0_REGISTER_OFFSET); 1812 EVERGREEN_CRTC0_REGISTER_OFFSET);
1810 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1813 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1811 EVERGREEN_CRTC0_REGISTER_OFFSET); 1814 EVERGREEN_CRTC0_REGISTER_OFFSET);
1812 ret |= DRM_SCANOUTPOS_VALID; 1815 ret |= DRM_SCANOUTPOS_VALID;
1813 } 1816 }
1814 if (crtc == 1) { 1817 if (pipe == 1) {
1815 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1818 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1816 EVERGREEN_CRTC1_REGISTER_OFFSET); 1819 EVERGREEN_CRTC1_REGISTER_OFFSET);
1817 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1820 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1818 EVERGREEN_CRTC1_REGISTER_OFFSET); 1821 EVERGREEN_CRTC1_REGISTER_OFFSET);
1819 ret |= DRM_SCANOUTPOS_VALID; 1822 ret |= DRM_SCANOUTPOS_VALID;
1820 } 1823 }
1821 if (crtc == 2) { 1824 if (pipe == 2) {
1822 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1825 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1823 EVERGREEN_CRTC2_REGISTER_OFFSET); 1826 EVERGREEN_CRTC2_REGISTER_OFFSET);
1824 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1827 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1825 EVERGREEN_CRTC2_REGISTER_OFFSET); 1828 EVERGREEN_CRTC2_REGISTER_OFFSET);
1826 ret |= DRM_SCANOUTPOS_VALID; 1829 ret |= DRM_SCANOUTPOS_VALID;
1827 } 1830 }
1828 if (crtc == 3) { 1831 if (pipe == 3) {
1829 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1832 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1830 EVERGREEN_CRTC3_REGISTER_OFFSET); 1833 EVERGREEN_CRTC3_REGISTER_OFFSET);
1831 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1834 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1832 EVERGREEN_CRTC3_REGISTER_OFFSET); 1835 EVERGREEN_CRTC3_REGISTER_OFFSET);
1833 ret |= DRM_SCANOUTPOS_VALID; 1836 ret |= DRM_SCANOUTPOS_VALID;
1834 } 1837 }
1835 if (crtc == 4) { 1838 if (pipe == 4) {
1836 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1839 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1837 EVERGREEN_CRTC4_REGISTER_OFFSET); 1840 EVERGREEN_CRTC4_REGISTER_OFFSET);
1838 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1841 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1839 EVERGREEN_CRTC4_REGISTER_OFFSET); 1842 EVERGREEN_CRTC4_REGISTER_OFFSET);
1840 ret |= DRM_SCANOUTPOS_VALID; 1843 ret |= DRM_SCANOUTPOS_VALID;
1841 } 1844 }
1842 if (crtc == 5) { 1845 if (pipe == 5) {
1843 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1846 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1844 EVERGREEN_CRTC5_REGISTER_OFFSET); 1847 EVERGREEN_CRTC5_REGISTER_OFFSET);
1845 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1848 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
@@ -1847,19 +1850,19 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
1847 ret |= DRM_SCANOUTPOS_VALID; 1850 ret |= DRM_SCANOUTPOS_VALID;
1848 } 1851 }
1849 } else if (ASIC_IS_AVIVO(rdev)) { 1852 } else if (ASIC_IS_AVIVO(rdev)) {
1850 if (crtc == 0) { 1853 if (pipe == 0) {
1851 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); 1854 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
1852 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); 1855 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
1853 ret |= DRM_SCANOUTPOS_VALID; 1856 ret |= DRM_SCANOUTPOS_VALID;
1854 } 1857 }
1855 if (crtc == 1) { 1858 if (pipe == 1) {
1856 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); 1859 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
1857 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); 1860 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
1858 ret |= DRM_SCANOUTPOS_VALID; 1861 ret |= DRM_SCANOUTPOS_VALID;
1859 } 1862 }
1860 } else { 1863 } else {
1861 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ 1864 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
1862 if (crtc == 0) { 1865 if (pipe == 0) {
1863 /* Assume vbl_end == 0, get vbl_start from 1866 /* Assume vbl_end == 0, get vbl_start from
1864 * upper 16 bits. 1867 * upper 16 bits.
1865 */ 1868 */
@@ -1873,7 +1876,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
1873 1876
1874 ret |= DRM_SCANOUTPOS_VALID; 1877 ret |= DRM_SCANOUTPOS_VALID;
1875 } 1878 }
1876 if (crtc == 1) { 1879 if (pipe == 1) {
1877 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & 1880 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
1878 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1881 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
1879 position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1882 position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
@@ -1904,7 +1907,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
1904 } 1907 }
1905 else { 1908 else {
1906 /* No: Fake something reasonable which gives at least ok results. */ 1909 /* No: Fake something reasonable which gives at least ok results. */
1907 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1910 vbl_start = mode->crtc_vdisplay;
1908 vbl_end = 0; 1911 vbl_end = 0;
1909 } 1912 }
1910 1913
@@ -1920,7 +1923,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
1920 1923
1921 /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 1924 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1922 if (in_vbl && (*vpos >= vbl_start)) { 1925 if (in_vbl && (*vpos >= vbl_start)) {
1923 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1926 vtotal = mode->crtc_vtotal;
1924 *vpos = *vpos - vtotal; 1927 *vpos = *vpos - vtotal;
1925 } 1928 }
1926 1929
@@ -1942,8 +1945,8 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
1942 * We only do this if DRM_CALLED_FROM_VBLIRQ. 1945 * We only do this if DRM_CALLED_FROM_VBLIRQ.
1943 */ 1946 */
1944 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { 1947 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
1945 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1948 vbl_start = mode->crtc_vdisplay;
1946 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1949 vtotal = mode->crtc_vtotal;
1947 1950
1948 if (vbl_start - *vpos < vtotal / 100) { 1951 if (vbl_start - *vpos < vtotal / 100) {
1949 *vpos -= vtotal; 1952 *vpos -= vtotal;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 5751446677d3..5b6a6f5b3619 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -105,10 +105,10 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
105 struct drm_file *file_priv); 105 struct drm_file *file_priv);
106int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); 106int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
107int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 107int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
108u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); 108u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
109int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); 109int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
110void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); 110void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
111int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 111int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
112 int *max_error, 112 int *max_error,
113 struct timeval *vblank_time, 113 struct timeval *vblank_time,
114 unsigned flags); 114 unsigned flags);
@@ -124,10 +124,10 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
124struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, 124struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
125 struct drm_gem_object *gobj, 125 struct drm_gem_object *gobj,
126 int flags); 126 int flags);
127extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 127extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
128 unsigned int flags, 128 unsigned int flags, int *vpos, int *hpos,
129 int *vpos, int *hpos, ktime_t *stime, 129 ktime_t *stime, ktime_t *etime,
130 ktime_t *etime); 130 const struct drm_display_mode *mode);
131extern bool radeon_is_px(struct drm_device *dev); 131extern bool radeon_is_px(struct drm_device *dev);
132extern const struct drm_ioctl_desc radeon_ioctls_kms[]; 132extern const struct drm_ioctl_desc radeon_ioctls_kms[];
133extern int radeon_max_kms_ioctl; 133extern int radeon_max_kms_ioctl;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 46bd3938282c..0caafc7a6e17 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -404,9 +404,9 @@ extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *
404extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 404extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
405 405
406extern void radeon_do_release(struct drm_device * dev); 406extern void radeon_do_release(struct drm_device * dev);
407extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); 407extern u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
408extern int radeon_enable_vblank(struct drm_device *dev, int crtc); 408extern int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe);
409extern void radeon_disable_vblank(struct drm_device *dev, int crtc); 409extern void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe);
410extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg); 410extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg);
411extern void radeon_driver_irq_preinstall(struct drm_device * dev); 411extern void radeon_driver_irq_preinstall(struct drm_device * dev);
412extern int radeon_driver_irq_postinstall(struct drm_device *dev); 412extern int radeon_driver_irq_postinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 244b19bab2e7..688afb62f7c4 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -62,12 +62,12 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
62 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); 62 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
63} 63}
64 64
65int radeon_enable_vblank(struct drm_device *dev, int crtc) 65int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe)
66{ 66{
67 drm_radeon_private_t *dev_priv = dev->dev_private; 67 drm_radeon_private_t *dev_priv = dev->dev_private;
68 68
69 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) { 69 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
70 switch (crtc) { 70 switch (pipe) {
71 case 0: 71 case 0:
72 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1); 72 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
73 break; 73 break;
@@ -75,12 +75,12 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
75 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1); 75 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
76 break; 76 break;
77 default: 77 default:
78 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 78 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
79 crtc); 79 pipe);
80 return -EINVAL; 80 return -EINVAL;
81 } 81 }
82 } else { 82 } else {
83 switch (crtc) { 83 switch (pipe) {
84 case 0: 84 case 0:
85 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1); 85 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
86 break; 86 break;
@@ -88,8 +88,8 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
88 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1); 88 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
89 break; 89 break;
90 default: 90 default:
91 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 91 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
92 crtc); 92 pipe);
93 return -EINVAL; 93 return -EINVAL;
94 } 94 }
95 } 95 }
@@ -97,12 +97,12 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
97 return 0; 97 return 0;
98} 98}
99 99
100void radeon_disable_vblank(struct drm_device *dev, int crtc) 100void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe)
101{ 101{
102 drm_radeon_private_t *dev_priv = dev->dev_private; 102 drm_radeon_private_t *dev_priv = dev->dev_private;
103 103
104 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) { 104 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
105 switch (crtc) { 105 switch (pipe) {
106 case 0: 106 case 0:
107 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0); 107 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
108 break; 108 break;
@@ -110,12 +110,12 @@ void radeon_disable_vblank(struct drm_device *dev, int crtc)
110 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0); 110 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
111 break; 111 break;
112 default: 112 default:
113 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 113 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
114 crtc); 114 pipe);
115 break; 115 break;
116 } 116 }
117 } else { 117 } else {
118 switch (crtc) { 118 switch (pipe) {
119 case 0: 119 case 0:
120 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0); 120 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
121 break; 121 break;
@@ -123,8 +123,8 @@ void radeon_disable_vblank(struct drm_device *dev, int crtc)
123 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0); 123 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
124 break; 124 break;
125 default: 125 default:
126 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 126 DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
127 crtc); 127 pipe);
128 break; 128 break;
129 } 129 }
130 } 130 }
@@ -255,7 +255,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
255 return ret; 255 return ret;
256} 256}
257 257
258u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc) 258u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
259{ 259{
260 drm_radeon_private_t *dev_priv = dev->dev_private; 260 drm_radeon_private_t *dev_priv = dev->dev_private;
261 261
@@ -264,18 +264,18 @@ u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
264 return -EINVAL; 264 return -EINVAL;
265 } 265 }
266 266
267 if (crtc < 0 || crtc > 1) { 267 if (pipe > 1) {
268 DRM_ERROR("Invalid crtc %d\n", crtc); 268 DRM_ERROR("Invalid crtc %u\n", pipe);
269 return -EINVAL; 269 return -EINVAL;
270 } 270 }
271 271
272 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) { 272 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
273 if (crtc == 0) 273 if (pipe == 0)
274 return RADEON_READ(R500_D1CRTC_FRAME_COUNT); 274 return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
275 else 275 else
276 return RADEON_READ(R500_D2CRTC_FRAME_COUNT); 276 return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
277 } else { 277 } else {
278 if (crtc == 0) 278 if (pipe == 0)
279 return RADEON_READ(RADEON_CRTC_CRNT_FRAME); 279 return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
280 else 280 else
281 return RADEON_READ(RADEON_CRTC2_CRNT_FRAME); 281 return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0e932bf932c1..0ec6fcca16d3 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -181,7 +181,9 @@ static void radeon_set_filp_rights(struct drm_device *dev,
181 struct drm_file *applier, 181 struct drm_file *applier,
182 uint32_t *value) 182 uint32_t *value)
183{ 183{
184 mutex_lock(&dev->struct_mutex); 184 struct radeon_device *rdev = dev->dev_private;
185
186 mutex_lock(&rdev->gem.mutex);
185 if (*value == 1) { 187 if (*value == 1) {
186 /* wants rights */ 188 /* wants rights */
187 if (!*owner) 189 if (!*owner)
@@ -192,7 +194,7 @@ static void radeon_set_filp_rights(struct drm_device *dev,
192 *owner = NULL; 194 *owner = NULL;
193 } 195 }
194 *value = *owner == applier ? 1 : 0; 196 *value = *owner == applier ? 1 : 0;
195 mutex_unlock(&dev->struct_mutex); 197 mutex_unlock(&rdev->gem.mutex);
196} 198}
197 199
198/* 200/*
@@ -602,7 +604,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
602 * 604 *
603 * @dev: drm dev pointer 605 * @dev: drm dev pointer
604 * 606 *
605 * Switch vga switcheroo state after last close (all asics). 607 * Switch vga_switcheroo state after last close (all asics).
606 */ 608 */
607void radeon_driver_lastclose_kms(struct drm_device *dev) 609void radeon_driver_lastclose_kms(struct drm_device *dev)
608{ 610{
@@ -727,10 +729,14 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
727 struct drm_file *file_priv) 729 struct drm_file *file_priv)
728{ 730{
729 struct radeon_device *rdev = dev->dev_private; 731 struct radeon_device *rdev = dev->dev_private;
732
733 mutex_lock(&rdev->gem.mutex);
730 if (rdev->hyperz_filp == file_priv) 734 if (rdev->hyperz_filp == file_priv)
731 rdev->hyperz_filp = NULL; 735 rdev->hyperz_filp = NULL;
732 if (rdev->cmask_filp == file_priv) 736 if (rdev->cmask_filp == file_priv)
733 rdev->cmask_filp = NULL; 737 rdev->cmask_filp = NULL;
738 mutex_unlock(&rdev->gem.mutex);
739
734 radeon_uvd_free_handles(rdev, file_priv); 740 radeon_uvd_free_handles(rdev, file_priv);
735 radeon_vce_free_handles(rdev, file_priv); 741 radeon_vce_free_handles(rdev, file_priv);
736} 742}
@@ -844,92 +850,52 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
844 /* Helper routine in DRM core does all the work: */ 850 /* Helper routine in DRM core does all the work: */
845 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 851 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
846 vblank_time, flags, 852 vblank_time, flags,
847 drmcrtc, &drmcrtc->hwmode); 853 &drmcrtc->hwmode);
848}
849
850#define KMS_INVALID_IOCTL(name) \
851static int name(struct drm_device *dev, void *data, struct drm_file \
852 *file_priv) \
853{ \
854 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
855 return -EINVAL; \
856} 854}
857 855
858/*
859 * All these ioctls are invalid in kms world.
860 */
861KMS_INVALID_IOCTL(radeon_cp_init_kms)
862KMS_INVALID_IOCTL(radeon_cp_start_kms)
863KMS_INVALID_IOCTL(radeon_cp_stop_kms)
864KMS_INVALID_IOCTL(radeon_cp_reset_kms)
865KMS_INVALID_IOCTL(radeon_cp_idle_kms)
866KMS_INVALID_IOCTL(radeon_cp_resume_kms)
867KMS_INVALID_IOCTL(radeon_engine_reset_kms)
868KMS_INVALID_IOCTL(radeon_fullscreen_kms)
869KMS_INVALID_IOCTL(radeon_cp_swap_kms)
870KMS_INVALID_IOCTL(radeon_cp_clear_kms)
871KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
872KMS_INVALID_IOCTL(radeon_cp_indices_kms)
873KMS_INVALID_IOCTL(radeon_cp_texture_kms)
874KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
875KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
876KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
877KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
878KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
879KMS_INVALID_IOCTL(radeon_cp_flip_kms)
880KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
881KMS_INVALID_IOCTL(radeon_mem_free_kms)
882KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
883KMS_INVALID_IOCTL(radeon_irq_emit_kms)
884KMS_INVALID_IOCTL(radeon_irq_wait_kms)
885KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
886KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
887KMS_INVALID_IOCTL(radeon_surface_free_kms)
888
889
890const struct drm_ioctl_desc radeon_ioctls_kms[] = { 856const struct drm_ioctl_desc radeon_ioctls_kms[] = {
891 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 857 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
892 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 858 DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
893 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 859 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
894 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 860 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
895 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), 861 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
896 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), 862 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
897 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), 863 DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
898 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), 864 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
899 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), 865 DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
900 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), 866 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
901 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), 867 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
902 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), 868 DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
903 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), 869 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
904 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), 870 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
905 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 871 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
906 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), 872 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
907 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), 873 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
908 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), 874 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
909 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), 875 DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
910 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), 876 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
911 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), 877 DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
912 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 878 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
913 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), 879 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
914 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), 880 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
915 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), 881 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
916 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 882 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
917 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 883 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
918 /* KMS */ 884 /* KMS */
919 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 885 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
920 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 886 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
921 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 887 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
922 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 888 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
923 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 889 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
924 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 890 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
925 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 891 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
926 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 892 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
927 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 893 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
928 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 894 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
929 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 895 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
930 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 896 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
931 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 897 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
932 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 898 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
933 DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 899 DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
934}; 900};
935int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); 901int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 457b026a0972..830e171c3a9e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -874,10 +874,10 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
874 int x, int y); 874 int x, int y);
875extern void radeon_cursor_reset(struct drm_crtc *crtc); 875extern void radeon_cursor_reset(struct drm_crtc *crtc);
876 876
877extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 877extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
878 unsigned int flags, 878 unsigned int flags, int *vpos, int *hpos,
879 int *vpos, int *hpos, ktime_t *stime, 879 ktime_t *stime, ktime_t *etime,
880 ktime_t *etime); 880 const struct drm_display_mode *mode);
881 881
882extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); 882extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
883extern struct edid * 883extern struct edid *
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5feee3b4c557..6d80dde23400 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1757,7 +1757,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1757 */ 1757 */
1758 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1758 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1759 if (rdev->pm.active_crtcs & (1 << crtc)) { 1759 if (rdev->pm.active_crtcs & (1 << crtc)) {
1760 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); 1760 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0,
1761 &vpos, &hpos, NULL, NULL,
1762 &rdev->mode_info.crtcs[crtc]->base.hwmode);
1761 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1763 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1762 !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK)) 1764 !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1763 in_vbl = false; 1765 in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 06ac59fe332a..e34307459e50 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -144,7 +144,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
144 man->available_caching = TTM_PL_MASK_CACHING; 144 man->available_caching = TTM_PL_MASK_CACHING;
145 man->default_caching = TTM_PL_FLAG_CACHED; 145 man->default_caching = TTM_PL_FLAG_CACHED;
146 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 146 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
147#if __OS_HAS_AGP 147#if IS_ENABLED(CONFIG_AGP)
148 if (rdev->flags & RADEON_IS_AGP) { 148 if (rdev->flags & RADEON_IS_AGP) {
149 if (!rdev->ddev->agp) { 149 if (!rdev->ddev->agp) {
150 DRM_ERROR("AGP is not enabled for memory type %u\n", 150 DRM_ERROR("AGP is not enabled for memory type %u\n",
@@ -461,7 +461,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
461 /* system memory */ 461 /* system memory */
462 return 0; 462 return 0;
463 case TTM_PL_TT: 463 case TTM_PL_TT:
464#if __OS_HAS_AGP 464#if IS_ENABLED(CONFIG_AGP)
465 if (rdev->flags & RADEON_IS_AGP) { 465 if (rdev->flags & RADEON_IS_AGP) {
466 /* RADEON_IS_AGP is set only if AGP is active */ 466 /* RADEON_IS_AGP is set only if AGP is active */
467 mem->bus.offset = mem->start << PAGE_SHIFT; 467 mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -680,7 +680,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
680 struct radeon_ttm_tt *gtt; 680 struct radeon_ttm_tt *gtt;
681 681
682 rdev = radeon_get_rdev(bdev); 682 rdev = radeon_get_rdev(bdev);
683#if __OS_HAS_AGP 683#if IS_ENABLED(CONFIG_AGP)
684 if (rdev->flags & RADEON_IS_AGP) { 684 if (rdev->flags & RADEON_IS_AGP) {
685 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, 685 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
686 size, page_flags, dummy_read_page); 686 size, page_flags, dummy_read_page);
@@ -736,7 +736,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
736 } 736 }
737 737
738 rdev = radeon_get_rdev(ttm->bdev); 738 rdev = radeon_get_rdev(ttm->bdev);
739#if __OS_HAS_AGP 739#if IS_ENABLED(CONFIG_AGP)
740 if (rdev->flags & RADEON_IS_AGP) { 740 if (rdev->flags & RADEON_IS_AGP) {
741 return ttm_agp_tt_populate(ttm); 741 return ttm_agp_tt_populate(ttm);
742 } 742 }
@@ -787,7 +787,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
787 return; 787 return;
788 788
789 rdev = radeon_get_rdev(ttm->bdev); 789 rdev = radeon_get_rdev(ttm->bdev);
790#if __OS_HAS_AGP 790#if IS_ENABLED(CONFIG_AGP)
791 if (rdev->flags & RADEON_IS_AGP) { 791 if (rdev->flags & RADEON_IS_AGP) {
792 ttm_agp_tt_unpopulate(ttm); 792 ttm_agp_tt_unpopulate(ttm);
793 return; 793 return;
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 11485a4a16ae..d4e0a39568f6 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,6 @@
1config DRM_RCAR_DU 1config DRM_RCAR_DU
2 tristate "DRM Support for R-Car Display Unit" 2 tristate "DRM Support for R-Car Display Unit"
3 depends on DRM && ARM && HAVE_DMA_ATTRS 3 depends on DRM && ARM && HAVE_DMA_ATTRS && OF
4 depends on ARCH_SHMOBILE || COMPILE_TEST 4 depends on ARCH_SHMOBILE || COMPILE_TEST
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 780ca11512ba..40422f6b645e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -84,16 +84,17 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
84 .num_lvds = 2, 84 .num_lvds = 2,
85}; 85};
86 86
87/* M2-W (r8a7791) and M2-N (r8a7793) are identical */
87static const struct rcar_du_device_info rcar_du_r8a7791_info = { 88static const struct rcar_du_device_info rcar_du_r8a7791_info = {
88 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 89 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
89 | RCAR_DU_FEATURE_EXT_CTRL_REGS, 90 | RCAR_DU_FEATURE_EXT_CTRL_REGS,
90 .num_crtcs = 2, 91 .num_crtcs = 2,
91 .routes = { 92 .routes = {
92 /* R8A7791 has one RGB output, one LVDS output and one 93 /* R8A779[13] has one RGB output, one LVDS output and one
93 * (currently unsupported) TCON output. 94 * (currently unsupported) TCON output.
94 */ 95 */
95 [RCAR_DU_OUTPUT_DPAD0] = { 96 [RCAR_DU_OUTPUT_DPAD0] = {
96 .possible_crtcs = BIT(1), 97 .possible_crtcs = BIT(1) | BIT(0),
97 .encoder_type = DRM_MODE_ENCODER_NONE, 98 .encoder_type = DRM_MODE_ENCODER_NONE,
98 .port = 0, 99 .port = 0,
99 }, 100 },
@@ -106,19 +107,34 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
106 .num_lvds = 1, 107 .num_lvds = 1,
107}; 108};
108 109
109static const struct platform_device_id rcar_du_id_table[] = { 110static const struct rcar_du_device_info rcar_du_r8a7794_info = {
110 { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info }, 111 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
111 { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info }, 112 | RCAR_DU_FEATURE_EXT_CTRL_REGS,
112 { "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info }, 113 .num_crtcs = 2,
113 { } 114 .routes = {
115 /* R8A7794 has two RGB outputs and one (currently unsupported)
116 * TCON output.
117 */
118 [RCAR_DU_OUTPUT_DPAD0] = {
119 .possible_crtcs = BIT(0),
120 .encoder_type = DRM_MODE_ENCODER_NONE,
121 .port = 0,
122 },
123 [RCAR_DU_OUTPUT_DPAD1] = {
124 .possible_crtcs = BIT(1),
125 .encoder_type = DRM_MODE_ENCODER_NONE,
126 .port = 1,
127 },
128 },
129 .num_lvds = 0,
114}; 130};
115 131
116MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
117
118static const struct of_device_id rcar_du_of_table[] = { 132static const struct of_device_id rcar_du_of_table[] = {
119 { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, 133 { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
120 { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, 134 { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
121 { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info }, 135 { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
136 { .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
137 { .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
122 { } 138 { }
123}; 139};
124 140
@@ -167,8 +183,7 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
167 init_waitqueue_head(&rcdu->commit.wait); 183 init_waitqueue_head(&rcdu->commit.wait);
168 184
169 rcdu->dev = &pdev->dev; 185 rcdu->dev = &pdev->dev;
170 rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data 186 rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
171 : (void *)platform_get_device_id(pdev)->driver_data;
172 rcdu->ddev = dev; 187 rcdu->ddev = dev;
173 dev->dev_private = rcdu; 188 dev->dev_private = rcdu;
174 189
@@ -221,20 +236,20 @@ static void rcar_du_lastclose(struct drm_device *dev)
221 drm_fbdev_cma_restore_mode(rcdu->fbdev); 236 drm_fbdev_cma_restore_mode(rcdu->fbdev);
222} 237}
223 238
224static int rcar_du_enable_vblank(struct drm_device *dev, int crtc) 239static int rcar_du_enable_vblank(struct drm_device *dev, unsigned int pipe)
225{ 240{
226 struct rcar_du_device *rcdu = dev->dev_private; 241 struct rcar_du_device *rcdu = dev->dev_private;
227 242
228 rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], true); 243 rcar_du_crtc_enable_vblank(&rcdu->crtcs[pipe], true);
229 244
230 return 0; 245 return 0;
231} 246}
232 247
233static void rcar_du_disable_vblank(struct drm_device *dev, int crtc) 248static void rcar_du_disable_vblank(struct drm_device *dev, unsigned int pipe)
234{ 249{
235 struct rcar_du_device *rcdu = dev->dev_private; 250 struct rcar_du_device *rcdu = dev->dev_private;
236 251
237 rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], false); 252 rcar_du_crtc_enable_vblank(&rcdu->crtcs[pipe], false);
238} 253}
239 254
240static const struct file_operations rcar_du_fops = { 255static const struct file_operations rcar_du_fops = {
@@ -259,7 +274,7 @@ static struct drm_driver rcar_du_driver = {
259 .preclose = rcar_du_preclose, 274 .preclose = rcar_du_preclose,
260 .lastclose = rcar_du_lastclose, 275 .lastclose = rcar_du_lastclose,
261 .set_busid = drm_platform_set_busid, 276 .set_busid = drm_platform_set_busid,
262 .get_vblank_counter = drm_vblank_count, 277 .get_vblank_counter = drm_vblank_no_hw_counter,
263 .enable_vblank = rcar_du_enable_vblank, 278 .enable_vblank = rcar_du_enable_vblank,
264 .disable_vblank = rcar_du_disable_vblank, 279 .disable_vblank = rcar_du_disable_vblank,
265 .gem_free_object = drm_gem_cma_free_object, 280 .gem_free_object = drm_gem_cma_free_object,
@@ -340,7 +355,6 @@ static struct platform_driver rcar_du_platform_driver = {
340 .pm = &rcar_du_pm_ops, 355 .pm = &rcar_du_pm_ops,
341 .of_match_table = rcar_du_of_table, 356 .of_match_table = rcar_du_of_table,
342 }, 357 },
343 .id_table = rcar_du_id_table,
344}; 358};
345 359
346module_platform_driver(rcar_du_platform_driver); 360module_platform_driver(rcar_du_platform_driver);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 7fd39a7d91c8..8e2ffe025153 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -49,9 +49,10 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
49 u32 defr8 = DEFR8_CODE | DEFR8_DEFE8; 49 u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
50 50
51 /* The DEFR8 register for the first group also controls RGB output 51 /* The DEFR8 register for the first group also controls RGB output
52 * routing to DPAD0 52 * routing to DPAD0 for DU instances that support it.
53 */ 53 */
54 if (rgrp->index == 0) 54 if (rgrp->dev->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs > 1 &&
55 rgrp->index == 0)
55 defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source); 56 defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
56 57
57 rcar_du_group_write(rgrp, DEFR8, defr8); 58 rcar_du_group_write(rgrp, DEFR8, defr8);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 56518eb1269a..ca12e8ca5552 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -456,7 +456,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
456 /* Apply the atomic update. */ 456 /* Apply the atomic update. */
457 drm_atomic_helper_commit_modeset_disables(dev, old_state); 457 drm_atomic_helper_commit_modeset_disables(dev, old_state);
458 drm_atomic_helper_commit_modeset_enables(dev, old_state); 458 drm_atomic_helper_commit_modeset_enables(dev, old_state);
459 drm_atomic_helper_commit_planes(dev, old_state); 459 drm_atomic_helper_commit_planes(dev, old_state, false);
460 460
461 drm_atomic_helper_wait_for_vblanks(dev, old_state); 461 drm_atomic_helper_wait_for_vblanks(dev, old_state);
462 462
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c66986414bb4..ffa583712cd9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -273,29 +273,6 @@ static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
273 .atomic_update = rcar_du_plane_atomic_update, 273 .atomic_update = rcar_du_plane_atomic_update,
274}; 274};
275 275
276static void rcar_du_plane_reset(struct drm_plane *plane)
277{
278 struct rcar_du_plane_state *state;
279
280 if (plane->state && plane->state->fb)
281 drm_framebuffer_unreference(plane->state->fb);
282
283 kfree(plane->state);
284 plane->state = NULL;
285
286 state = kzalloc(sizeof(*state), GFP_KERNEL);
287 if (state == NULL)
288 return;
289
290 state->hwindex = -1;
291 state->alpha = 255;
292 state->colorkey = RCAR_DU_COLORKEY_NONE;
293 state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
294
295 plane->state = &state->state;
296 plane->state->plane = plane;
297}
298
299static struct drm_plane_state * 276static struct drm_plane_state *
300rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane) 277rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
301{ 278{
@@ -322,6 +299,28 @@ static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
322 kfree(to_rcar_plane_state(state)); 299 kfree(to_rcar_plane_state(state));
323} 300}
324 301
302static void rcar_du_plane_reset(struct drm_plane *plane)
303{
304 struct rcar_du_plane_state *state;
305
306 if (plane->state) {
307 rcar_du_plane_atomic_destroy_state(plane, plane->state);
308 plane->state = NULL;
309 }
310
311 state = kzalloc(sizeof(*state), GFP_KERNEL);
312 if (state == NULL)
313 return;
314
315 state->hwindex = -1;
316 state->alpha = 255;
317 state->colorkey = RCAR_DU_COLORKEY_NONE;
318 state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
319
320 plane->state = &state->state;
321 plane->state->plane = plane;
322}
323
325static int rcar_du_plane_atomic_set_property(struct drm_plane *plane, 324static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
326 struct drm_plane_state *state, 325 struct drm_plane_state *state,
327 struct drm_property *property, 326 struct drm_property *property,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 9a0c2911272a..d26e0cc7dc4b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -19,6 +19,7 @@
19#include <drm/drmP.h> 19#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h> 20#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h> 21#include <drm/drm_fb_helper.h>
22#include <drm/drm_of.h>
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
24#include <linux/module.h> 25#include <linux/module.h>
@@ -103,7 +104,8 @@ static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
103 return NULL; 104 return NULL;
104} 105}
105 106
106static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) 107static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev,
108 unsigned int pipe)
107{ 109{
108 struct rockchip_drm_private *priv = dev->dev_private; 110 struct rockchip_drm_private *priv = dev->dev_private;
109 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe); 111 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
@@ -115,7 +117,8 @@ static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
115 return 0; 117 return 0;
116} 118}
117 119
118static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe) 120static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
121 unsigned int pipe)
119{ 122{
120 struct rockchip_drm_private *priv = dev->dev_private; 123 struct rockchip_drm_private *priv = dev->dev_private;
121 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe); 124 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
@@ -277,7 +280,7 @@ static struct drm_driver rockchip_drm_driver = {
277 .load = rockchip_drm_load, 280 .load = rockchip_drm_load,
278 .unload = rockchip_drm_unload, 281 .unload = rockchip_drm_unload,
279 .lastclose = rockchip_drm_lastclose, 282 .lastclose = rockchip_drm_lastclose,
280 .get_vblank_counter = drm_vblank_count, 283 .get_vblank_counter = drm_vblank_no_hw_counter,
281 .enable_vblank = rockchip_drm_crtc_enable_vblank, 284 .enable_vblank = rockchip_drm_crtc_enable_vblank,
282 .disable_vblank = rockchip_drm_crtc_disable_vblank, 285 .disable_vblank = rockchip_drm_crtc_disable_vblank,
283 .gem_vm_ops = &rockchip_drm_vm_ops, 286 .gem_vm_ops = &rockchip_drm_vm_ops,
@@ -416,29 +419,6 @@ static int compare_of(struct device *dev, void *data)
416 return dev->of_node == np; 419 return dev->of_node == np;
417} 420}
418 421
419static void rockchip_add_endpoints(struct device *dev,
420 struct component_match **match,
421 struct device_node *port)
422{
423 struct device_node *ep, *remote;
424
425 for_each_child_of_node(port, ep) {
426 remote = of_graph_get_remote_port_parent(ep);
427 if (!remote || !of_device_is_available(remote)) {
428 of_node_put(remote);
429 continue;
430 } else if (!of_device_is_available(remote->parent)) {
431 dev_warn(dev, "parent device of %s is not available\n",
432 remote->full_name);
433 of_node_put(remote);
434 continue;
435 }
436
437 component_match_add(dev, match, compare_of, remote);
438 of_node_put(remote);
439 }
440}
441
442static int rockchip_drm_bind(struct device *dev) 422static int rockchip_drm_bind(struct device *dev)
443{ 423{
444 struct drm_device *drm; 424 struct drm_device *drm;
@@ -481,61 +461,14 @@ static const struct component_master_ops rockchip_drm_ops = {
481 461
482static int rockchip_drm_platform_probe(struct platform_device *pdev) 462static int rockchip_drm_platform_probe(struct platform_device *pdev)
483{ 463{
484 struct device *dev = &pdev->dev; 464 int ret = drm_of_component_probe(&pdev->dev, compare_of,
485 struct component_match *match = NULL; 465 &rockchip_drm_ops);
486 struct device_node *np = dev->of_node;
487 struct device_node *port;
488 int i;
489
490 if (!np)
491 return -ENODEV;
492 /*
493 * Bind the crtc ports first, so that
494 * drm_of_find_possible_crtcs called from encoder .bind callbacks
495 * works as expected.
496 */
497 for (i = 0;; i++) {
498 port = of_parse_phandle(np, "ports", i);
499 if (!port)
500 break;
501
502 if (!of_device_is_available(port->parent)) {
503 of_node_put(port);
504 continue;
505 }
506
507 component_match_add(dev, &match, compare_of, port->parent);
508 of_node_put(port);
509 }
510 466
511 if (i == 0) { 467 /* keep compatibility with old code that was returning -ENODEV */
512 dev_err(dev, "missing 'ports' property\n"); 468 if (ret == -EINVAL)
513 return -ENODEV; 469 return -ENODEV;
514 }
515 470
516 if (!match) { 471 return ret;
517 dev_err(dev, "No available vop found for display-subsystem.\n");
518 return -ENODEV;
519 }
520 /*
521 * For each bound crtc, bind the encoders attached to its
522 * remote endpoint.
523 */
524 for (i = 0;; i++) {
525 port = of_parse_phandle(np, "ports", i);
526 if (!port)
527 break;
528
529 if (!of_device_is_available(port->parent)) {
530 of_node_put(port);
531 continue;
532 }
533
534 rockchip_add_endpoints(dev, &match, port);
535 of_node_put(port);
536 }
537
538 return component_master_add_with_match(dev, &rockchip_drm_ops, match);
539} 472}
540 473
541static int rockchip_drm_platform_remove(struct platform_device *pdev) 474static int rockchip_drm_platform_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index a6d9104f7f15..8caea0a33dd8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -79,12 +79,9 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
79int rockchip_gem_mmap_buf(struct drm_gem_object *obj, 79int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
80 struct vm_area_struct *vma) 80 struct vm_area_struct *vma)
81{ 81{
82 struct drm_device *drm = obj->dev;
83 int ret; 82 int ret;
84 83
85 mutex_lock(&drm->struct_mutex);
86 ret = drm_gem_mmap_obj(obj, obj->size, vma); 84 ret = drm_gem_mmap_obj(obj, obj->size, vma);
87 mutex_unlock(&drm->struct_mutex);
88 if (ret) 85 if (ret)
89 return ret; 86 return ret;
90 87
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 666321de7b99..04e66e3751b4 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -231,7 +231,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
231 return IRQ_HANDLED; 231 return IRQ_HANDLED;
232} 232}
233 233
234static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc) 234static int shmob_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
235{ 235{
236 struct shmob_drm_device *sdev = dev->dev_private; 236 struct shmob_drm_device *sdev = dev->dev_private;
237 237
@@ -240,7 +240,7 @@ static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
240 return 0; 240 return 0;
241} 241}
242 242
243static void shmob_drm_disable_vblank(struct drm_device *dev, int crtc) 243static void shmob_drm_disable_vblank(struct drm_device *dev, unsigned int pipe)
244{ 244{
245 struct shmob_drm_device *sdev = dev->dev_private; 245 struct shmob_drm_device *sdev = dev->dev_private;
246 246
@@ -269,7 +269,7 @@ static struct drm_driver shmob_drm_driver = {
269 .preclose = shmob_drm_preclose, 269 .preclose = shmob_drm_preclose,
270 .set_busid = drm_platform_set_busid, 270 .set_busid = drm_platform_set_busid,
271 .irq_handler = shmob_drm_irq, 271 .irq_handler = shmob_drm_irq,
272 .get_vblank_counter = drm_vblank_count, 272 .get_vblank_counter = drm_vblank_no_hw_counter,
273 .enable_vblank = shmob_drm_enable_vblank, 273 .enable_vblank = shmob_drm_enable_vblank,
274 .disable_vblank = shmob_drm_disable_vblank, 274 .disable_vblank = shmob_drm_disable_vblank,
275 .gem_free_object = drm_gem_cma_free_object, 275 .gem_free_object = drm_gem_cma_free_object,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 16f972b2a76a..328f8a750976 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -67,6 +67,10 @@ typedef struct drm_sis_private {
67 struct idr object_idr; 67 struct idr object_idr;
68} drm_sis_private_t; 68} drm_sis_private_t;
69 69
70struct sis_file_private {
71 struct list_head obj_list;
72};
73
70extern int sis_idle(struct drm_device *dev); 74extern int sis_idle(struct drm_device *dev);
71extern void sis_reclaim_buffers_locked(struct drm_device *dev, 75extern void sis_reclaim_buffers_locked(struct drm_device *dev,
72 struct drm_file *file_priv); 76 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 018ffc970e96..493c4a3006ad 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -299,7 +299,7 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
299 return 0; 299 return 0;
300} 300}
301 301
302int sti_crtc_enable_vblank(struct drm_device *dev, int crtc) 302int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
303{ 303{
304 struct sti_private *dev_priv = dev->dev_private; 304 struct sti_private *dev_priv = dev->dev_private;
305 struct sti_compositor *compo = dev_priv->compo; 305 struct sti_compositor *compo = dev_priv->compo;
@@ -307,9 +307,9 @@ int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
307 307
308 DRM_DEBUG_DRIVER("\n"); 308 DRM_DEBUG_DRIVER("\n");
309 309
310 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ? 310 if (sti_vtg_register_client(pipe == STI_MIXER_MAIN ?
311 compo->vtg_main : compo->vtg_aux, 311 compo->vtg_main : compo->vtg_aux,
312 vtg_vblank_nb, crtc)) { 312 vtg_vblank_nb, pipe)) {
313 DRM_ERROR("Cannot register VTG notifier\n"); 313 DRM_ERROR("Cannot register VTG notifier\n");
314 return -EINVAL; 314 return -EINVAL;
315 } 315 }
@@ -318,7 +318,7 @@ int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
318} 318}
319EXPORT_SYMBOL(sti_crtc_enable_vblank); 319EXPORT_SYMBOL(sti_crtc_enable_vblank);
320 320
321void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc) 321void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
322{ 322{
323 struct sti_private *priv = drm_dev->dev_private; 323 struct sti_private *priv = drm_dev->dev_private;
324 struct sti_compositor *compo = priv->compo; 324 struct sti_compositor *compo = priv->compo;
@@ -326,14 +326,14 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
326 326
327 DRM_DEBUG_DRIVER("\n"); 327 DRM_DEBUG_DRIVER("\n");
328 328
329 if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ? 329 if (sti_vtg_unregister_client(pipe == STI_MIXER_MAIN ?
330 compo->vtg_main : compo->vtg_aux, vtg_vblank_nb)) 330 compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
331 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 331 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
332 332
333 /* free the resources of the pending requests */ 333 /* free the resources of the pending requests */
334 if (compo->mixer[crtc]->pending_event) { 334 if (compo->mixer[pipe]->pending_event) {
335 drm_vblank_put(drm_dev, crtc); 335 drm_vblank_put(drm_dev, pipe);
336 compo->mixer[crtc]->pending_event = NULL; 336 compo->mixer[pipe]->pending_event = NULL;
337 } 337 }
338} 338}
339EXPORT_SYMBOL(sti_crtc_disable_vblank); 339EXPORT_SYMBOL(sti_crtc_disable_vblank);
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index 51963e6ddbe7..3f2d89a3634d 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -13,8 +13,8 @@ struct sti_mixer;
13 13
14int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 14int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
15 struct drm_plane *primary, struct drm_plane *cursor); 15 struct drm_plane *primary, struct drm_plane *cursor);
16int sti_crtc_enable_vblank(struct drm_device *dev, int crtc); 16int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
17void sti_crtc_disable_vblank(struct drm_device *dev, int crtc); 17void sti_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
18int sti_crtc_vblank_cb(struct notifier_block *nb, 18int sti_crtc_vblank_cb(struct notifier_block *nb,
19 unsigned long event, void *data); 19 unsigned long event, void *data);
20bool sti_crtc_is_main(struct drm_crtc *drm_crtc); 20bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 6f4af6a8ba1b..f8469967a0bf 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -59,7 +59,7 @@ static void sti_atomic_complete(struct sti_private *private,
59 */ 59 */
60 60
61 drm_atomic_helper_commit_modeset_disables(drm, state); 61 drm_atomic_helper_commit_modeset_disables(drm, state);
62 drm_atomic_helper_commit_planes(drm, state); 62 drm_atomic_helper_commit_planes(drm, state, false);
63 drm_atomic_helper_commit_modeset_enables(drm, state); 63 drm_atomic_helper_commit_modeset_enables(drm, state);
64 64
65 drm_atomic_helper_wait_for_vblanks(drm, state); 65 drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -201,7 +201,7 @@ static struct drm_driver sti_driver = {
201 .dumb_destroy = drm_gem_dumb_destroy, 201 .dumb_destroy = drm_gem_dumb_destroy,
202 .fops = &sti_driver_fops, 202 .fops = &sti_driver_fops,
203 203
204 .get_vblank_counter = drm_vblank_count, 204 .get_vblank_counter = drm_vblank_no_hw_counter,
205 .enable_vblank = sti_crtc_enable_vblank, 205 .enable_vblank = sti_crtc_enable_vblank,
206 .disable_vblank = sti_crtc_disable_vblank, 206 .disable_vblank = sti_crtc_disable_vblank,
207 207
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index ddefb85dc4f7..e9f24a85a103 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -480,14 +480,12 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = {
480}; 480};
481 481
482static int tegra_plane_prepare_fb(struct drm_plane *plane, 482static int tegra_plane_prepare_fb(struct drm_plane *plane,
483 struct drm_framebuffer *fb,
484 const struct drm_plane_state *new_state) 483 const struct drm_plane_state *new_state)
485{ 484{
486 return 0; 485 return 0;
487} 486}
488 487
489static void tegra_plane_cleanup_fb(struct drm_plane *plane, 488static void tegra_plane_cleanup_fb(struct drm_plane *plane,
490 struct drm_framebuffer *fb,
491 const struct drm_plane_state *old_fb) 489 const struct drm_plane_state *old_fb)
492{ 490{
493} 491}
@@ -1696,6 +1694,7 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
1696static int tegra_dc_init(struct host1x_client *client) 1694static int tegra_dc_init(struct host1x_client *client)
1697{ 1695{
1698 struct drm_device *drm = dev_get_drvdata(client->parent); 1696 struct drm_device *drm = dev_get_drvdata(client->parent);
1697 unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
1699 struct tegra_dc *dc = host1x_client_to_dc(client); 1698 struct tegra_dc *dc = host1x_client_to_dc(client);
1700 struct tegra_drm *tegra = drm->dev_private; 1699 struct tegra_drm *tegra = drm->dev_private;
1701 struct drm_plane *primary = NULL; 1700 struct drm_plane *primary = NULL;
@@ -1703,6 +1702,10 @@ static int tegra_dc_init(struct host1x_client *client)
1703 u32 value; 1702 u32 value;
1704 int err; 1703 int err;
1705 1704
1705 dc->syncpt = host1x_syncpt_request(dc->dev, flags);
1706 if (!dc->syncpt)
1707 dev_warn(dc->dev, "failed to allocate syncpoint\n");
1708
1706 if (tegra->domain) { 1709 if (tegra->domain) {
1707 err = iommu_attach_device(tegra->domain, dc->dev); 1710 err = iommu_attach_device(tegra->domain, dc->dev);
1708 if (err < 0) { 1711 if (err < 0) {
@@ -1849,6 +1852,8 @@ static int tegra_dc_exit(struct host1x_client *client)
1849 dc->domain = NULL; 1852 dc->domain = NULL;
1850 } 1853 }
1851 1854
1855 host1x_syncpt_free(dc->syncpt);
1856
1852 return 0; 1857 return 0;
1853} 1858}
1854 1859
@@ -1961,7 +1966,6 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
1961 1966
1962static int tegra_dc_probe(struct platform_device *pdev) 1967static int tegra_dc_probe(struct platform_device *pdev)
1963{ 1968{
1964 unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
1965 const struct of_device_id *id; 1969 const struct of_device_id *id;
1966 struct resource *regs; 1970 struct resource *regs;
1967 struct tegra_dc *dc; 1971 struct tegra_dc *dc;
@@ -2036,10 +2040,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
2036 return -ENXIO; 2040 return -ENXIO;
2037 } 2041 }
2038 2042
2039 dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
2040 if (!dc->syncpt)
2041 dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
2042
2043 INIT_LIST_HEAD(&dc->client.list); 2043 INIT_LIST_HEAD(&dc->client.list);
2044 dc->client.ops = &dc_client_ops; 2044 dc->client.ops = &dc_client_ops;
2045 dc->client.dev = &pdev->dev; 2045 dc->client.dev = &pdev->dev;
@@ -2067,8 +2067,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
2067 struct tegra_dc *dc = platform_get_drvdata(pdev); 2067 struct tegra_dc *dc = platform_get_drvdata(pdev);
2068 int err; 2068 int err;
2069 2069
2070 host1x_syncpt_free(dc->syncpt);
2071
2072 err = host1x_client_unregister(&dc->client); 2070 err = host1x_client_unregister(&dc->client);
2073 if (err < 0) { 2071 if (err < 0) {
2074 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 2072 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 224a7dc8e4ed..6aecb6647313 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -119,6 +119,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
119 */ 119 */
120 if (msg->size < 1) { 120 if (msg->size < 1) {
121 switch (msg->request & ~DP_AUX_I2C_MOT) { 121 switch (msg->request & ~DP_AUX_I2C_MOT) {
122 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
122 case DP_AUX_I2C_WRITE: 123 case DP_AUX_I2C_WRITE:
123 case DP_AUX_I2C_READ: 124 case DP_AUX_I2C_READ:
124 value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY; 125 value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY;
@@ -149,7 +150,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
149 150
150 break; 151 break;
151 152
152 case DP_AUX_I2C_STATUS: 153 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
153 if (msg->request & DP_AUX_I2C_MOT) 154 if (msg->request & DP_AUX_I2C_MOT)
154 value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ; 155 value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ;
155 else 156 else
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6d88cf1fcd1c..159ef515cab1 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -56,7 +56,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
56 */ 56 */
57 57
58 drm_atomic_helper_commit_modeset_disables(drm, state); 58 drm_atomic_helper_commit_modeset_disables(drm, state);
59 drm_atomic_helper_commit_planes(drm, state); 59 drm_atomic_helper_commit_planes(drm, state, false);
60 drm_atomic_helper_commit_modeset_enables(drm, state); 60 drm_atomic_helper_commit_modeset_enables(drm, state);
61 61
62 drm_atomic_helper_wait_for_vblanks(drm, state); 62 drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -778,20 +778,20 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
778 778
779static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 779static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
780#ifdef CONFIG_DRM_TEGRA_STAGING 780#ifdef CONFIG_DRM_TEGRA_STAGING
781 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED), 781 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0),
782 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), 782 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0),
783 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED), 783 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0),
784 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED), 784 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0),
785 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED), 785 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0),
786 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED), 786 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0),
787 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED), 787 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0),
788 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED), 788 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0),
789 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED), 789 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0),
790 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED), 790 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0),
791 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED), 791 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0),
792 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED), 792 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0),
793 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED), 793 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0),
794 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED), 794 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0),
795#endif 795#endif
796}; 796};
797 797
@@ -822,7 +822,8 @@ static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
822 return NULL; 822 return NULL;
823} 823}
824 824
825static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe) 825static u32 tegra_drm_get_vblank_counter(struct drm_device *drm,
826 unsigned int pipe)
826{ 827{
827 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); 828 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
828 struct tegra_dc *dc = to_tegra_dc(crtc); 829 struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -833,7 +834,7 @@ static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
833 return tegra_dc_get_vblank_counter(dc); 834 return tegra_dc_get_vblank_counter(dc);
834} 835}
835 836
836static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe) 837static int tegra_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
837{ 838{
838 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); 839 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
839 struct tegra_dc *dc = to_tegra_dc(crtc); 840 struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -846,7 +847,7 @@ static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
846 return 0; 847 return 0;
847} 848}
848 849
849static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe) 850static void tegra_drm_disable_vblank(struct drm_device *drm, unsigned int pipe)
850{ 851{
851 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); 852 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
852 struct tegra_dc *dc = to_tegra_dc(crtc); 853 struct tegra_dc *dc = to_tegra_dc(crtc);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 07c844b746b4..1004075fd088 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -341,7 +341,6 @@ fini:
341 341
342static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) 342static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
343{ 343{
344
345 drm_fb_helper_unregister_fbi(&fbdev->base); 344 drm_fb_helper_unregister_fbi(&fbdev->base);
346 drm_fb_helper_release_fbi(&fbdev->base); 345 drm_fb_helper_release_fbi(&fbdev->base);
347 346
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 0f283a3b932c..876cad58b1f9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -425,13 +425,13 @@ static void enable_vblank(struct drm_device *dev, bool enable)
425 tilcdc_clear(dev, reg, mask); 425 tilcdc_clear(dev, reg, mask);
426} 426}
427 427
428static int tilcdc_enable_vblank(struct drm_device *dev, int crtc) 428static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe)
429{ 429{
430 enable_vblank(dev, true); 430 enable_vblank(dev, true);
431 return 0; 431 return 0;
432} 432}
433 433
434static void tilcdc_disable_vblank(struct drm_device *dev, int crtc) 434static void tilcdc_disable_vblank(struct drm_device *dev, unsigned int pipe)
435{ 435{
436 enable_vblank(dev, false); 436 enable_vblank(dev, false);
437} 437}
@@ -563,7 +563,7 @@ static struct drm_driver tilcdc_driver = {
563 .irq_preinstall = tilcdc_irq_preinstall, 563 .irq_preinstall = tilcdc_irq_preinstall,
564 .irq_postinstall = tilcdc_irq_postinstall, 564 .irq_postinstall = tilcdc_irq_postinstall,
565 .irq_uninstall = tilcdc_irq_uninstall, 565 .irq_uninstall = tilcdc_irq_uninstall,
566 .get_vblank_counter = drm_vblank_count, 566 .get_vblank_counter = drm_vblank_no_hw_counter,
567 .enable_vblank = tilcdc_enable_vblank, 567 .enable_vblank = tilcdc_enable_vblank,
568 .disable_vblank = tilcdc_disable_vblank, 568 .disable_vblank = tilcdc_disable_vblank,
569 .gem_free_object = drm_gem_cma_free_object, 569 .gem_free_object = drm_gem_cma_free_object,
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
new file mode 100644
index 000000000000..e502802d74b6
--- /dev/null
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -0,0 +1,13 @@
1config DRM_VC4
2 tristate "Broadcom VC4 Graphics"
3 depends on ARCH_BCM2835 || COMPILE_TEST
4 depends on DRM
5 select DRM_KMS_HELPER
6 select DRM_KMS_CMA_HELPER
7 help
8 Choose this option if you have a system that has a Broadcom
9 VC4 GPU, such as the Raspberry Pi or other BCM2708/BCM2835.
10
11 This driver requires that "avoid_warnings=2" be present in
12 the config.txt for the firmware, to keep it from smashing
13 our display setup.
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
new file mode 100644
index 000000000000..32b4f9cd8f52
--- /dev/null
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -0,0 +1,17 @@
1ccflags-y := -Iinclude/drm
2
3# Please keep these build lists sorted!
4
5# core driver code
6vc4-y := \
7 vc4_bo.o \
8 vc4_crtc.o \
9 vc4_drv.o \
10 vc4_kms.o \
11 vc4_hdmi.o \
12 vc4_hvs.o \
13 vc4_plane.o
14
15vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
16
17obj-$(CONFIG_DRM_VC4) += vc4.o
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
new file mode 100644
index 000000000000..ab9f5108ae1a
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/* DOC: VC4 GEM BO management support.
10 *
11 * The VC4 GPU architecture (both scanout and rendering) has direct
12 * access to system memory with no MMU in between. To support it, we
13 * use the GEM CMA helper functions to allocate contiguous ranges of
14 * physical memory for our BOs.
15 */
16
17#include "vc4_drv.h"
18
19struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size)
20{
21 struct drm_gem_cma_object *cma_obj;
22
23 cma_obj = drm_gem_cma_create(dev, size);
24 if (IS_ERR(cma_obj))
25 return NULL;
26 else
27 return to_vc4_bo(&cma_obj->base);
28}
29
30int vc4_dumb_create(struct drm_file *file_priv,
31 struct drm_device *dev,
32 struct drm_mode_create_dumb *args)
33{
34 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
35 struct vc4_bo *bo = NULL;
36 int ret;
37
38 if (args->pitch < min_pitch)
39 args->pitch = min_pitch;
40
41 if (args->size < args->pitch * args->height)
42 args->size = args->pitch * args->height;
43
44 bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE));
45 if (!bo)
46 return -ENOMEM;
47
48 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
49 drm_gem_object_unreference_unlocked(&bo->base.base);
50
51 return ret;
52}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
new file mode 100644
index 000000000000..7a9f4768591e
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -0,0 +1,672 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/**
10 * DOC: VC4 CRTC module
11 *
12 * In VC4, the Pixel Valve is what most closely corresponds to the
13 * DRM's concept of a CRTC. The PV generates video timings from the
14 * output's clock plus its configuration. It pulls scaled pixels from
15 * the HVS at that timing, and feeds it to the encoder.
16 *
17 * However, the DRM CRTC also collects the configuration of all the
18 * DRM planes attached to it. As a result, this file also manages
19 * setup of the VC4 HVS's display elements on the CRTC.
20 *
21 * The 2835 has 3 different pixel valves. pv0 in the audio power
22 * domain feeds DSI0 or DPI, while pv1 feeds DS1 or SMI. pv2 in the
23 * image domain can feed either HDMI or the SDTV controller. The
24 * pixel valve chooses from the CPRMAN clocks (HSM for HDMI, VEC for
25 * SDTV, etc.) according to which output type is chosen in the mux.
26 *
27 * For power management, the pixel valve's registers are all clocked
28 * by the AXI clock, while the timings and FIFOs make use of the
29 * output-specific clock. Since the encoders also directly consume
30 * the CPRMAN clocks, and know what timings they need, they are the
31 * ones that set the clock.
32 */
33
34#include "drm_atomic.h"
35#include "drm_atomic_helper.h"
36#include "drm_crtc_helper.h"
37#include "linux/clk.h"
38#include "linux/component.h"
39#include "linux/of_device.h"
40#include "vc4_drv.h"
41#include "vc4_regs.h"
42
43struct vc4_crtc {
44 struct drm_crtc base;
45 const struct vc4_crtc_data *data;
46 void __iomem *regs;
47
48 /* Which HVS channel we're using for our CRTC. */
49 int channel;
50
51 /* Pointer to the actual hardware display list memory for the
52 * crtc.
53 */
54 u32 __iomem *dlist;
55
56 u32 dlist_size; /* in dwords */
57
58 struct drm_pending_vblank_event *event;
59};
60
61static inline struct vc4_crtc *
62to_vc4_crtc(struct drm_crtc *crtc)
63{
64 return (struct vc4_crtc *)crtc;
65}
66
67struct vc4_crtc_data {
68 /* Which channel of the HVS this pixelvalve sources from. */
69 int hvs_channel;
70
71 enum vc4_encoder_type encoder0_type;
72 enum vc4_encoder_type encoder1_type;
73};
74
75#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
76#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
77
78#define CRTC_REG(reg) { reg, #reg }
79static const struct {
80 u32 reg;
81 const char *name;
82} crtc_regs[] = {
83 CRTC_REG(PV_CONTROL),
84 CRTC_REG(PV_V_CONTROL),
85 CRTC_REG(PV_VSYNCD),
86 CRTC_REG(PV_HORZA),
87 CRTC_REG(PV_HORZB),
88 CRTC_REG(PV_VERTA),
89 CRTC_REG(PV_VERTB),
90 CRTC_REG(PV_VERTA_EVEN),
91 CRTC_REG(PV_VERTB_EVEN),
92 CRTC_REG(PV_INTEN),
93 CRTC_REG(PV_INTSTAT),
94 CRTC_REG(PV_STAT),
95 CRTC_REG(PV_HACT_ACT),
96};
97
98static void vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc)
99{
100 int i;
101
102 for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
103 DRM_INFO("0x%04x (%s): 0x%08x\n",
104 crtc_regs[i].reg, crtc_regs[i].name,
105 CRTC_READ(crtc_regs[i].reg));
106 }
107}
108
109#ifdef CONFIG_DEBUG_FS
110int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
111{
112 struct drm_info_node *node = (struct drm_info_node *)m->private;
113 struct drm_device *dev = node->minor->dev;
114 int crtc_index = (uintptr_t)node->info_ent->data;
115 struct drm_crtc *crtc;
116 struct vc4_crtc *vc4_crtc;
117 int i;
118
119 i = 0;
120 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
121 if (i == crtc_index)
122 break;
123 i++;
124 }
125 if (!crtc)
126 return 0;
127 vc4_crtc = to_vc4_crtc(crtc);
128
129 for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
130 seq_printf(m, "%s (0x%04x): 0x%08x\n",
131 crtc_regs[i].name, crtc_regs[i].reg,
132 CRTC_READ(crtc_regs[i].reg));
133 }
134
135 return 0;
136}
137#endif
138
139static void vc4_crtc_destroy(struct drm_crtc *crtc)
140{
141 drm_crtc_cleanup(crtc);
142}
143
144static u32 vc4_get_fifo_full_level(u32 format)
145{
146 static const u32 fifo_len_bytes = 64;
147 static const u32 hvs_latency_pix = 6;
148
149 switch (format) {
150 case PV_CONTROL_FORMAT_DSIV_16:
151 case PV_CONTROL_FORMAT_DSIC_16:
152 return fifo_len_bytes - 2 * hvs_latency_pix;
153 case PV_CONTROL_FORMAT_DSIV_18:
154 return fifo_len_bytes - 14;
155 case PV_CONTROL_FORMAT_24:
156 case PV_CONTROL_FORMAT_DSIV_24:
157 default:
158 return fifo_len_bytes - 3 * hvs_latency_pix;
159 }
160}
161
162/*
163 * Returns the clock select bit for the connector attached to the
164 * CRTC.
165 */
166static int vc4_get_clock_select(struct drm_crtc *crtc)
167{
168 struct drm_connector *connector;
169
170 drm_for_each_connector(connector, crtc->dev) {
171 if (connector && connector->state->crtc == crtc) {
172 struct drm_encoder *encoder = connector->encoder;
173 struct vc4_encoder *vc4_encoder =
174 to_vc4_encoder(encoder);
175
176 return vc4_encoder->clock_select;
177 }
178 }
179
180 return -1;
181}
182
183static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
184{
185 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
186 struct drm_crtc_state *state = crtc->state;
187 struct drm_display_mode *mode = &state->adjusted_mode;
188 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
189 u32 vactive = (mode->vdisplay >> (interlace ? 1 : 0));
190 u32 format = PV_CONTROL_FORMAT_24;
191 bool debug_dump_regs = false;
192 int clock_select = vc4_get_clock_select(crtc);
193
194 if (debug_dump_regs) {
195 DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
196 vc4_crtc_dump_regs(vc4_crtc);
197 }
198
199 /* Reset the PV fifo. */
200 CRTC_WRITE(PV_CONTROL, 0);
201 CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | PV_CONTROL_EN);
202 CRTC_WRITE(PV_CONTROL, 0);
203
204 CRTC_WRITE(PV_HORZA,
205 VC4_SET_FIELD(mode->htotal - mode->hsync_end,
206 PV_HORZA_HBP) |
207 VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
208 PV_HORZA_HSYNC));
209 CRTC_WRITE(PV_HORZB,
210 VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
211 PV_HORZB_HFP) |
212 VC4_SET_FIELD(mode->hdisplay, PV_HORZB_HACTIVE));
213
214 if (interlace) {
215 CRTC_WRITE(PV_VERTA_EVEN,
216 VC4_SET_FIELD(mode->vtotal - mode->vsync_end - 1,
217 PV_VERTA_VBP) |
218 VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
219 PV_VERTA_VSYNC));
220 CRTC_WRITE(PV_VERTB_EVEN,
221 VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
222 PV_VERTB_VFP) |
223 VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
224 }
225
226 CRTC_WRITE(PV_HACT_ACT, mode->hdisplay);
227
228 CRTC_WRITE(PV_V_CONTROL,
229 PV_VCONTROL_CONTINUOUS |
230 (interlace ? PV_VCONTROL_INTERLACE : 0));
231
232 CRTC_WRITE(PV_CONTROL,
233 VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
234 VC4_SET_FIELD(vc4_get_fifo_full_level(format),
235 PV_CONTROL_FIFO_LEVEL) |
236 PV_CONTROL_CLR_AT_START |
237 PV_CONTROL_TRIGGER_UNDERFLOW |
238 PV_CONTROL_WAIT_HSTART |
239 VC4_SET_FIELD(clock_select, PV_CONTROL_CLK_SELECT) |
240 PV_CONTROL_FIFO_CLR |
241 PV_CONTROL_EN);
242
243 if (debug_dump_regs) {
244 DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
245 vc4_crtc_dump_regs(vc4_crtc);
246 }
247}
248
249static void require_hvs_enabled(struct drm_device *dev)
250{
251 struct vc4_dev *vc4 = to_vc4_dev(dev);
252
253 WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) !=
254 SCALER_DISPCTRL_ENABLE);
255}
256
257static void vc4_crtc_disable(struct drm_crtc *crtc)
258{
259 struct drm_device *dev = crtc->dev;
260 struct vc4_dev *vc4 = to_vc4_dev(dev);
261 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
262 u32 chan = vc4_crtc->channel;
263 int ret;
264 require_hvs_enabled(dev);
265
266 CRTC_WRITE(PV_V_CONTROL,
267 CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
268 ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
269 WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n");
270
271 if (HVS_READ(SCALER_DISPCTRLX(chan)) &
272 SCALER_DISPCTRLX_ENABLE) {
273 HVS_WRITE(SCALER_DISPCTRLX(chan),
274 SCALER_DISPCTRLX_RESET);
275
276 /* While the docs say that reset is self-clearing, it
277 * seems it doesn't actually.
278 */
279 HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
280 }
281
282 /* Once we leave, the scaler should be disabled and its fifo empty. */
283
284 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
285
286 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
287 SCALER_DISPSTATX_MODE) !=
288 SCALER_DISPSTATX_MODE_DISABLED);
289
290 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
291 (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
292 SCALER_DISPSTATX_EMPTY);
293}
294
295static void vc4_crtc_enable(struct drm_crtc *crtc)
296{
297 struct drm_device *dev = crtc->dev;
298 struct vc4_dev *vc4 = to_vc4_dev(dev);
299 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
300 struct drm_crtc_state *state = crtc->state;
301 struct drm_display_mode *mode = &state->adjusted_mode;
302
303 require_hvs_enabled(dev);
304
305 /* Turn on the scaler, which will wait for vstart to start
306 * compositing.
307 */
308 HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel),
309 VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) |
310 VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) |
311 SCALER_DISPCTRLX_ENABLE);
312
313 /* Turn on the pixel valve, which will emit the vstart signal. */
314 CRTC_WRITE(PV_V_CONTROL,
315 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
316}
317
318static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
319 struct drm_crtc_state *state)
320{
321 struct drm_device *dev = crtc->dev;
322 struct vc4_dev *vc4 = to_vc4_dev(dev);
323 struct drm_plane *plane;
324 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
325 u32 dlist_count = 0;
326
327 /* The pixelvalve can only feed one encoder (and encoders are
328 * 1:1 with connectors.)
329 */
330 if (drm_atomic_connectors_for_crtc(state->state, crtc) > 1)
331 return -EINVAL;
332
333 drm_atomic_crtc_state_for_each_plane(plane, state) {
334 struct drm_plane_state *plane_state =
335 state->state->plane_states[drm_plane_index(plane)];
336
337 /* plane might not have changed, in which case take
338 * current state:
339 */
340 if (!plane_state)
341 plane_state = plane->state;
342
343 dlist_count += vc4_plane_dlist_size(plane_state);
344 }
345
346 dlist_count++; /* Account for SCALER_CTL0_END. */
347
348 if (!vc4_crtc->dlist || dlist_count > vc4_crtc->dlist_size) {
349 vc4_crtc->dlist = ((u32 __iomem *)vc4->hvs->dlist +
350 HVS_BOOTLOADER_DLIST_END);
351 vc4_crtc->dlist_size = ((SCALER_DLIST_SIZE >> 2) -
352 HVS_BOOTLOADER_DLIST_END);
353
354 if (dlist_count > vc4_crtc->dlist_size) {
355 DRM_DEBUG_KMS("dlist too large for CRTC (%d > %d).\n",
356 dlist_count, vc4_crtc->dlist_size);
357 return -EINVAL;
358 }
359 }
360
361 return 0;
362}
363
364static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
365 struct drm_crtc_state *old_state)
366{
367 struct drm_device *dev = crtc->dev;
368 struct vc4_dev *vc4 = to_vc4_dev(dev);
369 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
370 struct drm_plane *plane;
371 bool debug_dump_regs = false;
372 u32 __iomem *dlist_next = vc4_crtc->dlist;
373
374 if (debug_dump_regs) {
375 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
376 vc4_hvs_dump_state(dev);
377 }
378
379 /* Copy all the active planes' dlist contents to the hardware dlist.
380 *
381 * XXX: If the new display list was large enough that it
382 * overlapped a currently-read display list, we need to do
383 * something like disable scanout before putting in the new
384 * list. For now, we're safe because we only have the two
385 * planes.
386 */
387 drm_atomic_crtc_for_each_plane(plane, crtc) {
388 dlist_next += vc4_plane_write_dlist(plane, dlist_next);
389 }
390
391 if (dlist_next == vc4_crtc->dlist) {
392 /* If no planes were enabled, use the SCALER_CTL0_END
393 * at the start of the display list memory (in the
394 * bootloader section). We'll rewrite that
395 * SCALER_CTL0_END, just in case, though.
396 */
397 writel(SCALER_CTL0_END, vc4->hvs->dlist);
398 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 0);
399 } else {
400 writel(SCALER_CTL0_END, dlist_next);
401 dlist_next++;
402
403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
404 (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist);
405
406 /* Make the next display list start after ours. */
407 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
408 vc4_crtc->dlist = dlist_next;
409 }
410
411 if (debug_dump_regs) {
412 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
413 vc4_hvs_dump_state(dev);
414 }
415
416 if (crtc->state->event) {
417 unsigned long flags;
418
419 crtc->state->event->pipe = drm_crtc_index(crtc);
420
421 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
422
423 spin_lock_irqsave(&dev->event_lock, flags);
424 vc4_crtc->event = crtc->state->event;
425 spin_unlock_irqrestore(&dev->event_lock, flags);
426 crtc->state->event = NULL;
427 }
428}
429
430int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id)
431{
432 struct vc4_dev *vc4 = to_vc4_dev(dev);
433 struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
434
435 CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
436
437 return 0;
438}
439
440void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id)
441{
442 struct vc4_dev *vc4 = to_vc4_dev(dev);
443 struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
444
445 CRTC_WRITE(PV_INTEN, 0);
446}
447
448static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
449{
450 struct drm_crtc *crtc = &vc4_crtc->base;
451 struct drm_device *dev = crtc->dev;
452 unsigned long flags;
453
454 spin_lock_irqsave(&dev->event_lock, flags);
455 if (vc4_crtc->event) {
456 drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
457 vc4_crtc->event = NULL;
458 }
459 spin_unlock_irqrestore(&dev->event_lock, flags);
460}
461
462static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
463{
464 struct vc4_crtc *vc4_crtc = data;
465 u32 stat = CRTC_READ(PV_INTSTAT);
466 irqreturn_t ret = IRQ_NONE;
467
468 if (stat & PV_INT_VFP_START) {
469 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
470 drm_crtc_handle_vblank(&vc4_crtc->base);
471 vc4_crtc_handle_page_flip(vc4_crtc);
472 ret = IRQ_HANDLED;
473 }
474
475 return ret;
476}
477
478static const struct drm_crtc_funcs vc4_crtc_funcs = {
479 .set_config = drm_atomic_helper_set_config,
480 .destroy = vc4_crtc_destroy,
481 .page_flip = drm_atomic_helper_page_flip,
482 .set_property = NULL,
483 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
484 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
485 .reset = drm_atomic_helper_crtc_reset,
486 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
487 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
488};
489
490static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
491 .mode_set_nofb = vc4_crtc_mode_set_nofb,
492 .disable = vc4_crtc_disable,
493 .enable = vc4_crtc_enable,
494 .atomic_check = vc4_crtc_atomic_check,
495 .atomic_flush = vc4_crtc_atomic_flush,
496};
497
498/* Frees the page flip event when the DRM device is closed with the
499 * event still outstanding.
500 */
501void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
502{
503 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
504 struct drm_device *dev = crtc->dev;
505 unsigned long flags;
506
507 spin_lock_irqsave(&dev->event_lock, flags);
508
509 if (vc4_crtc->event && vc4_crtc->event->base.file_priv == file) {
510 vc4_crtc->event->base.destroy(&vc4_crtc->event->base);
511 drm_crtc_vblank_put(crtc);
512 vc4_crtc->event = NULL;
513 }
514
515 spin_unlock_irqrestore(&dev->event_lock, flags);
516}
517
518static const struct vc4_crtc_data pv0_data = {
519 .hvs_channel = 0,
520 .encoder0_type = VC4_ENCODER_TYPE_DSI0,
521 .encoder1_type = VC4_ENCODER_TYPE_DPI,
522};
523
524static const struct vc4_crtc_data pv1_data = {
525 .hvs_channel = 2,
526 .encoder0_type = VC4_ENCODER_TYPE_DSI1,
527 .encoder1_type = VC4_ENCODER_TYPE_SMI,
528};
529
530static const struct vc4_crtc_data pv2_data = {
531 .hvs_channel = 1,
532 .encoder0_type = VC4_ENCODER_TYPE_VEC,
533 .encoder1_type = VC4_ENCODER_TYPE_HDMI,
534};
535
536static const struct of_device_id vc4_crtc_dt_match[] = {
537 { .compatible = "brcm,bcm2835-pixelvalve0", .data = &pv0_data },
538 { .compatible = "brcm,bcm2835-pixelvalve1", .data = &pv1_data },
539 { .compatible = "brcm,bcm2835-pixelvalve2", .data = &pv2_data },
540 {}
541};
542
543static void vc4_set_crtc_possible_masks(struct drm_device *drm,
544 struct drm_crtc *crtc)
545{
546 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
547 struct drm_encoder *encoder;
548
549 drm_for_each_encoder(encoder, drm) {
550 struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
551
552 if (vc4_encoder->type == vc4_crtc->data->encoder0_type) {
553 vc4_encoder->clock_select = 0;
554 encoder->possible_crtcs |= drm_crtc_mask(crtc);
555 } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) {
556 vc4_encoder->clock_select = 1;
557 encoder->possible_crtcs |= drm_crtc_mask(crtc);
558 }
559 }
560}
561
562static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
563{
564 struct platform_device *pdev = to_platform_device(dev);
565 struct drm_device *drm = dev_get_drvdata(master);
566 struct vc4_dev *vc4 = to_vc4_dev(drm);
567 struct vc4_crtc *vc4_crtc;
568 struct drm_crtc *crtc;
569 struct drm_plane *primary_plane, *cursor_plane;
570 const struct of_device_id *match;
571 int ret;
572
573 vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
574 if (!vc4_crtc)
575 return -ENOMEM;
576 crtc = &vc4_crtc->base;
577
578 match = of_match_device(vc4_crtc_dt_match, dev);
579 if (!match)
580 return -ENODEV;
581 vc4_crtc->data = match->data;
582
583 vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
584 if (IS_ERR(vc4_crtc->regs))
585 return PTR_ERR(vc4_crtc->regs);
586
587 /* For now, we create just the primary and the legacy cursor
588 * planes. We should be able to stack more planes on easily,
589 * but to do that we would need to compute the bandwidth
590 * requirement of the plane configuration, and reject ones
591 * that will take too much.
592 */
593 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
594 if (!primary_plane) {
595 dev_err(dev, "failed to construct primary plane\n");
596 ret = PTR_ERR(primary_plane);
597 goto err;
598 }
599
600 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
601 if (!cursor_plane) {
602 dev_err(dev, "failed to construct cursor plane\n");
603 ret = PTR_ERR(cursor_plane);
604 goto err_primary;
605 }
606
607 drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane,
608 &vc4_crtc_funcs);
609 drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
610 primary_plane->crtc = crtc;
611 cursor_plane->crtc = crtc;
612 vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
613 vc4_crtc->channel = vc4_crtc->data->hvs_channel;
614
615 CRTC_WRITE(PV_INTEN, 0);
616 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
617 ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
618 vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc);
619 if (ret)
620 goto err_cursor;
621
622 vc4_set_crtc_possible_masks(drm, crtc);
623
624 platform_set_drvdata(pdev, vc4_crtc);
625
626 return 0;
627
628err_cursor:
629 cursor_plane->funcs->destroy(cursor_plane);
630err_primary:
631 primary_plane->funcs->destroy(primary_plane);
632err:
633 return ret;
634}
635
636static void vc4_crtc_unbind(struct device *dev, struct device *master,
637 void *data)
638{
639 struct platform_device *pdev = to_platform_device(dev);
640 struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev);
641
642 vc4_crtc_destroy(&vc4_crtc->base);
643
644 CRTC_WRITE(PV_INTEN, 0);
645
646 platform_set_drvdata(pdev, NULL);
647}
648
649static const struct component_ops vc4_crtc_ops = {
650 .bind = vc4_crtc_bind,
651 .unbind = vc4_crtc_unbind,
652};
653
654static int vc4_crtc_dev_probe(struct platform_device *pdev)
655{
656 return component_add(&pdev->dev, &vc4_crtc_ops);
657}
658
659static int vc4_crtc_dev_remove(struct platform_device *pdev)
660{
661 component_del(&pdev->dev, &vc4_crtc_ops);
662 return 0;
663}
664
665struct platform_driver vc4_crtc_driver = {
666 .probe = vc4_crtc_dev_probe,
667 .remove = vc4_crtc_dev_remove,
668 .driver = {
669 .name = "vc4_crtc",
670 .of_match_table = vc4_crtc_dt_match,
671 },
672};
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
new file mode 100644
index 000000000000..4297b0a5b74e
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/seq_file.h>
10#include <linux/circ_buf.h>
11#include <linux/ctype.h>
12#include <linux/debugfs.h>
13#include <drm/drmP.h>
14
15#include "vc4_drv.h"
16#include "vc4_regs.h"
17
18static const struct drm_info_list vc4_debugfs_list[] = {
19 {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
20 {"hvs_regs", vc4_hvs_debugfs_regs, 0},
21 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
22 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
23 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
24};
25
26#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
27
28int
29vc4_debugfs_init(struct drm_minor *minor)
30{
31 return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES,
32 minor->debugfs_root, minor);
33}
34
35void
36vc4_debugfs_cleanup(struct drm_minor *minor)
37{
38 drm_debugfs_remove_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, minor);
39}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
new file mode 100644
index 000000000000..6e730605edcc
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -0,0 +1,298 @@
1/*
2 * Copyright (C) 2014-2015 Broadcom
3 * Copyright (C) 2013 Red Hat
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/component.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include "drm_fb_cma_helper.h"
18
19#include "vc4_drv.h"
20#include "vc4_regs.h"
21
22#define DRIVER_NAME "vc4"
23#define DRIVER_DESC "Broadcom VC4 graphics"
24#define DRIVER_DATE "20140616"
25#define DRIVER_MAJOR 0
26#define DRIVER_MINOR 0
27#define DRIVER_PATCHLEVEL 0
28
29/* Helper function for mapping the regs on a platform device. */
30void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
31{
32 struct resource *res;
33 void __iomem *map;
34
35 res = platform_get_resource(dev, IORESOURCE_MEM, index);
36 map = devm_ioremap_resource(&dev->dev, res);
37 if (IS_ERR(map)) {
38 DRM_ERROR("Failed to map registers: %ld\n", PTR_ERR(map));
39 return map;
40 }
41
42 return map;
43}
44
45static void vc4_drm_preclose(struct drm_device *dev, struct drm_file *file)
46{
47 struct drm_crtc *crtc;
48
49 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
50 vc4_cancel_page_flip(crtc, file);
51}
52
53static void vc4_lastclose(struct drm_device *dev)
54{
55 struct vc4_dev *vc4 = to_vc4_dev(dev);
56
57 if (vc4->fbdev)
58 drm_fbdev_cma_restore_mode(vc4->fbdev);
59}
60
61static const struct file_operations vc4_drm_fops = {
62 .owner = THIS_MODULE,
63 .open = drm_open,
64 .release = drm_release,
65 .unlocked_ioctl = drm_ioctl,
66 .mmap = drm_gem_cma_mmap,
67 .poll = drm_poll,
68 .read = drm_read,
69#ifdef CONFIG_COMPAT
70 .compat_ioctl = drm_compat_ioctl,
71#endif
72 .llseek = noop_llseek,
73};
74
75static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
76};
77
78static struct drm_driver vc4_drm_driver = {
79 .driver_features = (DRIVER_MODESET |
80 DRIVER_ATOMIC |
81 DRIVER_GEM |
82 DRIVER_PRIME),
83 .lastclose = vc4_lastclose,
84 .preclose = vc4_drm_preclose,
85
86 .enable_vblank = vc4_enable_vblank,
87 .disable_vblank = vc4_disable_vblank,
88 .get_vblank_counter = drm_vblank_count,
89
90#if defined(CONFIG_DEBUG_FS)
91 .debugfs_init = vc4_debugfs_init,
92 .debugfs_cleanup = vc4_debugfs_cleanup,
93#endif
94
95 .gem_free_object = drm_gem_cma_free_object,
96 .gem_vm_ops = &drm_gem_cma_vm_ops,
97
98 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
99 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
100 .gem_prime_import = drm_gem_prime_import,
101 .gem_prime_export = drm_gem_prime_export,
102 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
103 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
104 .gem_prime_vmap = drm_gem_cma_prime_vmap,
105 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
106 .gem_prime_mmap = drm_gem_cma_prime_mmap,
107
108 .dumb_create = vc4_dumb_create,
109 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
110 .dumb_destroy = drm_gem_dumb_destroy,
111
112 .ioctls = vc4_drm_ioctls,
113 .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
114 .fops = &vc4_drm_fops,
115
116 .name = DRIVER_NAME,
117 .desc = DRIVER_DESC,
118 .date = DRIVER_DATE,
119 .major = DRIVER_MAJOR,
120 .minor = DRIVER_MINOR,
121 .patchlevel = DRIVER_PATCHLEVEL,
122};
123
124static int compare_dev(struct device *dev, void *data)
125{
126 return dev == data;
127}
128
129static void vc4_match_add_drivers(struct device *dev,
130 struct component_match **match,
131 struct platform_driver *const *drivers,
132 int count)
133{
134 int i;
135
136 for (i = 0; i < count; i++) {
137 struct device_driver *drv = &drivers[i]->driver;
138 struct device *p = NULL, *d;
139
140 while ((d = bus_find_device(&platform_bus_type, p, drv,
141 (void *)platform_bus_type.match))) {
142 put_device(p);
143 component_match_add(dev, match, compare_dev, d);
144 p = d;
145 }
146 put_device(p);
147 }
148}
149
150static int vc4_drm_bind(struct device *dev)
151{
152 struct platform_device *pdev = to_platform_device(dev);
153 struct drm_device *drm;
154 struct drm_connector *connector;
155 struct vc4_dev *vc4;
156 int ret = 0;
157
158 dev->coherent_dma_mask = DMA_BIT_MASK(32);
159
160 vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
161 if (!vc4)
162 return -ENOMEM;
163
164 drm = drm_dev_alloc(&vc4_drm_driver, dev);
165 if (!drm)
166 return -ENOMEM;
167 platform_set_drvdata(pdev, drm);
168 vc4->dev = drm;
169 drm->dev_private = vc4;
170
171 drm_dev_set_unique(drm, dev_name(dev));
172
173 drm_mode_config_init(drm);
174 if (ret)
175 goto unref;
176
177 ret = component_bind_all(dev, drm);
178 if (ret)
179 goto unref;
180
181 ret = drm_dev_register(drm, 0);
182 if (ret < 0)
183 goto unbind_all;
184
185 /* Connector registration has to occur after DRM device
186 * registration, because it creates sysfs entries based on the
187 * DRM device.
188 */
189 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
190 ret = drm_connector_register(connector);
191 if (ret)
192 goto unregister;
193 }
194
195 vc4_kms_load(drm);
196
197 return 0;
198
199unregister:
200 drm_dev_unregister(drm);
201unbind_all:
202 component_unbind_all(dev, drm);
203unref:
204 drm_dev_unref(drm);
205 return ret;
206}
207
208static void vc4_drm_unbind(struct device *dev)
209{
210 struct platform_device *pdev = to_platform_device(dev);
211 struct drm_device *drm = platform_get_drvdata(pdev);
212 struct vc4_dev *vc4 = to_vc4_dev(drm);
213
214 if (vc4->fbdev)
215 drm_fbdev_cma_fini(vc4->fbdev);
216
217 drm_mode_config_cleanup(drm);
218
219 drm_put_dev(drm);
220}
221
222static const struct component_master_ops vc4_drm_ops = {
223 .bind = vc4_drm_bind,
224 .unbind = vc4_drm_unbind,
225};
226
227static struct platform_driver *const component_drivers[] = {
228 &vc4_hdmi_driver,
229 &vc4_crtc_driver,
230 &vc4_hvs_driver,
231};
232
233static int vc4_platform_drm_probe(struct platform_device *pdev)
234{
235 struct component_match *match = NULL;
236 struct device *dev = &pdev->dev;
237
238 vc4_match_add_drivers(dev, &match,
239 component_drivers, ARRAY_SIZE(component_drivers));
240
241 return component_master_add_with_match(dev, &vc4_drm_ops, match);
242}
243
244static int vc4_platform_drm_remove(struct platform_device *pdev)
245{
246 component_master_del(&pdev->dev, &vc4_drm_ops);
247
248 return 0;
249}
250
251static const struct of_device_id vc4_of_match[] = {
252 { .compatible = "brcm,bcm2835-vc4", },
253 {},
254};
255MODULE_DEVICE_TABLE(of, vc4_of_match);
256
257static struct platform_driver vc4_platform_driver = {
258 .probe = vc4_platform_drm_probe,
259 .remove = vc4_platform_drm_remove,
260 .driver = {
261 .name = "vc4-drm",
262 .owner = THIS_MODULE,
263 .of_match_table = vc4_of_match,
264 },
265};
266
267static int __init vc4_drm_register(void)
268{
269 int i, ret;
270
271 for (i = 0; i < ARRAY_SIZE(component_drivers); i++) {
272 ret = platform_driver_register(component_drivers[i]);
273 if (ret) {
274 while (--i >= 0)
275 platform_driver_unregister(component_drivers[i]);
276 return ret;
277 }
278 }
279 return platform_driver_register(&vc4_platform_driver);
280}
281
282static void __exit vc4_drm_unregister(void)
283{
284 int i;
285
286 for (i = ARRAY_SIZE(component_drivers) - 1; i >= 0; i--)
287 platform_driver_unregister(component_drivers[i]);
288
289 platform_driver_unregister(&vc4_platform_driver);
290}
291
292module_init(vc4_drm_register);
293module_exit(vc4_drm_unregister);
294
295MODULE_ALIAS("platform:vc4-drm");
296MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
297MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
298MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
new file mode 100644
index 000000000000..fd8319fa682e
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -0,0 +1,145 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "drmP.h"
10#include "drm_gem_cma_helper.h"
11
12struct vc4_dev {
13 struct drm_device *dev;
14
15 struct vc4_hdmi *hdmi;
16 struct vc4_hvs *hvs;
17 struct vc4_crtc *crtc[3];
18
19 struct drm_fbdev_cma *fbdev;
20};
21
22static inline struct vc4_dev *
23to_vc4_dev(struct drm_device *dev)
24{
25 return (struct vc4_dev *)dev->dev_private;
26}
27
28struct vc4_bo {
29 struct drm_gem_cma_object base;
30};
31
32static inline struct vc4_bo *
33to_vc4_bo(struct drm_gem_object *bo)
34{
35 return (struct vc4_bo *)bo;
36}
37
38struct vc4_hvs {
39 struct platform_device *pdev;
40 void __iomem *regs;
41 void __iomem *dlist;
42};
43
44struct vc4_plane {
45 struct drm_plane base;
46};
47
48static inline struct vc4_plane *
49to_vc4_plane(struct drm_plane *plane)
50{
51 return (struct vc4_plane *)plane;
52}
53
54enum vc4_encoder_type {
55 VC4_ENCODER_TYPE_HDMI,
56 VC4_ENCODER_TYPE_VEC,
57 VC4_ENCODER_TYPE_DSI0,
58 VC4_ENCODER_TYPE_DSI1,
59 VC4_ENCODER_TYPE_SMI,
60 VC4_ENCODER_TYPE_DPI,
61};
62
63struct vc4_encoder {
64 struct drm_encoder base;
65 enum vc4_encoder_type type;
66 u32 clock_select;
67};
68
69static inline struct vc4_encoder *
70to_vc4_encoder(struct drm_encoder *encoder)
71{
72 return container_of(encoder, struct vc4_encoder, base);
73}
74
75#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
76#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
77
78/**
79 * _wait_for - magic (register) wait macro
80 *
81 * Does the right thing for modeset paths when run under kdgb or similar atomic
82 * contexts. Note that it's important that we check the condition again after
83 * having timed out, since the timeout could be due to preemption or similar and
84 * we've never had a chance to check the condition before the timeout.
85 */
86#define _wait_for(COND, MS, W) ({ \
87 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
88 int ret__ = 0; \
89 while (!(COND)) { \
90 if (time_after(jiffies, timeout__)) { \
91 if (!(COND)) \
92 ret__ = -ETIMEDOUT; \
93 break; \
94 } \
95 if (W && drm_can_sleep()) { \
96 msleep(W); \
97 } else { \
98 cpu_relax(); \
99 } \
100 } \
101 ret__; \
102})
103
104#define wait_for(COND, MS) _wait_for(COND, MS, 1)
105
106/* vc4_bo.c */
107void vc4_free_object(struct drm_gem_object *gem_obj);
108struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size);
109int vc4_dumb_create(struct drm_file *file_priv,
110 struct drm_device *dev,
111 struct drm_mode_create_dumb *args);
112struct dma_buf *vc4_prime_export(struct drm_device *dev,
113 struct drm_gem_object *obj, int flags);
114
115/* vc4_crtc.c */
116extern struct platform_driver vc4_crtc_driver;
117int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
118void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
119void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
120int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
121
122/* vc4_debugfs.c */
123int vc4_debugfs_init(struct drm_minor *minor);
124void vc4_debugfs_cleanup(struct drm_minor *minor);
125
126/* vc4_drv.c */
127void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
128
129/* vc4_hdmi.c */
130extern struct platform_driver vc4_hdmi_driver;
131int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
132
133/* vc4_hvs.c */
134extern struct platform_driver vc4_hvs_driver;
135void vc4_hvs_dump_state(struct drm_device *dev);
136int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
137
138/* vc4_kms.c */
139int vc4_kms_load(struct drm_device *dev);
140
141/* vc4_plane.c */
142struct drm_plane *vc4_plane_init(struct drm_device *dev,
143 enum drm_plane_type type);
144u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
145u32 vc4_plane_dlist_size(struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
new file mode 100644
index 000000000000..da9a36d6e1d1
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -0,0 +1,590 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/**
21 * DOC: VC4 Falcon HDMI module
22 *
23 * The HDMI core has a state machine and a PHY. Most of the unit
24 * operates off of the HSM clock from CPRMAN. It also internally uses
25 * the PLLH_PIX clock for the PHY.
26 */
27
28#include "drm_atomic_helper.h"
29#include "drm_crtc_helper.h"
30#include "drm_edid.h"
31#include "linux/clk.h"
32#include "linux/component.h"
33#include "linux/i2c.h"
34#include "linux/of_gpio.h"
35#include "linux/of_platform.h"
36#include "vc4_drv.h"
37#include "vc4_regs.h"
38
39/* General HDMI hardware state. */
40struct vc4_hdmi {
41 struct platform_device *pdev;
42
43 struct drm_encoder *encoder;
44 struct drm_connector *connector;
45
46 struct i2c_adapter *ddc;
47 void __iomem *hdmicore_regs;
48 void __iomem *hd_regs;
49 int hpd_gpio;
50
51 struct clk *pixel_clock;
52 struct clk *hsm_clock;
53};
54
55#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset)
56#define HDMI_WRITE(offset, val) writel(val, vc4->hdmi->hdmicore_regs + offset)
57#define HD_READ(offset) readl(vc4->hdmi->hd_regs + offset)
58#define HD_WRITE(offset, val) writel(val, vc4->hdmi->hd_regs + offset)
59
60/* VC4 HDMI encoder KMS struct */
61struct vc4_hdmi_encoder {
62 struct vc4_encoder base;
63 bool hdmi_monitor;
64};
65
66static inline struct vc4_hdmi_encoder *
67to_vc4_hdmi_encoder(struct drm_encoder *encoder)
68{
69 return container_of(encoder, struct vc4_hdmi_encoder, base.base);
70}
71
72/* VC4 HDMI connector KMS struct */
73struct vc4_hdmi_connector {
74 struct drm_connector base;
75
76 /* Since the connector is attached to just the one encoder,
77 * this is the reference to it so we can do the best_encoder()
78 * hook.
79 */
80 struct drm_encoder *encoder;
81};
82
83static inline struct vc4_hdmi_connector *
84to_vc4_hdmi_connector(struct drm_connector *connector)
85{
86 return container_of(connector, struct vc4_hdmi_connector, base);
87}
88
89#define HDMI_REG(reg) { reg, #reg }
90static const struct {
91 u32 reg;
92 const char *name;
93} hdmi_regs[] = {
94 HDMI_REG(VC4_HDMI_CORE_REV),
95 HDMI_REG(VC4_HDMI_SW_RESET_CONTROL),
96 HDMI_REG(VC4_HDMI_HOTPLUG_INT),
97 HDMI_REG(VC4_HDMI_HOTPLUG),
98 HDMI_REG(VC4_HDMI_HORZA),
99 HDMI_REG(VC4_HDMI_HORZB),
100 HDMI_REG(VC4_HDMI_FIFO_CTL),
101 HDMI_REG(VC4_HDMI_SCHEDULER_CONTROL),
102 HDMI_REG(VC4_HDMI_VERTA0),
103 HDMI_REG(VC4_HDMI_VERTA1),
104 HDMI_REG(VC4_HDMI_VERTB0),
105 HDMI_REG(VC4_HDMI_VERTB1),
106 HDMI_REG(VC4_HDMI_TX_PHY_RESET_CTL),
107};
108
109static const struct {
110 u32 reg;
111 const char *name;
112} hd_regs[] = {
113 HDMI_REG(VC4_HD_M_CTL),
114 HDMI_REG(VC4_HD_MAI_CTL),
115 HDMI_REG(VC4_HD_VID_CTL),
116 HDMI_REG(VC4_HD_CSC_CTL),
117 HDMI_REG(VC4_HD_FRAME_COUNT),
118};
119
120#ifdef CONFIG_DEBUG_FS
121int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
122{
123 struct drm_info_node *node = (struct drm_info_node *)m->private;
124 struct drm_device *dev = node->minor->dev;
125 struct vc4_dev *vc4 = to_vc4_dev(dev);
126 int i;
127
128 for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
129 seq_printf(m, "%s (0x%04x): 0x%08x\n",
130 hdmi_regs[i].name, hdmi_regs[i].reg,
131 HDMI_READ(hdmi_regs[i].reg));
132 }
133
134 for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
135 seq_printf(m, "%s (0x%04x): 0x%08x\n",
136 hd_regs[i].name, hd_regs[i].reg,
137 HD_READ(hd_regs[i].reg));
138 }
139
140 return 0;
141}
142#endif /* CONFIG_DEBUG_FS */
143
144static void vc4_hdmi_dump_regs(struct drm_device *dev)
145{
146 struct vc4_dev *vc4 = to_vc4_dev(dev);
147 int i;
148
149 for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
150 DRM_INFO("0x%04x (%s): 0x%08x\n",
151 hdmi_regs[i].reg, hdmi_regs[i].name,
152 HDMI_READ(hdmi_regs[i].reg));
153 }
154 for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
155 DRM_INFO("0x%04x (%s): 0x%08x\n",
156 hd_regs[i].reg, hd_regs[i].name,
157 HD_READ(hd_regs[i].reg));
158 }
159}
160
161static enum drm_connector_status
162vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
163{
164 struct drm_device *dev = connector->dev;
165 struct vc4_dev *vc4 = to_vc4_dev(dev);
166
167 if (vc4->hdmi->hpd_gpio) {
168 if (gpio_get_value(vc4->hdmi->hpd_gpio))
169 return connector_status_connected;
170 else
171 return connector_status_disconnected;
172 }
173
174 if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
175 return connector_status_connected;
176 else
177 return connector_status_disconnected;
178}
179
180static void vc4_hdmi_connector_destroy(struct drm_connector *connector)
181{
182 drm_connector_unregister(connector);
183 drm_connector_cleanup(connector);
184}
185
186static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
187{
188 struct vc4_hdmi_connector *vc4_connector =
189 to_vc4_hdmi_connector(connector);
190 struct drm_encoder *encoder = vc4_connector->encoder;
191 struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
192 struct drm_device *dev = connector->dev;
193 struct vc4_dev *vc4 = to_vc4_dev(dev);
194 int ret = 0;
195 struct edid *edid;
196
197 edid = drm_get_edid(connector, vc4->hdmi->ddc);
198 if (!edid)
199 return -ENODEV;
200
201 vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);
202 drm_mode_connector_update_edid_property(connector, edid);
203 ret = drm_add_edid_modes(connector, edid);
204
205 return ret;
206}
207
208static struct drm_encoder *
209vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
210{
211 struct vc4_hdmi_connector *hdmi_connector =
212 to_vc4_hdmi_connector(connector);
213 return hdmi_connector->encoder;
214}
215
216static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
217 .dpms = drm_atomic_helper_connector_dpms,
218 .detect = vc4_hdmi_connector_detect,
219 .fill_modes = drm_helper_probe_single_connector_modes,
220 .destroy = vc4_hdmi_connector_destroy,
221 .reset = drm_atomic_helper_connector_reset,
222 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
223 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
224};
225
226static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
227 .get_modes = vc4_hdmi_connector_get_modes,
228 .best_encoder = vc4_hdmi_connector_best_encoder,
229};
230
231static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
232 struct drm_encoder *encoder)
233{
234 struct drm_connector *connector = NULL;
235 struct vc4_hdmi_connector *hdmi_connector;
236 int ret = 0;
237
238 hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector),
239 GFP_KERNEL);
240 if (!hdmi_connector) {
241 ret = -ENOMEM;
242 goto fail;
243 }
244 connector = &hdmi_connector->base;
245
246 hdmi_connector->encoder = encoder;
247
248 drm_connector_init(dev, connector, &vc4_hdmi_connector_funcs,
249 DRM_MODE_CONNECTOR_HDMIA);
250 drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
251
252 connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
253 DRM_CONNECTOR_POLL_DISCONNECT);
254
255 connector->interlace_allowed = 0;
256 connector->doublescan_allowed = 0;
257
258 drm_mode_connector_attach_encoder(connector, encoder);
259
260 return connector;
261
262 fail:
263 if (connector)
264 vc4_hdmi_connector_destroy(connector);
265
266 return ERR_PTR(ret);
267}
268
269static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
270{
271 drm_encoder_cleanup(encoder);
272}
273
274static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
275 .destroy = vc4_hdmi_encoder_destroy,
276};
277
278static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder,
279 struct drm_display_mode *unadjusted_mode,
280 struct drm_display_mode *mode)
281{
282 struct drm_device *dev = encoder->dev;
283 struct vc4_dev *vc4 = to_vc4_dev(dev);
284 bool debug_dump_regs = false;
285 bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
286 bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
287 u32 vactive = (mode->vdisplay >>
288 ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0));
289 u32 verta = (VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
290 VC4_HDMI_VERTA_VSP) |
291 VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
292 VC4_HDMI_VERTA_VFP) |
293 VC4_SET_FIELD(vactive, VC4_HDMI_VERTA_VAL));
294 u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
295 VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
296 VC4_HDMI_VERTB_VBP));
297
298 if (debug_dump_regs) {
299 DRM_INFO("HDMI regs before:\n");
300 vc4_hdmi_dump_regs(dev);
301 }
302
303 HD_WRITE(VC4_HD_VID_CTL, 0);
304
305 clk_set_rate(vc4->hdmi->pixel_clock, mode->clock * 1000);
306
307 HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
308 HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
309 VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT |
310 VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
311
312 HDMI_WRITE(VC4_HDMI_HORZA,
313 (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
314 (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) |
315 VC4_SET_FIELD(mode->hdisplay, VC4_HDMI_HORZA_HAP));
316
317 HDMI_WRITE(VC4_HDMI_HORZB,
318 VC4_SET_FIELD(mode->htotal - mode->hsync_end,
319 VC4_HDMI_HORZB_HBP) |
320 VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
321 VC4_HDMI_HORZB_HSP) |
322 VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
323 VC4_HDMI_HORZB_HFP));
324
325 HDMI_WRITE(VC4_HDMI_VERTA0, verta);
326 HDMI_WRITE(VC4_HDMI_VERTA1, verta);
327
328 HDMI_WRITE(VC4_HDMI_VERTB0, vertb);
329 HDMI_WRITE(VC4_HDMI_VERTB1, vertb);
330
331 HD_WRITE(VC4_HD_VID_CTL,
332 (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
333 (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
334
335 /* The RGB order applies even when CSC is disabled. */
336 HD_WRITE(VC4_HD_CSC_CTL, VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
337 VC4_HD_CSC_CTL_ORDER));
338
339 HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
340
341 if (debug_dump_regs) {
342 DRM_INFO("HDMI regs after:\n");
343 vc4_hdmi_dump_regs(dev);
344 }
345}
346
347static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
348{
349 struct drm_device *dev = encoder->dev;
350 struct vc4_dev *vc4 = to_vc4_dev(dev);
351
352 HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
353 HD_WRITE(VC4_HD_VID_CTL,
354 HD_READ(VC4_HD_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
355}
356
357static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
358{
359 struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
360 struct drm_device *dev = encoder->dev;
361 struct vc4_dev *vc4 = to_vc4_dev(dev);
362 int ret;
363
364 HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0);
365
366 HD_WRITE(VC4_HD_VID_CTL,
367 HD_READ(VC4_HD_VID_CTL) |
368 VC4_HD_VID_CTL_ENABLE |
369 VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
370 VC4_HD_VID_CTL_FRAME_COUNTER_RESET);
371
372 if (vc4_encoder->hdmi_monitor) {
373 HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
374 HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
375 VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
376
377 ret = wait_for(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
378 VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1);
379 WARN_ONCE(ret, "Timeout waiting for "
380 "VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
381 } else {
382 HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
383 HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) &
384 ~(VC4_HDMI_RAM_PACKET_ENABLE));
385 HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
386 HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
387 ~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
388
389 ret = wait_for(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
390 VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1);
391 WARN_ONCE(ret, "Timeout waiting for "
392 "!VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
393 }
394
395 if (vc4_encoder->hdmi_monitor) {
396 u32 drift;
397
398 WARN_ON(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
399 VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE));
400 HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
401 HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
402 VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT);
403
404 /* XXX: Set HDMI_RAM_PACKET_CONFIG (1 << 16) and set
405 * up the infoframe.
406 */
407
408 drift = HDMI_READ(VC4_HDMI_FIFO_CTL);
409 drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK;
410
411 HDMI_WRITE(VC4_HDMI_FIFO_CTL,
412 drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
413 HDMI_WRITE(VC4_HDMI_FIFO_CTL,
414 drift | VC4_HDMI_FIFO_CTL_RECENTER);
415 udelay(1000);
416 HDMI_WRITE(VC4_HDMI_FIFO_CTL,
417 drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
418 HDMI_WRITE(VC4_HDMI_FIFO_CTL,
419 drift | VC4_HDMI_FIFO_CTL_RECENTER);
420
421 ret = wait_for(HDMI_READ(VC4_HDMI_FIFO_CTL) &
422 VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
423 WARN_ONCE(ret, "Timeout waiting for "
424 "VC4_HDMI_FIFO_CTL_RECENTER_DONE");
425 }
426}
427
428static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
429 .mode_set = vc4_hdmi_encoder_mode_set,
430 .disable = vc4_hdmi_encoder_disable,
431 .enable = vc4_hdmi_encoder_enable,
432};
433
434static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
435{
436 struct platform_device *pdev = to_platform_device(dev);
437 struct drm_device *drm = dev_get_drvdata(master);
438 struct vc4_dev *vc4 = drm->dev_private;
439 struct vc4_hdmi *hdmi;
440 struct vc4_hdmi_encoder *vc4_hdmi_encoder;
441 struct device_node *ddc_node;
442 u32 value;
443 int ret;
444
445 hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
446 if (!hdmi)
447 return -ENOMEM;
448
449 vc4_hdmi_encoder = devm_kzalloc(dev, sizeof(*vc4_hdmi_encoder),
450 GFP_KERNEL);
451 if (!vc4_hdmi_encoder)
452 return -ENOMEM;
453 vc4_hdmi_encoder->base.type = VC4_ENCODER_TYPE_HDMI;
454 hdmi->encoder = &vc4_hdmi_encoder->base.base;
455
456 hdmi->pdev = pdev;
457 hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0);
458 if (IS_ERR(hdmi->hdmicore_regs))
459 return PTR_ERR(hdmi->hdmicore_regs);
460
461 hdmi->hd_regs = vc4_ioremap_regs(pdev, 1);
462 if (IS_ERR(hdmi->hd_regs))
463 return PTR_ERR(hdmi->hd_regs);
464
465 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
466 if (!ddc_node) {
467 DRM_ERROR("Failed to find ddc node in device tree\n");
468 return -ENODEV;
469 }
470
471 hdmi->pixel_clock = devm_clk_get(dev, "pixel");
472 if (IS_ERR(hdmi->pixel_clock)) {
473 DRM_ERROR("Failed to get pixel clock\n");
474 return PTR_ERR(hdmi->pixel_clock);
475 }
476 hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
477 if (IS_ERR(hdmi->hsm_clock)) {
478 DRM_ERROR("Failed to get HDMI state machine clock\n");
479 return PTR_ERR(hdmi->hsm_clock);
480 }
481
482 hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
483 if (!hdmi->ddc) {
484 DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
485 return -EPROBE_DEFER;
486 }
487
488 /* Enable the clocks at startup. We can't quite recover from
489 * turning off the pixel clock during disable/enables yet, so
490 * it's always running.
491 */
492 ret = clk_prepare_enable(hdmi->pixel_clock);
493 if (ret) {
494 DRM_ERROR("Failed to turn on pixel clock: %d\n", ret);
495 goto err_put_i2c;
496 }
497
498 ret = clk_prepare_enable(hdmi->hsm_clock);
499 if (ret) {
500 DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n",
501 ret);
502 goto err_unprepare_pix;
503 }
504
505 /* Only use the GPIO HPD pin if present in the DT, otherwise
506 * we'll use the HDMI core's register.
507 */
508 if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
509 hdmi->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
510 if (hdmi->hpd_gpio < 0) {
511 ret = hdmi->hpd_gpio;
512 goto err_unprepare_hsm;
513 }
514 }
515
516 vc4->hdmi = hdmi;
517
518 /* HDMI core must be enabled. */
519 WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0);
520
521 drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
522 DRM_MODE_ENCODER_TMDS);
523 drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
524
525 hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder);
526 if (IS_ERR(hdmi->connector)) {
527 ret = PTR_ERR(hdmi->connector);
528 goto err_destroy_encoder;
529 }
530
531 return 0;
532
533err_destroy_encoder:
534 vc4_hdmi_encoder_destroy(hdmi->encoder);
535err_unprepare_hsm:
536 clk_disable_unprepare(hdmi->hsm_clock);
537err_unprepare_pix:
538 clk_disable_unprepare(hdmi->pixel_clock);
539err_put_i2c:
540 put_device(&vc4->hdmi->ddc->dev);
541
542 return ret;
543}
544
545static void vc4_hdmi_unbind(struct device *dev, struct device *master,
546 void *data)
547{
548 struct drm_device *drm = dev_get_drvdata(master);
549 struct vc4_dev *vc4 = drm->dev_private;
550 struct vc4_hdmi *hdmi = vc4->hdmi;
551
552 vc4_hdmi_connector_destroy(hdmi->connector);
553 vc4_hdmi_encoder_destroy(hdmi->encoder);
554
555 clk_disable_unprepare(hdmi->pixel_clock);
556 clk_disable_unprepare(hdmi->hsm_clock);
557 put_device(&hdmi->ddc->dev);
558
559 vc4->hdmi = NULL;
560}
561
562static const struct component_ops vc4_hdmi_ops = {
563 .bind = vc4_hdmi_bind,
564 .unbind = vc4_hdmi_unbind,
565};
566
567static int vc4_hdmi_dev_probe(struct platform_device *pdev)
568{
569 return component_add(&pdev->dev, &vc4_hdmi_ops);
570}
571
572static int vc4_hdmi_dev_remove(struct platform_device *pdev)
573{
574 component_del(&pdev->dev, &vc4_hdmi_ops);
575 return 0;
576}
577
578static const struct of_device_id vc4_hdmi_dt_match[] = {
579 { .compatible = "brcm,bcm2835-hdmi" },
580 {}
581};
582
583struct platform_driver vc4_hdmi_driver = {
584 .probe = vc4_hdmi_dev_probe,
585 .remove = vc4_hdmi_dev_remove,
586 .driver = {
587 .name = "vc4_hdmi",
588 .of_match_table = vc4_hdmi_dt_match,
589 },
590};
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
new file mode 100644
index 000000000000..ab1673f672a4
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/**
10 * DOC: VC4 HVS module.
11 *
12 * The HVS is the piece of hardware that does translation, scaling,
13 * colorspace conversion, and compositing of pixels stored in
14 * framebuffers into a FIFO of pixels going out to the Pixel Valve
15 * (CRTC). It operates at the system clock rate (the system audio
16 * clock gate, specifically), which is much higher than the pixel
17 * clock rate.
18 *
19 * There is a single global HVS, with multiple output FIFOs that can
20 * be consumed by the PVs. This file just manages the resources for
21 * the HVS, while the vc4_crtc.c code actually drives HVS setup for
22 * each CRTC.
23 */
24
25#include "linux/component.h"
26#include "vc4_drv.h"
27#include "vc4_regs.h"
28
29#define HVS_REG(reg) { reg, #reg }
30static const struct {
31 u32 reg;
32 const char *name;
33} hvs_regs[] = {
34 HVS_REG(SCALER_DISPCTRL),
35 HVS_REG(SCALER_DISPSTAT),
36 HVS_REG(SCALER_DISPID),
37 HVS_REG(SCALER_DISPECTRL),
38 HVS_REG(SCALER_DISPPROF),
39 HVS_REG(SCALER_DISPDITHER),
40 HVS_REG(SCALER_DISPEOLN),
41 HVS_REG(SCALER_DISPLIST0),
42 HVS_REG(SCALER_DISPLIST1),
43 HVS_REG(SCALER_DISPLIST2),
44 HVS_REG(SCALER_DISPLSTAT),
45 HVS_REG(SCALER_DISPLACT0),
46 HVS_REG(SCALER_DISPLACT1),
47 HVS_REG(SCALER_DISPLACT2),
48 HVS_REG(SCALER_DISPCTRL0),
49 HVS_REG(SCALER_DISPBKGND0),
50 HVS_REG(SCALER_DISPSTAT0),
51 HVS_REG(SCALER_DISPBASE0),
52 HVS_REG(SCALER_DISPCTRL1),
53 HVS_REG(SCALER_DISPBKGND1),
54 HVS_REG(SCALER_DISPSTAT1),
55 HVS_REG(SCALER_DISPBASE1),
56 HVS_REG(SCALER_DISPCTRL2),
57 HVS_REG(SCALER_DISPBKGND2),
58 HVS_REG(SCALER_DISPSTAT2),
59 HVS_REG(SCALER_DISPBASE2),
60 HVS_REG(SCALER_DISPALPHA2),
61};
62
63void vc4_hvs_dump_state(struct drm_device *dev)
64{
65 struct vc4_dev *vc4 = to_vc4_dev(dev);
66 int i;
67
68 for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
69 DRM_INFO("0x%04x (%s): 0x%08x\n",
70 hvs_regs[i].reg, hvs_regs[i].name,
71 HVS_READ(hvs_regs[i].reg));
72 }
73
74 DRM_INFO("HVS ctx:\n");
75 for (i = 0; i < 64; i += 4) {
76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
78 ((uint32_t *)vc4->hvs->dlist)[i + 0],
79 ((uint32_t *)vc4->hvs->dlist)[i + 1],
80 ((uint32_t *)vc4->hvs->dlist)[i + 2],
81 ((uint32_t *)vc4->hvs->dlist)[i + 3]);
82 }
83}
84
85#ifdef CONFIG_DEBUG_FS
86int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused)
87{
88 struct drm_info_node *node = (struct drm_info_node *)m->private;
89 struct drm_device *dev = node->minor->dev;
90 struct vc4_dev *vc4 = to_vc4_dev(dev);
91 int i;
92
93 for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
94 seq_printf(m, "%s (0x%04x): 0x%08x\n",
95 hvs_regs[i].name, hvs_regs[i].reg,
96 HVS_READ(hvs_regs[i].reg));
97 }
98
99 return 0;
100}
101#endif
102
103static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
104{
105 struct platform_device *pdev = to_platform_device(dev);
106 struct drm_device *drm = dev_get_drvdata(master);
107 struct vc4_dev *vc4 = drm->dev_private;
108 struct vc4_hvs *hvs = NULL;
109
110 hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
111 if (!hvs)
112 return -ENOMEM;
113
114 hvs->pdev = pdev;
115
116 hvs->regs = vc4_ioremap_regs(pdev, 0);
117 if (IS_ERR(hvs->regs))
118 return PTR_ERR(hvs->regs);
119
120 hvs->dlist = hvs->regs + SCALER_DLIST_START;
121
122 vc4->hvs = hvs;
123 return 0;
124}
125
126static void vc4_hvs_unbind(struct device *dev, struct device *master,
127 void *data)
128{
129 struct drm_device *drm = dev_get_drvdata(master);
130 struct vc4_dev *vc4 = drm->dev_private;
131
132 vc4->hvs = NULL;
133}
134
135static const struct component_ops vc4_hvs_ops = {
136 .bind = vc4_hvs_bind,
137 .unbind = vc4_hvs_unbind,
138};
139
140static int vc4_hvs_dev_probe(struct platform_device *pdev)
141{
142 return component_add(&pdev->dev, &vc4_hvs_ops);
143}
144
145static int vc4_hvs_dev_remove(struct platform_device *pdev)
146{
147 component_del(&pdev->dev, &vc4_hvs_ops);
148 return 0;
149}
150
151static const struct of_device_id vc4_hvs_dt_match[] = {
152 { .compatible = "brcm,bcm2835-hvs" },
153 {}
154};
155
156struct platform_driver vc4_hvs_driver = {
157 .probe = vc4_hvs_dev_probe,
158 .remove = vc4_hvs_dev_remove,
159 .driver = {
160 .name = "vc4_hvs",
161 .of_match_table = vc4_hvs_dt_match,
162 },
163};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
new file mode 100644
index 000000000000..2e5597d10cc6
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -0,0 +1,67 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/**
10 * DOC: VC4 KMS
11 *
12 * This is the general code for implementing KMS mode setting that
13 * doesn't clearly associate with any of the other objects (plane,
14 * crtc, HDMI encoder).
15 */
16
17#include "drm_crtc.h"
18#include "drm_atomic_helper.h"
19#include "drm_crtc_helper.h"
20#include "drm_plane_helper.h"
21#include "drm_fb_cma_helper.h"
22#include "vc4_drv.h"
23
24static void vc4_output_poll_changed(struct drm_device *dev)
25{
26 struct vc4_dev *vc4 = to_vc4_dev(dev);
27
28 if (vc4->fbdev)
29 drm_fbdev_cma_hotplug_event(vc4->fbdev);
30}
31
32static const struct drm_mode_config_funcs vc4_mode_funcs = {
33 .output_poll_changed = vc4_output_poll_changed,
34 .atomic_check = drm_atomic_helper_check,
35 .atomic_commit = drm_atomic_helper_commit,
36 .fb_create = drm_fb_cma_create,
37};
38
39int vc4_kms_load(struct drm_device *dev)
40{
41 struct vc4_dev *vc4 = to_vc4_dev(dev);
42 int ret;
43
44 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
45 if (ret < 0) {
46 dev_err(dev->dev, "failed to initialize vblank\n");
47 return ret;
48 }
49
50 dev->mode_config.max_width = 2048;
51 dev->mode_config.max_height = 2048;
52 dev->mode_config.funcs = &vc4_mode_funcs;
53 dev->mode_config.preferred_depth = 24;
54 dev->vblank_disable_allowed = true;
55
56 drm_mode_config_reset(dev);
57
58 vc4->fbdev = drm_fbdev_cma_init(dev, 32,
59 dev->mode_config.num_crtc,
60 dev->mode_config.num_connector);
61 if (IS_ERR(vc4->fbdev))
62 vc4->fbdev = NULL;
63
64 drm_kms_helper_poll_init(dev);
65
66 return 0;
67}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
new file mode 100644
index 000000000000..cdd8b10c0147
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/**
10 * DOC: VC4 plane module
11 *
12 * Each DRM plane is a layer of pixels being scanned out by the HVS.
13 *
14 * At atomic modeset check time, we compute the HVS display element
15 * state that would be necessary for displaying the plane (giving us a
16 * chance to figure out if a plane configuration is invalid), then at
17 * atomic flush time the CRTC will ask us to write our element state
18 * into the region of the HVS that it has allocated for us.
19 */
20
21#include "vc4_drv.h"
22#include "vc4_regs.h"
23#include "drm_atomic_helper.h"
24#include "drm_fb_cma_helper.h"
25#include "drm_plane_helper.h"
26
27struct vc4_plane_state {
28 struct drm_plane_state base;
29 u32 *dlist;
30 u32 dlist_size; /* Number of dwords in allocated for the display list */
31 u32 dlist_count; /* Number of used dwords in the display list. */
32};
33
34static inline struct vc4_plane_state *
35to_vc4_plane_state(struct drm_plane_state *state)
36{
37 return (struct vc4_plane_state *)state;
38}
39
40static const struct hvs_format {
41 u32 drm; /* DRM_FORMAT_* */
42 u32 hvs; /* HVS_FORMAT_* */
43 u32 pixel_order;
44 bool has_alpha;
45} hvs_formats[] = {
46 {
47 .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
48 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
49 },
50 {
51 .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
52 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
53 },
54};
55
56static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
57{
58 unsigned i;
59
60 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
61 if (hvs_formats[i].drm == drm_format)
62 return &hvs_formats[i];
63 }
64
65 return NULL;
66}
67
68static bool plane_enabled(struct drm_plane_state *state)
69{
70 return state->fb && state->crtc;
71}
72
73struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
74{
75 struct vc4_plane_state *vc4_state;
76
77 if (WARN_ON(!plane->state))
78 return NULL;
79
80 vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
81 if (!vc4_state)
82 return NULL;
83
84 __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
85
86 if (vc4_state->dlist) {
87 vc4_state->dlist = kmemdup(vc4_state->dlist,
88 vc4_state->dlist_count * 4,
89 GFP_KERNEL);
90 if (!vc4_state->dlist) {
91 kfree(vc4_state);
92 return NULL;
93 }
94 vc4_state->dlist_size = vc4_state->dlist_count;
95 }
96
97 return &vc4_state->base;
98}
99
100void vc4_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state)
102{
103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
104
105 kfree(vc4_state->dlist);
106 __drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base);
107 kfree(state);
108}
109
110/* Called during init to allocate the plane's atomic state. */
111void vc4_plane_reset(struct drm_plane *plane)
112{
113 struct vc4_plane_state *vc4_state;
114
115 WARN_ON(plane->state);
116
117 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
118 if (!vc4_state)
119 return;
120
121 plane->state = &vc4_state->base;
122 vc4_state->base.plane = plane;
123}
124
125static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
126{
127 if (vc4_state->dlist_count == vc4_state->dlist_size) {
128 u32 new_size = max(4u, vc4_state->dlist_count * 2);
129 u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL);
130
131 if (!new_dlist)
132 return;
133 memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
134
135 kfree(vc4_state->dlist);
136 vc4_state->dlist = new_dlist;
137 vc4_state->dlist_size = new_size;
138 }
139
140 vc4_state->dlist[vc4_state->dlist_count++] = val;
141}
142
143/* Writes out a full display list for an active plane to the plane's
144 * private dlist state.
145 */
146static int vc4_plane_mode_set(struct drm_plane *plane,
147 struct drm_plane_state *state)
148{
149 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
150 struct drm_framebuffer *fb = state->fb;
151 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
152 u32 ctl0_offset = vc4_state->dlist_count;
153 const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format);
154 uint32_t offset = fb->offsets[0];
155 int crtc_x = state->crtc_x;
156 int crtc_y = state->crtc_y;
157 int crtc_w = state->crtc_w;
158 int crtc_h = state->crtc_h;
159
160 if (crtc_x < 0) {
161 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
162 crtc_w += crtc_x;
163 crtc_x = 0;
164 }
165
166 if (crtc_y < 0) {
167 offset += fb->pitches[0] * -crtc_y;
168 crtc_h += crtc_y;
169 crtc_y = 0;
170 }
171
172 vc4_dlist_write(vc4_state,
173 SCALER_CTL0_VALID |
174 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
175 (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
176 SCALER_CTL0_UNITY);
177
178 /* Position Word 0: Image Positions and Alpha Value */
179 vc4_dlist_write(vc4_state,
180 VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
181 VC4_SET_FIELD(crtc_x, SCALER_POS0_START_X) |
182 VC4_SET_FIELD(crtc_y, SCALER_POS0_START_Y));
183
184 /* Position Word 1: Scaled Image Dimensions.
185 * Skipped due to SCALER_CTL0_UNITY scaling.
186 */
187
188 /* Position Word 2: Source Image Size, Alpha Mode */
189 vc4_dlist_write(vc4_state,
190 VC4_SET_FIELD(format->has_alpha ?
191 SCALER_POS2_ALPHA_MODE_PIPELINE :
192 SCALER_POS2_ALPHA_MODE_FIXED,
193 SCALER_POS2_ALPHA_MODE) |
194 VC4_SET_FIELD(crtc_w, SCALER_POS2_WIDTH) |
195 VC4_SET_FIELD(crtc_h, SCALER_POS2_HEIGHT));
196
197 /* Position Word 3: Context. Written by the HVS. */
198 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
199
200 /* Pointer Word 0: RGB / Y Pointer */
201 vc4_dlist_write(vc4_state, bo->paddr + offset);
202
203 /* Pointer Context Word 0: Written by the HVS */
204 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
205
206 /* Pitch word 0: Pointer 0 Pitch */
207 vc4_dlist_write(vc4_state,
208 VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH));
209
210 vc4_state->dlist[ctl0_offset] |=
211 VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
212
213 return 0;
214}
215
216/* If a modeset involves changing the setup of a plane, the atomic
217 * infrastructure will call this to validate a proposed plane setup.
218 * However, if a plane isn't getting updated, this (and the
219 * corresponding vc4_plane_atomic_update) won't get called. Thus, we
220 * compute the dlist here and have all active plane dlists get updated
221 * in the CRTC's flush.
222 */
223static int vc4_plane_atomic_check(struct drm_plane *plane,
224 struct drm_plane_state *state)
225{
226 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
227
228 vc4_state->dlist_count = 0;
229
230 if (plane_enabled(state))
231 return vc4_plane_mode_set(plane, state);
232 else
233 return 0;
234}
235
236static void vc4_plane_atomic_update(struct drm_plane *plane,
237 struct drm_plane_state *old_state)
238{
239 /* No contents here. Since we don't know where in the CRTC's
240 * dlist we should be stored, our dlist is uploaded to the
241 * hardware with vc4_plane_write_dlist() at CRTC atomic_flush
242 * time.
243 */
244}
245
246u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
247{
248 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
249 int i;
250
251 /* Can't memcpy_toio() because it needs to be 32-bit writes. */
252 for (i = 0; i < vc4_state->dlist_count; i++)
253 writel(vc4_state->dlist[i], &dlist[i]);
254
255 return vc4_state->dlist_count;
256}
257
258u32 vc4_plane_dlist_size(struct drm_plane_state *state)
259{
260 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
261
262 return vc4_state->dlist_count;
263}
264
265static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
266 .prepare_fb = NULL,
267 .cleanup_fb = NULL,
268 .atomic_check = vc4_plane_atomic_check,
269 .atomic_update = vc4_plane_atomic_update,
270};
271
272static void vc4_plane_destroy(struct drm_plane *plane)
273{
274 drm_plane_helper_disable(plane);
275 drm_plane_cleanup(plane);
276}
277
278static const struct drm_plane_funcs vc4_plane_funcs = {
279 .update_plane = drm_atomic_helper_update_plane,
280 .disable_plane = drm_atomic_helper_disable_plane,
281 .destroy = vc4_plane_destroy,
282 .set_property = NULL,
283 .reset = vc4_plane_reset,
284 .atomic_duplicate_state = vc4_plane_duplicate_state,
285 .atomic_destroy_state = vc4_plane_destroy_state,
286};
287
288struct drm_plane *vc4_plane_init(struct drm_device *dev,
289 enum drm_plane_type type)
290{
291 struct drm_plane *plane = NULL;
292 struct vc4_plane *vc4_plane;
293 u32 formats[ARRAY_SIZE(hvs_formats)];
294 int ret = 0;
295 unsigned i;
296
297 vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
298 GFP_KERNEL);
299 if (!vc4_plane) {
300 ret = -ENOMEM;
301 goto fail;
302 }
303
304 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
305 formats[i] = hvs_formats[i].drm;
306 plane = &vc4_plane->base;
307 ret = drm_universal_plane_init(dev, plane, 0xff,
308 &vc4_plane_funcs,
309 formats, ARRAY_SIZE(formats),
310 type);
311
312 drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
313
314 return plane;
315fail:
316 if (plane)
317 vc4_plane_destroy(plane);
318
319 return ERR_PTR(ret);
320}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
new file mode 100644
index 000000000000..9e4e904c668e
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -0,0 +1,570 @@
1/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef VC4_REGS_H
10#define VC4_REGS_H
11
12#include <linux/bitops.h>
13
14#define VC4_MASK(high, low) ((u32)GENMASK(high, low))
15/* Using the GNU statement expression extension */
16#define VC4_SET_FIELD(value, field) \
17 ({ \
18 uint32_t fieldval = (value) << field##_SHIFT; \
19 WARN_ON((fieldval & ~field##_MASK) != 0); \
20 fieldval & field##_MASK; \
21 })
22
23#define VC4_GET_FIELD(word, field) (((word) & field##_MASK) >> \
24 field##_SHIFT)
25
26#define V3D_IDENT0 0x00000
27# define V3D_EXPECTED_IDENT0 \
28 ((2 << 24) | \
29 ('V' << 0) | \
30 ('3' << 8) | \
31 ('D' << 16))
32
33#define V3D_IDENT1 0x00004
34/* Multiples of 1kb */
35# define V3D_IDENT1_VPM_SIZE_MASK VC4_MASK(31, 28)
36# define V3D_IDENT1_VPM_SIZE_SHIFT 28
37# define V3D_IDENT1_NSEM_MASK VC4_MASK(23, 16)
38# define V3D_IDENT1_NSEM_SHIFT 16
39# define V3D_IDENT1_TUPS_MASK VC4_MASK(15, 12)
40# define V3D_IDENT1_TUPS_SHIFT 12
41# define V3D_IDENT1_QUPS_MASK VC4_MASK(11, 8)
42# define V3D_IDENT1_QUPS_SHIFT 8
43# define V3D_IDENT1_NSLC_MASK VC4_MASK(7, 4)
44# define V3D_IDENT1_NSLC_SHIFT 4
45# define V3D_IDENT1_REV_MASK VC4_MASK(3, 0)
46# define V3D_IDENT1_REV_SHIFT 0
47
48#define V3D_IDENT2 0x00008
49#define V3D_SCRATCH 0x00010
50#define V3D_L2CACTL 0x00020
51# define V3D_L2CACTL_L2CCLR BIT(2)
52# define V3D_L2CACTL_L2CDIS BIT(1)
53# define V3D_L2CACTL_L2CENA BIT(0)
54
55#define V3D_SLCACTL 0x00024
56# define V3D_SLCACTL_T1CC_MASK VC4_MASK(27, 24)
57# define V3D_SLCACTL_T1CC_SHIFT 24
58# define V3D_SLCACTL_T0CC_MASK VC4_MASK(19, 16)
59# define V3D_SLCACTL_T0CC_SHIFT 16
60# define V3D_SLCACTL_UCC_MASK VC4_MASK(11, 8)
61# define V3D_SLCACTL_UCC_SHIFT 8
62# define V3D_SLCACTL_ICC_MASK VC4_MASK(3, 0)
63# define V3D_SLCACTL_ICC_SHIFT 0
64
65#define V3D_INTCTL 0x00030
66#define V3D_INTENA 0x00034
67#define V3D_INTDIS 0x00038
68# define V3D_INT_SPILLUSE BIT(3)
69# define V3D_INT_OUTOMEM BIT(2)
70# define V3D_INT_FLDONE BIT(1)
71# define V3D_INT_FRDONE BIT(0)
72
73#define V3D_CT0CS 0x00100
74#define V3D_CT1CS 0x00104
75#define V3D_CTNCS(n) (V3D_CT0CS + 4 * n)
76# define V3D_CTRSTA BIT(15)
77# define V3D_CTSEMA BIT(12)
78# define V3D_CTRTSD BIT(8)
79# define V3D_CTRUN BIT(5)
80# define V3D_CTSUBS BIT(4)
81# define V3D_CTERR BIT(3)
82# define V3D_CTMODE BIT(0)
83
84#define V3D_CT0EA 0x00108
85#define V3D_CT1EA 0x0010c
86#define V3D_CTNEA(n) (V3D_CT0EA + 4 * (n))
87#define V3D_CT0CA 0x00110
88#define V3D_CT1CA 0x00114
89#define V3D_CTNCA(n) (V3D_CT0CA + 4 * (n))
90#define V3D_CT00RA0 0x00118
91#define V3D_CT01RA0 0x0011c
92#define V3D_CTNRA0(n) (V3D_CT00RA0 + 4 * (n))
93#define V3D_CT0LC 0x00120
94#define V3D_CT1LC 0x00124
95#define V3D_CTNLC(n) (V3D_CT0LC + 4 * (n))
96#define V3D_CT0PC 0x00128
97#define V3D_CT1PC 0x0012c
98#define V3D_CTNPC(n) (V3D_CT0PC + 4 * (n))
99
100#define V3D_PCS 0x00130
101# define V3D_BMOOM BIT(8)
102# define V3D_RMBUSY BIT(3)
103# define V3D_RMACTIVE BIT(2)
104# define V3D_BMBUSY BIT(1)
105# define V3D_BMACTIVE BIT(0)
106
107#define V3D_BFC 0x00134
108#define V3D_RFC 0x00138
109#define V3D_BPCA 0x00300
110#define V3D_BPCS 0x00304
111#define V3D_BPOA 0x00308
112#define V3D_BPOS 0x0030c
113#define V3D_BXCF 0x00310
114#define V3D_SQRSV0 0x00410
115#define V3D_SQRSV1 0x00414
116#define V3D_SQCNTL 0x00418
117#define V3D_SRQPC 0x00430
118#define V3D_SRQUA 0x00434
119#define V3D_SRQUL 0x00438
120#define V3D_SRQCS 0x0043c
121#define V3D_VPACNTL 0x00500
122#define V3D_VPMBASE 0x00504
123#define V3D_PCTRC 0x00670
124#define V3D_PCTRE 0x00674
125#define V3D_PCTR0 0x00680
126#define V3D_PCTRS0 0x00684
127#define V3D_PCTR1 0x00688
128#define V3D_PCTRS1 0x0068c
129#define V3D_PCTR2 0x00690
130#define V3D_PCTRS2 0x00694
131#define V3D_PCTR3 0x00698
132#define V3D_PCTRS3 0x0069c
133#define V3D_PCTR4 0x006a0
134#define V3D_PCTRS4 0x006a4
135#define V3D_PCTR5 0x006a8
136#define V3D_PCTRS5 0x006ac
137#define V3D_PCTR6 0x006b0
138#define V3D_PCTRS6 0x006b4
139#define V3D_PCTR7 0x006b8
140#define V3D_PCTRS7 0x006bc
141#define V3D_PCTR8 0x006c0
142#define V3D_PCTRS8 0x006c4
143#define V3D_PCTR9 0x006c8
144#define V3D_PCTRS9 0x006cc
145#define V3D_PCTR10 0x006d0
146#define V3D_PCTRS10 0x006d4
147#define V3D_PCTR11 0x006d8
148#define V3D_PCTRS11 0x006dc
149#define V3D_PCTR12 0x006e0
150#define V3D_PCTRS12 0x006e4
151#define V3D_PCTR13 0x006e8
152#define V3D_PCTRS13 0x006ec
153#define V3D_PCTR14 0x006f0
154#define V3D_PCTRS14 0x006f4
155#define V3D_PCTR15 0x006f8
156#define V3D_PCTRS15 0x006fc
157#define V3D_BGE 0x00f00
158#define V3D_FDBGO 0x00f04
159#define V3D_FDBGB 0x00f08
160#define V3D_FDBGR 0x00f0c
161#define V3D_FDBGS 0x00f10
162#define V3D_ERRSTAT 0x00f20
163
164#define PV_CONTROL 0x00
165# define PV_CONTROL_FORMAT_MASK VC4_MASK(23, 21)
166# define PV_CONTROL_FORMAT_SHIFT 21
167# define PV_CONTROL_FORMAT_24 0
168# define PV_CONTROL_FORMAT_DSIV_16 1
169# define PV_CONTROL_FORMAT_DSIC_16 2
170# define PV_CONTROL_FORMAT_DSIV_18 3
171# define PV_CONTROL_FORMAT_DSIV_24 4
172
173# define PV_CONTROL_FIFO_LEVEL_MASK VC4_MASK(20, 15)
174# define PV_CONTROL_FIFO_LEVEL_SHIFT 15
175# define PV_CONTROL_CLR_AT_START BIT(14)
176# define PV_CONTROL_TRIGGER_UNDERFLOW BIT(13)
177# define PV_CONTROL_WAIT_HSTART BIT(12)
178# define PV_CONTROL_CLK_SELECT_DSI_VEC 0
179# define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1
180# define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2)
181# define PV_CONTROL_CLK_SELECT_SHIFT 2
182# define PV_CONTROL_FIFO_CLR BIT(1)
183# define PV_CONTROL_EN BIT(0)
184
185#define PV_V_CONTROL 0x04
186# define PV_VCONTROL_INTERLACE BIT(4)
187# define PV_VCONTROL_CONTINUOUS BIT(1)
188# define PV_VCONTROL_VIDEN BIT(0)
189
190#define PV_VSYNCD 0x08
191
192#define PV_HORZA 0x0c
193# define PV_HORZA_HBP_MASK VC4_MASK(31, 16)
194# define PV_HORZA_HBP_SHIFT 16
195# define PV_HORZA_HSYNC_MASK VC4_MASK(15, 0)
196# define PV_HORZA_HSYNC_SHIFT 0
197
198#define PV_HORZB 0x10
199# define PV_HORZB_HFP_MASK VC4_MASK(31, 16)
200# define PV_HORZB_HFP_SHIFT 16
201# define PV_HORZB_HACTIVE_MASK VC4_MASK(15, 0)
202# define PV_HORZB_HACTIVE_SHIFT 0
203
204#define PV_VERTA 0x14
205# define PV_VERTA_VBP_MASK VC4_MASK(31, 16)
206# define PV_VERTA_VBP_SHIFT 16
207# define PV_VERTA_VSYNC_MASK VC4_MASK(15, 0)
208# define PV_VERTA_VSYNC_SHIFT 0
209
210#define PV_VERTB 0x18
211# define PV_VERTB_VFP_MASK VC4_MASK(31, 16)
212# define PV_VERTB_VFP_SHIFT 16
213# define PV_VERTB_VACTIVE_MASK VC4_MASK(15, 0)
214# define PV_VERTB_VACTIVE_SHIFT 0
215
216#define PV_VERTA_EVEN 0x1c
217#define PV_VERTB_EVEN 0x20
218
219#define PV_INTEN 0x24
220#define PV_INTSTAT 0x28
221# define PV_INT_VID_IDLE BIT(9)
222# define PV_INT_VFP_END BIT(8)
223# define PV_INT_VFP_START BIT(7)
224# define PV_INT_VACT_START BIT(6)
225# define PV_INT_VBP_START BIT(5)
226# define PV_INT_VSYNC_START BIT(4)
227# define PV_INT_HFP_START BIT(3)
228# define PV_INT_HACT_START BIT(2)
229# define PV_INT_HBP_START BIT(1)
230# define PV_INT_HSYNC_START BIT(0)
231
232#define PV_STAT 0x2c
233
234#define PV_HACT_ACT 0x30
235
236#define SCALER_DISPCTRL 0x00000000
237/* Global register for clock gating the HVS */
238# define SCALER_DISPCTRL_ENABLE BIT(31)
239# define SCALER_DISPCTRL_DSP2EISLUR BIT(15)
240# define SCALER_DISPCTRL_DSP1EISLUR BIT(14)
241/* Enables Display 0 short line and underrun contribution to
242 * SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are
243 * always enabled.
244 */
245# define SCALER_DISPCTRL_DSP0EISLUR BIT(13)
246# define SCALER_DISPCTRL_DSP2EIEOLN BIT(12)
247# define SCALER_DISPCTRL_DSP2EIEOF BIT(11)
248# define SCALER_DISPCTRL_DSP1EIEOLN BIT(10)
249# define SCALER_DISPCTRL_DSP1EIEOF BIT(9)
250/* Enables Display 0 end-of-line-N contribution to
251 * SCALER_DISPSTAT_IRQDISP0
252 */
253# define SCALER_DISPCTRL_DSP0EIEOLN BIT(8)
254/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
255# define SCALER_DISPCTRL_DSP0EIEOF BIT(7)
256
257# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6)
258# define SCALER_DISPCTRL_SLVWREIRQ BIT(5)
259# define SCALER_DISPCTRL_DMAEIRQ BIT(4)
260# define SCALER_DISPCTRL_DISP2EIRQ BIT(3)
261# define SCALER_DISPCTRL_DISP1EIRQ BIT(2)
262/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
263 * bits and short frames..
264 */
265# define SCALER_DISPCTRL_DISP0EIRQ BIT(1)
266/* Enables interrupt generation on scaler profiler interrupt. */
267# define SCALER_DISPCTRL_SCLEIRQ BIT(0)
268
269#define SCALER_DISPSTAT 0x00000004
270# define SCALER_DISPSTAT_COBLOW2 BIT(29)
271# define SCALER_DISPSTAT_EOLN2 BIT(28)
272# define SCALER_DISPSTAT_ESFRAME2 BIT(27)
273# define SCALER_DISPSTAT_ESLINE2 BIT(26)
274# define SCALER_DISPSTAT_EUFLOW2 BIT(25)
275# define SCALER_DISPSTAT_EOF2 BIT(24)
276
277# define SCALER_DISPSTAT_COBLOW1 BIT(21)
278# define SCALER_DISPSTAT_EOLN1 BIT(20)
279# define SCALER_DISPSTAT_ESFRAME1 BIT(19)
280# define SCALER_DISPSTAT_ESLINE1 BIT(18)
281# define SCALER_DISPSTAT_EUFLOW1 BIT(17)
282# define SCALER_DISPSTAT_EOF1 BIT(16)
283
284# define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14)
285# define SCALER_DISPSTAT_RESP_SHIFT 14
286# define SCALER_DISPSTAT_RESP_OKAY 0
287# define SCALER_DISPSTAT_RESP_EXOKAY 1
288# define SCALER_DISPSTAT_RESP_SLVERR 2
289# define SCALER_DISPSTAT_RESP_DECERR 3
290
291# define SCALER_DISPSTAT_COBLOW0 BIT(13)
292/* Set when the DISPEOLN line is done compositing. */
293# define SCALER_DISPSTAT_EOLN0 BIT(12)
294/* Set when VSTART is seen but there are still pixels in the current
295 * output line.
296 */
297# define SCALER_DISPSTAT_ESFRAME0 BIT(11)
298/* Set when HSTART is seen but there are still pixels in the current
299 * output line.
300 */
301# define SCALER_DISPSTAT_ESLINE0 BIT(10)
302/* Set when the the downstream tries to read from the display FIFO
303 * while it's empty.
304 */
305# define SCALER_DISPSTAT_EUFLOW0 BIT(9)
306/* Set when the display mode changes from RUN to EOF */
307# define SCALER_DISPSTAT_EOF0 BIT(8)
308
309/* Set on AXI invalid DMA ID error. */
310# define SCALER_DISPSTAT_DMA_ERROR BIT(7)
311/* Set on AXI slave read decode error */
312# define SCALER_DISPSTAT_IRQSLVRD BIT(6)
313/* Set on AXI slave write decode error */
314# define SCALER_DISPSTAT_IRQSLVWR BIT(5)
315/* Set when SCALER_DISPSTAT_DMA_ERROR is set, or
316 * SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY.
317 */
318# define SCALER_DISPSTAT_IRQDMA BIT(4)
319# define SCALER_DISPSTAT_IRQDISP2 BIT(3)
320# define SCALER_DISPSTAT_IRQDISP1 BIT(2)
321/* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their
322 * corresponding interrupt bit is enabled in DISPCTRL.
323 */
324# define SCALER_DISPSTAT_IRQDISP0 BIT(1)
325/* On read, the profiler interrupt. On write, clear *all* interrupt bits. */
326# define SCALER_DISPSTAT_IRQSCL BIT(0)
327
328#define SCALER_DISPID 0x00000008
329#define SCALER_DISPECTRL 0x0000000c
330#define SCALER_DISPPROF 0x00000010
331#define SCALER_DISPDITHER 0x00000014
332#define SCALER_DISPEOLN 0x00000018
333#define SCALER_DISPLIST0 0x00000020
334#define SCALER_DISPLIST1 0x00000024
335#define SCALER_DISPLIST2 0x00000028
336#define SCALER_DISPLSTAT 0x0000002c
337#define SCALER_DISPLISTX(x) (SCALER_DISPLIST0 + \
338 (x) * (SCALER_DISPLIST1 - \
339 SCALER_DISPLIST0))
340
341#define SCALER_DISPLACT0 0x00000030
342#define SCALER_DISPLACT1 0x00000034
343#define SCALER_DISPLACT2 0x00000038
344#define SCALER_DISPCTRL0 0x00000040
345# define SCALER_DISPCTRLX_ENABLE BIT(31)
346# define SCALER_DISPCTRLX_RESET BIT(30)
347# define SCALER_DISPCTRLX_WIDTH_MASK VC4_MASK(23, 12)
348# define SCALER_DISPCTRLX_WIDTH_SHIFT 12
349# define SCALER_DISPCTRLX_HEIGHT_MASK VC4_MASK(11, 0)
350# define SCALER_DISPCTRLX_HEIGHT_SHIFT 0
351
352#define SCALER_DISPBKGND0 0x00000044
353#define SCALER_DISPSTAT0 0x00000048
354#define SCALER_DISPBASE0 0x0000004c
355# define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30)
356# define SCALER_DISPSTATX_MODE_SHIFT 30
357# define SCALER_DISPSTATX_MODE_DISABLED 0
358# define SCALER_DISPSTATX_MODE_INIT 1
359# define SCALER_DISPSTATX_MODE_RUN 2
360# define SCALER_DISPSTATX_MODE_EOF 3
361# define SCALER_DISPSTATX_FULL BIT(29)
362# define SCALER_DISPSTATX_EMPTY BIT(28)
363#define SCALER_DISPCTRL1 0x00000050
364#define SCALER_DISPBKGND1 0x00000054
365#define SCALER_DISPSTAT1 0x00000058
366#define SCALER_DISPSTATX(x) (SCALER_DISPSTAT0 + \
367 (x) * (SCALER_DISPSTAT1 - \
368 SCALER_DISPSTAT0))
369#define SCALER_DISPBASE1 0x0000005c
370#define SCALER_DISPCTRL2 0x00000060
371#define SCALER_DISPCTRLX(x) (SCALER_DISPCTRL0 + \
372 (x) * (SCALER_DISPCTRL1 - \
373 SCALER_DISPCTRL0))
374#define SCALER_DISPBKGND2 0x00000064
375#define SCALER_DISPSTAT2 0x00000068
376#define SCALER_DISPBASE2 0x0000006c
377#define SCALER_DISPALPHA2 0x00000070
378#define SCALER_GAMADDR 0x00000078
379#define SCALER_GAMDATA 0x000000e0
380#define SCALER_DLIST_START 0x00002000
381#define SCALER_DLIST_SIZE 0x00004000
382
383#define VC4_HDMI_CORE_REV 0x000
384
385#define VC4_HDMI_SW_RESET_CONTROL 0x004
386# define VC4_HDMI_SW_RESET_FORMAT_DETECT BIT(1)
387# define VC4_HDMI_SW_RESET_HDMI BIT(0)
388
389#define VC4_HDMI_HOTPLUG_INT 0x008
390
391#define VC4_HDMI_HOTPLUG 0x00c
392# define VC4_HDMI_HOTPLUG_CONNECTED BIT(0)
393
394#define VC4_HDMI_RAM_PACKET_CONFIG 0x0a0
395# define VC4_HDMI_RAM_PACKET_ENABLE BIT(16)
396
397#define VC4_HDMI_HORZA 0x0c4
398# define VC4_HDMI_HORZA_VPOS BIT(14)
399# define VC4_HDMI_HORZA_HPOS BIT(13)
400/* Horizontal active pixels (hdisplay). */
401# define VC4_HDMI_HORZA_HAP_MASK VC4_MASK(12, 0)
402# define VC4_HDMI_HORZA_HAP_SHIFT 0
403
404#define VC4_HDMI_HORZB 0x0c8
405/* Horizontal pack porch (htotal - hsync_end). */
406# define VC4_HDMI_HORZB_HBP_MASK VC4_MASK(29, 20)
407# define VC4_HDMI_HORZB_HBP_SHIFT 20
408/* Horizontal sync pulse (hsync_end - hsync_start). */
409# define VC4_HDMI_HORZB_HSP_MASK VC4_MASK(19, 10)
410# define VC4_HDMI_HORZB_HSP_SHIFT 10
411/* Horizontal front porch (hsync_start - hdisplay). */
412# define VC4_HDMI_HORZB_HFP_MASK VC4_MASK(9, 0)
413# define VC4_HDMI_HORZB_HFP_SHIFT 0
414
415#define VC4_HDMI_FIFO_CTL 0x05c
416# define VC4_HDMI_FIFO_CTL_RECENTER_DONE BIT(14)
417# define VC4_HDMI_FIFO_CTL_USE_EMPTY BIT(13)
418# define VC4_HDMI_FIFO_CTL_ON_VB BIT(7)
419# define VC4_HDMI_FIFO_CTL_RECENTER BIT(6)
420# define VC4_HDMI_FIFO_CTL_FIFO_RESET BIT(5)
421# define VC4_HDMI_FIFO_CTL_USE_PLL_LOCK BIT(4)
422# define VC4_HDMI_FIFO_CTL_INV_CLK_XFR BIT(3)
423# define VC4_HDMI_FIFO_CTL_CAPTURE_PTR BIT(2)
424# define VC4_HDMI_FIFO_CTL_USE_FULL BIT(1)
425# define VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N BIT(0)
426# define VC4_HDMI_FIFO_VALID_WRITE_MASK 0xefff
427
428#define VC4_HDMI_SCHEDULER_CONTROL 0x0c0
429# define VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT BIT(15)
430# define VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS BIT(5)
431# define VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT BIT(3)
432# define VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE BIT(1)
433# define VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI BIT(0)
434
435#define VC4_HDMI_VERTA0 0x0cc
436#define VC4_HDMI_VERTA1 0x0d4
437/* Vertical sync pulse (vsync_end - vsync_start). */
438# define VC4_HDMI_VERTA_VSP_MASK VC4_MASK(24, 20)
439# define VC4_HDMI_VERTA_VSP_SHIFT 20
440/* Vertical front porch (vsync_start - vdisplay). */
441# define VC4_HDMI_VERTA_VFP_MASK VC4_MASK(19, 13)
442# define VC4_HDMI_VERTA_VFP_SHIFT 13
443/* Vertical active lines (vdisplay). */
444# define VC4_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0)
445# define VC4_HDMI_VERTA_VAL_SHIFT 0
446
447#define VC4_HDMI_VERTB0 0x0d0
448#define VC4_HDMI_VERTB1 0x0d8
449/* Vertical sync pulse offset (for interlaced) */
450# define VC4_HDMI_VERTB_VSPO_MASK VC4_MASK(21, 9)
451# define VC4_HDMI_VERTB_VSPO_SHIFT 9
452/* Vertical pack porch (vtotal - vsync_end). */
453# define VC4_HDMI_VERTB_VBP_MASK VC4_MASK(8, 0)
454# define VC4_HDMI_VERTB_VBP_SHIFT 0
455
456#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0
457
458#define VC4_HD_M_CTL 0x00c
459# define VC4_HD_M_SW_RST BIT(2)
460# define VC4_HD_M_ENABLE BIT(0)
461
462#define VC4_HD_MAI_CTL 0x014
463
464#define VC4_HD_VID_CTL 0x038
465# define VC4_HD_VID_CTL_ENABLE BIT(31)
466# define VC4_HD_VID_CTL_UNDERFLOW_ENABLE BIT(30)
467# define VC4_HD_VID_CTL_FRAME_COUNTER_RESET BIT(29)
468# define VC4_HD_VID_CTL_VSYNC_LOW BIT(28)
469# define VC4_HD_VID_CTL_HSYNC_LOW BIT(27)
470
471#define VC4_HD_CSC_CTL 0x040
472# define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5)
473# define VC4_HD_CSC_CTL_ORDER_SHIFT 5
474# define VC4_HD_CSC_CTL_ORDER_RGB 0
475# define VC4_HD_CSC_CTL_ORDER_BGR 1
476# define VC4_HD_CSC_CTL_ORDER_BRG 2
477# define VC4_HD_CSC_CTL_ORDER_GRB 3
478# define VC4_HD_CSC_CTL_ORDER_GBR 4
479# define VC4_HD_CSC_CTL_ORDER_RBG 5
480# define VC4_HD_CSC_CTL_PADMSB BIT(4)
481# define VC4_HD_CSC_CTL_MODE_MASK VC4_MASK(3, 2)
482# define VC4_HD_CSC_CTL_MODE_SHIFT 2
483# define VC4_HD_CSC_CTL_MODE_RGB_TO_SD_YPRPB 0
484# define VC4_HD_CSC_CTL_MODE_RGB_TO_HD_YPRPB 1
485# define VC4_HD_CSC_CTL_MODE_CUSTOM 2
486# define VC4_HD_CSC_CTL_RGB2YCC BIT(1)
487# define VC4_HD_CSC_CTL_ENABLE BIT(0)
488
489#define VC4_HD_FRAME_COUNT 0x068
490
491/* HVS display list information. */
492#define HVS_BOOTLOADER_DLIST_END 32
493
494enum hvs_pixel_format {
495 /* 8bpp */
496 HVS_PIXEL_FORMAT_RGB332 = 0,
497 /* 16bpp */
498 HVS_PIXEL_FORMAT_RGBA4444 = 1,
499 HVS_PIXEL_FORMAT_RGB555 = 2,
500 HVS_PIXEL_FORMAT_RGBA5551 = 3,
501 HVS_PIXEL_FORMAT_RGB565 = 4,
502 /* 24bpp */
503 HVS_PIXEL_FORMAT_RGB888 = 5,
504 HVS_PIXEL_FORMAT_RGBA6666 = 6,
505 /* 32bpp */
506 HVS_PIXEL_FORMAT_RGBA8888 = 7
507};
508
509/* Note: the LSB is the rightmost character shown. Only valid for
510 * HVS_PIXEL_FORMAT_RGB8888, not RGB888.
511 */
512#define HVS_PIXEL_ORDER_RGBA 0
513#define HVS_PIXEL_ORDER_BGRA 1
514#define HVS_PIXEL_ORDER_ARGB 2
515#define HVS_PIXEL_ORDER_ABGR 3
516
517#define HVS_PIXEL_ORDER_XBRG 0
518#define HVS_PIXEL_ORDER_XRBG 1
519#define HVS_PIXEL_ORDER_XRGB 2
520#define HVS_PIXEL_ORDER_XBGR 3
521
522#define HVS_PIXEL_ORDER_XYCBCR 0
523#define HVS_PIXEL_ORDER_XYCRCB 1
524#define HVS_PIXEL_ORDER_YXCBCR 2
525#define HVS_PIXEL_ORDER_YXCRCB 3
526
527#define SCALER_CTL0_END BIT(31)
528#define SCALER_CTL0_VALID BIT(30)
529
530#define SCALER_CTL0_SIZE_MASK VC4_MASK(29, 24)
531#define SCALER_CTL0_SIZE_SHIFT 24
532
533#define SCALER_CTL0_HFLIP BIT(16)
534#define SCALER_CTL0_VFLIP BIT(15)
535
536#define SCALER_CTL0_ORDER_MASK VC4_MASK(14, 13)
537#define SCALER_CTL0_ORDER_SHIFT 13
538
539/* Set to indicate no scaling. */
540#define SCALER_CTL0_UNITY BIT(4)
541
542#define SCALER_CTL0_PIXEL_FORMAT_MASK VC4_MASK(3, 0)
543#define SCALER_CTL0_PIXEL_FORMAT_SHIFT 0
544
545#define SCALER_POS0_FIXED_ALPHA_MASK VC4_MASK(31, 24)
546#define SCALER_POS0_FIXED_ALPHA_SHIFT 24
547
548#define SCALER_POS0_START_Y_MASK VC4_MASK(23, 12)
549#define SCALER_POS0_START_Y_SHIFT 12
550
551#define SCALER_POS0_START_X_MASK VC4_MASK(11, 0)
552#define SCALER_POS0_START_X_SHIFT 0
553
554#define SCALER_POS2_ALPHA_MODE_MASK VC4_MASK(31, 30)
555#define SCALER_POS2_ALPHA_MODE_SHIFT 30
556#define SCALER_POS2_ALPHA_MODE_PIPELINE 0
557#define SCALER_POS2_ALPHA_MODE_FIXED 1
558#define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO 2
559#define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07 3
560
561#define SCALER_POS2_HEIGHT_MASK VC4_MASK(27, 16)
562#define SCALER_POS2_HEIGHT_SHIFT 16
563
564#define SCALER_POS2_WIDTH_MASK VC4_MASK(11, 0)
565#define SCALER_POS2_WIDTH_SHIFT 0
566
567#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
568#define SCALER_SRC_PITCH_SHIFT 0
569
570#endif /* VC4_REGS_H */
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 860062ef8814..c503a840fd88 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -235,66 +235,13 @@ unlock:
235 return ret; 235 return ret;
236} 236}
237 237
238int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
239{
240 struct drm_file *priv = filp->private_data;
241 struct drm_device *dev = priv->minor->dev;
242 struct drm_vma_offset_node *node;
243 struct drm_gem_object *obj;
244 struct drm_vgem_gem_object *vgem_obj;
245 int ret = 0;
246
247 mutex_lock(&dev->struct_mutex);
248
249 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
250 vma->vm_pgoff,
251 vma_pages(vma));
252 if (!node) {
253 ret = -EINVAL;
254 goto out_unlock;
255 } else if (!drm_vma_node_is_allowed(node, filp)) {
256 ret = -EACCES;
257 goto out_unlock;
258 }
259
260 obj = container_of(node, struct drm_gem_object, vma_node);
261
262 vgem_obj = to_vgem_bo(obj);
263
264 if (obj->dma_buf && vgem_obj->use_dma_buf) {
265 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
266 goto out_unlock;
267 }
268
269 if (!obj->dev->driver->gem_vm_ops) {
270 ret = -EINVAL;
271 goto out_unlock;
272 }
273
274 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
275 vma->vm_ops = obj->dev->driver->gem_vm_ops;
276 vma->vm_private_data = vgem_obj;
277 vma->vm_page_prot =
278 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
279
280 mutex_unlock(&dev->struct_mutex);
281 drm_gem_vm_open(vma);
282 return ret;
283
284out_unlock:
285 mutex_unlock(&dev->struct_mutex);
286
287 return ret;
288}
289
290
291static struct drm_ioctl_desc vgem_ioctls[] = { 238static struct drm_ioctl_desc vgem_ioctls[] = {
292}; 239};
293 240
294static const struct file_operations vgem_driver_fops = { 241static const struct file_operations vgem_driver_fops = {
295 .owner = THIS_MODULE, 242 .owner = THIS_MODULE,
296 .open = drm_open, 243 .open = drm_open,
297 .mmap = vgem_drm_gem_mmap, 244 .mmap = drm_gem_mmap,
298 .poll = drm_poll, 245 .poll = drm_poll,
299 .read = drm_read, 246 .read = drm_read,
300 .unlocked_ioctl = drm_ioctl, 247 .unlocked_ioctl = drm_ioctl,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index ef8c500b4a00..286a785fab4f 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -102,6 +102,10 @@ typedef struct drm_via_private {
102 uint32_t dma_diff; 102 uint32_t dma_diff;
103} drm_via_private_t; 103} drm_via_private_t;
104 104
105struct via_file_private {
106 struct list_head obj_list;
107};
108
105enum via_family { 109enum via_family {
106 VIA_OTHER = 0, /* Baseline */ 110 VIA_OTHER = 0, /* Baseline */
107 VIA_PRO_GROUP_A, /* Another video engine and DMA commands */ 111 VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
@@ -136,9 +140,9 @@ extern int via_init_context(struct drm_device *dev, int context);
136extern int via_final_context(struct drm_device *dev, int context); 140extern int via_final_context(struct drm_device *dev, int context);
137 141
138extern int via_do_cleanup_map(struct drm_device *dev); 142extern int via_do_cleanup_map(struct drm_device *dev);
139extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc); 143extern u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
140extern int via_enable_vblank(struct drm_device *dev, int crtc); 144extern int via_enable_vblank(struct drm_device *dev, unsigned int pipe);
141extern void via_disable_vblank(struct drm_device *dev, int crtc); 145extern void via_disable_vblank(struct drm_device *dev, unsigned int pipe);
142 146
143extern irqreturn_t via_driver_irq_handler(int irq, void *arg); 147extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
144extern void via_driver_irq_preinstall(struct drm_device *dev); 148extern void via_driver_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 1319433816d3..ea8172c747a2 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -95,10 +95,11 @@ static unsigned time_diff(struct timeval *now, struct timeval *then)
95 1000000 - (then->tv_usec - now->tv_usec); 95 1000000 - (then->tv_usec - now->tv_usec);
96} 96}
97 97
98u32 via_get_vblank_counter(struct drm_device *dev, int crtc) 98u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
99{ 99{
100 drm_via_private_t *dev_priv = dev->dev_private; 100 drm_via_private_t *dev_priv = dev->dev_private;
101 if (crtc != 0) 101
102 if (pipe != 0)
102 return 0; 103 return 0;
103 104
104 return atomic_read(&dev_priv->vbl_received); 105 return atomic_read(&dev_priv->vbl_received);
@@ -170,13 +171,13 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
170 } 171 }
171} 172}
172 173
173int via_enable_vblank(struct drm_device *dev, int crtc) 174int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
174{ 175{
175 drm_via_private_t *dev_priv = dev->dev_private; 176 drm_via_private_t *dev_priv = dev->dev_private;
176 u32 status; 177 u32 status;
177 178
178 if (crtc != 0) { 179 if (pipe != 0) {
179 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 180 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
180 return -EINVAL; 181 return -EINVAL;
181 } 182 }
182 183
@@ -189,7 +190,7 @@ int via_enable_vblank(struct drm_device *dev, int crtc)
189 return 0; 190 return 0;
190} 191}
191 192
192void via_disable_vblank(struct drm_device *dev, int crtc) 193void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
193{ 194{
194 drm_via_private_t *dev_priv = dev->dev_private; 195 drm_via_private_t *dev_priv = dev->dev_private;
195 u32 status; 196 u32 status;
@@ -200,8 +201,8 @@ void via_disable_vblank(struct drm_device *dev, int crtc)
200 VIA_WRITE8(0x83d4, 0x11); 201 VIA_WRITE8(0x83d4, 0x11);
201 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); 202 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
202 203
203 if (crtc != 0) 204 if (pipe != 0)
204 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 205 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
205} 206}
206 207
207static int 208static int
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 2ee1602d77d4..3fb8eac1084f 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -6,6 +6,7 @@ ccflags-y := -Iinclude/drm
6 6
7virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \ 7virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \
8 virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ 8 virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
9 virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o 9 virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
10 virtgpu_ioctl.o virtgpu_prime.o
10 11
11obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o 12obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4e160efc9402..f545913a56c7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -90,6 +90,14 @@ static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
90 cpu_to_le32(64), 90 cpu_to_le32(64),
91 cpu_to_le32(64), 91 cpu_to_le32(64),
92 0, 0, &fence); 92 0, 0, &fence);
93 ret = virtio_gpu_object_reserve(qobj, false);
94 if (!ret) {
95 reservation_object_add_excl_fence(qobj->tbo.resv,
96 &fence->f);
97 fence_put(&fence->f);
98 virtio_gpu_object_unreserve(qobj);
99 virtio_gpu_object_wait(qobj, false);
100 }
93 101
94 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 102 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
95 output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle); 103 output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
@@ -117,6 +125,51 @@ static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
117 return 0; 125 return 0;
118} 126}
119 127
128static int virtio_gpu_page_flip(struct drm_crtc *crtc,
129 struct drm_framebuffer *fb,
130 struct drm_pending_vblank_event *event,
131 uint32_t flags)
132{
133 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
134 struct virtio_gpu_output *output =
135 container_of(crtc, struct virtio_gpu_output, crtc);
136 struct drm_plane *plane = crtc->primary;
137 struct virtio_gpu_framebuffer *vgfb;
138 struct virtio_gpu_object *bo;
139 unsigned long irqflags;
140 uint32_t handle;
141
142 plane->fb = fb;
143 vgfb = to_virtio_gpu_framebuffer(plane->fb);
144 bo = gem_to_virtio_gpu_obj(vgfb->obj);
145 handle = bo->hw_res_handle;
146
147 DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
148 bo->dumb ? ", dumb" : "",
149 crtc->mode.hdisplay, crtc->mode.vdisplay);
150 if (bo->dumb) {
151 virtio_gpu_cmd_transfer_to_host_2d
152 (vgdev, handle, 0,
153 cpu_to_le32(crtc->mode.hdisplay),
154 cpu_to_le32(crtc->mode.vdisplay),
155 0, 0, NULL);
156 }
157 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
158 crtc->mode.hdisplay,
159 crtc->mode.vdisplay, 0, 0);
160 virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
161 crtc->mode.hdisplay,
162 crtc->mode.vdisplay);
163
164 if (event) {
165 spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
166 drm_send_vblank_event(crtc->dev, -1, event);
167 spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
168 }
169
170 return 0;
171}
172
120static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { 173static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
121 .cursor_set2 = virtio_gpu_crtc_cursor_set, 174 .cursor_set2 = virtio_gpu_crtc_cursor_set,
122 .cursor_move = virtio_gpu_crtc_cursor_move, 175 .cursor_move = virtio_gpu_crtc_cursor_move,
@@ -124,9 +177,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
124 .set_config = drm_atomic_helper_set_config, 177 .set_config = drm_atomic_helper_set_config,
125 .destroy = drm_crtc_cleanup, 178 .destroy = drm_crtc_cleanup,
126 179
127#if 0 /* not (yet) working without vblank support according to docs */ 180 .page_flip = virtio_gpu_page_flip,
128 .page_flip = drm_atomic_helper_page_flip,
129#endif
130 .reset = drm_atomic_helper_crtc_reset, 181 .reset = drm_atomic_helper_crtc_reset,
131 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 182 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
132 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 183 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 7d9610aaeff9..b40ed6061f05 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -73,6 +73,14 @@ static struct virtio_device_id id_table[] = {
73}; 73};
74 74
75static unsigned int features[] = { 75static unsigned int features[] = {
76#ifdef __LITTLE_ENDIAN
77 /*
78 * Gallium command stream send by virgl is native endian.
79 * Because of that we only support little endian guests on
80 * little endian hosts.
81 */
82 VIRTIO_GPU_F_VIRGL,
83#endif
76}; 84};
77static struct virtio_driver virtio_gpu_driver = { 85static struct virtio_driver virtio_gpu_driver = {
78 .feature_table = features, 86 .feature_table = features,
@@ -110,10 +118,12 @@ static const struct file_operations virtio_gpu_driver_fops = {
110 118
111 119
112static struct drm_driver driver = { 120static struct drm_driver driver = {
113 .driver_features = DRIVER_MODESET | DRIVER_GEM, 121 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
114 .set_busid = drm_virtio_set_busid, 122 .set_busid = drm_virtio_set_busid,
115 .load = virtio_gpu_driver_load, 123 .load = virtio_gpu_driver_load,
116 .unload = virtio_gpu_driver_unload, 124 .unload = virtio_gpu_driver_unload,
125 .open = virtio_gpu_driver_open,
126 .postclose = virtio_gpu_driver_postclose,
117 127
118 .dumb_create = virtio_gpu_mode_dumb_create, 128 .dumb_create = virtio_gpu_mode_dumb_create,
119 .dumb_map_offset = virtio_gpu_mode_dumb_mmap, 129 .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
@@ -123,10 +133,26 @@ static struct drm_driver driver = {
123 .debugfs_init = virtio_gpu_debugfs_init, 133 .debugfs_init = virtio_gpu_debugfs_init,
124 .debugfs_cleanup = virtio_gpu_debugfs_takedown, 134 .debugfs_cleanup = virtio_gpu_debugfs_takedown,
125#endif 135#endif
136 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
137 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
138 .gem_prime_export = drm_gem_prime_export,
139 .gem_prime_import = drm_gem_prime_import,
140 .gem_prime_pin = virtgpu_gem_prime_pin,
141 .gem_prime_unpin = virtgpu_gem_prime_unpin,
142 .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
143 .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
144 .gem_prime_vmap = virtgpu_gem_prime_vmap,
145 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
146 .gem_prime_mmap = virtgpu_gem_prime_mmap,
126 147
127 .gem_free_object = virtio_gpu_gem_free_object, 148 .gem_free_object = virtio_gpu_gem_free_object,
149 .gem_open_object = virtio_gpu_gem_object_open,
150 .gem_close_object = virtio_gpu_gem_object_close,
128 .fops = &virtio_gpu_driver_fops, 151 .fops = &virtio_gpu_driver_fops,
129 152
153 .ioctls = virtio_gpu_ioctls,
154 .num_ioctls = DRM_VIRTIO_NUM_IOCTLS,
155
130 .name = DRIVER_NAME, 156 .name = DRIVER_NAME,
131 .desc = DRIVER_DESC, 157 .desc = DRIVER_DESC,
132 .date = DRIVER_DATE, 158 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6d4db2dba90b..79f0abe69b64 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -146,6 +146,21 @@ struct virtio_gpu_queue {
146 struct work_struct dequeue_work; 146 struct work_struct dequeue_work;
147}; 147};
148 148
149struct virtio_gpu_drv_capset {
150 uint32_t id;
151 uint32_t max_version;
152 uint32_t max_size;
153};
154
155struct virtio_gpu_drv_cap_cache {
156 struct list_head head;
157 void *caps_cache;
158 uint32_t id;
159 uint32_t version;
160 uint32_t size;
161 atomic_t is_valid;
162};
163
149struct virtio_gpu_device { 164struct virtio_gpu_device {
150 struct device *dev; 165 struct device *dev;
151 struct drm_device *ddev; 166 struct drm_device *ddev;
@@ -179,7 +194,13 @@ struct virtio_gpu_device {
179 struct idr ctx_id_idr; 194 struct idr ctx_id_idr;
180 spinlock_t ctx_id_idr_lock; 195 spinlock_t ctx_id_idr_lock;
181 196
197 bool has_virgl_3d;
198
182 struct work_struct config_changed_work; 199 struct work_struct config_changed_work;
200
201 struct virtio_gpu_drv_capset *capsets;
202 uint32_t num_capsets;
203 struct list_head cap_cache;
183}; 204};
184 205
185struct virtio_gpu_fpriv { 206struct virtio_gpu_fpriv {
@@ -193,6 +214,8 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
193/* virtio_kms.c */ 214/* virtio_kms.c */
194int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags); 215int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
195int virtio_gpu_driver_unload(struct drm_device *dev); 216int virtio_gpu_driver_unload(struct drm_device *dev);
217int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
218void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
196 219
197/* virtio_gem.c */ 220/* virtio_gem.c */
198void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj); 221void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
@@ -203,6 +226,10 @@ int virtio_gpu_gem_create(struct drm_file *file,
203 uint64_t size, 226 uint64_t size,
204 struct drm_gem_object **obj_p, 227 struct drm_gem_object **obj_p,
205 uint32_t *handle_p); 228 uint32_t *handle_p);
229int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
230 struct drm_file *file);
231void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
232 struct drm_file *file);
206struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, 233struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
207 size_t size, bool kernel, 234 size_t size, bool kernel,
208 bool pinned); 235 bool pinned);
@@ -260,10 +287,43 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
260int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev); 287int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
261void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, 288void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
262 uint32_t resource_id); 289 uint32_t resource_id);
290int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
291int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
292 int idx, int version,
293 struct virtio_gpu_drv_cap_cache **cache_p);
294void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
295 uint32_t nlen, const char *name);
296void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
297 uint32_t id);
298void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
299 uint32_t ctx_id,
300 uint32_t resource_id);
301void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
302 uint32_t ctx_id,
303 uint32_t resource_id);
304void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
305 void *data, uint32_t data_size,
306 uint32_t ctx_id, struct virtio_gpu_fence **fence);
307void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
308 uint32_t resource_id, uint32_t ctx_id,
309 uint64_t offset, uint32_t level,
310 struct virtio_gpu_box *box,
311 struct virtio_gpu_fence **fence);
312void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
313 uint32_t resource_id, uint32_t ctx_id,
314 uint64_t offset, uint32_t level,
315 struct virtio_gpu_box *box,
316 struct virtio_gpu_fence **fence);
317void
318virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
319 struct virtio_gpu_resource_create_3d *rc_3d,
320 struct virtio_gpu_fence **fence);
263void virtio_gpu_ctrl_ack(struct virtqueue *vq); 321void virtio_gpu_ctrl_ack(struct virtqueue *vq);
264void virtio_gpu_cursor_ack(struct virtqueue *vq); 322void virtio_gpu_cursor_ack(struct virtqueue *vq);
323void virtio_gpu_fence_ack(struct virtqueue *vq);
265void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); 324void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
266void virtio_gpu_dequeue_cursor_func(struct work_struct *work); 325void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
326void virtio_gpu_dequeue_fence_func(struct work_struct *work);
267 327
268/* virtio_gpu_display.c */ 328/* virtio_gpu_display.c */
269int virtio_gpu_framebuffer_init(struct drm_device *dev, 329int virtio_gpu_framebuffer_init(struct drm_device *dev,
@@ -299,6 +359,18 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
299void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); 359void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
300int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); 360int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
301 361
362/* virtgpu_prime.c */
363int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
364void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
365struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
366struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
367 struct drm_device *dev, struct dma_buf_attachment *attach,
368 struct sg_table *sgt);
369void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
370void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
371int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
372 struct vm_area_struct *vma);
373
302static inline struct virtio_gpu_object* 374static inline struct virtio_gpu_object*
303virtio_gpu_object_ref(struct virtio_gpu_object *bo) 375virtio_gpu_object_ref(struct virtio_gpu_object *bo)
304{ 376{
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 67097c9ce9c1..cf4418709e76 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -81,7 +81,7 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
81 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 81 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
82 unsigned long irq_flags; 82 unsigned long irq_flags;
83 83
84 *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL); 84 *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
85 if ((*fence) == NULL) 85 if ((*fence) == NULL)
86 return -ENOMEM; 86 return -ENOMEM;
87 87
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index cfa0d27150bd..1feb7cee3f0d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -138,3 +138,44 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
138 drm_gem_object_unreference_unlocked(gobj); 138 drm_gem_object_unreference_unlocked(gobj);
139 return 0; 139 return 0;
140} 140}
141
142int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
143 struct drm_file *file)
144{
145 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
146 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
147 struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
148 int r;
149
150 if (!vgdev->has_virgl_3d)
151 return 0;
152
153 r = virtio_gpu_object_reserve(qobj, false);
154 if (r)
155 return r;
156
157 virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
158 qobj->hw_res_handle);
159 virtio_gpu_object_unreserve(qobj);
160 return 0;
161}
162
163void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
164 struct drm_file *file)
165{
166 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
167 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
168 struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
169 int r;
170
171 if (!vgdev->has_virgl_3d)
172 return;
173
174 r = virtio_gpu_object_reserve(qobj, false);
175 if (r)
176 return;
177
178 virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
179 qobj->hw_res_handle);
180 virtio_gpu_object_unreserve(qobj);
181}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
new file mode 100644
index 000000000000..b4de18e65db8
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -0,0 +1,573 @@
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <drm/drmP.h>
29#include "virtgpu_drv.h"
30#include <drm/virtgpu_drm.h>
31#include "ttm/ttm_execbuf_util.h"
32
33static void convert_to_hw_box(struct virtio_gpu_box *dst,
34 const struct drm_virtgpu_3d_box *src)
35{
36 dst->x = cpu_to_le32(src->x);
37 dst->y = cpu_to_le32(src->y);
38 dst->z = cpu_to_le32(src->z);
39 dst->w = cpu_to_le32(src->w);
40 dst->h = cpu_to_le32(src->h);
41 dst->d = cpu_to_le32(src->d);
42}
43
44static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv)
46{
47 struct virtio_gpu_device *vgdev = dev->dev_private;
48 struct drm_virtgpu_map *virtio_gpu_map = data;
49
50 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
51 virtio_gpu_map->handle,
52 &virtio_gpu_map->offset);
53}
54
55static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
56 struct list_head *head)
57{
58 struct ttm_validate_buffer *buf;
59 struct ttm_buffer_object *bo;
60 struct virtio_gpu_object *qobj;
61 int ret;
62
63 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
64 if (ret != 0)
65 return ret;
66
67 list_for_each_entry(buf, head, head) {
68 bo = buf->bo;
69 qobj = container_of(bo, struct virtio_gpu_object, tbo);
70 ret = ttm_bo_validate(bo, &qobj->placement, false, false);
71 if (ret) {
72 ttm_eu_backoff_reservation(ticket, head);
73 return ret;
74 }
75 }
76 return 0;
77}
78
79static void virtio_gpu_unref_list(struct list_head *head)
80{
81 struct ttm_validate_buffer *buf;
82 struct ttm_buffer_object *bo;
83 struct virtio_gpu_object *qobj;
84 list_for_each_entry(buf, head, head) {
85 bo = buf->bo;
86 qobj = container_of(bo, struct virtio_gpu_object, tbo);
87
88 drm_gem_object_unreference_unlocked(&qobj->gem_base);
89 }
90}
91
92static int virtio_gpu_execbuffer(struct drm_device *dev,
93 struct drm_virtgpu_execbuffer *exbuf,
94 struct drm_file *drm_file)
95{
96 struct virtio_gpu_device *vgdev = dev->dev_private;
97 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
98 struct drm_gem_object *gobj;
99 struct virtio_gpu_fence *fence;
100 struct virtio_gpu_object *qobj;
101 int ret;
102 uint32_t *bo_handles = NULL;
103 void __user *user_bo_handles = NULL;
104 struct list_head validate_list;
105 struct ttm_validate_buffer *buflist = NULL;
106 int i;
107 struct ww_acquire_ctx ticket;
108 void *buf;
109
110 if (vgdev->has_virgl_3d == false)
111 return -ENOSYS;
112
113 INIT_LIST_HEAD(&validate_list);
114 if (exbuf->num_bo_handles) {
115
116 bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117 sizeof(uint32_t));
118 buflist = drm_calloc_large(exbuf->num_bo_handles,
119 sizeof(struct ttm_validate_buffer));
120 if (!bo_handles || !buflist) {
121 drm_free_large(bo_handles);
122 drm_free_large(buflist);
123 return -ENOMEM;
124 }
125
126 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127 if (copy_from_user(bo_handles, user_bo_handles,
128 exbuf->num_bo_handles * sizeof(uint32_t))) {
129 ret = -EFAULT;
130 drm_free_large(bo_handles);
131 drm_free_large(buflist);
132 return ret;
133 }
134
135 for (i = 0; i < exbuf->num_bo_handles; i++) {
136 gobj = drm_gem_object_lookup(dev,
137 drm_file, bo_handles[i]);
138 if (!gobj) {
139 drm_free_large(bo_handles);
140 drm_free_large(buflist);
141 return -ENOENT;
142 }
143
144 qobj = gem_to_virtio_gpu_obj(gobj);
145 buflist[i].bo = &qobj->tbo;
146
147 list_add(&buflist[i].head, &validate_list);
148 }
149 drm_free_large(bo_handles);
150 }
151
152 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
153 if (ret)
154 goto out_free;
155
156 buf = kmalloc(exbuf->size, GFP_KERNEL);
157 if (!buf) {
158 ret = -ENOMEM;
159 goto out_unresv;
160 }
161 if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
162 exbuf->size)) {
163 kfree(buf);
164 ret = -EFAULT;
165 goto out_unresv;
166 }
167 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168 vfpriv->ctx_id, &fence);
169
170 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
171
172 /* fence the command bo */
173 virtio_gpu_unref_list(&validate_list);
174 drm_free_large(buflist);
175 fence_put(&fence->f);
176 return 0;
177
178out_unresv:
179 ttm_eu_backoff_reservation(&ticket, &validate_list);
180out_free:
181 virtio_gpu_unref_list(&validate_list);
182 drm_free_large(buflist);
183 return ret;
184}
185
186/*
187 * Usage of execbuffer:
188 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189 * However, the command as passed from user space must *not* contain the initial
190 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
191 */
192static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
193 struct drm_file *file_priv)
194{
195 struct drm_virtgpu_execbuffer *execbuffer = data;
196 return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
197}
198
199
200static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
201 struct drm_file *file_priv)
202{
203 struct virtio_gpu_device *vgdev = dev->dev_private;
204 struct drm_virtgpu_getparam *param = data;
205 int value;
206
207 switch (param->param) {
208 case VIRTGPU_PARAM_3D_FEATURES:
209 value = vgdev->has_virgl_3d == true ? 1 : 0;
210 break;
211 default:
212 return -EINVAL;
213 }
214 if (copy_to_user((void __user *)(unsigned long)param->value,
215 &value, sizeof(int))) {
216 return -EFAULT;
217 }
218 return 0;
219}
220
221static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
222 struct drm_file *file_priv)
223{
224 struct virtio_gpu_device *vgdev = dev->dev_private;
225 struct drm_virtgpu_resource_create *rc = data;
226 int ret;
227 uint32_t res_id;
228 struct virtio_gpu_object *qobj;
229 struct drm_gem_object *obj;
230 uint32_t handle = 0;
231 uint32_t size;
232 struct list_head validate_list;
233 struct ttm_validate_buffer mainbuf;
234 struct virtio_gpu_fence *fence = NULL;
235 struct ww_acquire_ctx ticket;
236 struct virtio_gpu_resource_create_3d rc_3d;
237
238 if (vgdev->has_virgl_3d == false) {
239 if (rc->depth > 1)
240 return -EINVAL;
241 if (rc->nr_samples > 1)
242 return -EINVAL;
243 if (rc->last_level > 1)
244 return -EINVAL;
245 if (rc->target != 2)
246 return -EINVAL;
247 if (rc->array_size > 1)
248 return -EINVAL;
249 }
250
251 INIT_LIST_HEAD(&validate_list);
252 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
253
254 virtio_gpu_resource_id_get(vgdev, &res_id);
255
256 size = rc->size;
257
258 /* allocate a single page size object */
259 if (size == 0)
260 size = PAGE_SIZE;
261
262 qobj = virtio_gpu_alloc_object(dev, size, false, false);
263 if (IS_ERR(qobj)) {
264 ret = PTR_ERR(qobj);
265 goto fail_id;
266 }
267 obj = &qobj->gem_base;
268
269 if (!vgdev->has_virgl_3d) {
270 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
271 rc->width, rc->height);
272
273 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
274 } else {
275 /* use a gem reference since unref list undoes them */
276 drm_gem_object_reference(&qobj->gem_base);
277 mainbuf.bo = &qobj->tbo;
278 list_add(&mainbuf.head, &validate_list);
279
280 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
281 if (ret) {
282 DRM_DEBUG("failed to validate\n");
283 goto fail_unref;
284 }
285
286 rc_3d.resource_id = cpu_to_le32(res_id);
287 rc_3d.target = cpu_to_le32(rc->target);
288 rc_3d.format = cpu_to_le32(rc->format);
289 rc_3d.bind = cpu_to_le32(rc->bind);
290 rc_3d.width = cpu_to_le32(rc->width);
291 rc_3d.height = cpu_to_le32(rc->height);
292 rc_3d.depth = cpu_to_le32(rc->depth);
293 rc_3d.array_size = cpu_to_le32(rc->array_size);
294 rc_3d.last_level = cpu_to_le32(rc->last_level);
295 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
296 rc_3d.flags = cpu_to_le32(rc->flags);
297
298 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
299 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
300 if (ret) {
301 ttm_eu_backoff_reservation(&ticket, &validate_list);
302 goto fail_unref;
303 }
304 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
305 }
306
307 qobj->hw_res_handle = res_id;
308
309 ret = drm_gem_handle_create(file_priv, obj, &handle);
310 if (ret) {
311
312 drm_gem_object_release(obj);
313 if (vgdev->has_virgl_3d) {
314 virtio_gpu_unref_list(&validate_list);
315 fence_put(&fence->f);
316 }
317 return ret;
318 }
319 drm_gem_object_unreference_unlocked(obj);
320
321 rc->res_handle = res_id; /* similiar to a VM address */
322 rc->bo_handle = handle;
323
324 if (vgdev->has_virgl_3d) {
325 virtio_gpu_unref_list(&validate_list);
326 fence_put(&fence->f);
327 }
328 return 0;
329fail_unref:
330 if (vgdev->has_virgl_3d) {
331 virtio_gpu_unref_list(&validate_list);
332 fence_put(&fence->f);
333 }
334//fail_obj:
335// drm_gem_object_handle_unreference_unlocked(obj);
336fail_id:
337 virtio_gpu_resource_id_put(vgdev, res_id);
338 return ret;
339}
340
341static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file_priv)
343{
344 struct drm_virtgpu_resource_info *ri = data;
345 struct drm_gem_object *gobj = NULL;
346 struct virtio_gpu_object *qobj = NULL;
347
348 gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
349 if (gobj == NULL)
350 return -ENOENT;
351
352 qobj = gem_to_virtio_gpu_obj(gobj);
353
354 ri->size = qobj->gem_base.size;
355 ri->res_handle = qobj->hw_res_handle;
356 drm_gem_object_unreference_unlocked(gobj);
357 return 0;
358}
359
360static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
361 void *data,
362 struct drm_file *file)
363{
364 struct virtio_gpu_device *vgdev = dev->dev_private;
365 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
366 struct drm_virtgpu_3d_transfer_from_host *args = data;
367 struct drm_gem_object *gobj = NULL;
368 struct virtio_gpu_object *qobj = NULL;
369 struct virtio_gpu_fence *fence;
370 int ret;
371 u32 offset = args->offset;
372 struct virtio_gpu_box box;
373
374 if (vgdev->has_virgl_3d == false)
375 return -ENOSYS;
376
377 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
378 if (gobj == NULL)
379 return -ENOENT;
380
381 qobj = gem_to_virtio_gpu_obj(gobj);
382
383 ret = virtio_gpu_object_reserve(qobj, false);
384 if (ret)
385 goto out;
386
387 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
388 true, false);
389 if (unlikely(ret))
390 goto out_unres;
391
392 convert_to_hw_box(&box, &args->box);
393 virtio_gpu_cmd_transfer_from_host_3d
394 (vgdev, qobj->hw_res_handle,
395 vfpriv->ctx_id, offset, args->level,
396 &box, &fence);
397 reservation_object_add_excl_fence(qobj->tbo.resv,
398 &fence->f);
399
400 fence_put(&fence->f);
401out_unres:
402 virtio_gpu_object_unreserve(qobj);
403out:
404 drm_gem_object_unreference_unlocked(gobj);
405 return ret;
406}
407
408static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
409 struct drm_file *file)
410{
411 struct virtio_gpu_device *vgdev = dev->dev_private;
412 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
413 struct drm_virtgpu_3d_transfer_to_host *args = data;
414 struct drm_gem_object *gobj = NULL;
415 struct virtio_gpu_object *qobj = NULL;
416 struct virtio_gpu_fence *fence;
417 struct virtio_gpu_box box;
418 int ret;
419 u32 offset = args->offset;
420
421 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
422 if (gobj == NULL)
423 return -ENOENT;
424
425 qobj = gem_to_virtio_gpu_obj(gobj);
426
427 ret = virtio_gpu_object_reserve(qobj, false);
428 if (ret)
429 goto out;
430
431 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
432 true, false);
433 if (unlikely(ret))
434 goto out_unres;
435
436 convert_to_hw_box(&box, &args->box);
437 if (!vgdev->has_virgl_3d) {
438 virtio_gpu_cmd_transfer_to_host_2d
439 (vgdev, qobj->hw_res_handle, offset,
440 box.w, box.h, box.x, box.y, NULL);
441 } else {
442 virtio_gpu_cmd_transfer_to_host_3d
443 (vgdev, qobj->hw_res_handle,
444 vfpriv ? vfpriv->ctx_id : 0, offset,
445 args->level, &box, &fence);
446 reservation_object_add_excl_fence(qobj->tbo.resv,
447 &fence->f);
448 fence_put(&fence->f);
449 }
450
451out_unres:
452 virtio_gpu_object_unreserve(qobj);
453out:
454 drm_gem_object_unreference_unlocked(gobj);
455 return ret;
456}
457
458static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
459 struct drm_file *file)
460{
461 struct drm_virtgpu_3d_wait *args = data;
462 struct drm_gem_object *gobj = NULL;
463 struct virtio_gpu_object *qobj = NULL;
464 int ret;
465 bool nowait = false;
466
467 gobj = drm_gem_object_lookup(dev, file, args->handle);
468 if (gobj == NULL)
469 return -ENOENT;
470
471 qobj = gem_to_virtio_gpu_obj(gobj);
472
473 if (args->flags & VIRTGPU_WAIT_NOWAIT)
474 nowait = true;
475 ret = virtio_gpu_object_wait(qobj, nowait);
476
477 drm_gem_object_unreference_unlocked(gobj);
478 return ret;
479}
480
481static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
482 void *data, struct drm_file *file)
483{
484 struct virtio_gpu_device *vgdev = dev->dev_private;
485 struct drm_virtgpu_get_caps *args = data;
486 int size;
487 int i;
488 int found_valid = -1;
489 int ret;
490 struct virtio_gpu_drv_cap_cache *cache_ent;
491 void *ptr;
492 if (vgdev->num_capsets == 0)
493 return -ENOSYS;
494
495 spin_lock(&vgdev->display_info_lock);
496 for (i = 0; i < vgdev->num_capsets; i++) {
497 if (vgdev->capsets[i].id == args->cap_set_id) {
498 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
499 found_valid = i;
500 break;
501 }
502 }
503 }
504
505 if (found_valid == -1) {
506 spin_unlock(&vgdev->display_info_lock);
507 return -EINVAL;
508 }
509
510 size = vgdev->capsets[found_valid].max_size;
511 if (args->size > size) {
512 spin_unlock(&vgdev->display_info_lock);
513 return -EINVAL;
514 }
515
516 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
517 if (cache_ent->id == args->cap_set_id &&
518 cache_ent->version == args->cap_set_ver) {
519 ptr = cache_ent->caps_cache;
520 spin_unlock(&vgdev->display_info_lock);
521 goto copy_exit;
522 }
523 }
524 spin_unlock(&vgdev->display_info_lock);
525
526 /* not in cache - need to talk to hw */
527 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
528 &cache_ent);
529
530 ret = wait_event_timeout(vgdev->resp_wq,
531 atomic_read(&cache_ent->is_valid), 5 * HZ);
532
533 ptr = cache_ent->caps_cache;
534
535copy_exit:
536 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
537 return -EFAULT;
538
539 return 0;
540}
541
542struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
543 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
544 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
545
546 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
547 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
548
549 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
550 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
551
552 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
553 virtio_gpu_resource_create_ioctl,
554 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
555
556 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
557 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
558
559 /* make transfer async to the main ring? - no sure, can we
560 thread these in the underlying GL */
561 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
562 virtio_gpu_transfer_from_host_ioctl,
563 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
564 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
565 virtio_gpu_transfer_to_host_ioctl,
566 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
567
568 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
569 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
570
571 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
572 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
573};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 782766c00d70..06496a128162 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -52,6 +52,41 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
52 events_clear, &events_clear); 52 events_clear, &events_clear);
53} 53}
54 54
55static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
56 uint32_t *resid)
57{
58 int handle;
59
60 idr_preload(GFP_KERNEL);
61 spin_lock(&vgdev->ctx_id_idr_lock);
62 handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
63 spin_unlock(&vgdev->ctx_id_idr_lock);
64 idr_preload_end();
65 *resid = handle;
66}
67
68static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
69{
70 spin_lock(&vgdev->ctx_id_idr_lock);
71 idr_remove(&vgdev->ctx_id_idr, id);
72 spin_unlock(&vgdev->ctx_id_idr_lock);
73}
74
75static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
76 uint32_t nlen, const char *name,
77 uint32_t *ctx_id)
78{
79 virtio_gpu_ctx_id_get(vgdev, ctx_id);
80 virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
81}
82
83static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
84 uint32_t ctx_id)
85{
86 virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
87 virtio_gpu_ctx_id_put(vgdev, ctx_id);
88}
89
55static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, 90static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
56 void (*work_func)(struct work_struct *work)) 91 void (*work_func)(struct work_struct *work))
57{ 92{
@@ -60,6 +95,36 @@ static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
60 INIT_WORK(&vgvq->dequeue_work, work_func); 95 INIT_WORK(&vgvq->dequeue_work, work_func);
61} 96}
62 97
98static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
99 int num_capsets)
100{
101 int i, ret;
102
103 vgdev->capsets = kcalloc(num_capsets,
104 sizeof(struct virtio_gpu_drv_capset),
105 GFP_KERNEL);
106 if (!vgdev->capsets) {
107 DRM_ERROR("failed to allocate cap sets\n");
108 return;
109 }
110 for (i = 0; i < num_capsets; i++) {
111 virtio_gpu_cmd_get_capset_info(vgdev, i);
112 ret = wait_event_timeout(vgdev->resp_wq,
113 vgdev->capsets[i].id > 0, 5 * HZ);
114 if (ret == 0) {
115 DRM_ERROR("timed out waiting for cap set %d\n", i);
116 kfree(vgdev->capsets);
117 vgdev->capsets = NULL;
118 return;
119 }
120 DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
121 i, vgdev->capsets[i].id,
122 vgdev->capsets[i].max_version,
123 vgdev->capsets[i].max_size);
124 }
125 vgdev->num_capsets = num_capsets;
126}
127
63int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) 128int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
64{ 129{
65 static vq_callback_t *callbacks[] = { 130 static vq_callback_t *callbacks[] = {
@@ -70,7 +135,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
70 struct virtio_gpu_device *vgdev; 135 struct virtio_gpu_device *vgdev;
71 /* this will expand later */ 136 /* this will expand later */
72 struct virtqueue *vqs[2]; 137 struct virtqueue *vqs[2];
73 u32 num_scanouts; 138 u32 num_scanouts, num_capsets;
74 int ret; 139 int ret;
75 140
76 if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1)) 141 if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
@@ -96,9 +161,15 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
96 161
97 spin_lock_init(&vgdev->fence_drv.lock); 162 spin_lock_init(&vgdev->fence_drv.lock);
98 INIT_LIST_HEAD(&vgdev->fence_drv.fences); 163 INIT_LIST_HEAD(&vgdev->fence_drv.fences);
164 INIT_LIST_HEAD(&vgdev->cap_cache);
99 INIT_WORK(&vgdev->config_changed_work, 165 INIT_WORK(&vgdev->config_changed_work,
100 virtio_gpu_config_changed_work_func); 166 virtio_gpu_config_changed_work_func);
101 167
168 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
169 vgdev->has_virgl_3d = true;
170 DRM_INFO("virgl 3d acceleration %s\n",
171 vgdev->has_virgl_3d ? "enabled" : "not available");
172
102 ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, 173 ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
103 callbacks, names); 174 callbacks, names);
104 if (ret) { 175 if (ret) {
@@ -129,6 +200,11 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
129 ret = -EINVAL; 200 ret = -EINVAL;
130 goto err_scanouts; 201 goto err_scanouts;
131 } 202 }
203 DRM_INFO("number of scanouts: %d\n", num_scanouts);
204
205 virtio_cread(vgdev->vdev, struct virtio_gpu_config,
206 num_capsets, &num_capsets);
207 DRM_INFO("number of cap sets: %d\n", num_capsets);
132 208
133 ret = virtio_gpu_modeset_init(vgdev); 209 ret = virtio_gpu_modeset_init(vgdev);
134 if (ret) 210 if (ret)
@@ -137,6 +213,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
137 virtio_device_ready(vgdev->vdev); 213 virtio_device_ready(vgdev->vdev);
138 vgdev->vqs_ready = true; 214 vgdev->vqs_ready = true;
139 215
216 if (num_capsets)
217 virtio_gpu_get_capsets(vgdev, num_capsets);
140 virtio_gpu_cmd_get_display_info(vgdev); 218 virtio_gpu_cmd_get_display_info(vgdev);
141 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, 219 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
142 5 * HZ); 220 5 * HZ);
@@ -157,6 +235,16 @@ err_vqs:
157 return ret; 235 return ret;
158} 236}
159 237
238static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
239{
240 struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
241
242 list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
243 kfree(cache_ent->caps_cache);
244 kfree(cache_ent);
245 }
246}
247
160int virtio_gpu_driver_unload(struct drm_device *dev) 248int virtio_gpu_driver_unload(struct drm_device *dev)
161{ 249{
162 struct virtio_gpu_device *vgdev = dev->dev_private; 250 struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -170,6 +258,49 @@ int virtio_gpu_driver_unload(struct drm_device *dev)
170 virtio_gpu_modeset_fini(vgdev); 258 virtio_gpu_modeset_fini(vgdev);
171 virtio_gpu_ttm_fini(vgdev); 259 virtio_gpu_ttm_fini(vgdev);
172 virtio_gpu_free_vbufs(vgdev); 260 virtio_gpu_free_vbufs(vgdev);
261 virtio_gpu_cleanup_cap_cache(vgdev);
262 kfree(vgdev->capsets);
173 kfree(vgdev); 263 kfree(vgdev);
174 return 0; 264 return 0;
175} 265}
266
267int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
268{
269 struct virtio_gpu_device *vgdev = dev->dev_private;
270 struct virtio_gpu_fpriv *vfpriv;
271 uint32_t id;
272 char dbgname[64], tmpname[TASK_COMM_LEN];
273
274 /* can't create contexts without 3d renderer */
275 if (!vgdev->has_virgl_3d)
276 return 0;
277
278 get_task_comm(tmpname, current);
279 snprintf(dbgname, sizeof(dbgname), "%s", tmpname);
280 dbgname[63] = 0;
281 /* allocate a virt GPU context for this opener */
282 vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
283 if (!vfpriv)
284 return -ENOMEM;
285
286 virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
287
288 vfpriv->ctx_id = id;
289 file->driver_priv = vfpriv;
290 return 0;
291}
292
293void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
294{
295 struct virtio_gpu_device *vgdev = dev->dev_private;
296 struct virtio_gpu_fpriv *vfpriv;
297
298 if (!vgdev->has_virgl_3d)
299 return;
300
301 vfpriv = file->driver_priv;
302
303 virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
304 kfree(vfpriv);
305 file->driver_priv = NULL;
306}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 2c624c784c1d..f300eba95bb1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -82,24 +82,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
82 size = roundup(size, PAGE_SIZE); 82 size = roundup(size, PAGE_SIZE);
83 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); 83 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
84 if (ret != 0) 84 if (ret != 0)
85 goto err_gem_init; 85 return ret;
86 bo->dumb = false; 86 bo->dumb = false;
87 virtio_gpu_init_ttm_placement(bo, pinned); 87 virtio_gpu_init_ttm_placement(bo, pinned);
88 88
89 ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type, 89 ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
90 &bo->placement, 0, !kernel, NULL, acc_size, 90 &bo->placement, 0, !kernel, NULL, acc_size,
91 NULL, NULL, &virtio_gpu_ttm_bo_destroy); 91 NULL, NULL, &virtio_gpu_ttm_bo_destroy);
92 /* ttm_bo_init failure will call the destroy */
92 if (ret != 0) 93 if (ret != 0)
93 goto err_ttm_init; 94 return ret;
94 95
95 *bo_ptr = bo; 96 *bo_ptr = bo;
96 return 0; 97 return 0;
97
98err_ttm_init:
99 drm_gem_object_release(&bo->gem_base);
100err_gem_init:
101 kfree(bo);
102 return ret;
103} 98}
104 99
105int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr) 100int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr)
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
new file mode 100644
index 000000000000..385e0eb9826a
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2014 Canonical
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Andreas Pokorny
23 */
24
25#include "virtgpu_drv.h"
26
27/* Empty Implementations as there should not be any other driver for a virtual
28 * device that might share buffers with virtgpu */
29
30int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
31{
32 WARN_ONCE(1, "not implemented");
33 return -ENODEV;
34}
35
36void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
37{
38 WARN_ONCE(1, "not implemented");
39}
40
41
42struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
43{
44 WARN_ONCE(1, "not implemented");
45 return ERR_PTR(-ENODEV);
46}
47
48struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
49 struct drm_device *dev, struct dma_buf_attachment *attach,
50 struct sg_table *table)
51{
52 WARN_ONCE(1, "not implemented");
53 return ERR_PTR(-ENODEV);
54}
55
56void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
57{
58 WARN_ONCE(1, "not implemented");
59 return ERR_PTR(-ENODEV);
60}
61
62void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
63{
64 WARN_ONCE(1, "not implemented");
65}
66
67int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
68 struct vm_area_struct *area)
69{
70 return -ENODEV;
71}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index b092d7b9a292..9fd924cd2b7f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -32,6 +32,7 @@
32#include <ttm/ttm_module.h> 32#include <ttm/ttm_module.h>
33#include <drm/drmP.h> 33#include <drm/drmP.h>
34#include <drm/drm.h> 34#include <drm/drm.h>
35#include <drm/virtgpu_drm.h>
35#include "virtgpu_drv.h" 36#include "virtgpu_drv.h"
36 37
37#include <linux/delay.h> 38#include <linux/delay.h>
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 1698669f4185..5a0f8a745b9d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,8 +293,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
293 wake_up(&vgdev->cursorq.ack_queue); 293 wake_up(&vgdev->cursorq.ack_queue);
294} 294}
295 295
296static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, 296static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
297 struct virtio_gpu_vbuffer *vbuf) 297 struct virtio_gpu_vbuffer *vbuf)
298{ 298{
299 struct virtqueue *vq = vgdev->ctrlq.vq; 299 struct virtqueue *vq = vgdev->ctrlq.vq;
300 struct scatterlist *sgs[3], vcmd, vout, vresp; 300 struct scatterlist *sgs[3], vcmd, vout, vresp;
@@ -320,7 +320,6 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
320 incnt++; 320 incnt++;
321 } 321 }
322 322
323 spin_lock(&vgdev->ctrlq.qlock);
324retry: 323retry:
325 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 324 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
326 if (ret == -ENOSPC) { 325 if (ret == -ENOSPC) {
@@ -331,13 +330,55 @@ retry:
331 } else { 330 } else {
332 virtqueue_kick(vq); 331 virtqueue_kick(vq);
333 } 332 }
334 spin_unlock(&vgdev->ctrlq.qlock);
335 333
336 if (!ret) 334 if (!ret)
337 ret = vq->num_free; 335 ret = vq->num_free;
338 return ret; 336 return ret;
339} 337}
340 338
339static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
340 struct virtio_gpu_vbuffer *vbuf)
341{
342 int rc;
343
344 spin_lock(&vgdev->ctrlq.qlock);
345 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
346 spin_unlock(&vgdev->ctrlq.qlock);
347 return rc;
348}
349
350static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
351 struct virtio_gpu_vbuffer *vbuf,
352 struct virtio_gpu_ctrl_hdr *hdr,
353 struct virtio_gpu_fence **fence)
354{
355 struct virtqueue *vq = vgdev->ctrlq.vq;
356 int rc;
357
358again:
359 spin_lock(&vgdev->ctrlq.qlock);
360
361 /*
362 * Make sure we have enouth space in the virtqueue. If not
363 * wait here until we have.
364 *
365 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
366 * to wait for free space, which can result in fence ids being
367 * submitted out-of-order.
368 */
369 if (vq->num_free < 3) {
370 spin_unlock(&vgdev->ctrlq.qlock);
371 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
372 goto again;
373 }
374
375 if (fence)
376 virtio_gpu_fence_emit(vgdev, hdr, fence);
377 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
378 spin_unlock(&vgdev->ctrlq.qlock);
379 return rc;
380}
381
341static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, 382static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
342 struct virtio_gpu_vbuffer *vbuf) 383 struct virtio_gpu_vbuffer *vbuf)
343{ 384{
@@ -490,9 +531,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
490 cmd_p->r.x = x; 531 cmd_p->r.x = x;
491 cmd_p->r.y = y; 532 cmd_p->r.y = y;
492 533
493 if (fence) 534 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
494 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
495 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
496} 535}
497 536
498static void 537static void
@@ -515,9 +554,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
515 vbuf->data_buf = ents; 554 vbuf->data_buf = ents;
516 vbuf->data_size = sizeof(*ents) * nents; 555 vbuf->data_size = sizeof(*ents) * nents;
517 556
518 if (fence) 557 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
519 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
520 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
521} 558}
522 559
523static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, 560static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -549,6 +586,47 @@ static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
549 drm_kms_helper_hotplug_event(vgdev->ddev); 586 drm_kms_helper_hotplug_event(vgdev->ddev);
550} 587}
551 588
589static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
590 struct virtio_gpu_vbuffer *vbuf)
591{
592 struct virtio_gpu_get_capset_info *cmd =
593 (struct virtio_gpu_get_capset_info *)vbuf->buf;
594 struct virtio_gpu_resp_capset_info *resp =
595 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
596 int i = le32_to_cpu(cmd->capset_index);
597
598 spin_lock(&vgdev->display_info_lock);
599 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
600 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
601 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
602 spin_unlock(&vgdev->display_info_lock);
603 wake_up(&vgdev->resp_wq);
604}
605
606static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
607 struct virtio_gpu_vbuffer *vbuf)
608{
609 struct virtio_gpu_get_capset *cmd =
610 (struct virtio_gpu_get_capset *)vbuf->buf;
611 struct virtio_gpu_resp_capset *resp =
612 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
613 struct virtio_gpu_drv_cap_cache *cache_ent;
614
615 spin_lock(&vgdev->display_info_lock);
616 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
617 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
618 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
619 memcpy(cache_ent->caps_cache, resp->capset_data,
620 cache_ent->size);
621 atomic_set(&cache_ent->is_valid, 1);
622 break;
623 }
624 }
625 spin_unlock(&vgdev->display_info_lock);
626 wake_up(&vgdev->resp_wq);
627}
628
629
552int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) 630int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
553{ 631{
554 struct virtio_gpu_ctrl_hdr *cmd_p; 632 struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -572,6 +650,230 @@ int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
572 return 0; 650 return 0;
573} 651}
574 652
653int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
654{
655 struct virtio_gpu_get_capset_info *cmd_p;
656 struct virtio_gpu_vbuffer *vbuf;
657 void *resp_buf;
658
659 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
660 GFP_KERNEL);
661 if (!resp_buf)
662 return -ENOMEM;
663
664 cmd_p = virtio_gpu_alloc_cmd_resp
665 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
666 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
667 resp_buf);
668 memset(cmd_p, 0, sizeof(*cmd_p));
669
670 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
671 cmd_p->capset_index = cpu_to_le32(idx);
672 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
673 return 0;
674}
675
676int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
677 int idx, int version,
678 struct virtio_gpu_drv_cap_cache **cache_p)
679{
680 struct virtio_gpu_get_capset *cmd_p;
681 struct virtio_gpu_vbuffer *vbuf;
682 int max_size = vgdev->capsets[idx].max_size;
683 struct virtio_gpu_drv_cap_cache *cache_ent;
684 void *resp_buf;
685
686 if (idx > vgdev->num_capsets)
687 return -EINVAL;
688
689 if (version > vgdev->capsets[idx].max_version)
690 return -EINVAL;
691
692 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
693 if (!cache_ent)
694 return -ENOMEM;
695
696 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
697 if (!cache_ent->caps_cache) {
698 kfree(cache_ent);
699 return -ENOMEM;
700 }
701
702 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
703 GFP_KERNEL);
704 if (!resp_buf) {
705 kfree(cache_ent->caps_cache);
706 kfree(cache_ent);
707 return -ENOMEM;
708 }
709
710 cache_ent->version = version;
711 cache_ent->id = vgdev->capsets[idx].id;
712 atomic_set(&cache_ent->is_valid, 0);
713 cache_ent->size = max_size;
714 spin_lock(&vgdev->display_info_lock);
715 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
716 spin_unlock(&vgdev->display_info_lock);
717
718 cmd_p = virtio_gpu_alloc_cmd_resp
719 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
720 sizeof(struct virtio_gpu_resp_capset) + max_size,
721 resp_buf);
722 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
723 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
724 cmd_p->capset_version = cpu_to_le32(version);
725 *cache_p = cache_ent;
726 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
727
728 return 0;
729}
730
731void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
732 uint32_t nlen, const char *name)
733{
734 struct virtio_gpu_ctx_create *cmd_p;
735 struct virtio_gpu_vbuffer *vbuf;
736
737 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
738 memset(cmd_p, 0, sizeof(*cmd_p));
739
740 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
741 cmd_p->hdr.ctx_id = cpu_to_le32(id);
742 cmd_p->nlen = cpu_to_le32(nlen);
743 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
744 cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
745 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
746}
747
748void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
749 uint32_t id)
750{
751 struct virtio_gpu_ctx_destroy *cmd_p;
752 struct virtio_gpu_vbuffer *vbuf;
753
754 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
755 memset(cmd_p, 0, sizeof(*cmd_p));
756
757 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
758 cmd_p->hdr.ctx_id = cpu_to_le32(id);
759 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
760}
761
762void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
763 uint32_t ctx_id,
764 uint32_t resource_id)
765{
766 struct virtio_gpu_ctx_resource *cmd_p;
767 struct virtio_gpu_vbuffer *vbuf;
768
769 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
770 memset(cmd_p, 0, sizeof(*cmd_p));
771
772 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
773 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
774 cmd_p->resource_id = cpu_to_le32(resource_id);
775 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
776
777}
778
779void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
780 uint32_t ctx_id,
781 uint32_t resource_id)
782{
783 struct virtio_gpu_ctx_resource *cmd_p;
784 struct virtio_gpu_vbuffer *vbuf;
785
786 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
787 memset(cmd_p, 0, sizeof(*cmd_p));
788
789 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
790 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
791 cmd_p->resource_id = cpu_to_le32(resource_id);
792 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
793}
794
795void
796virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
797 struct virtio_gpu_resource_create_3d *rc_3d,
798 struct virtio_gpu_fence **fence)
799{
800 struct virtio_gpu_resource_create_3d *cmd_p;
801 struct virtio_gpu_vbuffer *vbuf;
802
803 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
804 memset(cmd_p, 0, sizeof(*cmd_p));
805
806 *cmd_p = *rc_3d;
807 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
808 cmd_p->hdr.flags = 0;
809
810 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
811}
812
813void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
814 uint32_t resource_id, uint32_t ctx_id,
815 uint64_t offset, uint32_t level,
816 struct virtio_gpu_box *box,
817 struct virtio_gpu_fence **fence)
818{
819 struct virtio_gpu_transfer_host_3d *cmd_p;
820 struct virtio_gpu_vbuffer *vbuf;
821
822 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
823 memset(cmd_p, 0, sizeof(*cmd_p));
824
825 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
826 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
827 cmd_p->resource_id = cpu_to_le32(resource_id);
828 cmd_p->box = *box;
829 cmd_p->offset = cpu_to_le64(offset);
830 cmd_p->level = cpu_to_le32(level);
831
832 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
833}
834
835void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
836 uint32_t resource_id, uint32_t ctx_id,
837 uint64_t offset, uint32_t level,
838 struct virtio_gpu_box *box,
839 struct virtio_gpu_fence **fence)
840{
841 struct virtio_gpu_transfer_host_3d *cmd_p;
842 struct virtio_gpu_vbuffer *vbuf;
843
844 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
845 memset(cmd_p, 0, sizeof(*cmd_p));
846
847 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
848 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
849 cmd_p->resource_id = cpu_to_le32(resource_id);
850 cmd_p->box = *box;
851 cmd_p->offset = cpu_to_le64(offset);
852 cmd_p->level = cpu_to_le32(level);
853
854 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
855}
856
857void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
858 void *data, uint32_t data_size,
859 uint32_t ctx_id, struct virtio_gpu_fence **fence)
860{
861 struct virtio_gpu_cmd_submit *cmd_p;
862 struct virtio_gpu_vbuffer *vbuf;
863
864 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
865 memset(cmd_p, 0, sizeof(*cmd_p));
866
867 vbuf->data_buf = data;
868 vbuf->data_size = data_size;
869
870 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
871 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
872 cmd_p->size = cpu_to_le32(data_size);
873
874 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
875}
876
575int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 877int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
576 struct virtio_gpu_object *obj, 878 struct virtio_gpu_object *obj,
577 uint32_t resource_id, 879 uint32_t resource_id,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2c7a25c71af2..a09cf8529b9f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -146,73 +146,73 @@
146 146
147static const struct drm_ioctl_desc vmw_ioctls[] = { 147static const struct drm_ioctl_desc vmw_ioctls[] = {
148 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 148 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
149 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 149 DRM_AUTH | DRM_RENDER_ALLOW),
150 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 150 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
151 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 151 DRM_AUTH | DRM_RENDER_ALLOW),
152 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 152 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
153 DRM_UNLOCKED | DRM_RENDER_ALLOW), 153 DRM_RENDER_ALLOW),
154 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 154 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
155 vmw_kms_cursor_bypass_ioctl, 155 vmw_kms_cursor_bypass_ioctl,
156 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 156 DRM_MASTER | DRM_CONTROL_ALLOW),
157 157
158 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 158 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
159 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 159 DRM_MASTER | DRM_CONTROL_ALLOW),
160 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 160 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
161 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 161 DRM_MASTER | DRM_CONTROL_ALLOW),
162 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 162 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
163 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 163 DRM_MASTER | DRM_CONTROL_ALLOW),
164 164
165 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 165 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
166 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 166 DRM_AUTH | DRM_RENDER_ALLOW),
167 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 167 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
168 DRM_UNLOCKED | DRM_RENDER_ALLOW), 168 DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 169 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 170 DRM_AUTH | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 171 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
172 DRM_UNLOCKED | DRM_RENDER_ALLOW), 172 DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 173 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
174 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 174 DRM_AUTH | DRM_RENDER_ALLOW),
175 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED | 175 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
176 DRM_RENDER_ALLOW), 176 DRM_RENDER_ALLOW),
177 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 177 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
178 DRM_UNLOCKED | DRM_RENDER_ALLOW), 178 DRM_RENDER_ALLOW),
179 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 179 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
180 vmw_fence_obj_signaled_ioctl, 180 vmw_fence_obj_signaled_ioctl,
181 DRM_UNLOCKED | DRM_RENDER_ALLOW), 181 DRM_RENDER_ALLOW),
182 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 182 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
183 DRM_UNLOCKED | DRM_RENDER_ALLOW), 183 DRM_RENDER_ALLOW),
184 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 184 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
185 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 185 DRM_AUTH | DRM_RENDER_ALLOW),
186 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 186 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
187 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 187 DRM_AUTH | DRM_RENDER_ALLOW),
188 188
189 /* these allow direct access to the framebuffers mark as master only */ 189 /* these allow direct access to the framebuffers mark as master only */
190 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 190 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
191 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), 191 DRM_MASTER | DRM_AUTH),
192 VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 192 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
193 vmw_present_readback_ioctl, 193 vmw_present_readback_ioctl,
194 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), 194 DRM_MASTER | DRM_AUTH),
195 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 195 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
196 vmw_kms_update_layout_ioctl, 196 vmw_kms_update_layout_ioctl,
197 DRM_MASTER | DRM_UNLOCKED), 197 DRM_MASTER),
198 VMW_IOCTL_DEF(VMW_CREATE_SHADER, 198 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
199 vmw_shader_define_ioctl, 199 vmw_shader_define_ioctl,
200 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 200 DRM_AUTH | DRM_RENDER_ALLOW),
201 VMW_IOCTL_DEF(VMW_UNREF_SHADER, 201 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
202 vmw_shader_destroy_ioctl, 202 vmw_shader_destroy_ioctl,
203 DRM_UNLOCKED | DRM_RENDER_ALLOW), 203 DRM_RENDER_ALLOW),
204 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 204 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
205 vmw_gb_surface_define_ioctl, 205 vmw_gb_surface_define_ioctl,
206 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 206 DRM_AUTH | DRM_RENDER_ALLOW),
207 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 207 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
208 vmw_gb_surface_reference_ioctl, 208 vmw_gb_surface_reference_ioctl,
209 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 209 DRM_AUTH | DRM_RENDER_ALLOW),
210 VMW_IOCTL_DEF(VMW_SYNCCPU, 210 VMW_IOCTL_DEF(VMW_SYNCCPU,
211 vmw_user_dmabuf_synccpu_ioctl, 211 vmw_user_dmabuf_synccpu_ioctl,
212 DRM_UNLOCKED | DRM_RENDER_ALLOW), 212 DRM_RENDER_ALLOW),
213 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 213 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
214 vmw_extended_context_define_ioctl, 214 vmw_extended_context_define_ioctl,
215 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 215 DRM_AUTH | DRM_RENDER_ALLOW),
216}; 216};
217 217
218static struct pci_device_id vmw_pci_id_list[] = { 218static struct pci_device_id vmw_pci_id_list[] = {
@@ -643,7 +643,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
643 init_waitqueue_head(&dev_priv->fence_queue); 643 init_waitqueue_head(&dev_priv->fence_queue);
644 init_waitqueue_head(&dev_priv->fifo_queue); 644 init_waitqueue_head(&dev_priv->fifo_queue);
645 dev_priv->fence_queue_waiters = 0; 645 dev_priv->fence_queue_waiters = 0;
646 atomic_set(&dev_priv->fifo_queue_waiters, 0); 646 dev_priv->fifo_queue_waiters = 0;
647 647
648 dev_priv->used_memory_size = 0; 648 dev_priv->used_memory_size = 0;
649 649
@@ -752,8 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
753 dev_priv->active_master = &dev_priv->fbdev_master; 753 dev_priv->active_master = &dev_priv->fbdev_master;
754 754
755 dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start, 755 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
756 dev_priv->mmio_size); 756 dev_priv->mmio_size, MEMREMAP_WB);
757 757
758 if (unlikely(dev_priv->mmio_virt == NULL)) { 758 if (unlikely(dev_priv->mmio_virt == NULL)) {
759 ret = -ENOMEM; 759 ret = -ENOMEM;
@@ -907,7 +907,7 @@ out_no_irq:
907out_no_device: 907out_no_device:
908 ttm_object_device_release(&dev_priv->tdev); 908 ttm_object_device_release(&dev_priv->tdev);
909out_err4: 909out_err4:
910 iounmap(dev_priv->mmio_virt); 910 memunmap(dev_priv->mmio_virt);
911out_err3: 911out_err3:
912 vmw_ttm_global_release(dev_priv); 912 vmw_ttm_global_release(dev_priv);
913out_err0: 913out_err0:
@@ -958,7 +958,7 @@ static int vmw_driver_unload(struct drm_device *dev)
958 pci_release_regions(dev->pdev); 958 pci_release_regions(dev->pdev);
959 959
960 ttm_object_device_release(&dev_priv->tdev); 960 ttm_object_device_release(&dev_priv->tdev);
961 iounmap(dev_priv->mmio_virt); 961 memunmap(dev_priv->mmio_virt);
962 if (dev_priv->ctx.staged_bindings) 962 if (dev_priv->ctx.staged_bindings)
963 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 963 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
964 vmw_ttm_global_release(dev_priv); 964 vmw_ttm_global_release(dev_priv);
@@ -1062,14 +1062,6 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
1062 mutex_unlock(&dev->master_mutex); 1062 mutex_unlock(&dev->master_mutex);
1063 1063
1064 /* 1064 /*
1065 * Taking the drm_global_mutex after the TTM lock might deadlock
1066 */
1067 if (!(flags & DRM_UNLOCKED)) {
1068 DRM_ERROR("Refusing locked ioctl access.\n");
1069 return ERR_PTR(-EDEADLK);
1070 }
1071
1072 /*
1073 * Take the TTM lock. Possibly sleep waiting for the authenticating 1065 * Take the TTM lock. Possibly sleep waiting for the authenticating
1074 * master to become master again, or for a SIGTERM if the 1066 * master to become master again, or for a SIGTERM if the
1075 * authenticating master exits. 1067 * authenticating master exits.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index f19fd39b43e1..a8ae9dfb83b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -375,7 +375,7 @@ struct vmw_private {
375 uint32_t stdu_max_height; 375 uint32_t stdu_max_height;
376 uint32_t initial_width; 376 uint32_t initial_width;
377 uint32_t initial_height; 377 uint32_t initial_height;
378 u32 __iomem *mmio_virt; 378 u32 *mmio_virt;
379 uint32_t capabilities; 379 uint32_t capabilities;
380 uint32_t max_gmr_ids; 380 uint32_t max_gmr_ids;
381 uint32_t max_gmr_pages; 381 uint32_t max_gmr_pages;
@@ -440,13 +440,12 @@ struct vmw_private {
440 spinlock_t waiter_lock; 440 spinlock_t waiter_lock;
441 int fence_queue_waiters; /* Protected by waiter_lock */ 441 int fence_queue_waiters; /* Protected by waiter_lock */
442 int goal_queue_waiters; /* Protected by waiter_lock */ 442 int goal_queue_waiters; /* Protected by waiter_lock */
443 int cmdbuf_waiters; /* Protected by irq_lock */ 443 int cmdbuf_waiters; /* Protected by waiter_lock */
444 int error_waiters; /* Protected by irq_lock */ 444 int error_waiters; /* Protected by waiter_lock */
445 atomic_t fifo_queue_waiters; 445 int fifo_queue_waiters; /* Protected by waiter_lock */
446 uint32_t last_read_seqno; 446 uint32_t last_read_seqno;
447 spinlock_t irq_lock;
448 struct vmw_fence_manager *fman; 447 struct vmw_fence_manager *fman;
449 uint32_t irq_mask; 448 uint32_t irq_mask; /* Updates protected by waiter_lock */
450 449
451 /* 450 /*
452 * Device state 451 * Device state
@@ -914,9 +913,9 @@ void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
914bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 913bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
915 uint32_t pitch, 914 uint32_t pitch,
916 uint32_t height); 915 uint32_t height);
917u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); 916u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
918int vmw_enable_vblank(struct drm_device *dev, int crtc); 917int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
919void vmw_disable_vblank(struct drm_device *dev, int crtc); 918void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
920int vmw_kms_present(struct vmw_private *dev_priv, 919int vmw_kms_present(struct vmw_private *dev_priv,
921 struct drm_file *file_priv, 920 struct drm_file *file_priv,
922 struct vmw_framebuffer *vfb, 921 struct vmw_framebuffer *vfb,
@@ -1206,4 +1205,30 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1206{ 1205{
1207 atomic_dec(&dev_priv->num_fifo_resources); 1206 atomic_dec(&dev_priv->num_fifo_resources);
1208} 1207}
1208
1209/**
1210 * vmw_mmio_read - Perform a MMIO read from volatile memory
1211 *
1212 * @addr: The address to read from
1213 *
1214 * This function is intended to be equivalent to ioread32() on
1215 * memremap'd memory, but without byteswapping.
1216 */
1217static inline u32 vmw_mmio_read(u32 *addr)
1218{
1219 return READ_ONCE(*addr);
1220}
1221
1222/**
1223 * vmw_mmio_write - Perform a MMIO write to volatile memory
1224 *
1225 * @addr: The address to write to
1226 *
1227 * This function is intended to be equivalent to iowrite32 on
1228 * memremap'd memory, but without byteswapping.
1229 */
1230static inline void vmw_mmio_write(u32 value, u32 *addr)
1231{
1232 WRITE_ONCE(*addr, value);
1233}
1209#endif 1234#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 567ddede51d1..8e689b439890 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -142,8 +142,8 @@ static bool vmw_fence_enable_signaling(struct fence *f)
142 struct vmw_fence_manager *fman = fman_from_fence(fence); 142 struct vmw_fence_manager *fman = fman_from_fence(fence);
143 struct vmw_private *dev_priv = fman->dev_priv; 143 struct vmw_private *dev_priv = fman->dev_priv;
144 144
145 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 145 u32 *fifo_mem = dev_priv->mmio_virt;
146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 146 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
148 return false; 148 return false;
149 149
@@ -386,14 +386,14 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
386 u32 passed_seqno) 386 u32 passed_seqno)
387{ 387{
388 u32 goal_seqno; 388 u32 goal_seqno;
389 u32 __iomem *fifo_mem; 389 u32 *fifo_mem;
390 struct vmw_fence_obj *fence; 390 struct vmw_fence_obj *fence;
391 391
392 if (likely(!fman->seqno_valid)) 392 if (likely(!fman->seqno_valid))
393 return false; 393 return false;
394 394
395 fifo_mem = fman->dev_priv->mmio_virt; 395 fifo_mem = fman->dev_priv->mmio_virt;
396 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); 396 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
397 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) 397 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
398 return false; 398 return false;
399 399
@@ -401,8 +401,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
401 list_for_each_entry(fence, &fman->fence_list, head) { 401 list_for_each_entry(fence, &fman->fence_list, head) {
402 if (!list_empty(&fence->seq_passed_actions)) { 402 if (!list_empty(&fence->seq_passed_actions)) {
403 fman->seqno_valid = true; 403 fman->seqno_valid = true;
404 iowrite32(fence->base.seqno, 404 vmw_mmio_write(fence->base.seqno,
405 fifo_mem + SVGA_FIFO_FENCE_GOAL); 405 fifo_mem + SVGA_FIFO_FENCE_GOAL);
406 break; 406 break;
407 } 407 }
408 } 408 }
@@ -430,18 +430,18 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
430{ 430{
431 struct vmw_fence_manager *fman = fman_from_fence(fence); 431 struct vmw_fence_manager *fman = fman_from_fence(fence);
432 u32 goal_seqno; 432 u32 goal_seqno;
433 u32 __iomem *fifo_mem; 433 u32 *fifo_mem;
434 434
435 if (fence_is_signaled_locked(&fence->base)) 435 if (fence_is_signaled_locked(&fence->base))
436 return false; 436 return false;
437 437
438 fifo_mem = fman->dev_priv->mmio_virt; 438 fifo_mem = fman->dev_priv->mmio_virt;
439 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); 439 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
440 if (likely(fman->seqno_valid && 440 if (likely(fman->seqno_valid &&
441 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) 441 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
442 return false; 442 return false;
443 443
444 iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); 444 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
445 fman->seqno_valid = true; 445 fman->seqno_valid = true;
446 446
447 return true; 447 return true;
@@ -453,9 +453,9 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
453 struct list_head action_list; 453 struct list_head action_list;
454 bool needs_rerun; 454 bool needs_rerun;
455 uint32_t seqno, new_seqno; 455 uint32_t seqno, new_seqno;
456 u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; 456 u32 *fifo_mem = fman->dev_priv->mmio_virt;
457 457
458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 458 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
459rerun: 459rerun:
460 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 460 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
461 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { 461 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -477,7 +477,7 @@ rerun:
477 477
478 needs_rerun = vmw_fence_goal_new_locked(fman, seqno); 478 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
479 if (unlikely(needs_rerun)) { 479 if (unlikely(needs_rerun)) {
480 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 480 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
481 if (new_seqno != seqno) { 481 if (new_seqno != seqno) {
482 seqno = new_seqno; 482 seqno = new_seqno;
483 goto rerun; 483 goto rerun;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 80c40c31d4f8..a8baf5f5e765 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -36,7 +36,7 @@ struct vmw_temp_set_context {
36 36
37bool vmw_fifo_have_3d(struct vmw_private *dev_priv) 37bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
38{ 38{
39 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 39 u32 *fifo_mem = dev_priv->mmio_virt;
40 uint32_t fifo_min, hwversion; 40 uint32_t fifo_min, hwversion;
41 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 41 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
42 42
@@ -60,15 +60,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
60 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 60 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
61 return false; 61 return false;
62 62
63 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); 63 fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
64 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) 64 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
65 return false; 65 return false;
66 66
67 hwversion = ioread32(fifo_mem + 67 hwversion = vmw_mmio_read(fifo_mem +
68 ((fifo->capabilities & 68 ((fifo->capabilities &
69 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? 69 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
70 SVGA_FIFO_3D_HWVERSION_REVISED : 70 SVGA_FIFO_3D_HWVERSION_REVISED :
71 SVGA_FIFO_3D_HWVERSION)); 71 SVGA_FIFO_3D_HWVERSION));
72 72
73 if (hwversion == 0) 73 if (hwversion == 0)
74 return false; 74 return false;
@@ -85,13 +85,13 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
85 85
86bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) 86bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
87{ 87{
88 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 88 u32 *fifo_mem = dev_priv->mmio_virt;
89 uint32_t caps; 89 uint32_t caps;
90 90
91 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 91 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
92 return false; 92 return false;
93 93
94 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); 94 caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
95 if (caps & SVGA_FIFO_CAP_PITCHLOCK) 95 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
96 return true; 96 return true;
97 97
@@ -100,7 +100,7 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
100 100
101int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 101int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
102{ 102{
103 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 103 u32 *fifo_mem = dev_priv->mmio_virt;
104 uint32_t max; 104 uint32_t max;
105 uint32_t min; 105 uint32_t min;
106 106
@@ -137,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
137 if (min < PAGE_SIZE) 137 if (min < PAGE_SIZE)
138 min = PAGE_SIZE; 138 min = PAGE_SIZE;
139 139
140 iowrite32(min, fifo_mem + SVGA_FIFO_MIN); 140 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
141 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); 141 vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
142 wmb(); 142 wmb();
143 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD); 143 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
144 iowrite32(min, fifo_mem + SVGA_FIFO_STOP); 144 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
145 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY); 145 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
146 mb(); 146 mb();
147 147
148 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 148 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
149 149
150 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 150 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
151 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 151 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
152 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); 152 fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
153 153
154 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", 154 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
155 (unsigned int) max, 155 (unsigned int) max,
@@ -157,7 +157,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
157 (unsigned int) fifo->capabilities); 157 (unsigned int) fifo->capabilities);
158 158
159 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 159 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
160 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); 160 vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
161 vmw_marker_queue_init(&fifo->marker_queue); 161 vmw_marker_queue_init(&fifo->marker_queue);
162 162
163 return 0; 163 return 0;
@@ -165,31 +165,23 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
165 165
166void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 166void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
167{ 167{
168 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 168 u32 *fifo_mem = dev_priv->mmio_virt;
169 static DEFINE_SPINLOCK(ping_lock);
170 unsigned long irq_flags;
171 169
172 /* 170 preempt_disable();
173 * The ping_lock is needed because we don't have an atomic 171 if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
174 * test-and-set of the SVGA_FIFO_BUSY register.
175 */
176 spin_lock_irqsave(&ping_lock, irq_flags);
177 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
178 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
179 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 172 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
180 } 173 preempt_enable();
181 spin_unlock_irqrestore(&ping_lock, irq_flags);
182} 174}
183 175
184void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 176void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
185{ 177{
186 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 178 u32 *fifo_mem = dev_priv->mmio_virt;
187 179
188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 180 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 181 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
190 ; 182 ;
191 183
192 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 184 dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
193 185
194 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 186 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
195 dev_priv->config_done_state); 187 dev_priv->config_done_state);
@@ -213,11 +205,11 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
213 205
214static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) 206static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
215{ 207{
216 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 208 u32 *fifo_mem = dev_priv->mmio_virt;
217 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 209 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
218 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 210 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
219 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 211 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
220 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); 212 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
221 213
222 return ((max - next_cmd) + (stop - min) <= bytes); 214 return ((max - next_cmd) + (stop - min) <= bytes);
223} 215}
@@ -260,7 +252,6 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
260 unsigned long timeout) 252 unsigned long timeout)
261{ 253{
262 long ret = 1L; 254 long ret = 1L;
263 unsigned long irq_flags;
264 255
265 if (likely(!vmw_fifo_is_full(dev_priv, bytes))) 256 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
266 return 0; 257 return 0;
@@ -270,16 +261,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
270 return vmw_fifo_wait_noirq(dev_priv, bytes, 261 return vmw_fifo_wait_noirq(dev_priv, bytes,
271 interruptible, timeout); 262 interruptible, timeout);
272 263
273 spin_lock(&dev_priv->waiter_lock); 264 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
274 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { 265 &dev_priv->fifo_queue_waiters);
275 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
276 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
277 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
278 dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
279 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
280 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
281 }
282 spin_unlock(&dev_priv->waiter_lock);
283 266
284 if (interruptible) 267 if (interruptible)
285 ret = wait_event_interruptible_timeout 268 ret = wait_event_interruptible_timeout
@@ -295,14 +278,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
295 else if (likely(ret > 0)) 278 else if (likely(ret > 0))
296 ret = 0; 279 ret = 0;
297 280
298 spin_lock(&dev_priv->waiter_lock); 281 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
299 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { 282 &dev_priv->fifo_queue_waiters);
300 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
301 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
302 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
303 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
304 }
305 spin_unlock(&dev_priv->waiter_lock);
306 283
307 return ret; 284 return ret;
308} 285}
@@ -321,7 +298,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
321 uint32_t bytes) 298 uint32_t bytes)
322{ 299{
323 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 300 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
324 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 301 u32 *fifo_mem = dev_priv->mmio_virt;
325 uint32_t max; 302 uint32_t max;
326 uint32_t min; 303 uint32_t min;
327 uint32_t next_cmd; 304 uint32_t next_cmd;
@@ -329,9 +306,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
329 int ret; 306 int ret;
330 307
331 mutex_lock(&fifo_state->fifo_mutex); 308 mutex_lock(&fifo_state->fifo_mutex);
332 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 309 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
333 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 310 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
334 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 311 next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
335 312
336 if (unlikely(bytes >= (max - min))) 313 if (unlikely(bytes >= (max - min)))
337 goto out_err; 314 goto out_err;
@@ -342,7 +319,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
342 fifo_state->reserved_size = bytes; 319 fifo_state->reserved_size = bytes;
343 320
344 while (1) { 321 while (1) {
345 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); 322 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
346 bool need_bounce = false; 323 bool need_bounce = false;
347 bool reserve_in_place = false; 324 bool reserve_in_place = false;
348 325
@@ -376,8 +353,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
376 fifo_state->using_bounce_buffer = false; 353 fifo_state->using_bounce_buffer = false;
377 354
378 if (reserveable) 355 if (reserveable)
379 iowrite32(bytes, fifo_mem + 356 vmw_mmio_write(bytes, fifo_mem +
380 SVGA_FIFO_RESERVED); 357 SVGA_FIFO_RESERVED);
381 return (void __force *) (fifo_mem + 358 return (void __force *) (fifo_mem +
382 (next_cmd >> 2)); 359 (next_cmd >> 2));
383 } else { 360 } else {
@@ -427,7 +404,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
427} 404}
428 405
429static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, 406static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
430 u32 __iomem *fifo_mem, 407 u32 *fifo_mem,
431 uint32_t next_cmd, 408 uint32_t next_cmd,
432 uint32_t max, uint32_t min, uint32_t bytes) 409 uint32_t max, uint32_t min, uint32_t bytes)
433{ 410{
@@ -439,17 +416,16 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
439 if (bytes < chunk_size) 416 if (bytes < chunk_size)
440 chunk_size = bytes; 417 chunk_size = bytes;
441 418
442 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED); 419 vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
443 mb(); 420 mb();
444 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size); 421 memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
445 rest = bytes - chunk_size; 422 rest = bytes - chunk_size;
446 if (rest) 423 if (rest)
447 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), 424 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
448 rest);
449} 425}
450 426
451static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, 427static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
452 u32 __iomem *fifo_mem, 428 u32 *fifo_mem,
453 uint32_t next_cmd, 429 uint32_t next_cmd,
454 uint32_t max, uint32_t min, uint32_t bytes) 430 uint32_t max, uint32_t min, uint32_t bytes)
455{ 431{
@@ -457,12 +433,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
457 fifo_state->dynamic_buffer : fifo_state->static_buffer; 433 fifo_state->dynamic_buffer : fifo_state->static_buffer;
458 434
459 while (bytes > 0) { 435 while (bytes > 0) {
460 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2)); 436 vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
461 next_cmd += sizeof(uint32_t); 437 next_cmd += sizeof(uint32_t);
462 if (unlikely(next_cmd == max)) 438 if (unlikely(next_cmd == max))
463 next_cmd = min; 439 next_cmd = min;
464 mb(); 440 mb();
465 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); 441 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
466 mb(); 442 mb();
467 bytes -= sizeof(uint32_t); 443 bytes -= sizeof(uint32_t);
468 } 444 }
@@ -471,10 +447,10 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
471static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) 447static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
472{ 448{
473 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 449 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
474 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 450 u32 *fifo_mem = dev_priv->mmio_virt;
475 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 451 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
476 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 452 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
477 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 453 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
478 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 454 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
479 455
480 if (fifo_state->dx) 456 if (fifo_state->dx)
@@ -507,11 +483,11 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
507 if (next_cmd >= max) 483 if (next_cmd >= max)
508 next_cmd -= max - min; 484 next_cmd -= max - min;
509 mb(); 485 mb();
510 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); 486 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
511 } 487 }
512 488
513 if (reserveable) 489 if (reserveable)
514 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 490 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
515 mb(); 491 mb();
516 up_write(&fifo_state->rwsem); 492 up_write(&fifo_state->rwsem);
517 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 493 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 0a970afed93b..b8c6a03c8c54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -64,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
64 break; 64 break;
65 case DRM_VMW_PARAM_FIFO_HW_VERSION: 65 case DRM_VMW_PARAM_FIFO_HW_VERSION:
66 { 66 {
67 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 67 u32 *fifo_mem = dev_priv->mmio_virt;
68 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 68 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
69 69
70 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { 70 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -73,11 +73,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
73 } 73 }
74 74
75 param->value = 75 param->value =
76 ioread32(fifo_mem + 76 vmw_mmio_read(fifo_mem +
77 ((fifo->capabilities & 77 ((fifo->capabilities &
78 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? 78 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
79 SVGA_FIFO_3D_HWVERSION_REVISED : 79 SVGA_FIFO_3D_HWVERSION_REVISED :
80 SVGA_FIFO_3D_HWVERSION)); 80 SVGA_FIFO_3D_HWVERSION));
81 break; 81 break;
82 } 82 }
83 case DRM_VMW_PARAM_MAX_SURF_MEMORY: 83 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
@@ -122,6 +122,22 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
122 return 0; 122 return 0;
123} 123}
124 124
125static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
126{
127 /* If the header is updated, update the format test as well! */
128 BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
129
130 if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
131 cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
132 fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
133 SVGADX_DXFMT_MULTISAMPLE_4 |
134 SVGADX_DXFMT_MULTISAMPLE_8);
135 else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
136 return 0;
137
138 return fmt_value;
139}
140
125static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, 141static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
126 size_t size) 142 size_t size)
127{ 143{
@@ -147,7 +163,8 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
147 for (i = 0; i < max_size; ++i) { 163 for (i = 0; i < max_size; ++i) {
148 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 164 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
149 compat_cap->pairs[i][0] = i; 165 compat_cap->pairs[i][0] = i;
150 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 166 compat_cap->pairs[i][1] = vmw_mask_multisample
167 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
151 } 168 }
152 spin_unlock(&dev_priv->cap_lock); 169 spin_unlock(&dev_priv->cap_lock);
153 170
@@ -162,7 +179,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
162 (struct drm_vmw_get_3d_cap_arg *) data; 179 (struct drm_vmw_get_3d_cap_arg *) data;
163 struct vmw_private *dev_priv = vmw_priv(dev); 180 struct vmw_private *dev_priv = vmw_priv(dev);
164 uint32_t size; 181 uint32_t size;
165 u32 __iomem *fifo_mem; 182 u32 *fifo_mem;
166 void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); 183 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
167 void *bounce; 184 void *bounce;
168 int ret; 185 int ret;
@@ -202,7 +219,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
202 spin_lock(&dev_priv->cap_lock); 219 spin_lock(&dev_priv->cap_lock);
203 for (i = 0; i < num; ++i) { 220 for (i = 0; i < num; ++i) {
204 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 221 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
205 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 222 *bounce32++ = vmw_mask_multisample
223 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
206 } 224 }
207 spin_unlock(&dev_priv->cap_lock); 225 spin_unlock(&dev_priv->cap_lock);
208 } else if (gb_objects) { 226 } else if (gb_objects) {
@@ -211,7 +229,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
211 goto out_err; 229 goto out_err;
212 } else { 230 } else {
213 fifo_mem = dev_priv->mmio_virt; 231 fifo_mem = dev_priv->mmio_virt;
214 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 232 memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
215 } 233 }
216 234
217 ret = copy_to_user(buffer, bounce, size); 235 ret = copy_to_user(buffer, bounce, size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9498a5e33c12..0c7e1723292c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -36,15 +36,13 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
36 struct vmw_private *dev_priv = vmw_priv(dev); 36 struct vmw_private *dev_priv = vmw_priv(dev);
37 uint32_t status, masked_status; 37 uint32_t status, masked_status;
38 38
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 39 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41 masked_status = status & dev_priv->irq_mask; 40 masked_status = status & READ_ONCE(dev_priv->irq_mask);
42 spin_unlock(&dev_priv->irq_lock);
43 41
44 if (likely(status)) 42 if (likely(status))
45 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 43 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
46 44
47 if (!masked_status) 45 if (!status)
48 return IRQ_NONE; 46 return IRQ_NONE;
49 47
50 if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | 48 if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
@@ -72,8 +70,8 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
72void vmw_update_seqno(struct vmw_private *dev_priv, 70void vmw_update_seqno(struct vmw_private *dev_priv,
73 struct vmw_fifo_state *fifo_state) 71 struct vmw_fifo_state *fifo_state)
74{ 72{
75 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 73 u32 *fifo_mem = dev_priv->mmio_virt;
76 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 74 uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
77 75
78 if (dev_priv->last_read_seqno != seqno) { 76 if (dev_priv->last_read_seqno != seqno) {
79 dev_priv->last_read_seqno = seqno; 77 dev_priv->last_read_seqno = seqno;
@@ -178,8 +176,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
178 } 176 }
179 finish_wait(&dev_priv->fence_queue, &__wait); 177 finish_wait(&dev_priv->fence_queue, &__wait);
180 if (ret == 0 && fifo_idle) { 178 if (ret == 0 && fifo_idle) {
181 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 179 u32 *fifo_mem = dev_priv->mmio_virt;
182 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); 180
181 vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
183 } 182 }
184 wake_up_all(&dev_priv->fence_queue); 183 wake_up_all(&dev_priv->fence_queue);
185out_err: 184out_err:
@@ -189,65 +188,51 @@ out_err:
189 return ret; 188 return ret;
190} 189}
191 190
192void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 191void vmw_generic_waiter_add(struct vmw_private *dev_priv,
192 u32 flag, int *waiter_count)
193{ 193{
194 spin_lock(&dev_priv->waiter_lock); 194 spin_lock_bh(&dev_priv->waiter_lock);
195 if (dev_priv->fence_queue_waiters++ == 0) { 195 if ((*waiter_count)++ == 0) {
196 unsigned long irq_flags; 196 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
197 197 dev_priv->irq_mask |= flag;
198 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
199 outl(SVGA_IRQFLAG_ANY_FENCE,
200 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
201 dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
202 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 198 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
203 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
204 } 199 }
205 spin_unlock(&dev_priv->waiter_lock); 200 spin_unlock_bh(&dev_priv->waiter_lock);
206} 201}
207 202
208void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 203void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
204 u32 flag, int *waiter_count)
209{ 205{
210 spin_lock(&dev_priv->waiter_lock); 206 spin_lock_bh(&dev_priv->waiter_lock);
211 if (--dev_priv->fence_queue_waiters == 0) { 207 if (--(*waiter_count) == 0) {
212 unsigned long irq_flags; 208 dev_priv->irq_mask &= ~flag;
213
214 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
215 dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
216 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
217 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
218 } 210 }
219 spin_unlock(&dev_priv->waiter_lock); 211 spin_unlock_bh(&dev_priv->waiter_lock);
220} 212}
221 213
214void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
215{
216 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
217 &dev_priv->fence_queue_waiters);
218}
219
220void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
221{
222 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
223 &dev_priv->fence_queue_waiters);
224}
222 225
223void vmw_goal_waiter_add(struct vmw_private *dev_priv) 226void vmw_goal_waiter_add(struct vmw_private *dev_priv)
224{ 227{
225 spin_lock(&dev_priv->waiter_lock); 228 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
226 if (dev_priv->goal_queue_waiters++ == 0) { 229 &dev_priv->goal_queue_waiters);
227 unsigned long irq_flags;
228
229 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
230 outl(SVGA_IRQFLAG_FENCE_GOAL,
231 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
232 dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
233 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
234 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
235 }
236 spin_unlock(&dev_priv->waiter_lock);
237} 230}
238 231
239void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 232void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
240{ 233{
241 spin_lock(&dev_priv->waiter_lock); 234 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
242 if (--dev_priv->goal_queue_waiters == 0) { 235 &dev_priv->goal_queue_waiters);
243 unsigned long irq_flags;
244
245 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
246 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
247 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
248 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
249 }
250 spin_unlock(&dev_priv->waiter_lock);
251} 236}
252 237
253int vmw_wait_seqno(struct vmw_private *dev_priv, 238int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -304,7 +289,6 @@ void vmw_irq_preinstall(struct drm_device *dev)
304 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 289 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
305 return; 290 return;
306 291
307 spin_lock_init(&dev_priv->irq_lock);
308 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 292 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
309 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 293 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
310} 294}
@@ -327,30 +311,3 @@ void vmw_irq_uninstall(struct drm_device *dev)
327 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 311 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
328 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 312 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
329} 313}
330
331void vmw_generic_waiter_add(struct vmw_private *dev_priv,
332 u32 flag, int *waiter_count)
333{
334 unsigned long irq_flags;
335
336 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
337 if ((*waiter_count)++ == 0) {
338 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
339 dev_priv->irq_mask |= flag;
340 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
341 }
342 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
343}
344
345void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
346 u32 flag, int *waiter_count)
347{
348 unsigned long irq_flags;
349
350 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
351 if (--(*waiter_count) == 0) {
352 dev_priv->irq_mask &= ~flag;
353 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
354 }
355 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
356}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 15a6c01cd016..9fcd7f82995c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -78,7 +78,7 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
78 cmd->cursor.hotspotX = hotspotX; 78 cmd->cursor.hotspotX = hotspotX;
79 cmd->cursor.hotspotY = hotspotY; 79 cmd->cursor.hotspotY = hotspotY;
80 80
81 vmw_fifo_commit(dev_priv, cmd_size); 81 vmw_fifo_commit_flush(dev_priv, cmd_size);
82 82
83 return 0; 83 return 0;
84} 84}
@@ -123,14 +123,14 @@ err_unreserve:
123void vmw_cursor_update_position(struct vmw_private *dev_priv, 123void vmw_cursor_update_position(struct vmw_private *dev_priv,
124 bool show, int x, int y) 124 bool show, int x, int y)
125{ 125{
126 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 126 u32 *fifo_mem = dev_priv->mmio_virt;
127 uint32_t count; 127 uint32_t count;
128 128
129 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 129 vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
130 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X); 130 vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
131 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y); 131 vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
132 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT); 132 count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
133 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 133 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
134} 134}
135 135
136int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 136int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
@@ -1155,7 +1155,8 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1155 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1155 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1156 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 1156 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1157 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1157 else if (vmw_fifo_have_pitchlock(vmw_priv))
1158 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1158 vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1159 SVGA_FIFO_PITCHLOCK);
1159 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 1160 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1160 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 1161 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1161 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 1162 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
@@ -1181,8 +1182,8 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1181 vmw_priv->vga_pitchlock = 1182 vmw_priv->vga_pitchlock =
1182 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1183 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1183 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1184 else if (vmw_fifo_have_pitchlock(vmw_priv))
1184 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + 1185 vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1185 SVGA_FIFO_PITCHLOCK); 1186 SVGA_FIFO_PITCHLOCK);
1186 1187
1187 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1188 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1188 return 0; 1189 return 0;
@@ -1230,8 +1231,8 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1230 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, 1231 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1231 vmw_priv->vga_pitchlock); 1232 vmw_priv->vga_pitchlock);
1232 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1233 else if (vmw_fifo_have_pitchlock(vmw_priv))
1233 iowrite32(vmw_priv->vga_pitchlock, 1234 vmw_mmio_write(vmw_priv->vga_pitchlock,
1234 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1235 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1235 1236
1236 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1237 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1237 return 0; 1238 return 0;
@@ -1263,7 +1264,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1263/** 1264/**
1264 * Function called by DRM code called with vbl_lock held. 1265 * Function called by DRM code called with vbl_lock held.
1265 */ 1266 */
1266u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) 1267u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1267{ 1268{
1268 return 0; 1269 return 0;
1269} 1270}
@@ -1271,7 +1272,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1271/** 1272/**
1272 * Function called by DRM code called with vbl_lock held. 1273 * Function called by DRM code called with vbl_lock held.
1273 */ 1274 */
1274int vmw_enable_vblank(struct drm_device *dev, int crtc) 1275int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
1275{ 1276{
1276 return -ENOSYS; 1277 return -ENOSYS;
1277} 1278}
@@ -1279,7 +1280,7 @@ int vmw_enable_vblank(struct drm_device *dev, int crtc)
1279/** 1280/**
1280 * Function called by DRM code called with vbl_lock held. 1281 * Function called by DRM code called with vbl_lock held.
1281 */ 1282 */
1282void vmw_disable_vblank(struct drm_device *dev, int crtc) 1283void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
1283{ 1284{
1284} 1285}
1285 1286
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index c22e2df1b336..b1fc1c02792d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -717,6 +717,8 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
717 &event->event.tv_usec, 717 &event->event.tv_usec,
718 true); 718 true);
719 vmw_fence_obj_unreference(&fence); 719 vmw_fence_obj_unreference(&fence);
720 } else {
721 vmw_fifo_flush(dev_priv, false);
720 } 722 }
721 723
722 return ret; 724 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 03f63c749c02..7d620e82e000 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1291,6 +1291,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1291 uint32_t size; 1291 uint32_t size;
1292 uint32_t backup_handle; 1292 uint32_t backup_handle;
1293 1293
1294 if (req->multisample_count != 0)
1295 return -EINVAL;
1294 1296
1295 if (unlikely(vmw_user_surface_size == 0)) 1297 if (unlikely(vmw_user_surface_size == 0))
1296 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1298 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 791de9351eeb..cc3f1825c735 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -298,7 +298,7 @@ static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
298 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i)); 298 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
299 if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner)) 299 if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
300 host1x_debug_output(o, "%d: locked by channel %d\n", 300 host1x_debug_output(o, "%d: locked by channel %d\n",
301 i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner)); 301 i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
302 else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner)) 302 else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
303 host1x_debug_output(o, "%d: locked by cpu\n", i); 303 host1x_debug_output(o, "%d: locked by cpu\n", i);
304 else 304 else
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
index ac704e579977..31238c285d46 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_sync.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
@@ -131,12 +131,12 @@ static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
131} 131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \ 132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id) 133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v) 134static inline u32 host1x_sync_mlock_owner_chid_v(u32 v)
135{ 135{
136 return (v & 0xf) << 8; 136 return (v >> 8) & 0xf;
137} 137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \ 138#define HOST1X_SYNC_MLOCK_OWNER_CHID_V(v) \
139 host1x_sync_mlock_owner_chid_f(v) 139 host1x_sync_mlock_owner_chid_v(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r) 140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{ 141{
142 return (r >> 1) & 0x1; 142 return (r >> 1) & 0x1;
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
index 4495401525e8..540c7b65995f 100644
--- a/drivers/gpu/host1x/hw/hw_host1x02_sync.h
+++ b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
@@ -131,12 +131,12 @@ static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
131} 131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \ 132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id) 133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v) 134static inline u32 host1x_sync_mlock_owner_chid_v(u32 v)
135{ 135{
136 return (v & 0xf) << 8; 136 return (v >> 8) & 0xf;
137} 137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \ 138#define HOST1X_SYNC_MLOCK_OWNER_CHID_V(v) \
139 host1x_sync_mlock_owner_chid_f(v) 139 host1x_sync_mlock_owner_chid_v(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r) 140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{ 141{
142 return (r >> 1) & 0x1; 142 return (r >> 1) & 0x1;
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_sync.h b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
index ef2275b5407a..3d6c8ec65934 100644
--- a/drivers/gpu/host1x/hw/hw_host1x04_sync.h
+++ b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
@@ -131,12 +131,12 @@ static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
131} 131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \ 132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id) 133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v) 134static inline u32 host1x_sync_mlock_owner_chid_v(u32 v)
135{ 135{
136 return (v & 0xf) << 8; 136 return (v >> 8) & 0xf;
137} 137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \ 138#define HOST1X_SYNC_MLOCK_OWNER_CHID_V(v) \
139 host1x_sync_mlock_owner_chid_f(v) 139 host1x_sync_mlock_owner_chid_v(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r) 140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{ 141{
142 return (r >> 1) & 0x1; 142 return (r >> 1) & 0x1;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index e5a38d202a21..ba47b30d28fa 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -57,10 +57,15 @@ EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
57enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc) 57enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
58{ 58{
59 switch (drm_fourcc) { 59 switch (drm_fourcc) {
60 case DRM_FORMAT_ARGB1555:
61 case DRM_FORMAT_ABGR1555:
62 case DRM_FORMAT_RGBA5551:
63 case DRM_FORMAT_BGRA5551:
60 case DRM_FORMAT_RGB565: 64 case DRM_FORMAT_RGB565:
61 case DRM_FORMAT_BGR565: 65 case DRM_FORMAT_BGR565:
62 case DRM_FORMAT_RGB888: 66 case DRM_FORMAT_RGB888:
63 case DRM_FORMAT_BGR888: 67 case DRM_FORMAT_BGR888:
68 case DRM_FORMAT_ARGB4444:
64 case DRM_FORMAT_XRGB8888: 69 case DRM_FORMAT_XRGB8888:
65 case DRM_FORMAT_XBGR8888: 70 case DRM_FORMAT_XBGR8888:
66 case DRM_FORMAT_RGBX8888: 71 case DRM_FORMAT_RGBX8888:
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 3bf05bc4ab67..63eb16bf2cf0 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -452,7 +452,7 @@ void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
452} 452}
453EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar); 453EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
454 454
455static const struct ipu_rgb def_rgb_32 = { 455static const struct ipu_rgb def_xrgb_32 = {
456 .red = { .offset = 16, .length = 8, }, 456 .red = { .offset = 16, .length = 8, },
457 .green = { .offset = 8, .length = 8, }, 457 .green = { .offset = 8, .length = 8, },
458 .blue = { .offset = 0, .length = 8, }, 458 .blue = { .offset = 0, .length = 8, },
@@ -460,7 +460,7 @@ static const struct ipu_rgb def_rgb_32 = {
460 .bits_per_pixel = 32, 460 .bits_per_pixel = 32,
461}; 461};
462 462
463static const struct ipu_rgb def_bgr_32 = { 463static const struct ipu_rgb def_xbgr_32 = {
464 .red = { .offset = 0, .length = 8, }, 464 .red = { .offset = 0, .length = 8, },
465 .green = { .offset = 8, .length = 8, }, 465 .green = { .offset = 8, .length = 8, },
466 .blue = { .offset = 16, .length = 8, }, 466 .blue = { .offset = 16, .length = 8, },
@@ -468,6 +468,22 @@ static const struct ipu_rgb def_bgr_32 = {
468 .bits_per_pixel = 32, 468 .bits_per_pixel = 32,
469}; 469};
470 470
471static const struct ipu_rgb def_rgbx_32 = {
472 .red = { .offset = 24, .length = 8, },
473 .green = { .offset = 16, .length = 8, },
474 .blue = { .offset = 8, .length = 8, },
475 .transp = { .offset = 0, .length = 8, },
476 .bits_per_pixel = 32,
477};
478
479static const struct ipu_rgb def_bgrx_32 = {
480 .red = { .offset = 8, .length = 8, },
481 .green = { .offset = 16, .length = 8, },
482 .blue = { .offset = 24, .length = 8, },
483 .transp = { .offset = 0, .length = 8, },
484 .bits_per_pixel = 32,
485};
486
471static const struct ipu_rgb def_rgb_24 = { 487static const struct ipu_rgb def_rgb_24 = {
472 .red = { .offset = 16, .length = 8, }, 488 .red = { .offset = 16, .length = 8, },
473 .green = { .offset = 8, .length = 8, }, 489 .green = { .offset = 8, .length = 8, },
@@ -500,6 +516,46 @@ static const struct ipu_rgb def_bgr_16 = {
500 .bits_per_pixel = 16, 516 .bits_per_pixel = 16,
501}; 517};
502 518
519static const struct ipu_rgb def_argb_16 = {
520 .red = { .offset = 10, .length = 5, },
521 .green = { .offset = 5, .length = 5, },
522 .blue = { .offset = 0, .length = 5, },
523 .transp = { .offset = 15, .length = 1, },
524 .bits_per_pixel = 16,
525};
526
527static const struct ipu_rgb def_argb_16_4444 = {
528 .red = { .offset = 8, .length = 4, },
529 .green = { .offset = 4, .length = 4, },
530 .blue = { .offset = 0, .length = 4, },
531 .transp = { .offset = 12, .length = 4, },
532 .bits_per_pixel = 16,
533};
534
535static const struct ipu_rgb def_abgr_16 = {
536 .red = { .offset = 0, .length = 5, },
537 .green = { .offset = 5, .length = 5, },
538 .blue = { .offset = 10, .length = 5, },
539 .transp = { .offset = 15, .length = 1, },
540 .bits_per_pixel = 16,
541};
542
543static const struct ipu_rgb def_rgba_16 = {
544 .red = { .offset = 11, .length = 5, },
545 .green = { .offset = 6, .length = 5, },
546 .blue = { .offset = 1, .length = 5, },
547 .transp = { .offset = 0, .length = 1, },
548 .bits_per_pixel = 16,
549};
550
551static const struct ipu_rgb def_bgra_16 = {
552 .red = { .offset = 1, .length = 5, },
553 .green = { .offset = 6, .length = 5, },
554 .blue = { .offset = 11, .length = 5, },
555 .transp = { .offset = 0, .length = 1, },
556 .bits_per_pixel = 16,
557};
558
503#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y)) 559#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
504#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \ 560#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
505 (pix->width * (y) / 4) + (x) / 2) 561 (pix->width * (y) / 4) + (x) / 2)
@@ -563,11 +619,19 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
563 break; 619 break;
564 case DRM_FORMAT_ABGR8888: 620 case DRM_FORMAT_ABGR8888:
565 case DRM_FORMAT_XBGR8888: 621 case DRM_FORMAT_XBGR8888:
566 ipu_cpmem_set_format_rgb(ch, &def_bgr_32); 622 ipu_cpmem_set_format_rgb(ch, &def_xbgr_32);
567 break; 623 break;
568 case DRM_FORMAT_ARGB8888: 624 case DRM_FORMAT_ARGB8888:
569 case DRM_FORMAT_XRGB8888: 625 case DRM_FORMAT_XRGB8888:
570 ipu_cpmem_set_format_rgb(ch, &def_rgb_32); 626 ipu_cpmem_set_format_rgb(ch, &def_xrgb_32);
627 break;
628 case DRM_FORMAT_RGBA8888:
629 case DRM_FORMAT_RGBX8888:
630 ipu_cpmem_set_format_rgb(ch, &def_rgbx_32);
631 break;
632 case DRM_FORMAT_BGRA8888:
633 case DRM_FORMAT_BGRX8888:
634 ipu_cpmem_set_format_rgb(ch, &def_bgrx_32);
571 break; 635 break;
572 case DRM_FORMAT_BGR888: 636 case DRM_FORMAT_BGR888:
573 ipu_cpmem_set_format_rgb(ch, &def_bgr_24); 637 ipu_cpmem_set_format_rgb(ch, &def_bgr_24);
@@ -581,6 +645,21 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
581 case DRM_FORMAT_BGR565: 645 case DRM_FORMAT_BGR565:
582 ipu_cpmem_set_format_rgb(ch, &def_bgr_16); 646 ipu_cpmem_set_format_rgb(ch, &def_bgr_16);
583 break; 647 break;
648 case DRM_FORMAT_ARGB1555:
649 ipu_cpmem_set_format_rgb(ch, &def_argb_16);
650 break;
651 case DRM_FORMAT_ABGR1555:
652 ipu_cpmem_set_format_rgb(ch, &def_abgr_16);
653 break;
654 case DRM_FORMAT_RGBA5551:
655 ipu_cpmem_set_format_rgb(ch, &def_rgba_16);
656 break;
657 case DRM_FORMAT_BGRA5551:
658 ipu_cpmem_set_format_rgb(ch, &def_bgra_16);
659 break;
660 case DRM_FORMAT_ARGB4444:
661 ipu_cpmem_set_format_rgb(ch, &def_argb_16_4444);
662 break;
584 default: 663 default:
585 return -EINVAL; 664 return -EINVAL;
586 } 665 }
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 752cdd2da89a..06631ac61b04 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -202,7 +202,7 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
202 u32 ipu_clk) 202 u32 ipu_clk)
203{ 203{
204 u32 temp; 204 u32 temp;
205 u32 div_ratio; 205 int div_ratio;
206 206
207 div_ratio = (ipu_clk / pixel_clk) - 1; 207 div_ratio = (ipu_clk / pixel_clk) - 1;
208 208
@@ -271,6 +271,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
271 case MEDIA_BUS_FMT_SGBRG8_1X8: 271 case MEDIA_BUS_FMT_SGBRG8_1X8:
272 case MEDIA_BUS_FMT_SGRBG8_1X8: 272 case MEDIA_BUS_FMT_SGRBG8_1X8:
273 case MEDIA_BUS_FMT_SRGGB8_1X8: 273 case MEDIA_BUS_FMT_SRGGB8_1X8:
274 case MEDIA_BUS_FMT_Y8_1X8:
274 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; 275 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
275 cfg->mipi_dt = MIPI_DT_RAW8; 276 cfg->mipi_dt = MIPI_DT_RAW8;
276 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 277 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
@@ -538,7 +539,7 @@ void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
538 539
539 temp = ipu_csi_read(csi, CSI_TST_CTRL); 540 temp = ipu_csi_read(csi, CSI_TST_CTRL);
540 541
541 if (active == false) { 542 if (!active) {
542 temp &= ~CSI_TEST_GEN_MODE_EN; 543 temp &= ~CSI_TEST_GEN_MODE_EN;
543 ipu_csi_write(csi, temp, CSI_TST_CTRL); 544 ipu_csi_write(csi, temp, CSI_TST_CTRL);
544 } else { 545 } else {
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 9ef2e1f54ca4..d3ad5347342c 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -183,12 +183,19 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
183 } 183 }
184 184
185 if (interlaced) { 185 if (interlaced) {
186 dc_link_event(dc, DC_EVT_NL, 0, 3); 186 int addr;
187 dc_link_event(dc, DC_EVT_EOL, 0, 2); 187
188 dc_link_event(dc, DC_EVT_NEW_DATA, 0, 1); 188 if (dc->di)
189 addr = 1;
190 else
191 addr = 0;
192
193 dc_link_event(dc, DC_EVT_NL, addr, 3);
194 dc_link_event(dc, DC_EVT_EOL, addr, 2);
195 dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1);
189 196
190 /* Init template microcode */ 197 /* Init template microcode */
191 dc_write_tmpl(dc, 0, WROD(0), 0, map, SYNC_WAVE, 0, 8, 1); 198 dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, 6, 1);
192 } else { 199 } else {
193 if (dc->di) { 200 if (dc->di) {
194 dc_link_event(dc, DC_EVT_NL, 2, 3); 201 dc_link_event(dc, DC_EVT_NL, 2, 3);
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 2970c6bb668c..359268e3a166 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -71,6 +71,10 @@ enum di_sync_wave {
71 DI_SYNC_HSYNC = 3, 71 DI_SYNC_HSYNC = 3,
72 DI_SYNC_VSYNC = 4, 72 DI_SYNC_VSYNC = 4,
73 DI_SYNC_DE = 6, 73 DI_SYNC_DE = 6,
74
75 DI_SYNC_CNT1 = 2, /* counter >= 2 only */
76 DI_SYNC_CNT4 = 5, /* counter >= 5 only */
77 DI_SYNC_CNT5 = 6, /* counter >= 6 only */
74}; 78};
75 79
76#define SYNC_WAVE 0 80#define SYNC_WAVE 0
@@ -211,66 +215,59 @@ static void ipu_di_sync_config_interlaced(struct ipu_di *di,
211 sig->mode.hback_porch + sig->mode.hfront_porch; 215 sig->mode.hback_porch + sig->mode.hfront_porch;
212 u32 v_total = sig->mode.vactive + sig->mode.vsync_len + 216 u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
213 sig->mode.vback_porch + sig->mode.vfront_porch; 217 sig->mode.vback_porch + sig->mode.vfront_porch;
214 u32 reg;
215 struct di_sync_config cfg[] = { 218 struct di_sync_config cfg[] = {
216 { 219 {
217 .run_count = h_total / 2 - 1, 220 /* 1: internal VSYNC for each frame */
218 .run_src = DI_SYNC_CLK, 221 .run_count = v_total * 2 - 1,
222 .run_src = 3, /* == counter 7 */
219 }, { 223 }, {
220 .run_count = h_total - 11, 224 /* PIN2: HSYNC waveform */
225 .run_count = h_total - 1,
221 .run_src = DI_SYNC_CLK, 226 .run_src = DI_SYNC_CLK,
222 .cnt_down = 4, 227 .cnt_polarity_gen_en = 1,
228 .cnt_polarity_trigger_src = DI_SYNC_CLK,
229 .cnt_down = sig->mode.hsync_len * 2,
223 }, { 230 }, {
224 .run_count = v_total * 2 - 1, 231 /* PIN3: VSYNC waveform */
225 .run_src = DI_SYNC_INT_HSYNC, 232 .run_count = v_total - 1,
226 .offset_count = 1, 233 .run_src = 4, /* == counter 7 */
227 .offset_src = DI_SYNC_INT_HSYNC, 234 .cnt_polarity_gen_en = 1,
228 .cnt_down = 4, 235 .cnt_polarity_trigger_src = 4, /* == counter 7 */
236 .cnt_down = sig->mode.vsync_len * 2,
237 .cnt_clr_src = DI_SYNC_CNT1,
229 }, { 238 }, {
230 .run_count = v_total / 2 - 1, 239 /* 4: Field */
240 .run_count = v_total / 2,
231 .run_src = DI_SYNC_HSYNC, 241 .run_src = DI_SYNC_HSYNC,
232 .offset_count = sig->mode.vback_porch, 242 .offset_count = h_total / 2,
233 .offset_src = DI_SYNC_HSYNC, 243 .offset_src = DI_SYNC_CLK,
234 .repeat_count = 2, 244 .repeat_count = 2,
235 .cnt_clr_src = DI_SYNC_VSYNC, 245 .cnt_clr_src = DI_SYNC_CNT1,
236 }, {
237 .run_src = DI_SYNC_HSYNC,
238 .repeat_count = sig->mode.vactive / 2,
239 .cnt_clr_src = 4,
240 }, {
241 .run_count = v_total - 1,
242 .run_src = DI_SYNC_HSYNC,
243 }, { 246 }, {
244 .run_count = v_total / 2 - 1, 247 /* 5: Active lines */
245 .run_src = DI_SYNC_HSYNC, 248 .run_src = DI_SYNC_HSYNC,
246 .offset_count = 9, 249 .offset_count = (sig->mode.vsync_len +
250 sig->mode.vback_porch) / 2,
247 .offset_src = DI_SYNC_HSYNC, 251 .offset_src = DI_SYNC_HSYNC,
248 .repeat_count = 2, 252 .repeat_count = sig->mode.vactive / 2,
249 .cnt_clr_src = DI_SYNC_VSYNC, 253 .cnt_clr_src = DI_SYNC_CNT4,
250 }, { 254 }, {
255 /* 6: Active pixel, referenced by DC */
251 .run_src = DI_SYNC_CLK, 256 .run_src = DI_SYNC_CLK,
252 .offset_count = sig->mode.hback_porch, 257 .offset_count = sig->mode.hsync_len +
258 sig->mode.hback_porch,
253 .offset_src = DI_SYNC_CLK, 259 .offset_src = DI_SYNC_CLK,
254 .repeat_count = sig->mode.hactive, 260 .repeat_count = sig->mode.hactive,
255 .cnt_clr_src = 5, 261 .cnt_clr_src = DI_SYNC_CNT5,
256 }, { 262 }, {
257 .run_count = v_total - 1, 263 /* 7: Half line HSYNC */
258 .run_src = DI_SYNC_INT_HSYNC, 264 .run_count = h_total / 2 - 1,
259 .offset_count = v_total / 2, 265 .run_src = DI_SYNC_CLK,
260 .offset_src = DI_SYNC_INT_HSYNC,
261 .cnt_clr_src = DI_SYNC_HSYNC,
262 .cnt_down = 4,
263 } 266 }
264 }; 267 };
265 268
266 ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg)); 269 ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
267 270
268 /* set gentime select and tag sel */
269 reg = ipu_di_read(di, DI_SW_GEN1(9));
270 reg &= 0x1FFFFFFF;
271 reg |= (3 - 1) << 29 | 0x00008000;
272 ipu_di_write(di, reg, DI_SW_GEN1(9));
273
274 ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF); 271 ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF);
275} 272}
276 273
@@ -543,6 +540,29 @@ int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
543} 540}
544EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode); 541EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
545 542
543static u32 ipu_di_gen_polarity(int pin)
544{
545 switch (pin) {
546 case 1:
547 return DI_GEN_POLARITY_1;
548 case 2:
549 return DI_GEN_POLARITY_2;
550 case 3:
551 return DI_GEN_POLARITY_3;
552 case 4:
553 return DI_GEN_POLARITY_4;
554 case 5:
555 return DI_GEN_POLARITY_5;
556 case 6:
557 return DI_GEN_POLARITY_6;
558 case 7:
559 return DI_GEN_POLARITY_7;
560 case 8:
561 return DI_GEN_POLARITY_8;
562 }
563 return 0;
564}
565
546int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig) 566int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
547{ 567{
548 u32 reg; 568 u32 reg;
@@ -582,15 +602,8 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
582 602
583 /* set y_sel = 1 */ 603 /* set y_sel = 1 */
584 di_gen |= 0x10000000; 604 di_gen |= 0x10000000;
585 di_gen |= DI_GEN_POLARITY_5;
586 di_gen |= DI_GEN_POLARITY_8;
587
588 vsync_cnt = 7;
589 605
590 if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH) 606 vsync_cnt = 3;
591 di_gen |= DI_GEN_POLARITY_3;
592 if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
593 di_gen |= DI_GEN_POLARITY_2;
594 } else { 607 } else {
595 ipu_di_sync_config_noninterlaced(di, sig, div); 608 ipu_di_sync_config_noninterlaced(di, sig, div);
596 609
@@ -602,25 +615,13 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
602 */ 615 */
603 if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3)) 616 if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
604 vsync_cnt = 6; 617 vsync_cnt = 6;
605
606 if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH) {
607 if (sig->hsync_pin == 2)
608 di_gen |= DI_GEN_POLARITY_2;
609 else if (sig->hsync_pin == 4)
610 di_gen |= DI_GEN_POLARITY_4;
611 else if (sig->hsync_pin == 7)
612 di_gen |= DI_GEN_POLARITY_7;
613 }
614 if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH) {
615 if (sig->vsync_pin == 3)
616 di_gen |= DI_GEN_POLARITY_3;
617 else if (sig->vsync_pin == 6)
618 di_gen |= DI_GEN_POLARITY_6;
619 else if (sig->vsync_pin == 8)
620 di_gen |= DI_GEN_POLARITY_8;
621 }
622 } 618 }
623 619
620 if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
621 di_gen |= ipu_di_gen_polarity(sig->hsync_pin);
622 if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
623 di_gen |= ipu_di_gen_polarity(sig->vsync_pin);
624
624 if (sig->clk_pol) 625 if (sig->clk_pol)
625 di_gen |= DI_GEN_POLARITY_DISP_CLK; 626 di_gen |= DI_GEN_POLARITY_DISP_CLK;
626 627
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 21060668fd25..41edd5a3f100 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -1,53 +1,135 @@
1/* 1/*
2 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
3 *
2 * Copyright (c) 2010 Red Hat Inc. 4 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com> 5 * Author : Dave Airlie <airlied@redhat.com>
4 * 6 *
7 * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
5 * 8 *
6 * Licensed under GPLv2 9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
7 * 15 *
8 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs 16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
9 * 19 *
10 * Switcher interface - methods require for ATPX and DCM 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11 * - switchto - this throws the output MUX switch 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 * - discrete_set_power - sets the power state for the discrete card 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS
27 * IN THE SOFTWARE.
13 * 28 *
14 * GPU driver interface
15 * - set_gpu_state - this should do the equiv of s/r for the card
16 * - this should *not* set the discrete power state
17 * - switch_check - check if the device is in a position to switch now
18 */ 29 */
19 30
20#define pr_fmt(fmt) "vga_switcheroo: " fmt 31#define pr_fmt(fmt) "vga_switcheroo: " fmt
21 32
22#include <linux/module.h> 33#include <linux/console.h>
23#include <linux/seq_file.h>
24#include <linux/uaccess.h>
25#include <linux/fs.h>
26#include <linux/debugfs.h> 34#include <linux/debugfs.h>
27#include <linux/fb.h> 35#include <linux/fb.h>
28 36#include <linux/fs.h>
37#include <linux/module.h>
29#include <linux/pci.h> 38#include <linux/pci.h>
30#include <linux/console.h>
31#include <linux/vga_switcheroo.h>
32#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
33 40#include <linux/seq_file.h>
41#include <linux/uaccess.h>
34#include <linux/vgaarb.h> 42#include <linux/vgaarb.h>
43#include <linux/vga_switcheroo.h>
44
45/**
46 * DOC: Overview
47 *
48 * vga_switcheroo is the Linux subsystem for laptop hybrid graphics.
49 * These come in two flavors:
50 *
51 * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
52 * * muxless: Dual GPUs but only one of them is connected to outputs.
53 * The other one is merely used to offload rendering, its results
54 * are copied over PCIe into the framebuffer. On Linux this is
55 * supported with DRI PRIME.
56 *
57 * Hybrid graphics started to appear in the late Naughties and were initially
58 * all muxed. Newer laptops moved to a muxless architecture for cost reasons.
59 * A notable exception is the MacBook Pro which continues to use a mux.
60 * Muxes come with varying capabilities: Some switch only the panel, others
61 * can also switch external displays. Some switch all display pins at once
62 * while others can switch just the DDC lines. (To allow EDID probing
63 * for the inactive GPU.) Also, muxes are often used to cut power to the
64 * discrete GPU while it is not used.
65 *
66 * DRM drivers register GPUs with vga_switcheroo, these are heretoforth called
67 * clients. The mux is called the handler. Muxless machines also register a
68 * handler to control the power state of the discrete GPU, its ->switchto
69 * callback is a no-op for obvious reasons. The discrete GPU is often equipped
70 * with an HDA controller for the HDMI/DP audio signal, this will also
71 * register as a client so that vga_switcheroo can take care of the correct
72 * suspend/resume order when changing the discrete GPU's power state. In total
73 * there can thus be up to three clients: Two vga clients (GPUs) and one audio
74 * client (on the discrete GPU). The code is mostly prepared to support
75 * machines with more than two GPUs should they become available.
76 * The GPU to which the outputs are currently switched is called the
77 * active client in vga_switcheroo parlance. The GPU not in use is the
78 * inactive client.
79 */
35 80
81/**
82 * struct vga_switcheroo_client - registered client
83 * @pdev: client pci device
84 * @fb_info: framebuffer to which console is remapped on switching
85 * @pwr_state: current power state
86 * @ops: client callbacks
87 * @id: client identifier. Determining the id requires the handler,
88 * so gpus are initially assigned VGA_SWITCHEROO_UNKNOWN_ID
89 * and later given their true id in vga_switcheroo_enable()
90 * @active: whether the outputs are currently switched to this client
91 * @driver_power_control: whether power state is controlled by the driver's
92 * runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
93 * interface is a no-op so as not to interfere with runtime pm
94 * @list: client list
95 *
96 * Registered client. A client can be either a GPU or an audio device on a GPU.
97 * For audio clients, the @fb_info, @active and @driver_power_control members
98 * are bogus.
99 */
36struct vga_switcheroo_client { 100struct vga_switcheroo_client {
37 struct pci_dev *pdev; 101 struct pci_dev *pdev;
38 struct fb_info *fb_info; 102 struct fb_info *fb_info;
39 int pwr_state; 103 enum vga_switcheroo_state pwr_state;
40 const struct vga_switcheroo_client_ops *ops; 104 const struct vga_switcheroo_client_ops *ops;
41 int id; 105 enum vga_switcheroo_client_id id;
42 bool active; 106 bool active;
43 bool driver_power_control; 107 bool driver_power_control;
44 struct list_head list; 108 struct list_head list;
45}; 109};
46 110
111/*
112 * protects access to struct vgasr_priv
113 */
47static DEFINE_MUTEX(vgasr_mutex); 114static DEFINE_MUTEX(vgasr_mutex);
48 115
116/**
117 * struct vgasr_priv - vga_switcheroo private data
118 * @active: whether vga_switcheroo is enabled.
119 * Prerequisite is the registration of two GPUs and a handler
120 * @delayed_switch_active: whether a delayed switch is pending
121 * @delayed_client_id: client to which a delayed switch is pending
122 * @debugfs_root: directory for vga_switcheroo debugfs interface
123 * @switch_file: file for vga_switcheroo debugfs interface
124 * @registered_clients: number of registered GPUs
125 * (counting only vga clients, not audio clients)
126 * @clients: list of registered clients
127 * @handler: registered handler
128 *
129 * vga_switcheroo private data. Currently only one vga_switcheroo instance
130 * per system is supported.
131 */
49struct vgasr_priv { 132struct vgasr_priv {
50
51 bool active; 133 bool active;
52 bool delayed_switch_active; 134 bool delayed_switch_active;
53 enum vga_switcheroo_client_id delayed_client_id; 135 enum vga_switcheroo_client_id delayed_client_id;
@@ -58,12 +140,13 @@ struct vgasr_priv {
58 int registered_clients; 140 int registered_clients;
59 struct list_head clients; 141 struct list_head clients;
60 142
61 struct vga_switcheroo_handler *handler; 143 const struct vga_switcheroo_handler *handler;
62}; 144};
63 145
64#define ID_BIT_AUDIO 0x100 146#define ID_BIT_AUDIO 0x100
65#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO) 147#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
66#define client_is_vga(c) ((c)->id == -1 || !client_is_audio(c)) 148#define client_is_vga(c) ((c)->id == VGA_SWITCHEROO_UNKNOWN_ID || \
149 !client_is_audio(c))
67#define client_id(c) ((c)->id & ~ID_BIT_AUDIO) 150#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
68 151
69static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv); 152static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
@@ -91,7 +174,7 @@ static void vga_switcheroo_enable(void)
91 vgasr_priv.handler->init(); 174 vgasr_priv.handler->init();
92 175
93 list_for_each_entry(client, &vgasr_priv.clients, list) { 176 list_for_each_entry(client, &vgasr_priv.clients, list) {
94 if (client->id != -1) 177 if (client->id != VGA_SWITCHEROO_UNKNOWN_ID)
95 continue; 178 continue;
96 ret = vgasr_priv.handler->get_client_id(client->pdev); 179 ret = vgasr_priv.handler->get_client_id(client->pdev);
97 if (ret < 0) 180 if (ret < 0)
@@ -103,7 +186,16 @@ static void vga_switcheroo_enable(void)
103 vgasr_priv.active = true; 186 vgasr_priv.active = true;
104} 187}
105 188
106int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) 189/**
190 * vga_switcheroo_register_handler() - register handler
191 * @handler: handler callbacks
192 *
193 * Register handler. Enable vga_switcheroo if two vga clients have already
194 * registered.
195 *
196 * Return: 0 on success, -EINVAL if a handler was already registered.
197 */
198int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler)
107{ 199{
108 mutex_lock(&vgasr_mutex); 200 mutex_lock(&vgasr_mutex);
109 if (vgasr_priv.handler) { 201 if (vgasr_priv.handler) {
@@ -121,6 +213,11 @@ int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
121} 213}
122EXPORT_SYMBOL(vga_switcheroo_register_handler); 214EXPORT_SYMBOL(vga_switcheroo_register_handler);
123 215
216/**
217 * vga_switcheroo_unregister_handler() - unregister handler
218 *
219 * Unregister handler. Disable vga_switcheroo.
220 */
124void vga_switcheroo_unregister_handler(void) 221void vga_switcheroo_unregister_handler(void)
125{ 222{
126 mutex_lock(&vgasr_mutex); 223 mutex_lock(&vgasr_mutex);
@@ -136,7 +233,8 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
136 233
137static int register_client(struct pci_dev *pdev, 234static int register_client(struct pci_dev *pdev,
138 const struct vga_switcheroo_client_ops *ops, 235 const struct vga_switcheroo_client_ops *ops,
139 int id, bool active, bool driver_power_control) 236 enum vga_switcheroo_client_id id, bool active,
237 bool driver_power_control)
140{ 238{
141 struct vga_switcheroo_client *client; 239 struct vga_switcheroo_client *client;
142 240
@@ -164,21 +262,45 @@ static int register_client(struct pci_dev *pdev,
164 return 0; 262 return 0;
165} 263}
166 264
265/**
266 * vga_switcheroo_register_client - register vga client
267 * @pdev: client pci device
268 * @ops: client callbacks
269 * @driver_power_control: whether power state is controlled by the driver's
270 * runtime pm
271 *
272 * Register vga client (GPU). Enable vga_switcheroo if another GPU and a
273 * handler have already registered. The power state of the client is assumed
274 * to be ON.
275 *
276 * Return: 0 on success, -ENOMEM on memory allocation error.
277 */
167int vga_switcheroo_register_client(struct pci_dev *pdev, 278int vga_switcheroo_register_client(struct pci_dev *pdev,
168 const struct vga_switcheroo_client_ops *ops, 279 const struct vga_switcheroo_client_ops *ops,
169 bool driver_power_control) 280 bool driver_power_control)
170{ 281{
171 return register_client(pdev, ops, -1, 282 return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID,
172 pdev == vga_default_device(), 283 pdev == vga_default_device(),
173 driver_power_control); 284 driver_power_control);
174} 285}
175EXPORT_SYMBOL(vga_switcheroo_register_client); 286EXPORT_SYMBOL(vga_switcheroo_register_client);
176 287
288/**
289 * vga_switcheroo_register_audio_client - register audio client
290 * @pdev: client pci device
291 * @ops: client callbacks
292 * @id: client identifier
293 *
294 * Register audio client (audio device on a GPU). The power state of the
295 * client is assumed to be ON.
296 *
297 * Return: 0 on success, -ENOMEM on memory allocation error.
298 */
177int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 299int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
178 const struct vga_switcheroo_client_ops *ops, 300 const struct vga_switcheroo_client_ops *ops,
179 int id, bool active) 301 enum vga_switcheroo_client_id id)
180{ 302{
181 return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false); 303 return register_client(pdev, ops, id | ID_BIT_AUDIO, false, false);
182} 304}
183EXPORT_SYMBOL(vga_switcheroo_register_audio_client); 305EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
184 306
@@ -194,7 +316,8 @@ find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
194} 316}
195 317
196static struct vga_switcheroo_client * 318static struct vga_switcheroo_client *
197find_client_from_id(struct list_head *head, int client_id) 319find_client_from_id(struct list_head *head,
320 enum vga_switcheroo_client_id client_id)
198{ 321{
199 struct vga_switcheroo_client *client; 322 struct vga_switcheroo_client *client;
200 323
@@ -210,24 +333,42 @@ find_active_client(struct list_head *head)
210 struct vga_switcheroo_client *client; 333 struct vga_switcheroo_client *client;
211 334
212 list_for_each_entry(client, head, list) 335 list_for_each_entry(client, head, list)
213 if (client->active && client_is_vga(client)) 336 if (client->active)
214 return client; 337 return client;
215 return NULL; 338 return NULL;
216} 339}
217 340
218int vga_switcheroo_get_client_state(struct pci_dev *pdev) 341/**
342 * vga_switcheroo_get_client_state() - obtain power state of a given client
343 * @pdev: client pci device
344 *
345 * Obtain power state of a given client as seen from vga_switcheroo.
346 * The function is only called from hda_intel.c.
347 *
348 * Return: Power state.
349 */
350enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *pdev)
219{ 351{
220 struct vga_switcheroo_client *client; 352 struct vga_switcheroo_client *client;
353 enum vga_switcheroo_state ret;
221 354
355 mutex_lock(&vgasr_mutex);
222 client = find_client_from_pci(&vgasr_priv.clients, pdev); 356 client = find_client_from_pci(&vgasr_priv.clients, pdev);
223 if (!client) 357 if (!client)
224 return VGA_SWITCHEROO_NOT_FOUND; 358 ret = VGA_SWITCHEROO_NOT_FOUND;
225 if (!vgasr_priv.active) 359 else
226 return VGA_SWITCHEROO_INIT; 360 ret = client->pwr_state;
227 return client->pwr_state; 361 mutex_unlock(&vgasr_mutex);
362 return ret;
228} 363}
229EXPORT_SYMBOL(vga_switcheroo_get_client_state); 364EXPORT_SYMBOL(vga_switcheroo_get_client_state);
230 365
366/**
367 * vga_switcheroo_unregister_client() - unregister client
368 * @pdev: client pci device
369 *
370 * Unregister client. Disable vga_switcheroo if this is a vga client (GPU).
371 */
231void vga_switcheroo_unregister_client(struct pci_dev *pdev) 372void vga_switcheroo_unregister_client(struct pci_dev *pdev)
232{ 373{
233 struct vga_switcheroo_client *client; 374 struct vga_switcheroo_client *client;
@@ -249,6 +390,14 @@ void vga_switcheroo_unregister_client(struct pci_dev *pdev)
249} 390}
250EXPORT_SYMBOL(vga_switcheroo_unregister_client); 391EXPORT_SYMBOL(vga_switcheroo_unregister_client);
251 392
393/**
394 * vga_switcheroo_client_fb_set() - set framebuffer of a given client
395 * @pdev: client pci device
396 * @info: framebuffer
397 *
398 * Set framebuffer of a given client. The console will be remapped to this
399 * on switching.
400 */
252void vga_switcheroo_client_fb_set(struct pci_dev *pdev, 401void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
253 struct fb_info *info) 402 struct fb_info *info)
254{ 403{
@@ -262,6 +411,42 @@ void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
262} 411}
263EXPORT_SYMBOL(vga_switcheroo_client_fb_set); 412EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
264 413
414/**
415 * DOC: Manual switching and manual power control
416 *
417 * In this mode of use, the file /sys/kernel/debug/vgaswitcheroo/switch
418 * can be read to retrieve the current vga_switcheroo state and commands
419 * can be written to it to change the state. The file appears as soon as
420 * two GPU drivers and one handler have registered with vga_switcheroo.
421 * The following commands are understood:
422 *
423 * * OFF: Power off the device not in use.
424 * * ON: Power on the device not in use.
425 * * IGD: Switch to the integrated graphics device.
426 * Power on the integrated GPU if necessary, power off the discrete GPU.
427 * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
428 * have opened device files of the GPUs or the audio client. If the
429 * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
430 * and /dev/snd/controlC1 to identify processes blocking the switch.
431 * * DIS: Switch to the discrete graphics device.
432 * * DIGD: Delayed switch to the integrated graphics device.
433 * This will perform the switch once the last user space process has
434 * closed the device files of the GPUs and the audio client.
435 * * DDIS: Delayed switch to the discrete graphics device.
436 * * MIGD: Mux-only switch to the integrated graphics device.
437 * Does not remap console or change the power state of either gpu.
438 * If the integrated GPU is currently off, the screen will turn black.
439 * If it is on, the screen will show whatever happens to be in VRAM.
440 * Either way, the user has to blindly enter the command to switch back.
441 * * MDIS: Mux-only switch to the discrete graphics device.
442 *
443 * For GPUs whose power state is controlled by the driver's runtime pm,
444 * the ON and OFF commands are a no-op (see next section).
445 *
446 * For muxless machines, the IGD/DIS, DIGD/DDIS and MIGD/MDIS commands
447 * should not be used.
448 */
449
265static int vga_switcheroo_show(struct seq_file *m, void *v) 450static int vga_switcheroo_show(struct seq_file *m, void *v)
266{ 451{
267 struct vga_switcheroo_client *client; 452 struct vga_switcheroo_client *client;
@@ -312,7 +497,8 @@ static int vga_switchoff(struct vga_switcheroo_client *client)
312 return 0; 497 return 0;
313} 498}
314 499
315static void set_audio_state(int id, int state) 500static void set_audio_state(enum vga_switcheroo_client_id id,
501 enum vga_switcheroo_state state)
316{ 502{
317 struct vga_switcheroo_client *client; 503 struct vga_switcheroo_client *client;
318 504
@@ -399,7 +585,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
399 int ret; 585 int ret;
400 bool delay = false, can_switch; 586 bool delay = false, can_switch;
401 bool just_mux = false; 587 bool just_mux = false;
402 int client_id = -1; 588 enum vga_switcheroo_client_id client_id = VGA_SWITCHEROO_UNKNOWN_ID;
403 struct vga_switcheroo_client *client = NULL; 589 struct vga_switcheroo_client *client = NULL;
404 590
405 if (cnt > 63) 591 if (cnt > 63)
@@ -468,7 +654,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
468 client_id = VGA_SWITCHEROO_DIS; 654 client_id = VGA_SWITCHEROO_DIS;
469 } 655 }
470 656
471 if (client_id == -1) 657 if (client_id == VGA_SWITCHEROO_UNKNOWN_ID)
472 goto out; 658 goto out;
473 client = find_client_from_id(&vgasr_priv.clients, client_id); 659 client = find_client_from_id(&vgasr_priv.clients, client_id);
474 if (!client) 660 if (!client)
@@ -559,6 +745,16 @@ fail:
559 return -1; 745 return -1;
560} 746}
561 747
748/**
749 * vga_switcheroo_process_delayed_switch() - helper for delayed switching
750 *
751 * Process a delayed switch if one is pending. DRM drivers should call this
752 * from their ->lastclose callback.
753 *
754 * Return: 0 on success. -EINVAL if no delayed switch is pending, if the client
755 * has unregistered in the meantime or if there are other clients blocking the
756 * switch. If the actual switch fails, an error is reported and 0 is returned.
757 */
562int vga_switcheroo_process_delayed_switch(void) 758int vga_switcheroo_process_delayed_switch(void)
563{ 759{
564 struct vga_switcheroo_client *client; 760 struct vga_switcheroo_client *client;
@@ -589,6 +785,39 @@ err:
589} 785}
590EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 786EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
591 787
788/**
789 * DOC: Driver power control
790 *
791 * In this mode of use, the discrete GPU automatically powers up and down at
792 * the discretion of the driver's runtime pm. On muxed machines, the user may
793 * still influence the muxer state by way of the debugfs interface, however
794 * the ON and OFF commands become a no-op for the discrete GPU.
795 *
796 * This mode is the default on Nvidia HybridPower/Optimus and ATI PowerXpress.
797 * Specifying nouveau.runpm=0, radeon.runpm=0 or amdgpu.runpm=0 on the kernel
798 * command line disables it.
799 *
800 * When the driver decides to power up or down, it notifies vga_switcheroo
801 * thereof so that it can (a) power the audio device on the GPU up or down,
802 * and (b) update its internal power state representation for the device.
803 * This is achieved by vga_switcheroo_set_dynamic_switch().
804 *
805 * After the GPU has been suspended, the handler needs to be called to cut
806 * power to the GPU. Likewise it needs to reinstate power before the GPU
807 * can resume. This is achieved by vga_switcheroo_init_domain_pm_ops(),
808 * which augments the GPU's suspend/resume functions by the requisite
809 * calls to the handler.
810 *
811 * When the audio device resumes, the GPU needs to be woken. This is achieved
812 * by vga_switcheroo_init_domain_pm_optimus_hdmi_audio(), which augments the
813 * audio device's resume function.
814 *
815 * On muxed machines, if the mux is initially switched to the discrete GPU,
816 * the user ends up with a black screen when the GPU powers down after boot.
817 * As a workaround, the mux is forced to the integrated GPU on runtime suspend,
818 * cf. https://bugs.freedesktop.org/show_bug.cgi?id=75917
819 */
820
592static void vga_switcheroo_power_switch(struct pci_dev *pdev, 821static void vga_switcheroo_power_switch(struct pci_dev *pdev,
593 enum vga_switcheroo_state state) 822 enum vga_switcheroo_state state)
594{ 823{
@@ -607,22 +836,32 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev,
607 vgasr_priv.handler->power_state(client->id, state); 836 vgasr_priv.handler->power_state(client->id, state);
608} 837}
609 838
610/* force a PCI device to a certain state - mainly to turn off audio clients */ 839/**
611 840 * vga_switcheroo_set_dynamic_switch() - helper for driver power control
841 * @pdev: client pci device
842 * @dynamic: new power state
843 *
844 * Helper for GPUs whose power state is controlled by the driver's runtime pm.
845 * When the driver decides to power up or down, it notifies vga_switcheroo
846 * thereof using this helper so that it can (a) power the audio device on
847 * the GPU up or down, and (b) update its internal power state representation
848 * for the device.
849 */
612void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, 850void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
613 enum vga_switcheroo_state dynamic) 851 enum vga_switcheroo_state dynamic)
614{ 852{
615 struct vga_switcheroo_client *client; 853 struct vga_switcheroo_client *client;
616 854
855 mutex_lock(&vgasr_mutex);
617 client = find_client_from_pci(&vgasr_priv.clients, pdev); 856 client = find_client_from_pci(&vgasr_priv.clients, pdev);
618 if (!client) 857 if (!client || !client->driver_power_control) {
619 return; 858 mutex_unlock(&vgasr_mutex);
620
621 if (!client->driver_power_control)
622 return; 859 return;
860 }
623 861
624 client->pwr_state = dynamic; 862 client->pwr_state = dynamic;
625 set_audio_state(client->id, dynamic); 863 set_audio_state(client->id, dynamic);
864 mutex_unlock(&vgasr_mutex);
626} 865}
627EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch); 866EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
628 867
@@ -635,9 +874,11 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
635 ret = dev->bus->pm->runtime_suspend(dev); 874 ret = dev->bus->pm->runtime_suspend(dev);
636 if (ret) 875 if (ret)
637 return ret; 876 return ret;
877 mutex_lock(&vgasr_mutex);
638 if (vgasr_priv.handler->switchto) 878 if (vgasr_priv.handler->switchto)
639 vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD); 879 vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
640 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF); 880 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
881 mutex_unlock(&vgasr_mutex);
641 return 0; 882 return 0;
642} 883}
643 884
@@ -646,7 +887,9 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
646 struct pci_dev *pdev = to_pci_dev(dev); 887 struct pci_dev *pdev = to_pci_dev(dev);
647 int ret; 888 int ret;
648 889
890 mutex_lock(&vgasr_mutex);
649 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON); 891 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
892 mutex_unlock(&vgasr_mutex);
650 ret = dev->bus->pm->runtime_resume(dev); 893 ret = dev->bus->pm->runtime_resume(dev);
651 if (ret) 894 if (ret)
652 return ret; 895 return ret;
@@ -654,8 +897,18 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
654 return 0; 897 return 0;
655} 898}
656 899
657/* this version is for the case where the power switch is separate 900/**
658 to the device being powered down. */ 901 * vga_switcheroo_init_domain_pm_ops() - helper for driver power control
902 * @dev: vga client device
903 * @domain: power domain
904 *
905 * Helper for GPUs whose power state is controlled by the driver's runtime pm.
906 * After the GPU has been suspended, the handler needs to be called to cut
907 * power to the GPU. Likewise it needs to reinstate power before the GPU
908 * can resume. To this end, this helper augments the suspend/resume functions
909 * by the requisite calls to the handler. It needs only be called on platforms
910 * where the power switch is separate to the device being powered down.
911 */
659int vga_switcheroo_init_domain_pm_ops(struct device *dev, 912int vga_switcheroo_init_domain_pm_ops(struct device *dev,
660 struct dev_pm_domain *domain) 913 struct dev_pm_domain *domain)
661{ 914{
@@ -682,33 +935,50 @@ EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
682static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) 935static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
683{ 936{
684 struct pci_dev *pdev = to_pci_dev(dev); 937 struct pci_dev *pdev = to_pci_dev(dev);
938 struct vga_switcheroo_client *client;
939 struct device *video_dev = NULL;
685 int ret; 940 int ret;
686 struct vga_switcheroo_client *client, *found = NULL;
687 941
688 /* we need to check if we have to switch back on the video 942 /* we need to check if we have to switch back on the video
689 device so the audio device can come back */ 943 device so the audio device can come back */
944 mutex_lock(&vgasr_mutex);
690 list_for_each_entry(client, &vgasr_priv.clients, list) { 945 list_for_each_entry(client, &vgasr_priv.clients, list) {
691 if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && 946 if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
692 client_is_vga(client)) { 947 client_is_vga(client)) {
693 found = client; 948 video_dev = &client->pdev->dev;
694 ret = pm_runtime_get_sync(&client->pdev->dev);
695 if (ret) {
696 if (ret != 1)
697 return ret;
698 }
699 break; 949 break;
700 } 950 }
701 } 951 }
952 mutex_unlock(&vgasr_mutex);
953
954 if (video_dev) {
955 ret = pm_runtime_get_sync(video_dev);
956 if (ret && ret != 1)
957 return ret;
958 }
702 ret = dev->bus->pm->runtime_resume(dev); 959 ret = dev->bus->pm->runtime_resume(dev);
703 960
704 /* put the reference for the gpu */ 961 /* put the reference for the gpu */
705 if (found) { 962 if (video_dev) {
706 pm_runtime_mark_last_busy(&found->pdev->dev); 963 pm_runtime_mark_last_busy(video_dev);
707 pm_runtime_put_autosuspend(&found->pdev->dev); 964 pm_runtime_put_autosuspend(video_dev);
708 } 965 }
709 return ret; 966 return ret;
710} 967}
711 968
969/**
970 * vga_switcheroo_init_domain_pm_optimus_hdmi_audio() - helper for driver
971 * power control
972 * @dev: audio client device
973 * @domain: power domain
974 *
975 * Helper for GPUs whose power state is controlled by the driver's runtime pm.
976 * When the audio device resumes, the GPU needs to be woken. This helper
977 * augments the audio device's resume function to do that.
978 *
979 * Return: 0 on success, -EINVAL if no power management operations are
980 * defined for this device.
981 */
712int 982int
713vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, 983vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
714 struct dev_pm_domain *domain) 984 struct dev_pm_domain *domain)
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index a0b433456107..3166e4bc4eb6 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -531,7 +531,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
531 return false; 531 return false;
532 532
533 /* Allocate structure */ 533 /* Allocate structure */
534 vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL); 534 vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL);
535 if (vgadev == NULL) { 535 if (vgadev == NULL) {
536 pr_err("failed to allocate pci device\n"); 536 pr_err("failed to allocate pci device\n");
537 /* 537 /*
@@ -541,8 +541,6 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
541 return false; 541 return false;
542 } 542 }
543 543
544 memset(vgadev, 0, sizeof(*vgadev));
545
546 /* Take lock & check for duplicates */ 544 /* Take lock & check for duplicates */
547 spin_lock_irqsave(&vga_lock, flags); 545 spin_lock_irqsave(&vga_lock, flags);
548 if (vgadev_find(pdev) != NULL) { 546 if (vgadev_find(pdev) != NULL) {
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 0dec3f59917a..976efeb3f2ba 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -346,7 +346,7 @@ gmux_active_client(struct apple_gmux_data *gmux_data)
346 return VGA_SWITCHEROO_DIS; 346 return VGA_SWITCHEROO_DIS;
347} 347}
348 348
349static struct vga_switcheroo_handler gmux_handler = { 349static const struct vga_switcheroo_handler gmux_handler = {
350 .switchto = gmux_switchto, 350 .switchto = gmux_switchto,
351 .power_state = gmux_set_power_state, 351 .power_state = gmux_set_power_state,
352 .get_client_id = gmux_get_client_id, 352 .get_client_id = gmux_get_client_id,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f56cdcecc1c9..0b921ae06cd8 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -107,6 +107,9 @@ struct dma_buf_attachment;
107 * ATOMIC: used in the atomic code. 107 * ATOMIC: used in the atomic code.
108 * This is the category used by the DRM_DEBUG_ATOMIC() macro. 108 * This is the category used by the DRM_DEBUG_ATOMIC() macro.
109 * 109 *
110 * VBL: used for verbose debug message in the vblank code
111 * This is the category used by the DRM_DEBUG_VBL() macro.
112 *
110 * Enabling verbose debug messages is done through the drm.debug parameter, 113 * Enabling verbose debug messages is done through the drm.debug parameter,
111 * each category being enabled by a bit. 114 * each category being enabled by a bit.
112 * 115 *
@@ -114,7 +117,7 @@ struct dma_buf_attachment;
114 * drm.debug=0x2 will enable DRIVER messages 117 * drm.debug=0x2 will enable DRIVER messages
115 * drm.debug=0x3 will enable CORE and DRIVER messages 118 * drm.debug=0x3 will enable CORE and DRIVER messages
116 * ... 119 * ...
117 * drm.debug=0xf will enable all messages 120 * drm.debug=0x3f will enable all messages
118 * 121 *
119 * An interesting feature is that it's possible to enable verbose logging at 122 * An interesting feature is that it's possible to enable verbose logging at
120 * run-time by echoing the debug value in its sysfs node: 123 * run-time by echoing the debug value in its sysfs node:
@@ -125,6 +128,7 @@ struct dma_buf_attachment;
125#define DRM_UT_KMS 0x04 128#define DRM_UT_KMS 0x04
126#define DRM_UT_PRIME 0x08 129#define DRM_UT_PRIME 0x08
127#define DRM_UT_ATOMIC 0x10 130#define DRM_UT_ATOMIC 0x10
131#define DRM_UT_VBL 0x20
128 132
129extern __printf(2, 3) 133extern __printf(2, 3)
130void drm_ut_debug_printk(const char *function_name, 134void drm_ut_debug_printk(const char *function_name,
@@ -217,6 +221,11 @@ void drm_err(const char *format, ...);
217 if (unlikely(drm_debug & DRM_UT_ATOMIC)) \ 221 if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
218 drm_ut_debug_printk(__func__, fmt, ##args); \ 222 drm_ut_debug_printk(__func__, fmt, ##args); \
219 } while (0) 223 } while (0)
224#define DRM_DEBUG_VBL(fmt, args...) \
225 do { \
226 if (unlikely(drm_debug & DRM_UT_VBL)) \
227 drm_ut_debug_printk(__func__, fmt, ##args); \
228 } while (0)
220 229
221/*@}*/ 230/*@}*/
222 231
@@ -412,7 +421,7 @@ struct drm_driver {
412 /** 421 /**
413 * get_vblank_counter - get raw hardware vblank counter 422 * get_vblank_counter - get raw hardware vblank counter
414 * @dev: DRM device 423 * @dev: DRM device
415 * @crtc: counter to fetch 424 * @pipe: counter to fetch
416 * 425 *
417 * Driver callback for fetching a raw hardware vblank counter for @crtc. 426 * Driver callback for fetching a raw hardware vblank counter for @crtc.
418 * If a device doesn't have a hardware counter, the driver can simply 427 * If a device doesn't have a hardware counter, the driver can simply
@@ -426,12 +435,12 @@ struct drm_driver {
426 * RETURNS 435 * RETURNS
427 * Raw vblank counter value. 436 * Raw vblank counter value.
428 */ 437 */
429 u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); 438 u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
430 439
431 /** 440 /**
432 * enable_vblank - enable vblank interrupt events 441 * enable_vblank - enable vblank interrupt events
433 * @dev: DRM device 442 * @dev: DRM device
434 * @crtc: which irq to enable 443 * @pipe: which irq to enable
435 * 444 *
436 * Enable vblank interrupts for @crtc. If the device doesn't have 445 * Enable vblank interrupts for @crtc. If the device doesn't have
437 * a hardware vblank counter, this routine should be a no-op, since 446 * a hardware vblank counter, this routine should be a no-op, since
@@ -441,18 +450,18 @@ struct drm_driver {
441 * Zero on success, appropriate errno if the given @crtc's vblank 450 * Zero on success, appropriate errno if the given @crtc's vblank
442 * interrupt cannot be enabled. 451 * interrupt cannot be enabled.
443 */ 452 */
444 int (*enable_vblank) (struct drm_device *dev, int crtc); 453 int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
445 454
446 /** 455 /**
447 * disable_vblank - disable vblank interrupt events 456 * disable_vblank - disable vblank interrupt events
448 * @dev: DRM device 457 * @dev: DRM device
449 * @crtc: which irq to enable 458 * @pipe: which irq to enable
450 * 459 *
451 * Disable vblank interrupts for @crtc. If the device doesn't have 460 * Disable vblank interrupts for @crtc. If the device doesn't have
452 * a hardware vblank counter, this routine should be a no-op, since 461 * a hardware vblank counter, this routine should be a no-op, since
453 * interrupts will have to stay on to keep the count accurate. 462 * interrupts will have to stay on to keep the count accurate.
454 */ 463 */
455 void (*disable_vblank) (struct drm_device *dev, int crtc); 464 void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
456 465
457 /** 466 /**
458 * Called by \c drm_device_is_agp. Typically used to determine if a 467 * Called by \c drm_device_is_agp. Typically used to determine if a
@@ -474,7 +483,7 @@ struct drm_driver {
474 * optional accurate ktime_get timestamp of when position was measured. 483 * optional accurate ktime_get timestamp of when position was measured.
475 * 484 *
476 * \param dev DRM device. 485 * \param dev DRM device.
477 * \param crtc Id of the crtc to query. 486 * \param pipe Id of the crtc to query.
478 * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0). 487 * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
479 * \param *vpos Target location for current vertical scanout position. 488 * \param *vpos Target location for current vertical scanout position.
480 * \param *hpos Target location for current horizontal scanout position. 489 * \param *hpos Target location for current horizontal scanout position.
@@ -482,6 +491,7 @@ struct drm_driver {
482 * scanout position query. Can be NULL to skip timestamp. 491 * scanout position query. Can be NULL to skip timestamp.
483 * \param *etime Target location for timestamp taken immediately after 492 * \param *etime Target location for timestamp taken immediately after
484 * scanout position query. Can be NULL to skip timestamp. 493 * scanout position query. Can be NULL to skip timestamp.
494 * \param mode Current display timings.
485 * 495 *
486 * Returns vpos as a positive number while in active scanout area. 496 * Returns vpos as a positive number while in active scanout area.
487 * Returns vpos as a negative number inside vblank, counting the number 497 * Returns vpos as a negative number inside vblank, counting the number
@@ -497,10 +507,10 @@ struct drm_driver {
497 * but unknown small number of scanlines wrt. real scanout position. 507 * but unknown small number of scanlines wrt. real scanout position.
498 * 508 *
499 */ 509 */
500 int (*get_scanout_position) (struct drm_device *dev, int crtc, 510 int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
501 unsigned int flags, 511 unsigned int flags, int *vpos, int *hpos,
502 int *vpos, int *hpos, ktime_t *stime, 512 ktime_t *stime, ktime_t *etime,
503 ktime_t *etime); 513 const struct drm_display_mode *mode);
504 514
505 /** 515 /**
506 * Called by \c drm_get_last_vbltimestamp. Should return a precise 516 * Called by \c drm_get_last_vbltimestamp. Should return a precise
@@ -516,7 +526,7 @@ struct drm_driver {
516 * to the OpenML OML_sync_control extension specification. 526 * to the OpenML OML_sync_control extension specification.
517 * 527 *
518 * \param dev dev DRM device handle. 528 * \param dev dev DRM device handle.
519 * \param crtc crtc for which timestamp should be returned. 529 * \param pipe crtc for which timestamp should be returned.
520 * \param *max_error Maximum allowable timestamp error in nanoseconds. 530 * \param *max_error Maximum allowable timestamp error in nanoseconds.
521 * Implementation should strive to provide timestamp 531 * Implementation should strive to provide timestamp
522 * with an error of at most *max_error nanoseconds. 532 * with an error of at most *max_error nanoseconds.
@@ -532,7 +542,7 @@ struct drm_driver {
532 * negative number on failure. A positive status code on success, 542 * negative number on failure. A positive status code on success,
533 * which describes how the vblank_time timestamp was computed. 543 * which describes how the vblank_time timestamp was computed.
534 */ 544 */
535 int (*get_vblank_timestamp) (struct drm_device *dev, int crtc, 545 int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
536 int *max_error, 546 int *max_error,
537 struct timeval *vblank_time, 547 struct timeval *vblank_time,
538 unsigned flags); 548 unsigned flags);
@@ -701,6 +711,8 @@ struct drm_vblank_crtc {
701 u32 last_wait; /* Last vblank seqno waited per CRTC */ 711 u32 last_wait; /* Last vblank seqno waited per CRTC */
702 unsigned int inmodeset; /* Display driver is setting mode */ 712 unsigned int inmodeset; /* Display driver is setting mode */
703 unsigned int pipe; /* crtc index */ 713 unsigned int pipe; /* crtc index */
714 int framedur_ns; /* frame/field duration in ns */
715 int linedur_ns; /* line duration in ns */
704 bool enabled; /* so we don't call enable more than 716 bool enabled; /* so we don't call enable more than
705 once per disable */ 717 once per disable */
706}; 718};
@@ -905,6 +917,8 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
905/* Misc. IOCTL support (drm_ioctl.c) */ 917/* Misc. IOCTL support (drm_ioctl.c) */
906int drm_noop(struct drm_device *dev, void *data, 918int drm_noop(struct drm_device *dev, void *data,
907 struct drm_file *file_priv); 919 struct drm_file *file_priv);
920int drm_invalid_op(struct drm_device *dev, void *data,
921 struct drm_file *file_priv);
908 922
909/* Cache management (drm_cache.c) */ 923/* Cache management (drm_cache.c) */
910void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 924void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
@@ -923,10 +937,12 @@ extern int drm_irq_uninstall(struct drm_device *dev);
923extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); 937extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
924extern int drm_wait_vblank(struct drm_device *dev, void *data, 938extern int drm_wait_vblank(struct drm_device *dev, void *data,
925 struct drm_file *filp); 939 struct drm_file *filp);
926extern u32 drm_vblank_count(struct drm_device *dev, int pipe); 940extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
927extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); 941extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
928extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, 942extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
929 struct timeval *vblanktime); 943 struct timeval *vblanktime);
944extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
945 struct timeval *vblanktime);
930extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, 946extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
931 struct drm_pending_vblank_event *e); 947 struct drm_pending_vblank_event *e);
932extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 948extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
@@ -945,12 +961,12 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
945extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); 961extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
946extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 962extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
947extern void drm_vblank_cleanup(struct drm_device *dev); 963extern void drm_vblank_cleanup(struct drm_device *dev);
964extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
948 965
949extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 966extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
950 unsigned int pipe, int *max_error, 967 unsigned int pipe, int *max_error,
951 struct timeval *vblank_time, 968 struct timeval *vblank_time,
952 unsigned flags, 969 unsigned flags,
953 const struct drm_crtc *refcrtc,
954 const struct drm_display_mode *mode); 970 const struct drm_display_mode *mode);
955extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, 971extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
956 const struct drm_display_mode *mode); 972 const struct drm_display_mode *mode);
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index 055dc058d147..193ef19dfc5c 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -12,9 +12,6 @@
12struct drm_device; 12struct drm_device;
13struct drm_file; 13struct drm_file;
14 14
15#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
16 defined(MODULE)))
17
18struct drm_agp_head { 15struct drm_agp_head {
19 struct agp_kern_info agp_info; 16 struct agp_kern_info agp_info;
20 struct list_head memory; 17 struct list_head memory;
@@ -28,7 +25,7 @@ struct drm_agp_head {
28 unsigned long page_mask; 25 unsigned long page_mask;
29}; 26};
30 27
31#if __OS_HAS_AGP 28#if IS_ENABLED(CONFIG_AGP)
32 29
33void drm_free_agp(struct agp_memory * handle, int pages); 30void drm_free_agp(struct agp_memory * handle, int pages);
34int drm_bind_agp(struct agp_memory * handle, unsigned int start); 31int drm_bind_agp(struct agp_memory * handle, unsigned int start);
@@ -66,7 +63,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
66int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 63int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
67 struct drm_file *file_priv); 64 struct drm_file *file_priv);
68 65
69#else /* __OS_HAS_AGP */ 66#else /* CONFIG_AGP */
70 67
71static inline void drm_free_agp(struct agp_memory * handle, int pages) 68static inline void drm_free_agp(struct agp_memory * handle, int pages)
72{ 69{
@@ -105,95 +102,47 @@ static inline int drm_agp_acquire(struct drm_device *dev)
105 return -ENODEV; 102 return -ENODEV;
106} 103}
107 104
108static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
109 struct drm_file *file_priv)
110{
111 return -ENODEV;
112}
113
114static inline int drm_agp_release(struct drm_device *dev) 105static inline int drm_agp_release(struct drm_device *dev)
115{ 106{
116 return -ENODEV; 107 return -ENODEV;
117} 108}
118 109
119static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
120 struct drm_file *file_priv)
121{
122 return -ENODEV;
123}
124
125static inline int drm_agp_enable(struct drm_device *dev, 110static inline int drm_agp_enable(struct drm_device *dev,
126 struct drm_agp_mode mode) 111 struct drm_agp_mode mode)
127{ 112{
128 return -ENODEV; 113 return -ENODEV;
129} 114}
130 115
131static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
132 struct drm_file *file_priv)
133{
134 return -ENODEV;
135}
136
137static inline int drm_agp_info(struct drm_device *dev, 116static inline int drm_agp_info(struct drm_device *dev,
138 struct drm_agp_info *info) 117 struct drm_agp_info *info)
139{ 118{
140 return -ENODEV; 119 return -ENODEV;
141} 120}
142 121
143static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
144 struct drm_file *file_priv)
145{
146 return -ENODEV;
147}
148
149static inline int drm_agp_alloc(struct drm_device *dev, 122static inline int drm_agp_alloc(struct drm_device *dev,
150 struct drm_agp_buffer *request) 123 struct drm_agp_buffer *request)
151{ 124{
152 return -ENODEV; 125 return -ENODEV;
153} 126}
154 127
155static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
156 struct drm_file *file_priv)
157{
158 return -ENODEV;
159}
160
161static inline int drm_agp_free(struct drm_device *dev, 128static inline int drm_agp_free(struct drm_device *dev,
162 struct drm_agp_buffer *request) 129 struct drm_agp_buffer *request)
163{ 130{
164 return -ENODEV; 131 return -ENODEV;
165} 132}
166 133
167static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
168 struct drm_file *file_priv)
169{
170 return -ENODEV;
171}
172
173static inline int drm_agp_unbind(struct drm_device *dev, 134static inline int drm_agp_unbind(struct drm_device *dev,
174 struct drm_agp_binding *request) 135 struct drm_agp_binding *request)
175{ 136{
176 return -ENODEV; 137 return -ENODEV;
177} 138}
178 139
179static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
180 struct drm_file *file_priv)
181{
182 return -ENODEV;
183}
184
185static inline int drm_agp_bind(struct drm_device *dev, 140static inline int drm_agp_bind(struct drm_device *dev,
186 struct drm_agp_binding *request) 141 struct drm_agp_binding *request)
187{ 142{
188 return -ENODEV; 143 return -ENODEV;
189} 144}
190 145
191static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 146#endif /* CONFIG_AGP */
192 struct drm_file *file_priv)
193{
194 return -ENODEV;
195}
196
197#endif /* __OS_HAS_AGP */
198 147
199#endif /* _DRM_AGPSUPPORT_H_ */ 148#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 11266d147a29..8cba54a2a0a0 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -30,6 +30,8 @@
30 30
31#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
32 32
33struct drm_atomic_state;
34
33int drm_atomic_helper_check_modeset(struct drm_device *dev, 35int drm_atomic_helper_check_modeset(struct drm_device *dev,
34 struct drm_atomic_state *state); 36 struct drm_atomic_state *state);
35int drm_atomic_helper_check_planes(struct drm_device *dev, 37int drm_atomic_helper_check_planes(struct drm_device *dev,
@@ -55,7 +57,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
55int drm_atomic_helper_prepare_planes(struct drm_device *dev, 57int drm_atomic_helper_prepare_planes(struct drm_device *dev,
56 struct drm_atomic_state *state); 58 struct drm_atomic_state *state);
57void drm_atomic_helper_commit_planes(struct drm_device *dev, 59void drm_atomic_helper_commit_planes(struct drm_device *dev,
58 struct drm_atomic_state *state); 60 struct drm_atomic_state *state,
61 bool active_only);
59void drm_atomic_helper_cleanup_planes(struct drm_device *dev, 62void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
60 struct drm_atomic_state *old_state); 63 struct drm_atomic_state *old_state);
61void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); 64void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
@@ -72,7 +75,11 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane,
72 uint32_t src_x, uint32_t src_y, 75 uint32_t src_x, uint32_t src_y,
73 uint32_t src_w, uint32_t src_h); 76 uint32_t src_w, uint32_t src_h);
74int drm_atomic_helper_disable_plane(struct drm_plane *plane); 77int drm_atomic_helper_disable_plane(struct drm_plane *plane);
78int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
79 struct drm_plane_state *plane_state);
75int drm_atomic_helper_set_config(struct drm_mode_set *set); 80int drm_atomic_helper_set_config(struct drm_mode_set *set);
81int __drm_atomic_helper_set_config(struct drm_mode_set *set,
82 struct drm_atomic_state *state);
76 83
77int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, 84int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
78 struct drm_property *property, 85 struct drm_property *property,
@@ -117,6 +124,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
117 struct drm_connector_state *state); 124 struct drm_connector_state *state);
118struct drm_connector_state * 125struct drm_connector_state *
119drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); 126drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
127struct drm_atomic_state *
128drm_atomic_helper_duplicate_state(struct drm_device *dev,
129 struct drm_modeset_acquire_ctx *ctx);
120void 130void
121__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, 131__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
122 struct drm_connector_state *state); 132 struct drm_connector_state *state);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index faaeff7db684..3f0c6909dda1 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -86,10 +86,12 @@ static inline uint64_t I642U64(int64_t val)
86} 86}
87 87
88/* rotation property bits */ 88/* rotation property bits */
89#define DRM_ROTATE_MASK 0x0f
89#define DRM_ROTATE_0 0 90#define DRM_ROTATE_0 0
90#define DRM_ROTATE_90 1 91#define DRM_ROTATE_90 1
91#define DRM_ROTATE_180 2 92#define DRM_ROTATE_180 2
92#define DRM_ROTATE_270 3 93#define DRM_ROTATE_270 3
94#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK)
93#define DRM_REFLECT_X 4 95#define DRM_REFLECT_X 4
94#define DRM_REFLECT_Y 5 96#define DRM_REFLECT_Y 5
95 97
@@ -210,8 +212,6 @@ struct drm_framebuffer {
210 int flags; 212 int flags;
211 uint32_t pixel_format; /* fourcc format */ 213 uint32_t pixel_format; /* fourcc format */
212 struct list_head filp_head; 214 struct list_head filp_head;
213 /* if you are using the helper */
214 void *helper_private;
215}; 215};
216 216
217struct drm_property_blob { 217struct drm_property_blob {
@@ -407,17 +407,11 @@ struct drm_crtc_funcs {
407 * @enabled: is this CRTC enabled? 407 * @enabled: is this CRTC enabled?
408 * @mode: current mode timings 408 * @mode: current mode timings
409 * @hwmode: mode timings as programmed to hw regs 409 * @hwmode: mode timings as programmed to hw regs
410 * @invert_dimensions: for purposes of error checking crtc vs fb sizes,
411 * invert the width/height of the crtc. This is used if the driver
412 * is performing 90 or 270 degree rotated scanout
413 * @x: x position on screen 410 * @x: x position on screen
414 * @y: y position on screen 411 * @y: y position on screen
415 * @funcs: CRTC control functions 412 * @funcs: CRTC control functions
416 * @gamma_size: size of gamma ramp 413 * @gamma_size: size of gamma ramp
417 * @gamma_store: gamma ramp values 414 * @gamma_store: gamma ramp values
418 * @framedur_ns: precise frame timing
419 * @linedur_ns: precise line timing
420 * @pixeldur_ns: precise pixel timing
421 * @helper_private: mid-layer private data 415 * @helper_private: mid-layer private data
422 * @properties: property tracking for this CRTC 416 * @properties: property tracking for this CRTC
423 * @state: current atomic state for this CRTC 417 * @state: current atomic state for this CRTC
@@ -461,8 +455,6 @@ struct drm_crtc {
461 */ 455 */
462 struct drm_display_mode hwmode; 456 struct drm_display_mode hwmode;
463 457
464 bool invert_dimensions;
465
466 int x, y; 458 int x, y;
467 const struct drm_crtc_funcs *funcs; 459 const struct drm_crtc_funcs *funcs;
468 460
@@ -470,9 +462,6 @@ struct drm_crtc {
470 uint32_t gamma_size; 462 uint32_t gamma_size;
471 uint16_t *gamma_store; 463 uint16_t *gamma_store;
472 464
473 /* Constants needed for precise vblank and swap timestamping. */
474 int framedur_ns, linedur_ns, pixeldur_ns;
475
476 /* if you are using the helper */ 465 /* if you are using the helper */
477 const void *helper_private; 466 const void *helper_private;
478 467
@@ -913,7 +902,6 @@ struct drm_bridge_funcs {
913 * @next: the next bridge in the encoder chain 902 * @next: the next bridge in the encoder chain
914 * @of_node: device node pointer to the bridge 903 * @of_node: device node pointer to the bridge
915 * @list: to keep track of all added bridges 904 * @list: to keep track of all added bridges
916 * @base: base mode object
917 * @funcs: control functions 905 * @funcs: control functions
918 * @driver_private: pointer to the bridge driver's internal context 906 * @driver_private: pointer to the bridge driver's internal context
919 */ 907 */
@@ -1390,7 +1378,7 @@ extern int drm_property_add_enum(struct drm_property *property, int index,
1390extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); 1378extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
1391extern int drm_mode_create_tv_properties(struct drm_device *dev, 1379extern int drm_mode_create_tv_properties(struct drm_device *dev,
1392 unsigned int num_modes, 1380 unsigned int num_modes,
1393 char *modes[]); 1381 const char * const modes[]);
1394extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1382extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
1395extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); 1383extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
1396extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 1384extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 0212d139a480..bb9d0deca07c 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -46,7 +46,7 @@
46 46
47#define DP_AUX_I2C_WRITE 0x0 47#define DP_AUX_I2C_WRITE 0x0
48#define DP_AUX_I2C_READ 0x1 48#define DP_AUX_I2C_READ 0x1
49#define DP_AUX_I2C_STATUS 0x2 49#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2
50#define DP_AUX_I2C_MOT 0x4 50#define DP_AUX_I2C_MOT 0x4
51#define DP_AUX_NATIVE_WRITE 0x8 51#define DP_AUX_NATIVE_WRITE 0x8
52#define DP_AUX_NATIVE_READ 0x9 52#define DP_AUX_NATIVE_READ 0x9
@@ -638,6 +638,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
638 (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); 638 (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
639} 639}
640 640
641static inline bool
642drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
643{
644 return dpcd[DP_DPCD_REV] >= 0x12 &&
645 dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
646}
647
641/* 648/*
642 * DisplayPort AUX channel 649 * DisplayPort AUX channel
643 */ 650 */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 53c53c459b15..2af97691e878 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -326,9 +326,8 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
326int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); 326int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
327int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); 327int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
328int drm_av_sync_delay(struct drm_connector *connector, 328int drm_av_sync_delay(struct drm_connector *connector,
329 struct drm_display_mode *mode); 329 const struct drm_display_mode *mode);
330struct drm_connector *drm_select_eld(struct drm_encoder *encoder, 330struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
331 struct drm_display_mode *mode);
332int drm_load_edid_firmware(struct drm_connector *connector); 331int drm_load_edid_firmware(struct drm_connector *connector);
333 332
334int 333int
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index dbab4622b58f..87b090c4b730 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -104,6 +104,20 @@ struct drm_fb_helper_connector {
104 struct drm_connector *connector; 104 struct drm_connector *connector;
105}; 105};
106 106
107/**
108 * struct drm_fb_helper - helper to emulate fbdev on top of kms
109 * @fb: Scanout framebuffer object
110 * @dev: DRM device
111 * @crtc_count: number of possible CRTCs
112 * @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
113 * @connector_count: number of connected connectors
114 * @connector_info_alloc_count: size of connector_info
115 * @funcs: driver callbacks for fb helper
116 * @fbdev: emulated fbdev device info struct
117 * @pseudo_palette: fake palette of 16 colors
118 * @kernel_fb_list: list_head in kernel_fb_helper_list
119 * @delayed_hotplug: was there a hotplug while kms master active?
120 */
107struct drm_fb_helper { 121struct drm_fb_helper {
108 struct drm_framebuffer *fb; 122 struct drm_framebuffer *fb;
109 struct drm_device *dev; 123 struct drm_device *dev;
@@ -120,6 +134,17 @@ struct drm_fb_helper {
120 /* we got a hotplug but fbdev wasn't running the console 134 /* we got a hotplug but fbdev wasn't running the console
121 delay until next set_par */ 135 delay until next set_par */
122 bool delayed_hotplug; 136 bool delayed_hotplug;
137
138 /**
139 * @atomic:
140 *
141 * Use atomic updates for restore_fbdev_mode(), etc. This defaults to
142 * true if driver has DRIVER_ATOMIC feature flag, but drivers can
143 * override it to true after drm_fb_helper_init() if they support atomic
144 * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
145 * does not require ASYNC commits).
146 */
147 bool atomic;
123}; 148};
124 149
125#ifdef CONFIG_DRM_FBDEV_EMULATION 150#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -136,7 +161,7 @@ int drm_fb_helper_set_par(struct fb_info *info);
136int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 161int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
137 struct fb_info *info); 162 struct fb_info *info);
138 163
139bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); 164int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
140 165
141struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); 166struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
142void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); 167void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
@@ -226,10 +251,10 @@ static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
226 return 0; 251 return 0;
227} 252}
228 253
229static inline bool 254static inline int
230drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) 255drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
231{ 256{
232 return true; 257 return 0;
233} 258}
234 259
235static inline struct fb_info * 260static inline struct fb_info *
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 7a592d7e398b..15e7f007380f 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -142,8 +142,11 @@ drm_gem_object_reference(struct drm_gem_object *obj)
142static inline void 142static inline void
143drm_gem_object_unreference(struct drm_gem_object *obj) 143drm_gem_object_unreference(struct drm_gem_object *obj)
144{ 144{
145 if (obj != NULL) 145 if (obj != NULL) {
146 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
147
146 kref_put(&obj->refcount, drm_gem_object_free); 148 kref_put(&obj->refcount, drm_gem_object_free);
149 }
147} 150}
148 151
149static inline void 152static inline void
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 5dd18bfdf601..94938d89347c 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -43,19 +43,19 @@ struct drm_modeset_acquire_ctx {
43 43
44 struct ww_acquire_ctx ww_ctx; 44 struct ww_acquire_ctx ww_ctx;
45 45
46 /** 46 /*
47 * Contended lock: if a lock is contended you should only call 47 * Contended lock: if a lock is contended you should only call
48 * drm_modeset_backoff() which drops locks and slow-locks the 48 * drm_modeset_backoff() which drops locks and slow-locks the
49 * contended lock. 49 * contended lock.
50 */ 50 */
51 struct drm_modeset_lock *contended; 51 struct drm_modeset_lock *contended;
52 52
53 /** 53 /*
54 * list of held locks (drm_modeset_lock) 54 * list of held locks (drm_modeset_lock)
55 */ 55 */
56 struct list_head locked; 56 struct list_head locked;
57 57
58 /** 58 /*
59 * Trylock mode, use only for panic handlers! 59 * Trylock mode, use only for panic handlers!
60 */ 60 */
61 bool trylock_only; 61 bool trylock_only;
@@ -70,12 +70,12 @@ struct drm_modeset_acquire_ctx {
70 * Used for locking CRTCs and other modeset resources. 70 * Used for locking CRTCs and other modeset resources.
71 */ 71 */
72struct drm_modeset_lock { 72struct drm_modeset_lock {
73 /** 73 /*
74 * modeset lock 74 * modeset lock
75 */ 75 */
76 struct ww_mutex mutex; 76 struct ww_mutex mutex;
77 77
78 /** 78 /*
79 * Resources that are locked as part of an atomic update are added 79 * Resources that are locked as part of an atomic update are added
80 * to a list (so we know what to unlock at the end). 80 * to a list (so we know what to unlock at the end).
81 */ 81 */
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 2441f7112074..8544665ee4f4 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -1,18 +1,31 @@
1#ifndef __DRM_OF_H__ 1#ifndef __DRM_OF_H__
2#define __DRM_OF_H__ 2#define __DRM_OF_H__
3 3
4struct component_master_ops;
5struct device;
4struct drm_device; 6struct drm_device;
5struct device_node; 7struct device_node;
6 8
7#ifdef CONFIG_OF 9#ifdef CONFIG_OF
8extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, 10extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
9 struct device_node *port); 11 struct device_node *port);
12extern int drm_of_component_probe(struct device *dev,
13 int (*compare_of)(struct device *, void *),
14 const struct component_master_ops *m_ops);
10#else 15#else
11static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, 16static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
12 struct device_node *port) 17 struct device_node *port)
13{ 18{
14 return 0; 19 return 0;
15} 20}
21
22static inline int
23drm_of_component_probe(struct device *dev,
24 int (*compare_of)(struct device *, void *),
25 const struct component_master_ops *m_ops)
26{
27 return -EINVAL;
28}
16#endif 29#endif
17 30
18#endif /* __DRM_OF_H__ */ 31#endif /* __DRM_OF_H__ */
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index dda401bf910e..5a7f9d4efb1d 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -58,10 +58,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
58 */ 58 */
59struct drm_plane_helper_funcs { 59struct drm_plane_helper_funcs {
60 int (*prepare_fb)(struct drm_plane *plane, 60 int (*prepare_fb)(struct drm_plane *plane,
61 struct drm_framebuffer *fb,
62 const struct drm_plane_state *new_state); 61 const struct drm_plane_state *new_state);
63 void (*cleanup_fb)(struct drm_plane *plane, 62 void (*cleanup_fb)(struct drm_plane *plane,
64 struct drm_framebuffer *fb,
65 const struct drm_plane_state *old_state); 63 const struct drm_plane_state *old_state);
66 64
67 int (*atomic_check)(struct drm_plane *plane, 65 int (*atomic_check)(struct drm_plane *plane,
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 8cd402c73a5f..2f63dd5e05eb 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -54,9 +54,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
54 unsigned long page_offset, unsigned long size); 54 unsigned long page_offset, unsigned long size);
55void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); 55void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
56 56
57struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
58 unsigned long start,
59 unsigned long pages);
60struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, 57struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
61 unsigned long start, 58 unsigned long start,
62 unsigned long pages); 59 unsigned long pages);
@@ -71,25 +68,25 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
71 struct file *filp); 68 struct file *filp);
72 69
73/** 70/**
74 * drm_vma_offset_exact_lookup() - Look up node by exact address 71 * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
75 * @mgr: Manager object 72 * @mgr: Manager object
76 * @start: Start address (page-based, not byte-based) 73 * @start: Start address (page-based, not byte-based)
77 * @pages: Size of object (page-based) 74 * @pages: Size of object (page-based)
78 * 75 *
79 * Same as drm_vma_offset_lookup() but does not allow any offset into the node. 76 * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
80 * It only returns the exact object with the given start address. 77 * It only returns the exact object with the given start address.
81 * 78 *
82 * RETURNS: 79 * RETURNS:
83 * Node at exact start address @start. 80 * Node at exact start address @start.
84 */ 81 */
85static inline struct drm_vma_offset_node * 82static inline struct drm_vma_offset_node *
86drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, 83drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
87 unsigned long start, 84 unsigned long start,
88 unsigned long pages) 85 unsigned long pages)
89{ 86{
90 struct drm_vma_offset_node *node; 87 struct drm_vma_offset_node *node;
91 88
92 node = drm_vma_offset_lookup(mgr, start, pages); 89 node = drm_vma_offset_lookup_locked(mgr, start, pages);
93 return (node && node->vm_node.start == start) ? node : NULL; 90 return (node && node->vm_node.start == start) ? node : NULL;
94} 91}
95 92
@@ -97,7 +94,7 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
97 * drm_vma_offset_lock_lookup() - Lock lookup for extended private use 94 * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
98 * @mgr: Manager object 95 * @mgr: Manager object
99 * 96 *
100 * Lock VMA manager for extended lookups. Only *_locked() VMA function calls 97 * Lock VMA manager for extended lookups. Only locked VMA function calls
101 * are allowed while holding this lock. All other contexts are blocked from VMA 98 * are allowed while holding this lock. All other contexts are blocked from VMA
102 * until the lock is released via drm_vma_offset_unlock_lookup(). 99 * until the lock is released via drm_vma_offset_unlock_lookup().
103 * 100 *
@@ -108,13 +105,6 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
108 * not call any other VMA helpers while holding this lock. 105 * not call any other VMA helpers while holding this lock.
109 * 106 *
110 * Note: You're in atomic-context while holding this lock! 107 * Note: You're in atomic-context while holding this lock!
111 *
112 * Example:
113 * drm_vma_offset_lock_lookup(mgr);
114 * node = drm_vma_offset_lookup_locked(mgr);
115 * if (node)
116 * kref_get_unless_zero(container_of(node, sth, entr));
117 * drm_vma_offset_unlock_lookup(mgr);
118 */ 108 */
119static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) 109static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
120{ 110{
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 89dc7d6bc1cc..30d89e0da2c6 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -30,38 +30,49 @@
30 */ 30 */
31#define MAX_PORTS 5 31#define MAX_PORTS 5
32 32
33struct i915_audio_component { 33/**
34 struct device *dev; 34 * struct i915_audio_component_ops - callbacks defined in gfx driver
35 * @owner: the module owner
36 * @get_power: get the POWER_DOMAIN_AUDIO power well
37 * @put_power: put the POWER_DOMAIN_AUDIO power well
38 * @codec_wake_override: Enable/Disable generating the codec wake signal
39 * @get_cdclk_freq: get the Core Display Clock in KHz
40 * @sync_audio_rate: set n/cts based on the sample rate
41 */
42struct i915_audio_component_ops {
43 struct module *owner;
44 void (*get_power)(struct device *);
45 void (*put_power)(struct device *);
46 void (*codec_wake_override)(struct device *, bool enable);
47 int (*get_cdclk_freq)(struct device *);
48 int (*sync_audio_rate)(struct device *, int port, int rate);
49};
50
51struct i915_audio_component_audio_ops {
52 void *audio_ptr;
35 /** 53 /**
36 * @aud_sample_rate: the array of audio sample rate per port 54 * Call from i915 driver, notifying the HDA driver that
55 * pin sense and/or ELD information has changed.
56 * @audio_ptr: HDA driver object
57 * @port: Which port has changed (PORTA / PORTB / PORTC etc)
37 */ 58 */
59 void (*pin_eld_notify)(void *audio_ptr, int port);
60};
61
62/**
63 * struct i915_audio_component - used for audio video interaction
64 * @dev: the device from gfx driver
65 * @aud_sample_rate: the array of audio sample rate per port
66 * @ops: callback for audio driver calling
67 * @audio_ops: Call from i915 driver
68 */
69struct i915_audio_component {
70 struct device *dev;
38 int aud_sample_rate[MAX_PORTS]; 71 int aud_sample_rate[MAX_PORTS];
39 72
40 const struct i915_audio_component_ops { 73 const struct i915_audio_component_ops *ops;
41 struct module *owner;
42 void (*get_power)(struct device *);
43 void (*put_power)(struct device *);
44 void (*codec_wake_override)(struct device *, bool enable);
45 int (*get_cdclk_freq)(struct device *);
46 /**
47 * @sync_audio_rate: set n/cts based on the sample rate
48 *
49 * Called from audio driver. After audio driver sets the
50 * sample rate, it will call this function to set n/cts
51 */
52 int (*sync_audio_rate)(struct device *, int port, int rate);
53 } *ops;
54 74
55 const struct i915_audio_component_audio_ops { 75 const struct i915_audio_component_audio_ops *audio_ops;
56 void *audio_ptr;
57 /**
58 * Call from i915 driver, notifying the HDA driver that
59 * pin sense and/or ELD information has changed.
60 * @audio_ptr: HDA driver object
61 * @port: Which port has changed (PORTA / PORTB / PORTC etc)
62 */
63 void (*pin_eld_notify)(void *audio_ptr, int port);
64 } *audio_ops;
65}; 76};
66 77
67#endif /* _I915_COMPONENT_H_ */ 78#endif /* _I915_COMPONENT_H_ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 41a3b11f7796..3d003805aac3 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -156,7 +156,7 @@ struct fb_cursor_user {
156#define FB_EVENT_GET_REQ 0x0D 156#define FB_EVENT_GET_REQ 0x0D
157/* Unbind from the console if possible */ 157/* Unbind from the console if possible */
158#define FB_EVENT_FB_UNBIND 0x0E 158#define FB_EVENT_FB_UNBIND 0x0E
159/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */ 159/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
160#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F 160#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
161/* A hardware display blank early change occured */ 161/* A hardware display blank early change occured */
162#define FB_EARLY_EVENT_BLANK 0x10 162#define FB_EARLY_EVENT_BLANK 0x10
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 39efee130d2b..bb522011383b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -280,6 +280,22 @@ fence_is_signaled(struct fence *fence)
280} 280}
281 281
282/** 282/**
283 * fence_is_later - return if f1 is chronologically later than f2
284 * @f1: [in] the first fence from the same context
285 * @f2: [in] the second fence from the same context
286 *
287 * Returns true if f1 is chronologically later than f2. Both fences must be
288 * from the same context, since a seqno is not re-used across contexts.
289 */
290static inline bool fence_is_later(struct fence *f1, struct fence *f2)
291{
292 if (WARN_ON(f1->context != f2->context))
293 return false;
294
295 return f1->seqno - f2->seqno < INT_MAX;
296}
297
298/**
283 * fence_later - return the chronologically later fence 299 * fence_later - return the chronologically later fence
284 * @f1: [in] the first fence from the same context 300 * @f1: [in] the first fence from the same context
285 * @f2: [in] the second fence from the same context 301 * @f2: [in] the second fence from the same context
@@ -298,14 +314,15 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
298 * set if enable_signaling wasn't called, and enabling that here is 314 * set if enable_signaling wasn't called, and enabling that here is
299 * overkill. 315 * overkill.
300 */ 316 */
301 if (f2->seqno - f1->seqno <= INT_MAX) 317 if (fence_is_later(f1, f2))
302 return fence_is_signaled(f2) ? NULL : f2;
303 else
304 return fence_is_signaled(f1) ? NULL : f1; 318 return fence_is_signaled(f1) ? NULL : f1;
319 else
320 return fence_is_signaled(f2) ? NULL : f2;
305} 321}
306 322
307signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); 323signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
308 324signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
325 bool intr, signed long timeout);
309 326
310/** 327/**
311 * fence_wait - sleep until the fence gets signaled 328 * fence_wait - sleep until the fence gets signaled
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b483abd34493..69e1d4a1f1b3 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -1,10 +1,31 @@
1/* 1/*
2 * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
3 *
2 * Copyright (c) 2010 Red Hat Inc. 4 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com> 5 * Author : Dave Airlie <airlied@redhat.com>
4 * 6 *
5 * Licensed under GPLv2 7 * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS
27 * IN THE SOFTWARE.
6 * 28 *
7 * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
8 */ 29 */
9 30
10#ifndef _LINUX_VGA_SWITCHEROO_H_ 31#ifndef _LINUX_VGA_SWITCHEROO_H_
@@ -14,28 +35,85 @@
14 35
15struct pci_dev; 36struct pci_dev;
16 37
38/**
39 * enum vga_switcheroo_state - client power state
40 * @VGA_SWITCHEROO_OFF: off
41 * @VGA_SWITCHEROO_ON: on
42 * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo.
43 * Only used in vga_switcheroo_get_client_state() which in turn is only
44 * called from hda_intel.c
45 *
46 * Client power state.
47 */
17enum vga_switcheroo_state { 48enum vga_switcheroo_state {
18 VGA_SWITCHEROO_OFF, 49 VGA_SWITCHEROO_OFF,
19 VGA_SWITCHEROO_ON, 50 VGA_SWITCHEROO_ON,
20 /* below are referred only from vga_switcheroo_get_client_state() */ 51 /* below are referred only from vga_switcheroo_get_client_state() */
21 VGA_SWITCHEROO_INIT,
22 VGA_SWITCHEROO_NOT_FOUND, 52 VGA_SWITCHEROO_NOT_FOUND,
23}; 53};
24 54
55/**
56 * enum vga_switcheroo_client_id - client identifier
57 * @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients.
58 * Determining the id requires the handler, so GPUs are given their
59 * true id in a delayed fashion in vga_switcheroo_enable()
60 * @VGA_SWITCHEROO_IGD: integrated graphics device
61 * @VGA_SWITCHEROO_DIS: discrete graphics device
62 * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported
63 *
64 * Client identifier. Audio clients use the same identifier & 0x100.
65 */
25enum vga_switcheroo_client_id { 66enum vga_switcheroo_client_id {
67 VGA_SWITCHEROO_UNKNOWN_ID = -1,
26 VGA_SWITCHEROO_IGD, 68 VGA_SWITCHEROO_IGD,
27 VGA_SWITCHEROO_DIS, 69 VGA_SWITCHEROO_DIS,
28 VGA_SWITCHEROO_MAX_CLIENTS, 70 VGA_SWITCHEROO_MAX_CLIENTS,
29}; 71};
30 72
73/**
74 * struct vga_switcheroo_handler - handler callbacks
75 * @init: initialize handler.
76 * Optional. This gets called when vga_switcheroo is enabled, i.e. when
77 * two vga clients have registered. It allows the handler to perform
78 * some delayed initialization that depends on the existence of the
79 * vga clients. Currently only the radeon and amdgpu drivers use this.
80 * The return value is ignored
81 * @switchto: switch outputs to given client.
82 * Mandatory. For muxless machines this should be a no-op. Returning 0
83 * denotes success, anything else failure (in which case the switch is
84 * aborted)
85 * @power_state: cut or reinstate power of given client.
86 * Optional. The return value is ignored
87 * @get_client_id: determine if given pci device is integrated or discrete GPU.
88 * Mandatory
89 *
90 * Handler callbacks. The multiplexer itself. The @switchto and @get_client_id
91 * methods are mandatory, all others may be set to NULL.
92 */
31struct vga_switcheroo_handler { 93struct vga_switcheroo_handler {
94 int (*init)(void);
32 int (*switchto)(enum vga_switcheroo_client_id id); 95 int (*switchto)(enum vga_switcheroo_client_id id);
33 int (*power_state)(enum vga_switcheroo_client_id id, 96 int (*power_state)(enum vga_switcheroo_client_id id,
34 enum vga_switcheroo_state state); 97 enum vga_switcheroo_state state);
35 int (*init)(void); 98 enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
36 int (*get_client_id)(struct pci_dev *pdev);
37}; 99};
38 100
101/**
102 * struct vga_switcheroo_client_ops - client callbacks
103 * @set_gpu_state: do the equivalent of suspend/resume for the card.
104 * Mandatory. This should not cut power to the discrete GPU,
105 * which is the job of the handler
106 * @reprobe: poll outputs.
107 * Optional. This gets called after waking the GPU and switching
108 * the outputs to it
109 * @can_switch: check if the device is in a position to switch now.
110 * Mandatory. The client should return false if a user space process
111 * has one of its device files open
112 *
113 * Client callbacks. A client can be either a GPU or an audio device on a GPU.
114 * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
115 * set to NULL. For audio clients, the @reprobe member is bogus.
116 */
39struct vga_switcheroo_client_ops { 117struct vga_switcheroo_client_ops {
40 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); 118 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
41 void (*reprobe)(struct pci_dev *dev); 119 void (*reprobe)(struct pci_dev *dev);
@@ -49,17 +127,17 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
49 bool driver_power_control); 127 bool driver_power_control);
50int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 128int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
51 const struct vga_switcheroo_client_ops *ops, 129 const struct vga_switcheroo_client_ops *ops,
52 int id, bool active); 130 enum vga_switcheroo_client_id id);
53 131
54void vga_switcheroo_client_fb_set(struct pci_dev *dev, 132void vga_switcheroo_client_fb_set(struct pci_dev *dev,
55 struct fb_info *info); 133 struct fb_info *info);
56 134
57int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler); 135int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
58void vga_switcheroo_unregister_handler(void); 136void vga_switcheroo_unregister_handler(void);
59 137
60int vga_switcheroo_process_delayed_switch(void); 138int vga_switcheroo_process_delayed_switch(void);
61 139
62int vga_switcheroo_get_client_state(struct pci_dev *dev); 140enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
63 141
64void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); 142void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
65 143
@@ -72,13 +150,13 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
72static inline int vga_switcheroo_register_client(struct pci_dev *dev, 150static inline int vga_switcheroo_register_client(struct pci_dev *dev,
73 const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } 151 const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
74static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} 152static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
75static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } 153static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
76static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 154static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
77 const struct vga_switcheroo_client_ops *ops, 155 const struct vga_switcheroo_client_ops *ops,
78 int id, bool active) { return 0; } 156 enum vga_switcheroo_client_id id) { return 0; }
79static inline void vga_switcheroo_unregister_handler(void) {} 157static inline void vga_switcheroo_unregister_handler(void) {}
80static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 158static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
81static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } 159static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
82 160
83static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} 161static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
84 162
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 2d9a25daab05..38d437096c35 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -17,3 +17,4 @@ header-y += tegra_drm.h
17header-y += via_drm.h 17header-y += via_drm.h
18header-y += vmwgfx_drm.h 18header-y += vmwgfx_drm.h
19header-y += msm_drm.h 19header-y += msm_drm.h
20header-y += virtgpu_drm.h
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index fbdd11851725..e52933a73580 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -640,6 +640,6 @@ struct drm_amdgpu_info_hw_ip {
640#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 640#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
641#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 641#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
642#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 642#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
643#define AMDGPU_FAMILY_CZ 135 /* Carrizo */ 643#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
644 644
645#endif 645#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 359107ab629e..6c11ca401de8 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -105,8 +105,16 @@
105 105
106struct drm_mode_modeinfo { 106struct drm_mode_modeinfo {
107 __u32 clock; 107 __u32 clock;
108 __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; 108 __u16 hdisplay;
109 __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; 109 __u16 hsync_start;
110 __u16 hsync_end;
111 __u16 htotal;
112 __u16 hskew;
113 __u16 vdisplay;
114 __u16 vsync_start;
115 __u16 vsync_end;
116 __u16 vtotal;
117 __u16 vscan;
110 118
111 __u32 vrefresh; 119 __u32 vrefresh;
112 120
@@ -124,8 +132,10 @@ struct drm_mode_card_res {
124 __u32 count_crtcs; 132 __u32 count_crtcs;
125 __u32 count_connectors; 133 __u32 count_connectors;
126 __u32 count_encoders; 134 __u32 count_encoders;
127 __u32 min_width, max_width; 135 __u32 min_width;
128 __u32 min_height, max_height; 136 __u32 max_width;
137 __u32 min_height;
138 __u32 max_height;
129}; 139};
130 140
131struct drm_mode_crtc { 141struct drm_mode_crtc {
@@ -135,7 +145,8 @@ struct drm_mode_crtc {
135 __u32 crtc_id; /**< Id */ 145 __u32 crtc_id; /**< Id */
136 __u32 fb_id; /**< Id of framebuffer */ 146 __u32 fb_id; /**< Id of framebuffer */
137 147
138 __u32 x, y; /**< Position on the frameuffer */ 148 __u32 x; /**< x Position on the framebuffer */
149 __u32 y; /**< y Position on the framebuffer */
139 150
140 __u32 gamma_size; 151 __u32 gamma_size;
141 __u32 mode_valid; 152 __u32 mode_valid;
@@ -153,12 +164,16 @@ struct drm_mode_set_plane {
153 __u32 flags; /* see above flags */ 164 __u32 flags; /* see above flags */
154 165
155 /* Signed dest location allows it to be partially off screen */ 166 /* Signed dest location allows it to be partially off screen */
156 __s32 crtc_x, crtc_y; 167 __s32 crtc_x;
157 __u32 crtc_w, crtc_h; 168 __s32 crtc_y;
169 __u32 crtc_w;
170 __u32 crtc_h;
158 171
159 /* Source values are 16.16 fixed point */ 172 /* Source values are 16.16 fixed point */
160 __u32 src_x, src_y; 173 __u32 src_x;
161 __u32 src_h, src_w; 174 __u32 src_y;
175 __u32 src_h;
176 __u32 src_w;
162}; 177};
163 178
164struct drm_mode_get_plane { 179struct drm_mode_get_plane {
@@ -244,7 +259,8 @@ struct drm_mode_get_connector {
244 __u32 connector_type_id; 259 __u32 connector_type_id;
245 260
246 __u32 connection; 261 __u32 connection;
247 __u32 mm_width, mm_height; /**< HxW in millimeters */ 262 __u32 mm_width; /**< width in millimeters */
263 __u32 mm_height; /**< height in millimeters */
248 __u32 subpixel; 264 __u32 subpixel;
249 265
250 __u32 pad; 266 __u32 pad;
@@ -327,7 +343,8 @@ struct drm_mode_get_blob {
327 343
328struct drm_mode_fb_cmd { 344struct drm_mode_fb_cmd {
329 __u32 fb_id; 345 __u32 fb_id;
330 __u32 width, height; 346 __u32 width;
347 __u32 height;
331 __u32 pitch; 348 __u32 pitch;
332 __u32 bpp; 349 __u32 bpp;
333 __u32 depth; 350 __u32 depth;
@@ -340,7 +357,8 @@ struct drm_mode_fb_cmd {
340 357
341struct drm_mode_fb_cmd2 { 358struct drm_mode_fb_cmd2 {
342 __u32 fb_id; 359 __u32 fb_id;
343 __u32 width, height; 360 __u32 width;
361 __u32 height;
344 __u32 pixel_format; /* fourcc code from drm_fourcc.h */ 362 __u32 pixel_format; /* fourcc code from drm_fourcc.h */
345 __u32 flags; /* see above flags */ 363 __u32 flags; /* see above flags */
346 364
diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h
index 7a10bb6f2c0f..34736efd5824 100644
--- a/include/uapi/drm/i810_drm.h
+++ b/include/uapi/drm/i810_drm.h
@@ -1,6 +1,8 @@
1#ifndef _I810_DRM_H_ 1#ifndef _I810_DRM_H_
2#define _I810_DRM_H_ 2#define _I810_DRM_H_
3 3
4#include <drm/drm.h>
5
4/* WARNING: These defines must be the same as what the Xserver uses. 6/* WARNING: These defines must be the same as what the Xserver uses.
5 * if you change them, you must change the defines in the Xserver. 7 * if you change them, you must change the defines in the Xserver.
6 */ 8 */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index fd5aa47bd689..484a9fb20479 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -690,7 +690,8 @@ struct drm_i915_gem_exec_object2 {
690#define EXEC_OBJECT_NEEDS_FENCE (1<<0) 690#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
691#define EXEC_OBJECT_NEEDS_GTT (1<<1) 691#define EXEC_OBJECT_NEEDS_GTT (1<<1)
692#define EXEC_OBJECT_WRITE (1<<2) 692#define EXEC_OBJECT_WRITE (1<<2)
693#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) 693#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
694#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1)
694 __u64 flags; 695 __u64 flags;
695 696
696 __u64 rsvd1; 697 __u64 rsvd1;
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 5507eead5863..fd594cc73cc0 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -27,14 +27,6 @@
27 27
28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000 28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
29 29
30/* reserved object handles when using deprecated object APIs - these
31 * are here so that libdrm can allow interoperability with the new
32 * object APIs
33 */
34#define NOUVEAU_ABI16_CLIENT 0xffffffff
35#define NOUVEAU_ABI16_DEVICE 0xdddddddd
36#define NOUVEAU_ABI16_CHAN(n) (0xcccc0000 | (n))
37
38#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) 30#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
39#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 31#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
40#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 32#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h
index 8d8878b55f55..76b0aa3e8210 100644
--- a/include/uapi/drm/r128_drm.h
+++ b/include/uapi/drm/r128_drm.h
@@ -33,6 +33,8 @@
33#ifndef __R128_DRM_H__ 33#ifndef __R128_DRM_H__
34#define __R128_DRM_H__ 34#define __R128_DRM_H__
35 35
36#include <drm/drm.h>
37
36/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
37 * defines in the X server file (r128_sarea.h) 39 * defines in the X server file (r128_sarea.h)
38 */ 40 */
diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h
index 818d49be2e6e..9dc9dc1a7753 100644
--- a/include/uapi/drm/savage_drm.h
+++ b/include/uapi/drm/savage_drm.h
@@ -26,6 +26,8 @@
26#ifndef __SAVAGE_DRM_H__ 26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__ 27#define __SAVAGE_DRM_H__
28 28
29#include <drm/drm.h>
30
29#ifndef __SAVAGE_SAREA_DEFINES__ 31#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__ 32#define __SAVAGE_SAREA_DEFINES__
31 33
diff --git a/include/uapi/drm/sis_drm.h b/include/uapi/drm/sis_drm.h
index df3763222d73..374858cdcdaa 100644
--- a/include/uapi/drm/sis_drm.h
+++ b/include/uapi/drm/sis_drm.h
@@ -64,8 +64,4 @@ typedef struct {
64 unsigned long offset, size; 64 unsigned long offset, size;
65} drm_sis_fb_t; 65} drm_sis_fb_t;
66 66
67struct sis_file_private {
68 struct list_head obj_list;
69};
70
71#endif /* __SIS_DRM_H__ */ 67#endif /* __SIS_DRM_H__ */
diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h
index 8b0533ccbd5a..45bc80c3714b 100644
--- a/include/uapi/drm/via_drm.h
+++ b/include/uapi/drm/via_drm.h
@@ -274,8 +274,4 @@ typedef struct drm_via_dmablit {
274 drm_via_blitsync_t sync; 274 drm_via_blitsync_t sync;
275} drm_via_dmablit_t; 275} drm_via_dmablit_t;
276 276
277struct via_file_private {
278 struct list_head obj_list;
279};
280
281#endif /* _VIA_DRM_H_ */ 277#endif /* _VIA_DRM_H_ */
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
new file mode 100644
index 000000000000..fc9e2d6e5e2f
--- /dev/null
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright 2013 Red Hat
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24#ifndef VIRTGPU_DRM_H
25#define VIRTGPU_DRM_H
26
27#include <stddef.h>
28#include "drm/drm.h"
29
30/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints.
32 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size
35 */
36
37#define DRM_VIRTGPU_MAP 0x01
38#define DRM_VIRTGPU_EXECBUFFER 0x02
39#define DRM_VIRTGPU_GETPARAM 0x03
40#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
41#define DRM_VIRTGPU_RESOURCE_INFO 0x05
42#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
43#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
44#define DRM_VIRTGPU_WAIT 0x08
45#define DRM_VIRTGPU_GET_CAPS 0x09
46
47struct drm_virtgpu_map {
48 uint64_t offset; /* use for mmap system call */
49 uint32_t handle;
50 uint32_t pad;
51};
52
53struct drm_virtgpu_execbuffer {
54 uint32_t flags; /* for future use */
55 uint32_t size;
56 uint64_t command; /* void* */
57 uint64_t bo_handles;
58 uint32_t num_bo_handles;
59 uint32_t pad;
60};
61
62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
63
64struct drm_virtgpu_getparam {
65 uint64_t param;
66 uint64_t value;
67};
68
69/* NO_BO flags? NO resource flag? */
70/* resource flag for y_0_top */
71struct drm_virtgpu_resource_create {
72 uint32_t target;
73 uint32_t format;
74 uint32_t bind;
75 uint32_t width;
76 uint32_t height;
77 uint32_t depth;
78 uint32_t array_size;
79 uint32_t last_level;
80 uint32_t nr_samples;
81 uint32_t flags;
82 uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
83 uint32_t res_handle; /* returned by kernel */
84 uint32_t size; /* validate transfer in the host */
85 uint32_t stride; /* validate transfer in the host */
86};
87
88struct drm_virtgpu_resource_info {
89 uint32_t bo_handle;
90 uint32_t res_handle;
91 uint32_t size;
92 uint32_t stride;
93};
94
95struct drm_virtgpu_3d_box {
96 uint32_t x;
97 uint32_t y;
98 uint32_t z;
99 uint32_t w;
100 uint32_t h;
101 uint32_t d;
102};
103
104struct drm_virtgpu_3d_transfer_to_host {
105 uint32_t bo_handle;
106 struct drm_virtgpu_3d_box box;
107 uint32_t level;
108 uint32_t offset;
109};
110
111struct drm_virtgpu_3d_transfer_from_host {
112 uint32_t bo_handle;
113 struct drm_virtgpu_3d_box box;
114 uint32_t level;
115 uint32_t offset;
116};
117
118#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
119struct drm_virtgpu_3d_wait {
120 uint32_t handle; /* 0 is an invalid handle */
121 uint32_t flags;
122};
123
124struct drm_virtgpu_get_caps {
125 uint32_t cap_set_id;
126 uint32_t cap_set_ver;
127 uint64_t addr;
128 uint32_t size;
129 uint32_t pad;
130};
131
132#define DRM_IOCTL_VIRTGPU_MAP \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
134
135#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
136 DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
137 struct drm_virtgpu_execbuffer)
138
139#define DRM_IOCTL_VIRTGPU_GETPARAM \
140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
141 struct drm_virtgpu_getparam)
142
143#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
144 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
145 struct drm_virtgpu_resource_create)
146
147#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
149 struct drm_virtgpu_resource_info)
150
151#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
152 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
153 struct drm_virtgpu_3d_transfer_from_host)
154
155#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
156 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
157 struct drm_virtgpu_3d_transfer_to_host)
158
159#define DRM_IOCTL_VIRTGPU_WAIT \
160 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
161 struct drm_virtgpu_3d_wait)
162
163#define DRM_IOCTL_VIRTGPU_GET_CAPS \
164 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
165 struct drm_virtgpu_get_caps)
166
167#endif
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 478be5270e26..7a63faa9065c 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -40,6 +40,8 @@
40 40
41#include <linux/types.h> 41#include <linux/types.h>
42 42
43#define VIRTIO_GPU_F_VIRGL 0
44
43enum virtio_gpu_ctrl_type { 45enum virtio_gpu_ctrl_type {
44 VIRTIO_GPU_UNDEFINED = 0, 46 VIRTIO_GPU_UNDEFINED = 0,
45 47
@@ -52,6 +54,18 @@ enum virtio_gpu_ctrl_type {
52 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, 54 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
53 VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, 55 VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
54 VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, 56 VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
57 VIRTIO_GPU_CMD_GET_CAPSET_INFO,
58 VIRTIO_GPU_CMD_GET_CAPSET,
59
60 /* 3d commands */
61 VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
62 VIRTIO_GPU_CMD_CTX_DESTROY,
63 VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE,
64 VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE,
65 VIRTIO_GPU_CMD_RESOURCE_CREATE_3D,
66 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
67 VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
68 VIRTIO_GPU_CMD_SUBMIT_3D,
55 69
56 /* cursor commands */ 70 /* cursor commands */
57 VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, 71 VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -60,6 +74,8 @@ enum virtio_gpu_ctrl_type {
60 /* success responses */ 74 /* success responses */
61 VIRTIO_GPU_RESP_OK_NODATA = 0x1100, 75 VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
62 VIRTIO_GPU_RESP_OK_DISPLAY_INFO, 76 VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
77 VIRTIO_GPU_RESP_OK_CAPSET_INFO,
78 VIRTIO_GPU_RESP_OK_CAPSET,
63 79
64 /* error responses */ 80 /* error responses */
65 VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, 81 VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -180,13 +196,107 @@ struct virtio_gpu_resp_display_info {
180 } pmodes[VIRTIO_GPU_MAX_SCANOUTS]; 196 } pmodes[VIRTIO_GPU_MAX_SCANOUTS];
181}; 197};
182 198
199/* data passed in the control vq, 3d related */
200
201struct virtio_gpu_box {
202 __le32 x, y, z;
203 __le32 w, h, d;
204};
205
206/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */
207struct virtio_gpu_transfer_host_3d {
208 struct virtio_gpu_ctrl_hdr hdr;
209 struct virtio_gpu_box box;
210 __le64 offset;
211 __le32 resource_id;
212 __le32 level;
213 __le32 stride;
214 __le32 layer_stride;
215};
216
217/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */
218#define VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP (1 << 0)
219struct virtio_gpu_resource_create_3d {
220 struct virtio_gpu_ctrl_hdr hdr;
221 __le32 resource_id;
222 __le32 target;
223 __le32 format;
224 __le32 bind;
225 __le32 width;
226 __le32 height;
227 __le32 depth;
228 __le32 array_size;
229 __le32 last_level;
230 __le32 nr_samples;
231 __le32 flags;
232 __le32 padding;
233};
234
235/* VIRTIO_GPU_CMD_CTX_CREATE */
236struct virtio_gpu_ctx_create {
237 struct virtio_gpu_ctrl_hdr hdr;
238 __le32 nlen;
239 __le32 padding;
240 char debug_name[64];
241};
242
243/* VIRTIO_GPU_CMD_CTX_DESTROY */
244struct virtio_gpu_ctx_destroy {
245 struct virtio_gpu_ctrl_hdr hdr;
246};
247
248/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */
249struct virtio_gpu_ctx_resource {
250 struct virtio_gpu_ctrl_hdr hdr;
251 __le32 resource_id;
252 __le32 padding;
253};
254
255/* VIRTIO_GPU_CMD_SUBMIT_3D */
256struct virtio_gpu_cmd_submit {
257 struct virtio_gpu_ctrl_hdr hdr;
258 __le32 size;
259 __le32 padding;
260};
261
262#define VIRTIO_GPU_CAPSET_VIRGL 1
263
264/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
265struct virtio_gpu_get_capset_info {
266 struct virtio_gpu_ctrl_hdr hdr;
267 __le32 capset_index;
268 __le32 padding;
269};
270
271/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */
272struct virtio_gpu_resp_capset_info {
273 struct virtio_gpu_ctrl_hdr hdr;
274 __le32 capset_id;
275 __le32 capset_max_version;
276 __le32 capset_max_size;
277 __le32 padding;
278};
279
280/* VIRTIO_GPU_CMD_GET_CAPSET */
281struct virtio_gpu_get_capset {
282 struct virtio_gpu_ctrl_hdr hdr;
283 __le32 capset_id;
284 __le32 capset_version;
285};
286
287/* VIRTIO_GPU_RESP_OK_CAPSET */
288struct virtio_gpu_resp_capset {
289 struct virtio_gpu_ctrl_hdr hdr;
290 uint8_t capset_data[];
291};
292
183#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) 293#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
184 294
185struct virtio_gpu_config { 295struct virtio_gpu_config {
186 __u32 events_read; 296 __u32 events_read;
187 __u32 events_clear; 297 __u32 events_clear;
188 __u32 num_scanouts; 298 __u32 num_scanouts;
189 __u32 reserved; 299 __u32 num_capsets;
190}; 300};
191 301
192/* simple formats for fbcon/X use */ 302/* simple formats for fbcon/X use */
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index 3696575b02f2..c1c1ca18abc0 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -82,6 +82,8 @@
82 82
83/* VIDCON0 */ 83/* VIDCON0 */
84#define VIDCON0_SWRESET (1 << 28) 84#define VIDCON0_SWRESET (1 << 28)
85#define VIDCON0_CLKVALUP (1 << 14)
86#define VIDCON0_VLCKFREE (1 << 5)
85#define VIDCON0_STOP_STATUS (1 << 2) 87#define VIDCON0_STOP_STATUS (1 << 2)
86#define VIDCON0_ENVID (1 << 1) 88#define VIDCON0_ENVID (1 << 1)
87#define VIDCON0_ENVID_F (1 << 0) 89#define VIDCON0_ENVID_F (1 << 0)
@@ -137,6 +139,13 @@
137/* DECON_UPDATE */ 139/* DECON_UPDATE */
138#define STANDALONE_UPDATE_F (1 << 0) 140#define STANDALONE_UPDATE_F (1 << 0)
139 141
142/* DECON_VIDCON1 */
143#define VIDCON1_VCLK_MASK (0x3 << 9)
144#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
145#define VIDCON1_VCLK_HOLD (0x0 << 9)
146#define VIDCON1_VCLK_RUN (0x1 << 9)
147
148
140/* DECON_VIDTCON00 */ 149/* DECON_VIDTCON00 */
141#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16) 150#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16)
142#define VIDTCON00_VFPD_F(x) ((x) & 0xfff) 151#define VIDTCON00_VFPD_F(x) ((x) & 0xfff)
@@ -159,7 +168,27 @@
159#define TRIGCON_TRIGEN_PER_F (1 << 31) 168#define TRIGCON_TRIGEN_PER_F (1 << 31)
160#define TRIGCON_TRIGEN_F (1 << 30) 169#define TRIGCON_TRIGEN_F (1 << 30)
161#define TRIGCON_TE_AUTO_MASK (1 << 29) 170#define TRIGCON_TE_AUTO_MASK (1 << 29)
171#define TRIGCON_WB_SWTRIGCMD (1 << 28)
172#define TRIGCON_SWTRIGCMD_W4BUF (1 << 26)
173#define TRIGCON_TRIGMODE_W4BUF (1 << 25)
174#define TRIGCON_SWTRIGCMD_W3BUF (1 << 21)
175#define TRIGCON_TRIGMODE_W3BUF (1 << 20)
176#define TRIGCON_SWTRIGCMD_W2BUF (1 << 16)
177#define TRIGCON_TRIGMODE_W2BUF (1 << 15)
178#define TRIGCON_SWTRIGCMD_W1BUF (1 << 11)
179#define TRIGCON_TRIGMODE_W1BUF (1 << 10)
180#define TRIGCON_SWTRIGCMD_W0BUF (1 << 6)
181#define TRIGCON_TRIGMODE_W0BUF (1 << 5)
182#define TRIGCON_HWTRIGMASK_I80_RGB (1 << 4)
183#define TRIGCON_HWTRIGEN_I80_RGB (1 << 3)
184#define TRIGCON_HWTRIG_INV_I80_RGB (1 << 2)
162#define TRIGCON_SWTRIGCMD (1 << 1) 185#define TRIGCON_SWTRIGCMD (1 << 1)
163#define TRIGCON_SWTRIGEN (1 << 0) 186#define TRIGCON_SWTRIGEN (1 << 0)
164 187
188/* DECON_CRCCTRL */
189#define CRCCTRL_CRCCLKEN (0x1 << 2)
190#define CRCCTRL_CRCSTART_F (0x1 << 1)
191#define CRCCTRL_CRCEN (0x1 << 0)
192#define CRCCTRL_MASK (0x7)
193
165#endif /* EXYNOS_REGS_DECON_H */ 194#endif /* EXYNOS_REGS_DECON_H */
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 314105cd5061..7b635d68cfe1 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -153,7 +153,7 @@ struct azx {
153 unsigned int snoop:1; 153 unsigned int snoop:1;
154 unsigned int align_buffer_size:1; 154 unsigned int align_buffer_size:1;
155 unsigned int region_requested:1; 155 unsigned int region_requested:1;
156 unsigned int disabled:1; /* disabled by VGA-switcher */ 156 unsigned int disabled:1; /* disabled by vga_switcheroo */
157 157
158#ifdef CONFIG_SND_HDA_DSP_LOADER 158#ifdef CONFIG_SND_HDA_DSP_LOADER
159 struct azx_dev saved_azx_dev; 159 struct azx_dev saved_azx_dev;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4d2cbe2ca141..8a7fbdcb4072 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -338,7 +338,7 @@ enum {
338 AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF) 338 AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
339 339
340/* 340/*
341 * VGA-switcher support 341 * vga_switcheroo support
342 */ 342 */
343#ifdef SUPPORT_VGA_SWITCHEROO 343#ifdef SUPPORT_VGA_SWITCHEROO
344#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo) 344#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
@@ -1077,12 +1077,12 @@ static void azx_vs_set_state(struct pci_dev *pci,
1077 } 1077 }
1078 } 1078 }
1079 } else { 1079 } else {
1080 dev_info(chip->card->dev, "%s via VGA-switcheroo\n", 1080 dev_info(chip->card->dev, "%s via vga_switcheroo\n",
1081 disabled ? "Disabling" : "Enabling"); 1081 disabled ? "Disabling" : "Enabling");
1082 if (disabled) { 1082 if (disabled) {
1083 pm_runtime_put_sync_suspend(card->dev); 1083 pm_runtime_put_sync_suspend(card->dev);
1084 azx_suspend(card->dev); 1084 azx_suspend(card->dev);
1085 /* when we get suspended by vga switcheroo we end up in D3cold, 1085 /* when we get suspended by vga_switcheroo we end up in D3cold,
1086 * however we have no ACPI handle, so pci/acpi can't put us there, 1086 * however we have no ACPI handle, so pci/acpi can't put us there,
1087 * put ourselves there */ 1087 * put ourselves there */
1088 pci->current_state = PCI_D3cold; 1088 pci->current_state = PCI_D3cold;
@@ -1122,7 +1122,7 @@ static void init_vga_switcheroo(struct azx *chip)
1122 struct pci_dev *p = get_bound_vga(chip->pci); 1122 struct pci_dev *p = get_bound_vga(chip->pci);
1123 if (p) { 1123 if (p) {
1124 dev_info(chip->card->dev, 1124 dev_info(chip->card->dev,
1125 "Handle VGA-switcheroo audio client\n"); 1125 "Handle vga_switcheroo audio client\n");
1126 hda->use_vga_switcheroo = 1; 1126 hda->use_vga_switcheroo = 1;
1127 pci_dev_put(p); 1127 pci_dev_put(p);
1128 } 1128 }
@@ -1144,8 +1144,7 @@ static int register_vga_switcheroo(struct azx *chip)
1144 * is there any machine with two switchable HDMI audio controllers? 1144 * is there any machine with two switchable HDMI audio controllers?
1145 */ 1145 */
1146 err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops, 1146 err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
1147 VGA_SWITCHEROO_DIS, 1147 VGA_SWITCHEROO_DIS);
1148 hda->probe_continued);
1149 if (err < 0) 1148 if (err < 0)
1150 return err; 1149 return err;
1151 hda->vga_switcheroo_registered = 1; 1150 hda->vga_switcheroo_registered = 1;
@@ -1234,7 +1233,7 @@ static int azx_dev_free(struct snd_device *device)
1234 1233
1235#ifdef SUPPORT_VGA_SWITCHEROO 1234#ifdef SUPPORT_VGA_SWITCHEROO
1236/* 1235/*
1237 * Check of disabled HDMI controller by vga-switcheroo 1236 * Check of disabled HDMI controller by vga_switcheroo
1238 */ 1237 */
1239static struct pci_dev *get_bound_vga(struct pci_dev *pci) 1238static struct pci_dev *get_bound_vga(struct pci_dev *pci)
1240{ 1239{
@@ -1919,7 +1918,7 @@ static int azx_probe(struct pci_dev *pci,
1919 1918
1920 err = register_vga_switcheroo(chip); 1919 err = register_vga_switcheroo(chip);
1921 if (err < 0) { 1920 if (err < 0) {
1922 dev_err(card->dev, "Error registering VGA-switcheroo client\n"); 1921 dev_err(card->dev, "Error registering vga_switcheroo client\n");
1923 goto out_free; 1922 goto out_free;
1924 } 1923 }
1925 1924
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index 354f0bbed833..ff0c4d617bc1 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -35,7 +35,7 @@ struct hda_intel {
35 unsigned int irq_pending_warned:1; 35 unsigned int irq_pending_warned:1;
36 unsigned int probe_continued:1; 36 unsigned int probe_continued:1;
37 37
38 /* VGA-switcheroo setup */ 38 /* vga_switcheroo setup */
39 unsigned int use_vga_switcheroo:1; 39 unsigned int use_vga_switcheroo:1;
40 unsigned int vga_switcheroo_registered:1; 40 unsigned int vga_switcheroo_registered:1;
41 unsigned int init_failed:1; /* delayed init failed */ 41 unsigned int init_failed:1; /* delayed init failed */