aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 13:17:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 13:17:26 -0400
commita09e9a7a4b907f2dfa9bdb2b98a1828ab4b340b2 (patch)
treec7a2df4e887573648eeaf8f7939889046990d3f6
parent9ab073bc45b8b523cc39658925bb44bef35ca657 (diff)
parent86a7e1224a68511d3a1ae0b7e11581b9d37723ae (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm tree changes from Dave Airlie: "This is the main drm pull request, I have some overlap with sound and arm-soc, the sound patch is acked and may conflict based on -next reports but should be a trivial fixup, which I'll leave to you! Highlights: - new drivers: MSM driver from Rob Clark - non-drm: switcheroo and hdmi audio driver support for secondary GPU poweroff, so drivers can use runtime PM to poweroff the GPUs. This can save 5 or 6W on some optimus laptops. - drm core: combined GEM and TTM VMA manager per-filp mmap permission tracking initial rendernode support (via a runtime enable for now, until we get api stable), remove old proc support, lots of cleanups of legacy code hdmi vendor infoframes and 4k modes lots of gem/prime locking and races fixes async pageflip scaffolding drm bridge objects - i915: Haswell PC8+ support and eLLC support, HDMI 4K support, initial per-process VMA pieces, watermark reworks, convert to generic hdmi infoframes, encoder reworking, fastboot support, - radeon: CIK PM support, remove 3d blit code in favour of DMA engines, Berlin GPU support, HDMI audio fixes - nouveau: secondary GPU power down support for optimus laptops, lots of fixes, use MSI, VP3 engine support - exynos: runtime pm support for g2d, DT support, remove non-DT, - tda998x i2c driver: lots of fixes for sync issues - gma500: lots of cleanups - rcar: add LVDS support, fbdev emulation, - tegra: just minor fixes" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (684 commits) drm/exynos: Fix build error with exynos_drm_connector.c drm/exynos: Remove non-DT support in exynos_drm_fimd drm/exynos: Remove non-DT support in exynos_hdmi drm/exynos: Remove non-DT support in exynos_drm_g2d drm/exynos: Remove non-DT support in exynos_hdmiphy drm/exynos: Remove non-DT support in exynos_ddc drm/exynos: Make Exynos DRM drivers depend on OF drm/exynos: Consider fallback option to allocation fail drm/exynos: fimd: move platform data parsing to separate function drm/exynos: fimd: get signal polarities from device tree drm/exynos: fimd: replace struct fb_videomode with videomode drm/exynos: check a pixel format to a particular window layer drm/exynos: fix fimd pixel format setting drm/exynos: Add NULL pointer check drm/exynos: Remove redundant error messages drm/exynos: Add missing of.h header include drm/exynos: Remove redundant NULL check in exynos_drm_buf drm/exynos: add device tree support for rotator drm/exynos: Add missing includes drm/exynos: add runtime pm interfaces to g2d driver ...
-rw-r--r--Documentation/DocBook/drm.tmpl138
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-rotator.txt27
-rw-r--r--drivers/gpu/drm/Kconfig15
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c51
-rw-r--r--drivers/gpu/drm/drm_bufs.c236
-rw-r--r--drivers/gpu/drm/drm_context.c81
-rw-r--r--drivers/gpu/drm/drm_crtc.c173
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c89
-rw-r--r--drivers/gpu/drm/drm_dma.c17
-rw-r--r--drivers/gpu/drm/drm_drv.c106
-rw-r--r--drivers/gpu/drm/drm_edid.c306
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_flip_work.c124
-rw-r--r--drivers/gpu/drm/drm_fops.c98
-rw-r--r--drivers/gpu/drm/drm_gem.c440
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c62
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c229
-rw-r--r--drivers/gpu/drm/drm_modes.c58
-rw-r--r--drivers/gpu/drm/drm_pci.c35
-rw-r--r--drivers/gpu/drm/drm_platform.c16
-rw-r--r--drivers/gpu/drm/drm_prime.c190
-rw-r--r--drivers/gpu/drm/drm_proc.c209
-rw-r--r--drivers/gpu/drm/drm_scatter.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c73
-rw-r--r--drivers/gpu/drm/drm_usb.c9
-rw-r--r--drivers/gpu/drm/drm_vm.c3
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c436
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c263
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c60
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c9
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c920
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c154
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c89
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c71
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c31
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/gma500/gem.c39
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c776
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h103
-rw-r--r--drivers/gpu/drm/gma500/gtt.c38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c15
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c65
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c63
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c43
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c48
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.h (renamed from drivers/gpu/drm/gma500/psb_intel_display.h)13
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c944
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h44
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c53
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c485
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c986
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c141
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c322
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h602
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c759
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c313
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c208
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1019
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1523
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h150
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h45
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c78
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1501
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c518
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h148
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c23
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c286
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c40
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1155
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c79
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c59
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c31
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c595
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c3
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c5
-rw-r--r--drivers/gpu/drm/msm/Kconfig34
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/NOTES69
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1438
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h2193
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c502
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h432
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c370
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h254
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h502
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h114
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h48
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c272
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h131
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h508
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c167
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c367
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c281
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c214
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h50
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h1061
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c685
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c305
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c56
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c365
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h194
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c243
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c776
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h213
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c202
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c258
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c597
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h99
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c412
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c463
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h124
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c61
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv98.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/math.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c284
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c74
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c52
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c3
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile24
-rw-r--r--drivers/gpu/drm/radeon/atombios.h615
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c16
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5243
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h332
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c262
-rw-r--r--drivers/gpu/drm/radeon/cik.c3127
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c785
-rw-r--r--drivers/gpu/drm/radeon/cikd.h594
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h944
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c278
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c536
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c729
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c190
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c100
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h11
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2645
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h199
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c207
-rw-r--r--drivers/gpu/drm/radeon/ni.c373
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c338
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h57
-rw-r--r--drivers/gpu/drm/radeon/pptable.h682
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c811
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c60
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c785
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c497
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c300
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h6
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c113
-rw-r--r--drivers/gpu/drm/radeon/r600d.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon.h275
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1262
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h119
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c186
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c88
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c58
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c204
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c101
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h16
-rw-r--r--drivers/gpu/drm/radeon/si.c844
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c235
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c80
-rw-r--r--drivers/gpu/drm/radeon/sid.h71
-rw-r--r--drivers/gpu/drm/radeon/smu7.h170
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h486
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h300
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c436
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c165
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c55
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c68
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig7
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c258
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h63
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c202
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h49
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c165
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.c)101
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.h)17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c196
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h46
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c170
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h94
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.c)65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.h)15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h69
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c3
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c43
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c102
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c231
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c63
-rw-r--r--drivers/gpu/drm/udl/udl_main.c4
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/gpu/host1x/dev.h2
-rw-r--r--drivers/gpu/host1x/drm/dc.c2
-rw-r--r--drivers/gpu/host1x/drm/drm.c7
-rw-r--r--drivers/gpu/host1x/drm/gem.c16
-rw-r--r--drivers/gpu/host1x/drm/gem.h3
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c27
-rw-r--r--drivers/gpu/host1x/drm/rgb.c14
-rw-r--r--drivers/gpu/host1x/job.c15
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c147
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c5
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c3
-rw-r--r--drivers/video/hdmi.c141
-rw-r--r--include/drm/drmP.h254
-rw-r--r--include/drm/drm_agpsupport.h194
-rw-r--r--include/drm/drm_crtc.h85
-rw-r--r--include/drm/drm_dp_helper.h31
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_fb_cma_helper.h1
-rw-r--r--include/drm/drm_flip_work.h76
-rw-r--r--include/drm/drm_gem_cma_helper.h8
-rw-r--r--include/drm/drm_mm.h142
-rw-r--r--include/drm/drm_pciids.h48
-rw-r--r--include/drm/drm_vma_manager.h257
-rw-r--r--include/drm/exynos_drm.h3
-rw-r--r--include/drm/i2c/tda998x.h30
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h10
-rw-r--r--include/linux/hdmi.h53
-rw-r--r--include/linux/platform_data/rcar-du.h34
-rw-r--r--include/linux/vga_switcheroo.h13
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/drm_mode.h16
-rw-r--r--include/uapi/drm/i915_drm.h49
-rw-r--r--include/uapi/drm/msm_drm.h207
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--sound/pci/hda/hda_intel.c36
435 files changed, 49887 insertions, 17151 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 7d1278e7a434..ed1d6d289022 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -156,13 +156,6 @@
156 </para></listitem> 156 </para></listitem>
157 </varlistentry> 157 </varlistentry>
158 <varlistentry> 158 <varlistentry>
159 <term>DRIVER_USE_MTRR</term>
160 <listitem><para>
161 Driver uses MTRR interface for mapping memory, the DRM core will
162 manage MTRR resources. Deprecated.
163 </para></listitem>
164 </varlistentry>
165 <varlistentry>
166 <term>DRIVER_PCI_DMA</term> 159 <term>DRIVER_PCI_DMA</term>
167 <listitem><para> 160 <listitem><para>
168 Driver is capable of PCI DMA, mapping of PCI DMA buffers to 161 Driver is capable of PCI DMA, mapping of PCI DMA buffers to
@@ -195,28 +188,6 @@
195 </para></listitem> 188 </para></listitem>
196 </varlistentry> 189 </varlistentry>
197 <varlistentry> 190 <varlistentry>
198 <term>DRIVER_IRQ_VBL</term>
199 <listitem><para>Unused. Deprecated.</para></listitem>
200 </varlistentry>
201 <varlistentry>
202 <term>DRIVER_DMA_QUEUE</term>
203 <listitem><para>
204 Should be set if the driver queues DMA requests and completes them
205 asynchronously. Deprecated.
206 </para></listitem>
207 </varlistentry>
208 <varlistentry>
209 <term>DRIVER_FB_DMA</term>
210 <listitem><para>
211 Driver supports DMA to/from the framebuffer, mapping of frambuffer
212 DMA buffers to userspace will be supported. Deprecated.
213 </para></listitem>
214 </varlistentry>
215 <varlistentry>
216 <term>DRIVER_IRQ_VBL2</term>
217 <listitem><para>Unused. Deprecated.</para></listitem>
218 </varlistentry>
219 <varlistentry>
220 <term>DRIVER_GEM</term> 191 <term>DRIVER_GEM</term>
221 <listitem><para> 192 <listitem><para>
222 Driver use the GEM memory manager. 193 Driver use the GEM memory manager.
@@ -234,6 +205,12 @@
234 Driver implements DRM PRIME buffer sharing. 205 Driver implements DRM PRIME buffer sharing.
235 </para></listitem> 206 </para></listitem>
236 </varlistentry> 207 </varlistentry>
208 <varlistentry>
209 <term>DRIVER_RENDER</term>
210 <listitem><para>
211 Driver supports dedicated render nodes.
212 </para></listitem>
213 </varlistentry>
237 </variablelist> 214 </variablelist>
238 </sect3> 215 </sect3>
239 <sect3> 216 <sect3>
@@ -2212,6 +2189,18 @@ void intel_crt_init(struct drm_device *dev)
2212!Iinclude/drm/drm_rect.h 2189!Iinclude/drm/drm_rect.h
2213!Edrivers/gpu/drm/drm_rect.c 2190!Edrivers/gpu/drm/drm_rect.c
2214 </sect2> 2191 </sect2>
2192 <sect2>
2193 <title>Flip-work Helper Reference</title>
2194!Pinclude/drm/drm_flip_work.h flip utils
2195!Iinclude/drm/drm_flip_work.h
2196!Edrivers/gpu/drm/drm_flip_work.c
2197 </sect2>
2198 <sect2>
2199 <title>VMA Offset Manager</title>
2200!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
2201!Edrivers/gpu/drm/drm_vma_manager.c
2202!Iinclude/drm/drm_vma_manager.h
2203 </sect2>
2215 </sect1> 2204 </sect1>
2216 2205
2217 <!-- Internals: kms properties --> 2206 <!-- Internals: kms properties -->
@@ -2422,18 +2411,18 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
2422 </abstract> 2411 </abstract>
2423 <para> 2412 <para>
2424 The <methodname>firstopen</methodname> method is called by the DRM core 2413 The <methodname>firstopen</methodname> method is called by the DRM core
2425 when an application opens a device that has no other opened file handle. 2414 for legacy UMS (User Mode Setting) drivers only when an application
2426 Similarly the <methodname>lastclose</methodname> method is called when 2415 opens a device that has no other opened file handle. UMS drivers can
2427 the last application holding a file handle opened on the device closes 2416 implement it to acquire device resources. KMS drivers can't use the
2428 it. Both methods are mostly used for UMS (User Mode Setting) drivers to 2417 method and must acquire resources in the <methodname>load</methodname>
2429 acquire and release device resources which should be done in the 2418 method instead.
2430 <methodname>load</methodname> and <methodname>unload</methodname>
2431 methods for KMS drivers.
2432 </para> 2419 </para>
2433 <para> 2420 <para>
2434 Note that the <methodname>lastclose</methodname> method is also called 2421 Similarly the <methodname>lastclose</methodname> method is called when
2435 at module unload time or, for hot-pluggable devices, when the device is 2422 the last application holding a file handle opened on the device closes
2436 unplugged. The <methodname>firstopen</methodname> and 2423 it, for both UMS and KMS drivers. Additionally, the method is also
2424 called at module unload time or, for hot-pluggable devices, when the
2425 device is unplugged. The <methodname>firstopen</methodname> and
2437 <methodname>lastclose</methodname> calls can thus be unbalanced. 2426 <methodname>lastclose</methodname> calls can thus be unbalanced.
2438 </para> 2427 </para>
2439 <para> 2428 <para>
@@ -2462,7 +2451,12 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
2462 <para> 2451 <para>
2463 The <methodname>lastclose</methodname> method should restore CRTC and 2452 The <methodname>lastclose</methodname> method should restore CRTC and
2464 plane properties to default value, so that a subsequent open of the 2453 plane properties to default value, so that a subsequent open of the
2465 device will not inherit state from the previous user. 2454 device will not inherit state from the previous user. It can also be
2455 used to execute delayed power switching state changes, e.g. in
2456 conjunction with the vga-switcheroo infrastructure. Beyond that KMS
2457 drivers should not do any further cleanup. Only legacy UMS drivers might
2458 need to clean up device state so that the vga console or an independent
2459 fbdev driver could take over.
2466 </para> 2460 </para>
2467 </sect2> 2461 </sect2>
2468 <sect2> 2462 <sect2>
@@ -2498,7 +2492,6 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
2498 <programlisting> 2492 <programlisting>
2499 .poll = drm_poll, 2493 .poll = drm_poll,
2500 .read = drm_read, 2494 .read = drm_read,
2501 .fasync = drm_fasync,
2502 .llseek = no_llseek, 2495 .llseek = no_llseek,
2503 </programlisting> 2496 </programlisting>
2504 </para> 2497 </para>
@@ -2657,6 +2650,69 @@ int (*resume) (struct drm_device *);</synopsis>
2657 info, since man pages should cover the rest. 2650 info, since man pages should cover the rest.
2658 </para> 2651 </para>
2659 2652
2653 <!-- External: render nodes -->
2654
2655 <sect1>
2656 <title>Render nodes</title>
2657 <para>
2658 DRM core provides multiple character-devices for user-space to use.
2659 Depending on which device is opened, user-space can perform a different
2660 set of operations (mainly ioctls). The primary node is always created
2661 and called <term>card&lt;num&gt;</term>. Additionally, a currently
2662 unused control node, called <term>controlD&lt;num&gt;</term> is also
2663 created. The primary node provides all legacy operations and
2664 historically was the only interface used by userspace. With KMS, the
2665 control node was introduced. However, the planned KMS control interface
2666 has never been written and so the control node stays unused to date.
2667 </para>
2668 <para>
2669 With the increased use of offscreen renderers and GPGPU applications,
2670 clients no longer require running compositors or graphics servers to
2671 make use of a GPU. But the DRM API required unprivileged clients to
2672 authenticate to a DRM-Master prior to getting GPU access. To avoid this
2673 step and to grant clients GPU access without authenticating, render
2674 nodes were introduced. Render nodes solely serve render clients, that
2675 is, no modesetting or privileged ioctls can be issued on render nodes.
2676 Only non-global rendering commands are allowed. If a driver supports
2677 render nodes, it must advertise it via the <term>DRIVER_RENDER</term>
2678 DRM driver capability. If not supported, the primary node must be used
2679 for render clients together with the legacy drmAuth authentication
2680 procedure.
2681 </para>
2682 <para>
2683 If a driver advertises render node support, DRM core will create a
2684 separate render node called <term>renderD&lt;num&gt;</term>. There will
2685 be one render node per device. No ioctls except PRIME-related ioctls
2686 will be allowed on this node. Especially <term>GEM_OPEN</term> will be
2687 explicitly prohibited. Render nodes are designed to avoid the
2688 buffer-leaks, which occur if clients guess the flink names or mmap
2689 offsets on the legacy interface. Additionally to this basic interface,
2690 drivers must mark their driver-dependent render-only ioctls as
2691 <term>DRM_RENDER_ALLOW</term> so render clients can use them. Driver
2692 authors must be careful not to allow any privileged ioctls on render
2693 nodes.
2694 </para>
2695 <para>
2696 With render nodes, user-space can now control access to the render node
2697 via basic file-system access-modes. A running graphics server which
2698 authenticates clients on the privileged primary/legacy node is no longer
2699 required. Instead, a client can open the render node and is immediately
2700 granted GPU access. Communication between clients (or servers) is done
2701 via PRIME. FLINK from render node to legacy node is not supported. New
2702 clients must not use the insecure FLINK interface.
2703 </para>
2704 <para>
2705 Besides dropping all modeset/global ioctls, render nodes also drop the
2706 DRM-Master concept. There is no reason to associate render clients with
2707 a DRM-Master as they are independent of any graphics server. Besides,
2708 they must work without any running master, anyway.
2709 Drivers must be able to run without a master object if they support
2710 render nodes. If, on the other hand, a driver requires shared state
2711 between clients which is visible to user-space and accessible beyond
2712 open-file boundaries, they cannot support render nodes.
2713 </para>
2714 </sect1>
2715
2660 <!-- External: vblank handling --> 2716 <!-- External: vblank handling -->
2661 2717
2662 <sect1> 2718 <sect1>
diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
new file mode 100644
index 000000000000..82cd1ed0be93
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
@@ -0,0 +1,27 @@
1* Samsung Image Rotator
2
3Required properties:
4 - compatible : value should be one of the following:
5 (a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
6 (b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
7 (c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
8
9 - reg : Physical base address of the IP registers and length of memory
10 mapped region.
11
12 - interrupts : Interrupt specifier for rotator interrupt, according to format
13 specific to interrupt parent.
14
15 - clocks : Clock specifier for rotator clock, according to generic clock
16 bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
17
18 - clock-names : Names of clocks. For exynos rotator, it should be "rotator".
19
20Example:
21 rotator@12810000 {
22 compatible = "samsung,exynos4210-rotator";
23 reg = <0x12810000 0x1000>;
24 interrupts = <0 83 0>;
25 clocks = <&clock 278>;
26 clock-names = "rotator";
27 };
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a7c54c843291..955555d6ec88 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
6# 6#
7menuconfig DRM 7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
10 select HDMI 10 select HDMI
11 select I2C 11 select I2C
12 select I2C_ALGOBIT 12 select I2C_ALGOBIT
@@ -168,6 +168,17 @@ config DRM_I915_KMS
168 the driver to bind to PCI devices, which precludes loading things 168 the driver to bind to PCI devices, which precludes loading things
169 like intelfb. 169 like intelfb.
170 170
171config DRM_I915_PRELIMINARY_HW_SUPPORT
172 bool "Enable preliminary support for prerelease Intel hardware by default"
173 depends on DRM_I915
174 help
175 Choose this option if you have prerelease Intel hardware and want the
176 i915 driver to support it by default. You can enable such support at
177 runtime with the module option i915.preliminary_hw_support=1; this
178 option changes the default for that module option.
179
180 If in doubt, say "N".
181
171config DRM_MGA 182config DRM_MGA
172 tristate "Matrox g200/g400" 183 tristate "Matrox g200/g400"
173 depends on DRM && PCI 184 depends on DRM && PCI
@@ -223,3 +234,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig"
223source "drivers/gpu/drm/tilcdc/Kconfig" 234source "drivers/gpu/drm/tilcdc/Kconfig"
224 235
225source "drivers/gpu/drm/qxl/Kconfig" 236source "drivers/gpu/drm/qxl/Kconfig"
237
238source "drivers/gpu/drm/msm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 801bcafa3028..f089adfe70ee 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -7,13 +7,13 @@ ccflags-y := -Iinclude/drm
7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o \ 8 drm_context.o drm_dma.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o drm_pci.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_prime.o \ 15 drm_trace_points.o drm_global.o drm_prime.o \
16 drm_rect.o 16 drm_rect.o drm_vma_manager.o drm_flip_work.o
17 17
18drm-$(CONFIG_COMPAT) += drm_ioc32.o 18drm-$(CONFIG_COMPAT) += drm_ioc32.o
19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
54obj-$(CONFIG_DRM_OMAP) += omapdrm/ 54obj-$(CONFIG_DRM_OMAP) += omapdrm/
55obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 55obj-$(CONFIG_DRM_TILCDC) += tilcdc/
56obj-$(CONFIG_DRM_QXL) += qxl/ 56obj-$(CONFIG_DRM_QXL) += qxl/
57obj-$(CONFIG_DRM_MSM) += msm/
57obj-y += i2c/ 58obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index df0d0a08097a..32e270dc714e 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -190,7 +190,6 @@ static const struct file_operations ast_fops = {
190 .unlocked_ioctl = drm_ioctl, 190 .unlocked_ioctl = drm_ioctl,
191 .mmap = ast_mmap, 191 .mmap = ast_mmap,
192 .poll = drm_poll, 192 .poll = drm_poll,
193 .fasync = drm_fasync,
194#ifdef CONFIG_COMPAT 193#ifdef CONFIG_COMPAT
195 .compat_ioctl = drm_compat_ioctl, 194 .compat_ioctl = drm_compat_ioctl,
196#endif 195#endif
@@ -198,7 +197,7 @@ static const struct file_operations ast_fops = {
198}; 197};
199 198
200static struct drm_driver driver = { 199static struct drm_driver driver = {
201 .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM, 200 .driver_features = DRIVER_MODESET | DRIVER_GEM,
202 .dev_priv_size = 0, 201 .dev_priv_size = 0,
203 202
204 .load = ast_driver_load, 203 .load = ast_driver_load,
@@ -216,7 +215,7 @@ static struct drm_driver driver = {
216 .gem_free_object = ast_gem_free_object, 215 .gem_free_object = ast_gem_free_object,
217 .dumb_create = ast_dumb_create, 216 .dumb_create = ast_dumb_create,
218 .dumb_map_offset = ast_dumb_mmap_offset, 217 .dumb_map_offset = ast_dumb_mmap_offset,
219 .dumb_destroy = ast_dumb_destroy, 218 .dumb_destroy = drm_gem_dumb_destroy,
220 219
221}; 220};
222 221
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 622d4ae7eb9e..796dbb212a41 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
322extern int ast_dumb_create(struct drm_file *file, 322extern int ast_dumb_create(struct drm_file *file,
323 struct drm_device *dev, 323 struct drm_device *dev,
324 struct drm_mode_create_dumb *args); 324 struct drm_mode_create_dumb *args);
325extern int ast_dumb_destroy(struct drm_file *file,
326 struct drm_device *dev,
327 uint32_t handle);
328 325
329extern int ast_gem_init_object(struct drm_gem_object *obj); 326extern int ast_gem_init_object(struct drm_gem_object *obj);
330extern void ast_gem_free_object(struct drm_gem_object *obj); 327extern void ast_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f60fd7bd1183..7f6152d374ca 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
449 return 0; 449 return 0;
450} 450}
451 451
452int ast_dumb_destroy(struct drm_file *file,
453 struct drm_device *dev,
454 uint32_t handle)
455{
456 return drm_gem_handle_delete(file, handle);
457}
458
459int ast_gem_init_object(struct drm_gem_object *obj) 452int ast_gem_init_object(struct drm_gem_object *obj)
460{ 453{
461 BUG(); 454 BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
487 480
488static inline u64 ast_bo_mmap_offset(struct ast_bo *bo) 481static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
489{ 482{
490 return bo->bo.addr_space_offset; 483 return drm_vma_node_offset_addr(&bo->bo.vma_node);
491} 484}
492int 485int
493ast_dumb_mmap_offset(struct drm_file *file, 486ast_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 6e8887fe6c1b..32aecb34dbce 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct ast_bo *astbo = ast_bo(bo);
152
153 return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
152} 154}
153 155
154static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,7 +323,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
321 return ret; 323 return ret;
322 } 324 }
323 325
324 astbo->gem.driver_private = NULL;
325 astbo->bo.bdev = &ast->ttm.bdev; 326 astbo->bo.bdev = &ast->ttm.bdev;
326 astbo->bo.bdev->dev_mapping = dev->dev_mapping; 327 astbo->bo.bdev->dev_mapping = dev->dev_mapping;
327 328
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 8ecb601152ef..138364d91782 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -85,10 +85,9 @@ static const struct file_operations cirrus_driver_fops = {
85#ifdef CONFIG_COMPAT 85#ifdef CONFIG_COMPAT
86 .compat_ioctl = drm_compat_ioctl, 86 .compat_ioctl = drm_compat_ioctl,
87#endif 87#endif
88 .fasync = drm_fasync,
89}; 88};
90static struct drm_driver driver = { 89static struct drm_driver driver = {
91 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR, 90 .driver_features = DRIVER_MODESET | DRIVER_GEM,
92 .load = cirrus_driver_load, 91 .load = cirrus_driver_load,
93 .unload = cirrus_driver_unload, 92 .unload = cirrus_driver_unload,
94 .fops = &cirrus_driver_fops, 93 .fops = &cirrus_driver_fops,
@@ -102,7 +101,7 @@ static struct drm_driver driver = {
102 .gem_free_object = cirrus_gem_free_object, 101 .gem_free_object = cirrus_gem_free_object,
103 .dumb_create = cirrus_dumb_create, 102 .dumb_create = cirrus_dumb_create,
104 .dumb_map_offset = cirrus_dumb_mmap_offset, 103 .dumb_map_offset = cirrus_dumb_mmap_offset,
105 .dumb_destroy = cirrus_dumb_destroy, 104 .dumb_destroy = drm_gem_dumb_destroy,
106}; 105};
107 106
108static struct pci_driver cirrus_pci_driver = { 107static struct pci_driver cirrus_pci_driver = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index bae55609e6c3..9b0bb9184afd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
203int cirrus_dumb_create(struct drm_file *file, 203int cirrus_dumb_create(struct drm_file *file,
204 struct drm_device *dev, 204 struct drm_device *dev,
205 struct drm_mode_create_dumb *args); 205 struct drm_mode_create_dumb *args);
206int cirrus_dumb_destroy(struct drm_file *file,
207 struct drm_device *dev,
208 uint32_t handle);
209 206
210int cirrus_framebuffer_init(struct drm_device *dev, 207int cirrus_framebuffer_init(struct drm_device *dev,
211 struct cirrus_framebuffer *gfb, 208 struct cirrus_framebuffer *gfb,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 35cbae827771..f130a533a512 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
255 return 0; 255 return 0;
256} 256}
257 257
258int cirrus_dumb_destroy(struct drm_file *file,
259 struct drm_device *dev,
260 uint32_t handle)
261{
262 return drm_gem_handle_delete(file, handle);
263}
264
265int cirrus_gem_init_object(struct drm_gem_object *obj) 258int cirrus_gem_init_object(struct drm_gem_object *obj)
266{ 259{
267 BUG(); 260 BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
294 287
295static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo) 288static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
296{ 289{
297 return bo->bo.addr_space_offset; 290 return drm_vma_node_offset_addr(&bo->bo.vma_node);
298} 291}
299 292
300int 293int
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 69fd8f1ac8df..75becdeac07d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct cirrus_bo *cirrusbo = cirrus_bo(bo);
152
153 return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
152} 154}
153 155
154static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -326,7 +328,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
326 return ret; 328 return ret;
327 } 329 }
328 330
329 cirrusbo->gem.driver_private = NULL;
330 cirrusbo->bo.bdev = &cirrus->ttm.bdev; 331 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
331 cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; 332 cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
332 333
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d8fed179797..e301d653d97e 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -424,6 +424,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
424} 424}
425 425
426/** 426/**
427 * drm_agp_clear - Clear AGP resource list
428 * @dev: DRM device
429 *
430 * Iterate over all AGP resources and remove them. But keep the AGP head
431 * intact so it can still be used. It is safe to call this if AGP is disabled or
432 * was already removed.
433 *
434 * If DRIVER_MODESET is active, nothing is done to protect the modesetting
435 * resources from getting destroyed. Drivers are responsible of cleaning them up
436 * during device shutdown.
437 */
438void drm_agp_clear(struct drm_device *dev)
439{
440 struct drm_agp_mem *entry, *tempe;
441
442 if (!drm_core_has_AGP(dev) || !dev->agp)
443 return;
444 if (drm_core_check_feature(dev, DRIVER_MODESET))
445 return;
446
447 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
448 if (entry->bound)
449 drm_unbind_agp(entry->memory);
450 drm_free_agp(entry->memory, entry->pages);
451 kfree(entry);
452 }
453 INIT_LIST_HEAD(&dev->agp->memory);
454
455 if (dev->agp->acquired)
456 drm_agp_release(dev);
457
458 dev->agp->acquired = 0;
459 dev->agp->enabled = 0;
460}
461
462/**
463 * drm_agp_destroy - Destroy AGP head
464 * @dev: DRM device
465 *
466 * Destroy resources that were previously allocated via drm_agp_initp. Caller
467 * must ensure to clean up all AGP resources before calling this. See
468 * drm_agp_clear().
469 *
470 * Call this to destroy AGP heads allocated via drm_agp_init().
471 */
472void drm_agp_destroy(struct drm_agp_head *agp)
473{
474 kfree(agp);
475}
476
477/**
427 * Binds a collection of pages into AGP memory at the given offset, returning 478 * Binds a collection of pages into AGP memory at the given offset, returning
428 * the AGP memory structure containing them. 479 * the AGP memory structure containing them.
429 * 480 *
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 5a4dbb410b71..471e051d295e 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
207 return 0; 207 return 0;
208 } 208 }
209 209
210 if (drm_core_has_MTRR(dev)) { 210 if (map->type == _DRM_FRAME_BUFFER ||
211 if (map->type == _DRM_FRAME_BUFFER || 211 (map->flags & _DRM_WRITE_COMBINING)) {
212 (map->flags & _DRM_WRITE_COMBINING)) { 212 map->mtrr =
213 map->mtrr = 213 arch_phys_wc_add(map->offset, map->size);
214 arch_phys_wc_add(map->offset, map->size);
215 }
216 } 214 }
217 if (map->type == _DRM_REGISTERS) { 215 if (map->type == _DRM_REGISTERS) {
218 if (map->flags & _DRM_WRITE_COMBINING) 216 if (map->flags & _DRM_WRITE_COMBINING)
@@ -243,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
243 } 241 }
244 map->handle = vmalloc_user(map->size); 242 map->handle = vmalloc_user(map->size);
245 DRM_DEBUG("%lu %d %p\n", 243 DRM_DEBUG("%lu %d %p\n",
246 map->size, drm_order(map->size), map->handle); 244 map->size, order_base_2(map->size), map->handle);
247 if (!map->handle) { 245 if (!map->handle) {
248 kfree(map); 246 kfree(map);
249 return -ENOMEM; 247 return -ENOMEM;
@@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
464 iounmap(map->handle); 462 iounmap(map->handle);
465 /* FALLTHROUGH */ 463 /* FALLTHROUGH */
466 case _DRM_FRAME_BUFFER: 464 case _DRM_FRAME_BUFFER:
467 if (drm_core_has_MTRR(dev)) 465 arch_phys_wc_del(map->mtrr);
468 arch_phys_wc_del(map->mtrr);
469 break; 466 break;
470 case _DRM_SHM: 467 case _DRM_SHM:
471 vfree(map->handle); 468 vfree(map->handle);
@@ -630,7 +627,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
630 return -EINVAL; 627 return -EINVAL;
631 628
632 count = request->count; 629 count = request->count;
633 order = drm_order(request->size); 630 order = order_base_2(request->size);
634 size = 1 << order; 631 size = 1 << order;
635 632
636 alignment = (request->flags & _DRM_PAGE_ALIGN) 633 alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +797,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
800 return -EPERM; 797 return -EPERM;
801 798
802 count = request->count; 799 count = request->count;
803 order = drm_order(request->size); 800 order = order_base_2(request->size);
804 size = 1 << order; 801 size = 1 << order;
805 802
806 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 803 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +999,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1002 return -EPERM; 999 return -EPERM;
1003 1000
1004 count = request->count; 1001 count = request->count;
1005 order = drm_order(request->size); 1002 order = order_base_2(request->size);
1006 size = 1 << order; 1003 size = 1 << order;
1007 1004
1008 alignment = (request->flags & _DRM_PAGE_ALIGN) 1005 alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1130,161 +1127,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1130 return 0; 1127 return 0;
1131} 1128}
1132 1129
1133static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1134{
1135 struct drm_device_dma *dma = dev->dma;
1136 struct drm_buf_entry *entry;
1137 struct drm_buf *buf;
1138 unsigned long offset;
1139 unsigned long agp_offset;
1140 int count;
1141 int order;
1142 int size;
1143 int alignment;
1144 int page_order;
1145 int total;
1146 int byte_count;
1147 int i;
1148 struct drm_buf **temp_buflist;
1149
1150 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1151 return -EINVAL;
1152
1153 if (!dma)
1154 return -EINVAL;
1155
1156 if (!capable(CAP_SYS_ADMIN))
1157 return -EPERM;
1158
1159 count = request->count;
1160 order = drm_order(request->size);
1161 size = 1 << order;
1162
1163 alignment = (request->flags & _DRM_PAGE_ALIGN)
1164 ? PAGE_ALIGN(size) : size;
1165 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1166 total = PAGE_SIZE << page_order;
1167
1168 byte_count = 0;
1169 agp_offset = request->agp_start;
1170
1171 DRM_DEBUG("count: %d\n", count);
1172 DRM_DEBUG("order: %d\n", order);
1173 DRM_DEBUG("size: %d\n", size);
1174 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1175 DRM_DEBUG("alignment: %d\n", alignment);
1176 DRM_DEBUG("page_order: %d\n", page_order);
1177 DRM_DEBUG("total: %d\n", total);
1178
1179 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1180 return -EINVAL;
1181
1182 spin_lock(&dev->count_lock);
1183 if (dev->buf_use) {
1184 spin_unlock(&dev->count_lock);
1185 return -EBUSY;
1186 }
1187 atomic_inc(&dev->buf_alloc);
1188 spin_unlock(&dev->count_lock);
1189
1190 mutex_lock(&dev->struct_mutex);
1191 entry = &dma->bufs[order];
1192 if (entry->buf_count) {
1193 mutex_unlock(&dev->struct_mutex);
1194 atomic_dec(&dev->buf_alloc);
1195 return -ENOMEM; /* May only call once for each order */
1196 }
1197
1198 if (count < 0 || count > 4096) {
1199 mutex_unlock(&dev->struct_mutex);
1200 atomic_dec(&dev->buf_alloc);
1201 return -EINVAL;
1202 }
1203
1204 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1205 GFP_KERNEL);
1206 if (!entry->buflist) {
1207 mutex_unlock(&dev->struct_mutex);
1208 atomic_dec(&dev->buf_alloc);
1209 return -ENOMEM;
1210 }
1211
1212 entry->buf_size = size;
1213 entry->page_order = page_order;
1214
1215 offset = 0;
1216
1217 while (entry->buf_count < count) {
1218 buf = &entry->buflist[entry->buf_count];
1219 buf->idx = dma->buf_count + entry->buf_count;
1220 buf->total = alignment;
1221 buf->order = order;
1222 buf->used = 0;
1223
1224 buf->offset = (dma->byte_count + offset);
1225 buf->bus_address = agp_offset + offset;
1226 buf->address = (void *)(agp_offset + offset);
1227 buf->next = NULL;
1228 buf->waiting = 0;
1229 buf->pending = 0;
1230 buf->file_priv = NULL;
1231
1232 buf->dev_priv_size = dev->driver->dev_priv_size;
1233 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1234 if (!buf->dev_private) {
1235 /* Set count correctly so we free the proper amount. */
1236 entry->buf_count = count;
1237 drm_cleanup_buf_error(dev, entry);
1238 mutex_unlock(&dev->struct_mutex);
1239 atomic_dec(&dev->buf_alloc);
1240 return -ENOMEM;
1241 }
1242
1243 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1244
1245 offset += alignment;
1246 entry->buf_count++;
1247 byte_count += PAGE_SIZE << page_order;
1248 }
1249
1250 DRM_DEBUG("byte_count: %d\n", byte_count);
1251
1252 temp_buflist = krealloc(dma->buflist,
1253 (dma->buf_count + entry->buf_count) *
1254 sizeof(*dma->buflist), GFP_KERNEL);
1255 if (!temp_buflist) {
1256 /* Free the entry because it isn't valid */
1257 drm_cleanup_buf_error(dev, entry);
1258 mutex_unlock(&dev->struct_mutex);
1259 atomic_dec(&dev->buf_alloc);
1260 return -ENOMEM;
1261 }
1262 dma->buflist = temp_buflist;
1263
1264 for (i = 0; i < entry->buf_count; i++) {
1265 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1266 }
1267
1268 dma->buf_count += entry->buf_count;
1269 dma->seg_count += entry->seg_count;
1270 dma->page_count += byte_count >> PAGE_SHIFT;
1271 dma->byte_count += byte_count;
1272
1273 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1274 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1275
1276 mutex_unlock(&dev->struct_mutex);
1277
1278 request->count = entry->buf_count;
1279 request->size = size;
1280
1281 dma->flags = _DRM_DMA_USE_FB;
1282
1283 atomic_dec(&dev->buf_alloc);
1284 return 0;
1285}
1286
1287
1288/** 1130/**
1289 * Add buffers for DMA transfers (ioctl). 1131 * Add buffers for DMA transfers (ioctl).
1290 * 1132 *
@@ -1305,6 +1147,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
1305 struct drm_buf_desc *request = data; 1147 struct drm_buf_desc *request = data;
1306 int ret; 1148 int ret;
1307 1149
1150 if (drm_core_check_feature(dev, DRIVER_MODESET))
1151 return -EINVAL;
1152
1308 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1153 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1309 return -EINVAL; 1154 return -EINVAL;
1310 1155
@@ -1316,7 +1161,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
1316 if (request->flags & _DRM_SG_BUFFER) 1161 if (request->flags & _DRM_SG_BUFFER)
1317 ret = drm_addbufs_sg(dev, request); 1162 ret = drm_addbufs_sg(dev, request);
1318 else if (request->flags & _DRM_FB_BUFFER) 1163 else if (request->flags & _DRM_FB_BUFFER)
1319 ret = drm_addbufs_fb(dev, request); 1164 ret = -EINVAL;
1320 else 1165 else
1321 ret = drm_addbufs_pci(dev, request); 1166 ret = drm_addbufs_pci(dev, request);
1322 1167
@@ -1348,6 +1193,9 @@ int drm_infobufs(struct drm_device *dev, void *data,
1348 int i; 1193 int i;
1349 int count; 1194 int count;
1350 1195
1196 if (drm_core_check_feature(dev, DRIVER_MODESET))
1197 return -EINVAL;
1198
1351 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1199 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1352 return -EINVAL; 1200 return -EINVAL;
1353 1201
@@ -1427,6 +1275,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
1427 int order; 1275 int order;
1428 struct drm_buf_entry *entry; 1276 struct drm_buf_entry *entry;
1429 1277
1278 if (drm_core_check_feature(dev, DRIVER_MODESET))
1279 return -EINVAL;
1280
1430 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1281 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1431 return -EINVAL; 1282 return -EINVAL;
1432 1283
@@ -1435,7 +1286,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
1435 1286
1436 DRM_DEBUG("%d, %d, %d\n", 1287 DRM_DEBUG("%d, %d, %d\n",
1437 request->size, request->low_mark, request->high_mark); 1288 request->size, request->low_mark, request->high_mark);
1438 order = drm_order(request->size); 1289 order = order_base_2(request->size);
1439 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1290 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1440 return -EINVAL; 1291 return -EINVAL;
1441 entry = &dma->bufs[order]; 1292 entry = &dma->bufs[order];
@@ -1472,6 +1323,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
1472 int idx; 1323 int idx;
1473 struct drm_buf *buf; 1324 struct drm_buf *buf;
1474 1325
1326 if (drm_core_check_feature(dev, DRIVER_MODESET))
1327 return -EINVAL;
1328
1475 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1329 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1476 return -EINVAL; 1330 return -EINVAL;
1477 1331
@@ -1524,6 +1378,9 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1524 struct drm_buf_map *request = data; 1378 struct drm_buf_map *request = data;
1525 int i; 1379 int i;
1526 1380
1381 if (drm_core_check_feature(dev, DRIVER_MODESET))
1382 return -EINVAL;
1383
1527 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1384 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1528 return -EINVAL; 1385 return -EINVAL;
1529 1386
@@ -1541,9 +1398,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1541 if (request->count >= dma->buf_count) { 1398 if (request->count >= dma->buf_count) {
1542 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1399 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1543 || (drm_core_check_feature(dev, DRIVER_SG) 1400 || (drm_core_check_feature(dev, DRIVER_SG)
1544 && (dma->flags & _DRM_DMA_USE_SG)) 1401 && (dma->flags & _DRM_DMA_USE_SG))) {
1545 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1546 && (dma->flags & _DRM_DMA_USE_FB))) {
1547 struct drm_local_map *map = dev->agp_buffer_map; 1402 struct drm_local_map *map = dev->agp_buffer_map;
1548 unsigned long token = dev->agp_buffer_token; 1403 unsigned long token = dev->agp_buffer_token;
1549 1404
@@ -1600,25 +1455,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1600 return retcode; 1455 return retcode;
1601} 1456}
1602 1457
1603/** 1458int drm_dma_ioctl(struct drm_device *dev, void *data,
1604 * Compute size order. Returns the exponent of the smaller power of two which 1459 struct drm_file *file_priv)
1605 * is greater or equal to given number.
1606 *
1607 * \param size size.
1608 * \return order.
1609 *
1610 * \todo Can be made faster.
1611 */
1612int drm_order(unsigned long size)
1613{ 1460{
1614 int order; 1461 if (drm_core_check_feature(dev, DRIVER_MODESET))
1615 unsigned long tmp; 1462 return -EINVAL;
1616 1463
1617 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; 1464 if (dev->driver->dma_ioctl)
1465 return dev->driver->dma_ioctl(dev, data, file_priv);
1466 else
1467 return -EINVAL;
1468}
1618 1469
1619 if (size & (size - 1)) 1470struct drm_local_map *drm_getsarea(struct drm_device *dev)
1620 ++order; 1471{
1472 struct drm_map_list *entry;
1621 1473
1622 return order; 1474 list_for_each_entry(entry, &dev->maplist, head) {
1475 if (entry->map && entry->map->type == _DRM_SHM &&
1476 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1477 return entry->map;
1478 }
1479 }
1480 return NULL;
1623} 1481}
1624EXPORT_SYMBOL(drm_order); 1482EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 725968d38976..b4fb86d89850 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,10 +42,6 @@
42 42
43#include <drm/drmP.h> 43#include <drm/drmP.h>
44 44
45/******************************************************************/
46/** \name Context bitmap support */
47/*@{*/
48
49/** 45/**
50 * Free a handle from the context bitmap. 46 * Free a handle from the context bitmap.
51 * 47 *
@@ -56,13 +52,48 @@
56 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex 52 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
57 * lock. 53 * lock.
58 */ 54 */
59void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 55static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
60{ 56{
57 if (drm_core_check_feature(dev, DRIVER_MODESET))
58 return;
59
61 mutex_lock(&dev->struct_mutex); 60 mutex_lock(&dev->struct_mutex);
62 idr_remove(&dev->ctx_idr, ctx_handle); 61 idr_remove(&dev->ctx_idr, ctx_handle);
63 mutex_unlock(&dev->struct_mutex); 62 mutex_unlock(&dev->struct_mutex);
64} 63}
65 64
65/******************************************************************/
66/** \name Context bitmap support */
67/*@{*/
68
69void drm_legacy_ctxbitmap_release(struct drm_device *dev,
70 struct drm_file *file_priv)
71{
72 if (drm_core_check_feature(dev, DRIVER_MODESET))
73 return;
74
75 mutex_lock(&dev->ctxlist_mutex);
76 if (!list_empty(&dev->ctxlist)) {
77 struct drm_ctx_list *pos, *n;
78
79 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
80 if (pos->tag == file_priv &&
81 pos->handle != DRM_KERNEL_CONTEXT) {
82 if (dev->driver->context_dtor)
83 dev->driver->context_dtor(dev,
84 pos->handle);
85
86 drm_ctxbitmap_free(dev, pos->handle);
87
88 list_del(&pos->head);
89 kfree(pos);
90 --dev->ctx_count;
91 }
92 }
93 }
94 mutex_unlock(&dev->ctxlist_mutex);
95}
96
66/** 97/**
67 * Context bitmap allocation. 98 * Context bitmap allocation.
68 * 99 *
@@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
90 * 121 *
91 * Initialise the drm_device::ctx_idr 122 * Initialise the drm_device::ctx_idr
92 */ 123 */
93int drm_ctxbitmap_init(struct drm_device * dev) 124void drm_legacy_ctxbitmap_init(struct drm_device * dev)
94{ 125{
126 if (drm_core_check_feature(dev, DRIVER_MODESET))
127 return;
128
95 idr_init(&dev->ctx_idr); 129 idr_init(&dev->ctx_idr);
96 return 0;
97} 130}
98 131
99/** 132/**
@@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
104 * Free all idr members using drm_ctx_sarea_free helper function 137 * Free all idr members using drm_ctx_sarea_free helper function
105 * while holding the drm_device::struct_mutex lock. 138 * while holding the drm_device::struct_mutex lock.
106 */ 139 */
107void drm_ctxbitmap_cleanup(struct drm_device * dev) 140void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
108{ 141{
109 mutex_lock(&dev->struct_mutex); 142 mutex_lock(&dev->struct_mutex);
110 idr_destroy(&dev->ctx_idr); 143 idr_destroy(&dev->ctx_idr);
@@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
136 struct drm_local_map *map; 169 struct drm_local_map *map;
137 struct drm_map_list *_entry; 170 struct drm_map_list *_entry;
138 171
172 if (drm_core_check_feature(dev, DRIVER_MODESET))
173 return -EINVAL;
174
139 mutex_lock(&dev->struct_mutex); 175 mutex_lock(&dev->struct_mutex);
140 176
141 map = idr_find(&dev->ctx_idr, request->ctx_id); 177 map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data,
180 struct drm_local_map *map = NULL; 216 struct drm_local_map *map = NULL;
181 struct drm_map_list *r_list = NULL; 217 struct drm_map_list *r_list = NULL;
182 218
219 if (drm_core_check_feature(dev, DRIVER_MODESET))
220 return -EINVAL;
221
183 mutex_lock(&dev->struct_mutex); 222 mutex_lock(&dev->struct_mutex);
184 list_for_each_entry(r_list, &dev->maplist, head) { 223 list_for_each_entry(r_list, &dev->maplist, head) {
185 if (r_list->map 224 if (r_list->map
@@ -251,7 +290,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
251 struct drm_file *file_priv, int new) 290 struct drm_file *file_priv, int new)
252{ 291{
253 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ 292 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
254 dev->last_switch = jiffies;
255 293
256 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { 294 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
257 DRM_ERROR("Lock isn't held after context switch\n"); 295 DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +299,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
261 when the kernel holds the lock, release 299 when the kernel holds the lock, release
262 that lock here. */ 300 that lock here. */
263 clear_bit(0, &dev->context_flag); 301 clear_bit(0, &dev->context_flag);
264 wake_up(&dev->context_wait);
265 302
266 return 0; 303 return 0;
267} 304}
@@ -282,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data,
282 struct drm_ctx ctx; 319 struct drm_ctx ctx;
283 int i; 320 int i;
284 321
322 if (drm_core_check_feature(dev, DRIVER_MODESET))
323 return -EINVAL;
324
285 if (res->count >= DRM_RESERVED_CONTEXTS) { 325 if (res->count >= DRM_RESERVED_CONTEXTS) {
286 memset(&ctx, 0, sizeof(ctx)); 326 memset(&ctx, 0, sizeof(ctx));
287 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 327 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -312,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data,
312 struct drm_ctx_list *ctx_entry; 352 struct drm_ctx_list *ctx_entry;
313 struct drm_ctx *ctx = data; 353 struct drm_ctx *ctx = data;
314 354
355 if (drm_core_check_feature(dev, DRIVER_MODESET))
356 return -EINVAL;
357
315 ctx->handle = drm_ctxbitmap_next(dev); 358 ctx->handle = drm_ctxbitmap_next(dev);
316 if (ctx->handle == DRM_KERNEL_CONTEXT) { 359 if (ctx->handle == DRM_KERNEL_CONTEXT) {
317 /* Skip kernel's context and get a new one. */ 360 /* Skip kernel's context and get a new one. */
@@ -342,12 +385,6 @@ int drm_addctx(struct drm_device *dev, void *data,
342 return 0; 385 return 0;
343} 386}
344 387
345int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
346{
347 /* This does nothing */
348 return 0;
349}
350
351/** 388/**
352 * Get context. 389 * Get context.
353 * 390 *
@@ -361,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
361{ 398{
362 struct drm_ctx *ctx = data; 399 struct drm_ctx *ctx = data;
363 400
401 if (drm_core_check_feature(dev, DRIVER_MODESET))
402 return -EINVAL;
403
364 /* This is 0, because we don't handle any context flags */ 404 /* This is 0, because we don't handle any context flags */
365 ctx->flags = 0; 405 ctx->flags = 0;
366 406
@@ -383,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data,
383{ 423{
384 struct drm_ctx *ctx = data; 424 struct drm_ctx *ctx = data;
385 425
426 if (drm_core_check_feature(dev, DRIVER_MODESET))
427 return -EINVAL;
428
386 DRM_DEBUG("%d\n", ctx->handle); 429 DRM_DEBUG("%d\n", ctx->handle);
387 return drm_context_switch(dev, dev->last_context, ctx->handle); 430 return drm_context_switch(dev, dev->last_context, ctx->handle);
388} 431}
@@ -403,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data,
403{ 446{
404 struct drm_ctx *ctx = data; 447 struct drm_ctx *ctx = data;
405 448
449 if (drm_core_check_feature(dev, DRIVER_MODESET))
450 return -EINVAL;
451
406 DRM_DEBUG("%d\n", ctx->handle); 452 DRM_DEBUG("%d\n", ctx->handle);
407 drm_context_switch_complete(dev, file_priv, ctx->handle); 453 drm_context_switch_complete(dev, file_priv, ctx->handle);
408 454
@@ -425,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data,
425{ 471{
426 struct drm_ctx *ctx = data; 472 struct drm_ctx *ctx = data;
427 473
474 if (drm_core_check_feature(dev, DRIVER_MODESET))
475 return -EINVAL;
476
428 DRM_DEBUG("%d\n", ctx->handle); 477 DRM_DEBUG("%d\n", ctx->handle);
429 if (ctx->handle != DRM_KERNEL_CONTEXT) { 478 if (ctx->handle != DRM_KERNEL_CONTEXT) {
430 if (dev->driver->context_dtor) 479 if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fc83bb9eb514..bff2fa941f60 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
125 { DRM_MODE_SCALE_ASPECT, "Full aspect" }, 125 { DRM_MODE_SCALE_ASPECT, "Full aspect" },
126}; 126};
127 127
128static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
129{
130 { DRM_MODE_DITHERING_OFF, "Off" },
131 { DRM_MODE_DITHERING_ON, "On" },
132 { DRM_MODE_DITHERING_AUTO, "Automatic" },
133};
134
135/* 128/*
136 * Non-global properties, but "required" for certain connectors. 129 * Non-global properties, but "required" for certain connectors.
137 */ 130 */
@@ -186,29 +179,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
186struct drm_conn_prop_enum_list { 179struct drm_conn_prop_enum_list {
187 int type; 180 int type;
188 const char *name; 181 const char *name;
189 int count; 182 struct ida ida;
190}; 183};
191 184
192/* 185/*
193 * Connector and encoder types. 186 * Connector and encoder types.
194 */ 187 */
195static struct drm_conn_prop_enum_list drm_connector_enum_list[] = 188static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
196{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, 189{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
197 { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, 190 { DRM_MODE_CONNECTOR_VGA, "VGA" },
198 { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, 191 { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
199 { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, 192 { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
200 { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, 193 { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
201 { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, 194 { DRM_MODE_CONNECTOR_Composite, "Composite" },
202 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, 195 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
203 { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, 196 { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
204 { DRM_MODE_CONNECTOR_Component, "Component", 0 }, 197 { DRM_MODE_CONNECTOR_Component, "Component" },
205 { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, 198 { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
206 { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, 199 { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
207 { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, 200 { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
208 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, 201 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
209 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 202 { DRM_MODE_CONNECTOR_TV, "TV" },
210 { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, 203 { DRM_MODE_CONNECTOR_eDP, "eDP" },
211 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, 204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
212}; 205};
213 206
214static const struct drm_prop_enum_list drm_encoder_enum_list[] = 207static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -220,6 +213,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
220 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 213 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
221}; 214};
222 215
216void drm_connector_ida_init(void)
217{
218 int i;
219
220 for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
221 ida_init(&drm_connector_enum_list[i].ida);
222}
223
224void drm_connector_ida_destroy(void)
225{
226 int i;
227
228 for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
229 ida_destroy(&drm_connector_enum_list[i].ida);
230}
231
223const char *drm_get_encoder_name(const struct drm_encoder *encoder) 232const char *drm_get_encoder_name(const struct drm_encoder *encoder)
224{ 233{
225 static char buf[32]; 234 static char buf[32];
@@ -677,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
677} 686}
678EXPORT_SYMBOL(drm_mode_probed_add); 687EXPORT_SYMBOL(drm_mode_probed_add);
679 688
680/** 689/*
681 * drm_mode_remove - remove and free a mode 690 * drm_mode_remove - remove and free a mode
682 * @connector: connector list to modify 691 * @connector: connector list to modify
683 * @mode: mode to remove 692 * @mode: mode to remove
684 * 693 *
685 * Remove @mode from @connector's mode list, then free it. 694 * Remove @mode from @connector's mode list, then free it.
686 */ 695 */
687void drm_mode_remove(struct drm_connector *connector, 696static void drm_mode_remove(struct drm_connector *connector,
688 struct drm_display_mode *mode) 697 struct drm_display_mode *mode)
689{ 698{
690 list_del(&mode->head); 699 list_del(&mode->head);
691 drm_mode_destroy(connector->dev, mode); 700 drm_mode_destroy(connector->dev, mode);
692} 701}
693EXPORT_SYMBOL(drm_mode_remove);
694 702
695/** 703/**
696 * drm_connector_init - Init a preallocated connector 704 * drm_connector_init - Init a preallocated connector
@@ -711,6 +719,8 @@ int drm_connector_init(struct drm_device *dev,
711 int connector_type) 719 int connector_type)
712{ 720{
713 int ret; 721 int ret;
722 struct ida *connector_ida =
723 &drm_connector_enum_list[connector_type].ida;
714 724
715 drm_modeset_lock_all(dev); 725 drm_modeset_lock_all(dev);
716 726
@@ -723,7 +733,12 @@ int drm_connector_init(struct drm_device *dev,
723 connector->funcs = funcs; 733 connector->funcs = funcs;
724 connector->connector_type = connector_type; 734 connector->connector_type = connector_type;
725 connector->connector_type_id = 735 connector->connector_type_id =
726 ++drm_connector_enum_list[connector_type].count; /* TODO */ 736 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
737 if (connector->connector_type_id < 0) {
738 ret = connector->connector_type_id;
739 drm_mode_object_put(dev, &connector->base);
740 goto out;
741 }
727 INIT_LIST_HEAD(&connector->probed_modes); 742 INIT_LIST_HEAD(&connector->probed_modes);
728 INIT_LIST_HEAD(&connector->modes); 743 INIT_LIST_HEAD(&connector->modes);
729 connector->edid_blob_ptr = NULL; 744 connector->edid_blob_ptr = NULL;
@@ -764,6 +779,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
764 list_for_each_entry_safe(mode, t, &connector->modes, head) 779 list_for_each_entry_safe(mode, t, &connector->modes, head)
765 drm_mode_remove(connector, mode); 780 drm_mode_remove(connector, mode);
766 781
782 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
783 connector->connector_type_id);
784
767 drm_mode_object_put(dev, &connector->base); 785 drm_mode_object_put(dev, &connector->base);
768 list_del(&connector->head); 786 list_del(&connector->head);
769 dev->mode_config.num_connector--; 787 dev->mode_config.num_connector--;
@@ -781,6 +799,41 @@ void drm_connector_unplug_all(struct drm_device *dev)
781} 799}
782EXPORT_SYMBOL(drm_connector_unplug_all); 800EXPORT_SYMBOL(drm_connector_unplug_all);
783 801
802int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
803 const struct drm_bridge_funcs *funcs)
804{
805 int ret;
806
807 drm_modeset_lock_all(dev);
808
809 ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
810 if (ret)
811 goto out;
812
813 bridge->dev = dev;
814 bridge->funcs = funcs;
815
816 list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
817 dev->mode_config.num_bridge++;
818
819 out:
820 drm_modeset_unlock_all(dev);
821 return ret;
822}
823EXPORT_SYMBOL(drm_bridge_init);
824
825void drm_bridge_cleanup(struct drm_bridge *bridge)
826{
827 struct drm_device *dev = bridge->dev;
828
829 drm_modeset_lock_all(dev);
830 drm_mode_object_put(dev, &bridge->base);
831 list_del(&bridge->head);
832 dev->mode_config.num_bridge--;
833 drm_modeset_unlock_all(dev);
834}
835EXPORT_SYMBOL(drm_bridge_cleanup);
836
784int drm_encoder_init(struct drm_device *dev, 837int drm_encoder_init(struct drm_device *dev,
785 struct drm_encoder *encoder, 838 struct drm_encoder *encoder,
786 const struct drm_encoder_funcs *funcs, 839 const struct drm_encoder_funcs *funcs,
@@ -1135,30 +1188,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
1135EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); 1188EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
1136 1189
1137/** 1190/**
1138 * drm_mode_create_dithering_property - create dithering property
1139 * @dev: DRM device
1140 *
1141 * Called by a driver the first time it's needed, must be attached to desired
1142 * connectors.
1143 */
1144int drm_mode_create_dithering_property(struct drm_device *dev)
1145{
1146 struct drm_property *dithering_mode;
1147
1148 if (dev->mode_config.dithering_mode_property)
1149 return 0;
1150
1151 dithering_mode =
1152 drm_property_create_enum(dev, 0, "dithering",
1153 drm_dithering_mode_enum_list,
1154 ARRAY_SIZE(drm_dithering_mode_enum_list));
1155 dev->mode_config.dithering_mode_property = dithering_mode;
1156
1157 return 0;
1158}
1159EXPORT_SYMBOL(drm_mode_create_dithering_property);
1160
1161/**
1162 * drm_mode_create_dirty_property - create dirty property 1191 * drm_mode_create_dirty_property - create dirty property
1163 * @dev: DRM device 1192 * @dev: DRM device
1164 * 1193 *
@@ -1190,6 +1219,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
1190 total_objects += dev->mode_config.num_crtc; 1219 total_objects += dev->mode_config.num_crtc;
1191 total_objects += dev->mode_config.num_connector; 1220 total_objects += dev->mode_config.num_connector;
1192 total_objects += dev->mode_config.num_encoder; 1221 total_objects += dev->mode_config.num_encoder;
1222 total_objects += dev->mode_config.num_bridge;
1193 1223
1194 group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL); 1224 group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
1195 if (!group->id_list) 1225 if (!group->id_list)
@@ -1198,6 +1228,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
1198 group->num_crtcs = 0; 1228 group->num_crtcs = 0;
1199 group->num_connectors = 0; 1229 group->num_connectors = 0;
1200 group->num_encoders = 0; 1230 group->num_encoders = 0;
1231 group->num_bridges = 0;
1201 return 0; 1232 return 0;
1202} 1233}
1203 1234
@@ -1207,6 +1238,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
1207 struct drm_crtc *crtc; 1238 struct drm_crtc *crtc;
1208 struct drm_encoder *encoder; 1239 struct drm_encoder *encoder;
1209 struct drm_connector *connector; 1240 struct drm_connector *connector;
1241 struct drm_bridge *bridge;
1210 int ret; 1242 int ret;
1211 1243
1212 if ((ret = drm_mode_group_init(dev, group))) 1244 if ((ret = drm_mode_group_init(dev, group)))
@@ -1223,6 +1255,11 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
1223 group->id_list[group->num_crtcs + group->num_encoders + 1255 group->id_list[group->num_crtcs + group->num_encoders +
1224 group->num_connectors++] = connector->base.id; 1256 group->num_connectors++] = connector->base.id;
1225 1257
1258 list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
1259 group->id_list[group->num_crtcs + group->num_encoders +
1260 group->num_connectors + group->num_bridges++] =
1261 bridge->base.id;
1262
1226 return 0; 1263 return 0;
1227} 1264}
1228EXPORT_SYMBOL(drm_mode_group_init_legacy_group); 1265EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
@@ -2604,10 +2641,22 @@ int drm_mode_getfb(struct drm_device *dev,
2604 r->depth = fb->depth; 2641 r->depth = fb->depth;
2605 r->bpp = fb->bits_per_pixel; 2642 r->bpp = fb->bits_per_pixel;
2606 r->pitch = fb->pitches[0]; 2643 r->pitch = fb->pitches[0];
2607 if (fb->funcs->create_handle) 2644 if (fb->funcs->create_handle) {
2608 ret = fb->funcs->create_handle(fb, file_priv, &r->handle); 2645 if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
2609 else 2646 ret = fb->funcs->create_handle(fb, file_priv,
2647 &r->handle);
2648 } else {
2649 /* GET_FB() is an unprivileged ioctl so we must not
2650 * return a buffer-handle to non-master processes! For
2651 * backwards-compatibility reasons, we cannot make
2652 * GET_FB() privileged, so just return an invalid handle
2653 * for non-masters. */
2654 r->handle = 0;
2655 ret = 0;
2656 }
2657 } else {
2610 ret = -ENODEV; 2658 ret = -ENODEV;
2659 }
2611 2660
2612 drm_framebuffer_unreference(fb); 2661 drm_framebuffer_unreference(fb);
2613 2662
@@ -3514,6 +3563,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3514 page_flip->reserved != 0) 3563 page_flip->reserved != 0)
3515 return -EINVAL; 3564 return -EINVAL;
3516 3565
3566 if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
3567 return -EINVAL;
3568
3517 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); 3569 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
3518 if (!obj) 3570 if (!obj)
3519 return -EINVAL; 3571 return -EINVAL;
@@ -3587,7 +3639,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3587 } 3639 }
3588 3640
3589 old_fb = crtc->fb; 3641 old_fb = crtc->fb;
3590 ret = crtc->funcs->page_flip(crtc, fb, e); 3642 ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
3591 if (ret) { 3643 if (ret) {
3592 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 3644 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
3593 spin_lock_irqsave(&dev->event_lock, flags); 3645 spin_lock_irqsave(&dev->event_lock, flags);
@@ -3905,6 +3957,7 @@ void drm_mode_config_init(struct drm_device *dev)
3905 INIT_LIST_HEAD(&dev->mode_config.fb_list); 3957 INIT_LIST_HEAD(&dev->mode_config.fb_list);
3906 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 3958 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
3907 INIT_LIST_HEAD(&dev->mode_config.connector_list); 3959 INIT_LIST_HEAD(&dev->mode_config.connector_list);
3960 INIT_LIST_HEAD(&dev->mode_config.bridge_list);
3908 INIT_LIST_HEAD(&dev->mode_config.encoder_list); 3961 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
3909 INIT_LIST_HEAD(&dev->mode_config.property_list); 3962 INIT_LIST_HEAD(&dev->mode_config.property_list);
3910 INIT_LIST_HEAD(&dev->mode_config.property_blob_list); 3963 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
@@ -3941,6 +3994,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
3941 struct drm_connector *connector, *ot; 3994 struct drm_connector *connector, *ot;
3942 struct drm_crtc *crtc, *ct; 3995 struct drm_crtc *crtc, *ct;
3943 struct drm_encoder *encoder, *enct; 3996 struct drm_encoder *encoder, *enct;
3997 struct drm_bridge *bridge, *brt;
3944 struct drm_framebuffer *fb, *fbt; 3998 struct drm_framebuffer *fb, *fbt;
3945 struct drm_property *property, *pt; 3999 struct drm_property *property, *pt;
3946 struct drm_property_blob *blob, *bt; 4000 struct drm_property_blob *blob, *bt;
@@ -3951,6 +4005,11 @@ void drm_mode_config_cleanup(struct drm_device *dev)
3951 encoder->funcs->destroy(encoder); 4005 encoder->funcs->destroy(encoder);
3952 } 4006 }
3953 4007
4008 list_for_each_entry_safe(bridge, brt,
4009 &dev->mode_config.bridge_list, head) {
4010 bridge->funcs->destroy(bridge);
4011 }
4012
3954 list_for_each_entry_safe(connector, ot, 4013 list_for_each_entry_safe(connector, ot,
3955 &dev->mode_config.connector_list, head) { 4014 &dev->mode_config.connector_list, head) {
3956 connector->funcs->destroy(connector); 4015 connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6a647493ca7f..c722c3b5404d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -257,10 +257,16 @@ drm_encoder_disable(struct drm_encoder *encoder)
257{ 257{
258 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 258 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
259 259
260 if (encoder->bridge)
261 encoder->bridge->funcs->disable(encoder->bridge);
262
260 if (encoder_funcs->disable) 263 if (encoder_funcs->disable)
261 (*encoder_funcs->disable)(encoder); 264 (*encoder_funcs->disable)(encoder);
262 else 265 else
263 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); 266 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
267
268 if (encoder->bridge)
269 encoder->bridge->funcs->post_disable(encoder->bridge);
264} 270}
265 271
266/** 272/**
@@ -424,6 +430,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
424 430
425 if (encoder->crtc != crtc) 431 if (encoder->crtc != crtc)
426 continue; 432 continue;
433
434 if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
435 ret = encoder->bridge->funcs->mode_fixup(
436 encoder->bridge, mode, adjusted_mode);
437 if (!ret) {
438 DRM_DEBUG_KMS("Bridge fixup failed\n");
439 goto done;
440 }
441 }
442
427 encoder_funcs = encoder->helper_private; 443 encoder_funcs = encoder->helper_private;
428 if (!(ret = encoder_funcs->mode_fixup(encoder, mode, 444 if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
429 adjusted_mode))) { 445 adjusted_mode))) {
@@ -443,9 +459,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
443 459
444 if (encoder->crtc != crtc) 460 if (encoder->crtc != crtc)
445 continue; 461 continue;
462
463 if (encoder->bridge)
464 encoder->bridge->funcs->disable(encoder->bridge);
465
446 encoder_funcs = encoder->helper_private; 466 encoder_funcs = encoder->helper_private;
447 /* Disable the encoders as the first thing we do. */ 467 /* Disable the encoders as the first thing we do. */
448 encoder_funcs->prepare(encoder); 468 encoder_funcs->prepare(encoder);
469
470 if (encoder->bridge)
471 encoder->bridge->funcs->post_disable(encoder->bridge);
449 } 472 }
450 473
451 drm_crtc_prepare_encoders(dev); 474 drm_crtc_prepare_encoders(dev);
@@ -469,6 +492,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
469 mode->base.id, mode->name); 492 mode->base.id, mode->name);
470 encoder_funcs = encoder->helper_private; 493 encoder_funcs = encoder->helper_private;
471 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 494 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
495
496 if (encoder->bridge && encoder->bridge->funcs->mode_set)
497 encoder->bridge->funcs->mode_set(encoder->bridge, mode,
498 adjusted_mode);
472 } 499 }
473 500
474 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 501 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -479,9 +506,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
479 if (encoder->crtc != crtc) 506 if (encoder->crtc != crtc)
480 continue; 507 continue;
481 508
509 if (encoder->bridge)
510 encoder->bridge->funcs->pre_enable(encoder->bridge);
511
482 encoder_funcs = encoder->helper_private; 512 encoder_funcs = encoder->helper_private;
483 encoder_funcs->commit(encoder); 513 encoder_funcs->commit(encoder);
484 514
515 if (encoder->bridge)
516 encoder->bridge->funcs->enable(encoder->bridge);
485 } 517 }
486 518
487 /* Store real post-adjustment hardware mode. */ 519 /* Store real post-adjustment hardware mode. */
@@ -830,6 +862,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
830 return dpms; 862 return dpms;
831} 863}
832 864
865/* Helper which handles bridge ordering around encoder dpms */
866static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
867{
868 struct drm_bridge *bridge = encoder->bridge;
869 struct drm_encoder_helper_funcs *encoder_funcs;
870
871 if (bridge) {
872 if (mode == DRM_MODE_DPMS_ON)
873 bridge->funcs->pre_enable(bridge);
874 else
875 bridge->funcs->disable(bridge);
876 }
877
878 encoder_funcs = encoder->helper_private;
879 if (encoder_funcs->dpms)
880 encoder_funcs->dpms(encoder, mode);
881
882 if (bridge) {
883 if (mode == DRM_MODE_DPMS_ON)
884 bridge->funcs->enable(bridge);
885 else
886 bridge->funcs->post_disable(bridge);
887 }
888}
889
833static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) 890static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
834{ 891{
835 int dpms = DRM_MODE_DPMS_OFF; 892 int dpms = DRM_MODE_DPMS_OFF;
@@ -857,7 +914,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
857{ 914{
858 struct drm_encoder *encoder = connector->encoder; 915 struct drm_encoder *encoder = connector->encoder;
859 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; 916 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
860 int old_dpms; 917 int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
861 918
862 if (mode == connector->dpms) 919 if (mode == connector->dpms)
863 return; 920 return;
@@ -865,6 +922,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
865 old_dpms = connector->dpms; 922 old_dpms = connector->dpms;
866 connector->dpms = mode; 923 connector->dpms = mode;
867 924
925 if (encoder)
926 encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
927
868 /* from off to on, do crtc then encoder */ 928 /* from off to on, do crtc then encoder */
869 if (mode < old_dpms) { 929 if (mode < old_dpms) {
870 if (crtc) { 930 if (crtc) {
@@ -873,22 +933,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
873 (*crtc_funcs->dpms) (crtc, 933 (*crtc_funcs->dpms) (crtc,
874 drm_helper_choose_crtc_dpms(crtc)); 934 drm_helper_choose_crtc_dpms(crtc));
875 } 935 }
876 if (encoder) { 936 if (encoder)
877 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 937 drm_helper_encoder_dpms(encoder, encoder_dpms);
878 if (encoder_funcs->dpms)
879 (*encoder_funcs->dpms) (encoder,
880 drm_helper_choose_encoder_dpms(encoder));
881 }
882 } 938 }
883 939
884 /* from on to off, do encoder then crtc */ 940 /* from on to off, do encoder then crtc */
885 if (mode > old_dpms) { 941 if (mode > old_dpms) {
886 if (encoder) { 942 if (encoder)
887 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 943 drm_helper_encoder_dpms(encoder, encoder_dpms);
888 if (encoder_funcs->dpms)
889 (*encoder_funcs->dpms) (encoder,
890 drm_helper_choose_encoder_dpms(encoder));
891 }
892 if (crtc) { 944 if (crtc) {
893 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 945 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
894 if (crtc_funcs->dpms) 946 if (crtc_funcs->dpms)
@@ -924,9 +976,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
924{ 976{
925 struct drm_crtc *crtc; 977 struct drm_crtc *crtc;
926 struct drm_encoder *encoder; 978 struct drm_encoder *encoder;
927 struct drm_encoder_helper_funcs *encoder_funcs;
928 struct drm_crtc_helper_funcs *crtc_funcs; 979 struct drm_crtc_helper_funcs *crtc_funcs;
929 int ret; 980 int ret, encoder_dpms;
930 981
931 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 982 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
932 983
@@ -946,10 +997,10 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
946 if(encoder->crtc != crtc) 997 if(encoder->crtc != crtc)
947 continue; 998 continue;
948 999
949 encoder_funcs = encoder->helper_private; 1000 encoder_dpms = drm_helper_choose_encoder_dpms(
950 if (encoder_funcs->dpms) 1001 encoder);
951 (*encoder_funcs->dpms) (encoder, 1002
952 drm_helper_choose_encoder_dpms(encoder)); 1003 drm_helper_encoder_dpms(encoder, encoder_dpms);
953 } 1004 }
954 1005
955 crtc_funcs = crtc->helper_private; 1006 crtc_funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 495b5fd2787c..8a140a953754 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -44,10 +44,18 @@
44 * 44 *
45 * Allocate and initialize a drm_device_dma structure. 45 * Allocate and initialize a drm_device_dma structure.
46 */ 46 */
47int drm_dma_setup(struct drm_device *dev) 47int drm_legacy_dma_setup(struct drm_device *dev)
48{ 48{
49 int i; 49 int i;
50 50
51 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
52 drm_core_check_feature(dev, DRIVER_MODESET)) {
53 return 0;
54 }
55
56 dev->buf_use = 0;
57 atomic_set(&dev->buf_alloc, 0);
58
51 dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); 59 dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
52 if (!dev->dma) 60 if (!dev->dma)
53 return -ENOMEM; 61 return -ENOMEM;
@@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
66 * Free all pages associated with DMA buffers, the buffers and pages lists, and 74 * Free all pages associated with DMA buffers, the buffers and pages lists, and
67 * finally the drm_device::dma structure itself. 75 * finally the drm_device::dma structure itself.
68 */ 76 */
69void drm_dma_takedown(struct drm_device *dev) 77void drm_legacy_dma_takedown(struct drm_device *dev)
70{ 78{
71 struct drm_device_dma *dma = dev->dma; 79 struct drm_device_dma *dma = dev->dma;
72 int i, j; 80 int i, j;
73 81
82 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
83 drm_core_check_feature(dev, DRIVER_MODESET)) {
84 return;
85 }
86
74 if (!dma) 87 if (!dma)
75 return; 88 return;
76 89
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 99fcd7c32ea2..e572dd20bdee 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -68,7 +68,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 73
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
87 87
88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
89 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 89 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
90 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 90 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 91 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
92 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 92 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
93 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 93 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
106 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 106 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
107 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 107 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
108 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 108 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
109 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 109 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
110 DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
111 110
112 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 111 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 112
@@ -122,7 +121,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
122 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 121 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
123#endif 122#endif
124 123
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 124 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 126
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 127 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -131,14 +130,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
131 130
132 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 131 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
133 132
134 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), 133 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
135 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 134 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 135 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
137 136
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 138
140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), 139 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
141 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), 140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
142 141
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -172,6 +171,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
173 172
174/** 173/**
174 * drm_legacy_dev_reinit
175 *
176 * Reinitializes a legacy/ums drm device in it's lastclose function.
177 */
178static void drm_legacy_dev_reinit(struct drm_device *dev)
179{
180 int i;
181
182 if (drm_core_check_feature(dev, DRIVER_MODESET))
183 return;
184
185 atomic_set(&dev->ioctl_count, 0);
186 atomic_set(&dev->vma_count, 0);
187
188 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
189 atomic_set(&dev->counts[i], 0);
190
191 dev->sigdata.lock = NULL;
192
193 dev->context_flag = 0;
194 dev->last_context = 0;
195 dev->if_version = 0;
196}
197
198/**
175 * Take down the DRM device. 199 * Take down the DRM device.
176 * 200 *
177 * \param dev DRM device structure. 201 * \param dev DRM device structure.
@@ -195,32 +219,9 @@ int drm_lastclose(struct drm_device * dev)
195 219
196 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&dev->struct_mutex);
197 221
198 /* Clear AGP information */ 222 drm_agp_clear(dev);
199 if (drm_core_has_AGP(dev) && dev->agp &&
200 !drm_core_check_feature(dev, DRIVER_MODESET)) {
201 struct drm_agp_mem *entry, *tempe;
202
203 /* Remove AGP resources, but leave dev->agp
204 intact until drv_cleanup is called. */
205 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
206 if (entry->bound)
207 drm_unbind_agp(entry->memory);
208 drm_free_agp(entry->memory, entry->pages);
209 kfree(entry);
210 }
211 INIT_LIST_HEAD(&dev->agp->memory);
212 223
213 if (dev->agp->acquired) 224 drm_legacy_sg_cleanup(dev);
214 drm_agp_release(dev);
215
216 dev->agp->acquired = 0;
217 dev->agp->enabled = 0;
218 }
219 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
220 !drm_core_check_feature(dev, DRIVER_MODESET)) {
221 drm_sg_cleanup(dev->sg);
222 dev->sg = NULL;
223 }
224 225
225 /* Clear vma list (only built for debugging) */ 226 /* Clear vma list (only built for debugging) */
226 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { 227 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
@@ -228,13 +229,13 @@ int drm_lastclose(struct drm_device * dev)
228 kfree(vma); 229 kfree(vma);
229 } 230 }
230 231
231 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 232 drm_legacy_dma_takedown(dev);
232 !drm_core_check_feature(dev, DRIVER_MODESET))
233 drm_dma_takedown(dev);
234 233
235 dev->dev_mapping = NULL; 234 dev->dev_mapping = NULL;
236 mutex_unlock(&dev->struct_mutex); 235 mutex_unlock(&dev->struct_mutex);
237 236
237 drm_legacy_dev_reinit(dev);
238
238 DRM_DEBUG("lastclose completed\n"); 239 DRM_DEBUG("lastclose completed\n");
239 return 0; 240 return 0;
240} 241}
@@ -251,6 +252,7 @@ static int __init drm_core_init(void)
251 int ret = -ENOMEM; 252 int ret = -ENOMEM;
252 253
253 drm_global_init(); 254 drm_global_init();
255 drm_connector_ida_init();
254 idr_init(&drm_minors_idr); 256 idr_init(&drm_minors_idr);
255 257
256 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 258 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -263,13 +265,6 @@ static int __init drm_core_init(void)
263 goto err_p2; 265 goto err_p2;
264 } 266 }
265 267
266 drm_proc_root = proc_mkdir("dri", NULL);
267 if (!drm_proc_root) {
268 DRM_ERROR("Cannot create /proc/dri\n");
269 ret = -1;
270 goto err_p3;
271 }
272
273 drm_debugfs_root = debugfs_create_dir("dri", NULL); 268 drm_debugfs_root = debugfs_create_dir("dri", NULL);
274 if (!drm_debugfs_root) { 269 if (!drm_debugfs_root) {
275 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); 270 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -292,12 +287,12 @@ err_p1:
292 287
293static void __exit drm_core_exit(void) 288static void __exit drm_core_exit(void)
294{ 289{
295 remove_proc_entry("dri", NULL);
296 debugfs_remove(drm_debugfs_root); 290 debugfs_remove(drm_debugfs_root);
297 drm_sysfs_destroy(); 291 drm_sysfs_destroy();
298 292
299 unregister_chrdev(DRM_MAJOR, "drm"); 293 unregister_chrdev(DRM_MAJOR, "drm");
300 294
295 drm_connector_ida_destroy();
301 idr_destroy(&drm_minors_idr); 296 idr_destroy(&drm_minors_idr);
302} 297}
303 298
@@ -420,17 +415,15 @@ long drm_ioctl(struct file *filp,
420 415
421 /* Do not trust userspace, use our own definition */ 416 /* Do not trust userspace, use our own definition */
422 func = ioctl->func; 417 func = ioctl->func;
423 /* is there a local override? */
424 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
425 func = dev->driver->dma_ioctl;
426 418
427 if (!func) { 419 if (!func) {
428 DRM_DEBUG("no function\n"); 420 DRM_DEBUG("no function\n");
429 retcode = -EINVAL; 421 retcode = -EINVAL;
430 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 422 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
431 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 423 ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
432 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || 424 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
433 (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) { 425 (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
426 (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
434 retcode = -EACCES; 427 retcode = -EACCES;
435 } else { 428 } else {
436 if (cmd & (IOC_IN | IOC_OUT)) { 429 if (cmd & (IOC_IN | IOC_OUT)) {
@@ -485,19 +478,4 @@ long drm_ioctl(struct file *filp,
485 DRM_DEBUG("ret = %d\n", retcode); 478 DRM_DEBUG("ret = %d\n", retcode);
486 return retcode; 479 return retcode;
487} 480}
488
489EXPORT_SYMBOL(drm_ioctl); 481EXPORT_SYMBOL(drm_ioctl);
490
491struct drm_local_map *drm_getsarea(struct drm_device *dev)
492{
493 struct drm_map_list *entry;
494
495 list_for_each_entry(entry, &dev->maplist, head) {
496 if (entry->map && entry->map->type == _DRM_SHM &&
497 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
498 return entry->map;
499 }
500 }
501 return NULL;
502}
503EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 95d6f4b6967c..1688ff500513 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -125,6 +125,9 @@ static struct edid_quirk {
125 125
126 /* ViewSonic VA2026w */ 126 /* ViewSonic VA2026w */
127 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, 127 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
128
129 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
128}; 131};
129 132
130/* 133/*
@@ -931,6 +934,36 @@ static const struct drm_display_mode edid_cea_modes[] = {
931 .vrefresh = 100, }, 934 .vrefresh = 100, },
932}; 935};
933 936
937/*
938 * HDMI 1.4 4k modes.
939 */
940static const struct drm_display_mode edid_4k_modes[] = {
941 /* 1 - 3840x2160@30Hz */
942 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
943 3840, 4016, 4104, 4400, 0,
944 2160, 2168, 2178, 2250, 0,
945 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
946 .vrefresh = 30, },
947 /* 2 - 3840x2160@25Hz */
948 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
949 3840, 4896, 4984, 5280, 0,
950 2160, 2168, 2178, 2250, 0,
951 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
952 .vrefresh = 25, },
953 /* 3 - 3840x2160@24Hz */
954 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
955 3840, 5116, 5204, 5500, 0,
956 2160, 2168, 2178, 2250, 0,
957 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
958 .vrefresh = 24, },
959 /* 4 - 4096x2160@24Hz (SMPTE) */
960 { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
961 4096, 5116, 5204, 5500, 0,
962 2160, 2168, 2178, 2250, 0,
963 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
964 .vrefresh = 24, },
965};
966
934/*** DDC fetch and block validation ***/ 967/*** DDC fetch and block validation ***/
935 968
936static const u8 edid_header[] = { 969static const u8 edid_header[] = {
@@ -2287,7 +2320,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2287 return closure.modes; 2320 return closure.modes;
2288} 2321}
2289 2322
2290#define HDMI_IDENTIFIER 0x000C03
2291#define AUDIO_BLOCK 0x01 2323#define AUDIO_BLOCK 0x01
2292#define VIDEO_BLOCK 0x02 2324#define VIDEO_BLOCK 0x02
2293#define VENDOR_BLOCK 0x03 2325#define VENDOR_BLOCK 0x03
@@ -2298,10 +2330,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2298#define EDID_CEA_YCRCB422 (1 << 4) 2330#define EDID_CEA_YCRCB422 (1 << 4)
2299#define EDID_CEA_VCDB_QS (1 << 6) 2331#define EDID_CEA_VCDB_QS (1 << 6)
2300 2332
2301/** 2333/*
2302 * Search EDID for CEA extension block. 2334 * Search EDID for CEA extension block.
2303 */ 2335 */
2304u8 *drm_find_cea_extension(struct edid *edid) 2336static u8 *drm_find_cea_extension(struct edid *edid)
2305{ 2337{
2306 u8 *edid_ext = NULL; 2338 u8 *edid_ext = NULL;
2307 int i; 2339 int i;
@@ -2322,7 +2354,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
2322 2354
2323 return edid_ext; 2355 return edid_ext;
2324} 2356}
2325EXPORT_SYMBOL(drm_find_cea_extension);
2326 2357
2327/* 2358/*
2328 * Calculate the alternate clock for the CEA mode 2359 * Calculate the alternate clock for the CEA mode
@@ -2380,6 +2411,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2380} 2411}
2381EXPORT_SYMBOL(drm_match_cea_mode); 2412EXPORT_SYMBOL(drm_match_cea_mode);
2382 2413
2414/*
2415 * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
2416 * specific block).
2417 *
2418 * It's almost like cea_mode_alternate_clock(), we just need to add an
2419 * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
2420 * one.
2421 */
2422static unsigned int
2423hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
2424{
2425 if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
2426 return hdmi_mode->clock;
2427
2428 return cea_mode_alternate_clock(hdmi_mode);
2429}
2430
2431/*
2432 * drm_match_hdmi_mode - look for a HDMI mode matching given mode
2433 * @to_match: display mode
2434 *
2435 * An HDMI mode is one defined in the HDMI vendor specific block.
2436 *
2437 * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
2438 */
2439static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2440{
2441 u8 mode;
2442
2443 if (!to_match->clock)
2444 return 0;
2445
2446 for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
2447 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
2448 unsigned int clock1, clock2;
2449
2450 /* Make sure to also match alternate clocks */
2451 clock1 = hdmi_mode->clock;
2452 clock2 = hdmi_mode_alternate_clock(hdmi_mode);
2453
2454 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2455 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2456 drm_mode_equal_no_clocks(to_match, hdmi_mode))
2457 return mode + 1;
2458 }
2459 return 0;
2460}
2461
2383static int 2462static int
2384add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) 2463add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2385{ 2464{
@@ -2397,18 +2476,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2397 * with the alternate clock for certain CEA modes. 2476 * with the alternate clock for certain CEA modes.
2398 */ 2477 */
2399 list_for_each_entry(mode, &connector->probed_modes, head) { 2478 list_for_each_entry(mode, &connector->probed_modes, head) {
2400 const struct drm_display_mode *cea_mode; 2479 const struct drm_display_mode *cea_mode = NULL;
2401 struct drm_display_mode *newmode; 2480 struct drm_display_mode *newmode;
2402 u8 cea_mode_idx = drm_match_cea_mode(mode) - 1; 2481 u8 mode_idx = drm_match_cea_mode(mode) - 1;
2403 unsigned int clock1, clock2; 2482 unsigned int clock1, clock2;
2404 2483
2405 if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes)) 2484 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
2406 continue; 2485 cea_mode = &edid_cea_modes[mode_idx];
2486 clock2 = cea_mode_alternate_clock(cea_mode);
2487 } else {
2488 mode_idx = drm_match_hdmi_mode(mode) - 1;
2489 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
2490 cea_mode = &edid_4k_modes[mode_idx];
2491 clock2 = hdmi_mode_alternate_clock(cea_mode);
2492 }
2493 }
2407 2494
2408 cea_mode = &edid_cea_modes[cea_mode_idx]; 2495 if (!cea_mode)
2496 continue;
2409 2497
2410 clock1 = cea_mode->clock; 2498 clock1 = cea_mode->clock;
2411 clock2 = cea_mode_alternate_clock(cea_mode);
2412 2499
2413 if (clock1 == clock2) 2500 if (clock1 == clock2)
2414 continue; 2501 continue;
@@ -2442,10 +2529,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2442} 2529}
2443 2530
2444static int 2531static int
2445do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) 2532do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
2446{ 2533{
2447 struct drm_device *dev = connector->dev; 2534 struct drm_device *dev = connector->dev;
2448 u8 * mode, cea_mode; 2535 const u8 *mode;
2536 u8 cea_mode;
2449 int modes = 0; 2537 int modes = 0;
2450 2538
2451 for (mode = db; mode < db + len; mode++) { 2539 for (mode = db; mode < db + len; mode++) {
@@ -2465,6 +2553,68 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
2465 return modes; 2553 return modes;
2466} 2554}
2467 2555
2556/*
2557 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
2558 * @connector: connector corresponding to the HDMI sink
2559 * @db: start of the CEA vendor specific block
2560 * @len: length of the CEA block payload, ie. one can access up to db[len]
2561 *
2562 * Parses the HDMI VSDB looking for modes to add to @connector.
2563 */
2564static int
2565do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2566{
2567 struct drm_device *dev = connector->dev;
2568 int modes = 0, offset = 0, i;
2569 u8 vic_len;
2570
2571 if (len < 8)
2572 goto out;
2573
2574 /* no HDMI_Video_Present */
2575 if (!(db[8] & (1 << 5)))
2576 goto out;
2577
2578 /* Latency_Fields_Present */
2579 if (db[8] & (1 << 7))
2580 offset += 2;
2581
2582 /* I_Latency_Fields_Present */
2583 if (db[8] & (1 << 6))
2584 offset += 2;
2585
2586 /* the declared length is not long enough for the 2 first bytes
2587 * of additional video format capabilities */
2588 offset += 2;
2589 if (len < (8 + offset))
2590 goto out;
2591
2592 vic_len = db[8 + offset] >> 5;
2593
2594 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
2595 struct drm_display_mode *newmode;
2596 u8 vic;
2597
2598 vic = db[9 + offset + i];
2599
2600 vic--; /* VICs start at 1 */
2601 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2602 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2603 continue;
2604 }
2605
2606 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2607 if (!newmode)
2608 continue;
2609
2610 drm_mode_probed_add(connector, newmode);
2611 modes++;
2612 }
2613
2614out:
2615 return modes;
2616}
2617
2468static int 2618static int
2469cea_db_payload_len(const u8 *db) 2619cea_db_payload_len(const u8 *db)
2470{ 2620{
@@ -2496,14 +2646,30 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
2496 return 0; 2646 return 0;
2497} 2647}
2498 2648
2649static bool cea_db_is_hdmi_vsdb(const u8 *db)
2650{
2651 int hdmi_id;
2652
2653 if (cea_db_tag(db) != VENDOR_BLOCK)
2654 return false;
2655
2656 if (cea_db_payload_len(db) < 5)
2657 return false;
2658
2659 hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
2660
2661 return hdmi_id == HDMI_IEEE_OUI;
2662}
2663
2499#define for_each_cea_db(cea, i, start, end) \ 2664#define for_each_cea_db(cea, i, start, end) \
2500 for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) 2665 for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
2501 2666
2502static int 2667static int
2503add_cea_modes(struct drm_connector *connector, struct edid *edid) 2668add_cea_modes(struct drm_connector *connector, struct edid *edid)
2504{ 2669{
2505 u8 * cea = drm_find_cea_extension(edid); 2670 const u8 *cea = drm_find_cea_extension(edid);
2506 u8 * db, dbl; 2671 const u8 *db;
2672 u8 dbl;
2507 int modes = 0; 2673 int modes = 0;
2508 2674
2509 if (cea && cea_revision(cea) >= 3) { 2675 if (cea && cea_revision(cea) >= 3) {
@@ -2517,7 +2683,9 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
2517 dbl = cea_db_payload_len(db); 2683 dbl = cea_db_payload_len(db);
2518 2684
2519 if (cea_db_tag(db) == VIDEO_BLOCK) 2685 if (cea_db_tag(db) == VIDEO_BLOCK)
2520 modes += do_cea_modes (connector, db+1, dbl); 2686 modes += do_cea_modes(connector, db + 1, dbl);
2687 else if (cea_db_is_hdmi_vsdb(db))
2688 modes += do_hdmi_vsdb_modes(connector, db, dbl);
2521 } 2689 }
2522 } 2690 }
2523 2691
@@ -2570,21 +2738,6 @@ monitor_name(struct detailed_timing *t, void *data)
2570 *(u8 **)data = t->data.other_data.data.str.str; 2738 *(u8 **)data = t->data.other_data.data.str.str;
2571} 2739}
2572 2740
2573static bool cea_db_is_hdmi_vsdb(const u8 *db)
2574{
2575 int hdmi_id;
2576
2577 if (cea_db_tag(db) != VENDOR_BLOCK)
2578 return false;
2579
2580 if (cea_db_payload_len(db) < 5)
2581 return false;
2582
2583 hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
2584
2585 return hdmi_id == HDMI_IDENTIFIER;
2586}
2587
2588/** 2741/**
2589 * drm_edid_to_eld - build ELD from EDID 2742 * drm_edid_to_eld - build ELD from EDID
2590 * @connector: connector corresponding to the HDMI/DP sink 2743 * @connector: connector corresponding to the HDMI/DP sink
@@ -2732,6 +2885,58 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
2732EXPORT_SYMBOL(drm_edid_to_sad); 2885EXPORT_SYMBOL(drm_edid_to_sad);
2733 2886
2734/** 2887/**
2888 * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
2889 * @edid: EDID to parse
2890 * @sadb: pointer to the speaker block
2891 *
2892 * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
2893 * Note: returned pointer needs to be kfreed
2894 *
2895 * Return number of found Speaker Allocation Blocks or negative number on error.
2896 */
2897int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
2898{
2899 int count = 0;
2900 int i, start, end, dbl;
2901 const u8 *cea;
2902
2903 cea = drm_find_cea_extension(edid);
2904 if (!cea) {
2905 DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
2906 return -ENOENT;
2907 }
2908
2909 if (cea_revision(cea) < 3) {
2910 DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
2911 return -ENOTSUPP;
2912 }
2913
2914 if (cea_db_offsets(cea, &start, &end)) {
2915 DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
2916 return -EPROTO;
2917 }
2918
2919 for_each_cea_db(cea, i, start, end) {
2920 const u8 *db = &cea[i];
2921
2922 if (cea_db_tag(db) == SPEAKER_BLOCK) {
2923 dbl = cea_db_payload_len(db);
2924
2925 /* Speaker Allocation Data Block */
2926 if (dbl == 3) {
2927 *sadb = kmalloc(dbl, GFP_KERNEL);
2928 memcpy(*sadb, &db[1], dbl);
2929 count = dbl;
2930 break;
2931 }
2932 }
2933 }
2934
2935 return count;
2936}
2937EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
2938
2939/**
2735 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond 2940 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
2736 * @connector: connector associated with the HDMI/DP sink 2941 * @connector: connector associated with the HDMI/DP sink
2737 * @mode: the display mode 2942 * @mode: the display mode
@@ -3102,9 +3307,10 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3102 if (err < 0) 3307 if (err < 0)
3103 return err; 3308 return err;
3104 3309
3310 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3311 frame->pixel_repeat = 1;
3312
3105 frame->video_code = drm_match_cea_mode(mode); 3313 frame->video_code = drm_match_cea_mode(mode);
3106 if (!frame->video_code)
3107 return 0;
3108 3314
3109 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; 3315 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
3110 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; 3316 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
@@ -3112,3 +3318,39 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3112 return 0; 3318 return 0;
3113} 3319}
3114EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 3320EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
3321
3322/**
3323 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
3324 * data from a DRM display mode
3325 * @frame: HDMI vendor infoframe
3326 * @mode: DRM display mode
3327 *
3328 * Note that there's is a need to send HDMI vendor infoframes only when using a
3329 * 4k or stereoscopic 3D mode. So when giving any other mode as input this
3330 * function will return -EINVAL, error that can be safely ignored.
3331 *
3332 * Returns 0 on success or a negative error code on failure.
3333 */
3334int
3335drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3336 const struct drm_display_mode *mode)
3337{
3338 int err;
3339 u8 vic;
3340
3341 if (!frame || !mode)
3342 return -EINVAL;
3343
3344 vic = drm_match_hdmi_mode(mode);
3345 if (!vic)
3346 return -EINVAL;
3347
3348 err = hdmi_vendor_infoframe_init(frame);
3349 if (err < 0)
3350 return err;
3351
3352 frame->vic = vic;
3353
3354 return 0;
3355}
3356EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c385cc5e730e..61b5a47ad239 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); 181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
182 182
183#ifdef CONFIG_DEBUG_FS 183#ifdef CONFIG_DEBUG_FS
184/** 184/*
185 * drm_fb_cma_describe() - Helper to dump information about a single 185 * drm_fb_cma_describe() - Helper to dump information about a single
186 * CMA framebuffer object 186 * CMA framebuffer object
187 */ 187 */
188void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) 188static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
189{ 189{
190 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 190 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
191 int i, n = drm_format_num_planes(fb->pixel_format); 191 int i, n = drm_format_num_planes(fb->pixel_format);
@@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
199 drm_gem_cma_describe(fb_cma->obj[i], m); 199 drm_gem_cma_describe(fb_cma->obj[i], m);
200 } 200 }
201} 201}
202EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
203 202
204/** 203/**
205 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects 204 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
new file mode 100644
index 000000000000..e788882d9021
--- /dev/null
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "drmP.h"
25#include "drm_flip_work.h"
26
27/**
28 * drm_flip_work_queue - queue work
29 * @work: the flip-work
30 * @val: the value to queue
31 *
32 * Queues work, that will later be run (passed back to drm_flip_func_t
33 * func) on a work queue after drm_flip_work_commit() is called.
34 */
35void drm_flip_work_queue(struct drm_flip_work *work, void *val)
36{
37 if (kfifo_put(&work->fifo, (const void **)&val)) {
38 atomic_inc(&work->pending);
39 } else {
40 DRM_ERROR("%s fifo full!\n", work->name);
41 work->func(work, val);
42 }
43}
44EXPORT_SYMBOL(drm_flip_work_queue);
45
46/**
47 * drm_flip_work_commit - commit queued work
48 * @work: the flip-work
49 * @wq: the work-queue to run the queued work on
50 *
51 * Trigger work previously queued by drm_flip_work_queue() to run
52 * on a workqueue. The typical usage would be to queue work (via
53 * drm_flip_work_queue()) at any point (from vblank irq and/or
54 * prior), and then from vblank irq commit the queued work.
55 */
56void drm_flip_work_commit(struct drm_flip_work *work,
57 struct workqueue_struct *wq)
58{
59 uint32_t pending = atomic_read(&work->pending);
60 atomic_add(pending, &work->count);
61 atomic_sub(pending, &work->pending);
62 queue_work(wq, &work->worker);
63}
64EXPORT_SYMBOL(drm_flip_work_commit);
65
66static void flip_worker(struct work_struct *w)
67{
68 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
69 uint32_t count = atomic_read(&work->count);
70 void *val = NULL;
71
72 atomic_sub(count, &work->count);
73
74 while(count--)
75 if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
76 work->func(work, val);
77}
78
79/**
80 * drm_flip_work_init - initialize flip-work
81 * @work: the flip-work to initialize
82 * @size: the max queue depth
83 * @name: debug name
84 * @func: the callback work function
85 *
86 * Initializes/allocates resources for the flip-work
87 *
88 * RETURNS:
89 * Zero on success, error code on failure.
90 */
91int drm_flip_work_init(struct drm_flip_work *work, int size,
92 const char *name, drm_flip_func_t func)
93{
94 int ret;
95
96 work->name = name;
97 atomic_set(&work->count, 0);
98 atomic_set(&work->pending, 0);
99 work->func = func;
100
101 ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
102 if (ret) {
103 DRM_ERROR("could not allocate %s fifo\n", name);
104 return ret;
105 }
106
107 INIT_WORK(&work->worker, flip_worker);
108
109 return 0;
110}
111EXPORT_SYMBOL(drm_flip_work_init);
112
113/**
114 * drm_flip_work_cleanup - cleans up flip-work
115 * @work: the flip-work to cleanup
116 *
117 * Destroy resources allocated for the flip-work
118 */
119void drm_flip_work_cleanup(struct drm_flip_work *work)
120{
121 WARN_ON(!kfifo_is_empty(&work->fifo));
122 kfifo_free(&work->fifo);
123}
124EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a24385e0368..4be8e09a32ef 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -48,59 +48,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
48 48
49static int drm_setup(struct drm_device * dev) 49static int drm_setup(struct drm_device * dev)
50{ 50{
51 int i;
52 int ret; 51 int ret;
53 52
54 if (dev->driver->firstopen) { 53 if (dev->driver->firstopen &&
54 !drm_core_check_feature(dev, DRIVER_MODESET)) {
55 ret = dev->driver->firstopen(dev); 55 ret = dev->driver->firstopen(dev);
56 if (ret != 0) 56 if (ret != 0)
57 return ret; 57 return ret;
58 } 58 }
59 59
60 atomic_set(&dev->ioctl_count, 0); 60 ret = drm_legacy_dma_setup(dev);
61 atomic_set(&dev->vma_count, 0); 61 if (ret < 0)
62 62 return ret;
63 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
64 !drm_core_check_feature(dev, DRIVER_MODESET)) {
65 dev->buf_use = 0;
66 atomic_set(&dev->buf_alloc, 0);
67
68 i = drm_dma_setup(dev);
69 if (i < 0)
70 return i;
71 }
72
73 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
74 atomic_set(&dev->counts[i], 0);
75
76 dev->sigdata.lock = NULL;
77
78 dev->context_flag = 0;
79 dev->interrupt_flag = 0;
80 dev->dma_flag = 0;
81 dev->last_context = 0;
82 dev->last_switch = 0;
83 dev->last_checked = 0;
84 init_waitqueue_head(&dev->context_wait);
85 dev->if_version = 0;
86
87 dev->ctx_start = 0;
88 dev->lck_start = 0;
89 63
90 dev->buf_async = NULL;
91 init_waitqueue_head(&dev->buf_readers);
92 init_waitqueue_head(&dev->buf_writers);
93 64
94 DRM_DEBUG("\n"); 65 DRM_DEBUG("\n");
95
96 /*
97 * The kernel's context could be created here, but is now created
98 * in drm_dma_enqueue. This is more resource-efficient for
99 * hardware that does not do DMA, but may mean that
100 * drm_select_queue fails between the time the interrupt is
101 * initialized and the time the queues are initialized.
102 */
103
104 return 0; 66 return 0;
105} 67}
106 68
@@ -257,7 +219,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
257 return -EBUSY; /* No exclusive opens */ 219 return -EBUSY; /* No exclusive opens */
258 if (!drm_cpu_valid()) 220 if (!drm_cpu_valid())
259 return -EINVAL; 221 return -EINVAL;
260 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 222 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
261 return -EINVAL; 223 return -EINVAL;
262 224
263 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 225 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
@@ -300,10 +262,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
300 goto out_prime_destroy; 262 goto out_prime_destroy;
301 } 263 }
302 264
303 265 /* if there is no current master make this fd it, but do not create
304 /* if there is no current master make this fd it */ 266 * any master object for render clients */
305 mutex_lock(&dev->struct_mutex); 267 mutex_lock(&dev->struct_mutex);
306 if (!priv->minor->master) { 268 if (!priv->minor->master && !drm_is_render_client(priv)) {
307 /* create a new master */ 269 /* create a new master */
308 priv->minor->master = drm_master_create(priv->minor); 270 priv->minor->master = drm_master_create(priv->minor);
309 if (!priv->minor->master) { 271 if (!priv->minor->master) {
@@ -341,12 +303,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
341 goto out_close; 303 goto out_close;
342 } 304 }
343 } 305 }
344 mutex_unlock(&dev->struct_mutex); 306 } else if (!drm_is_render_client(priv)) {
345 } else {
346 /* get a reference to the master */ 307 /* get a reference to the master */
347 priv->master = drm_master_get(priv->minor->master); 308 priv->master = drm_master_get(priv->minor->master);
348 mutex_unlock(&dev->struct_mutex);
349 } 309 }
310 mutex_unlock(&dev->struct_mutex);
350 311
351 mutex_lock(&dev->struct_mutex); 312 mutex_lock(&dev->struct_mutex);
352 list_add(&priv->lhead, &dev->filelist); 313 list_add(&priv->lhead, &dev->filelist);
@@ -388,18 +349,6 @@ out_put_pid:
388 return ret; 349 return ret;
389} 350}
390 351
391/** No-op. */
392int drm_fasync(int fd, struct file *filp, int on)
393{
394 struct drm_file *priv = filp->private_data;
395 struct drm_device *dev = priv->minor->dev;
396
397 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
398 (long)old_encode_dev(priv->minor->device));
399 return fasync_helper(fd, filp, on, &dev->buf_async);
400}
401EXPORT_SYMBOL(drm_fasync);
402
403static void drm_master_release(struct drm_device *dev, struct file *filp) 352static void drm_master_release(struct drm_device *dev, struct file *filp)
404{ 353{
405 struct drm_file *file_priv = filp->private_data; 354 struct drm_file *file_priv = filp->private_data;
@@ -490,26 +439,7 @@ int drm_release(struct inode *inode, struct file *filp)
490 if (dev->driver->driver_features & DRIVER_GEM) 439 if (dev->driver->driver_features & DRIVER_GEM)
491 drm_gem_release(dev, file_priv); 440 drm_gem_release(dev, file_priv);
492 441
493 mutex_lock(&dev->ctxlist_mutex); 442 drm_legacy_ctxbitmap_release(dev, file_priv);
494 if (!list_empty(&dev->ctxlist)) {
495 struct drm_ctx_list *pos, *n;
496
497 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
498 if (pos->tag == file_priv &&
499 pos->handle != DRM_KERNEL_CONTEXT) {
500 if (dev->driver->context_dtor)
501 dev->driver->context_dtor(dev,
502 pos->handle);
503
504 drm_ctxbitmap_free(dev, pos->handle);
505
506 list_del(&pos->head);
507 kfree(pos);
508 --dev->ctx_count;
509 }
510 }
511 }
512 mutex_unlock(&dev->ctxlist_mutex);
513 443
514 mutex_lock(&dev->struct_mutex); 444 mutex_lock(&dev->struct_mutex);
515 445
@@ -547,7 +477,8 @@ int drm_release(struct inode *inode, struct file *filp)
547 iput(container_of(dev->dev_mapping, struct inode, i_data)); 477 iput(container_of(dev->dev_mapping, struct inode, i_data));
548 478
549 /* drop the reference held my the file priv */ 479 /* drop the reference held my the file priv */
550 drm_master_put(&file_priv->master); 480 if (file_priv->master)
481 drm_master_put(&file_priv->master);
551 file_priv->is_master = 0; 482 file_priv->is_master = 0;
552 list_del(&file_priv->lhead); 483 list_del(&file_priv->lhead);
553 mutex_unlock(&dev->struct_mutex); 484 mutex_unlock(&dev->struct_mutex);
@@ -555,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
555 if (dev->driver->postclose) 486 if (dev->driver->postclose)
556 dev->driver->postclose(dev, file_priv); 487 dev->driver->postclose(dev, file_priv);
557 488
489
558 if (drm_core_check_feature(dev, DRIVER_PRIME)) 490 if (drm_core_check_feature(dev, DRIVER_PRIME))
559 drm_prime_destroy_file_private(&file_priv->prime); 491 drm_prime_destroy_file_private(&file_priv->prime);
560 492
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 603f256152ef..49293bdc972a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -37,6 +37,7 @@
37#include <linux/shmem_fs.h> 37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39#include <drm/drmP.h> 39#include <drm/drmP.h>
40#include <drm/drm_vma_manager.h>
40 41
41/** @file drm_gem.c 42/** @file drm_gem.c
42 * 43 *
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
92{ 93{
93 struct drm_gem_mm *mm; 94 struct drm_gem_mm *mm;
94 95
95 spin_lock_init(&dev->object_name_lock); 96 mutex_init(&dev->object_name_lock);
96 idr_init(&dev->object_name_idr); 97 idr_init(&dev->object_name_idr);
97 98
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 99 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
102 } 103 }
103 104
104 dev->mm_private = mm; 105 dev->mm_private = mm;
105 106 drm_vma_offset_manager_init(&mm->vma_manager,
106 if (drm_ht_create(&mm->offset_hash, 12)) { 107 DRM_FILE_PAGE_OFFSET_START,
107 kfree(mm); 108 DRM_FILE_PAGE_OFFSET_SIZE);
108 return -ENOMEM;
109 }
110
111 drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE);
113 109
114 return 0; 110 return 0;
115} 111}
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
119{ 115{
120 struct drm_gem_mm *mm = dev->mm_private; 116 struct drm_gem_mm *mm = dev->mm_private;
121 117
122 drm_mm_takedown(&mm->offset_manager); 118 drm_vma_offset_manager_destroy(&mm->vma_manager);
123 drm_ht_remove(&mm->offset_hash);
124 kfree(mm); 119 kfree(mm);
125 dev->mm_private = NULL; 120 dev->mm_private = NULL;
126} 121}
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
132int drm_gem_object_init(struct drm_device *dev, 127int drm_gem_object_init(struct drm_device *dev,
133 struct drm_gem_object *obj, size_t size) 128 struct drm_gem_object *obj, size_t size)
134{ 129{
135 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 130 struct file *filp;
136 131
137 obj->dev = dev; 132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
138 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 133 if (IS_ERR(filp))
139 if (IS_ERR(obj->filp)) 134 return PTR_ERR(filp);
140 return PTR_ERR(obj->filp);
141 135
142 kref_init(&obj->refcount); 136 drm_gem_private_object_init(dev, obj, size);
143 atomic_set(&obj->handle_count, 0); 137 obj->filp = filp;
144 obj->size = size;
145 138
146 return 0; 139 return 0;
147} 140}
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
152 * no GEM provided backing store. Instead the caller is responsible for 145 * no GEM provided backing store. Instead the caller is responsible for
153 * backing the object and handling it. 146 * backing the object and handling it.
154 */ 147 */
155int drm_gem_private_object_init(struct drm_device *dev, 148void drm_gem_private_object_init(struct drm_device *dev,
156 struct drm_gem_object *obj, size_t size) 149 struct drm_gem_object *obj, size_t size)
157{ 150{
158 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 151 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
159 152
@@ -161,10 +154,9 @@ int drm_gem_private_object_init(struct drm_device *dev,
161 obj->filp = NULL; 154 obj->filp = NULL;
162 155
163 kref_init(&obj->refcount); 156 kref_init(&obj->refcount);
164 atomic_set(&obj->handle_count, 0); 157 obj->handle_count = 0;
165 obj->size = size; 158 obj->size = size;
166 159 drm_vma_node_reset(&obj->vma_node);
167 return 0;
168} 160}
169EXPORT_SYMBOL(drm_gem_private_object_init); 161EXPORT_SYMBOL(drm_gem_private_object_init);
170 162
@@ -200,16 +192,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
200static void 192static void
201drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 193drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
202{ 194{
203 if (obj->import_attach) { 195 /*
204 drm_prime_remove_buf_handle(&filp->prime, 196 * Note: obj->dma_buf can't disappear as long as we still hold a
205 obj->import_attach->dmabuf); 197 * handle reference in obj->handle_count.
198 */
199 mutex_lock(&filp->prime.lock);
200 if (obj->dma_buf) {
201 drm_prime_remove_buf_handle_locked(&filp->prime,
202 obj->dma_buf);
206 } 203 }
207 if (obj->export_dma_buf) { 204 mutex_unlock(&filp->prime.lock);
208 drm_prime_remove_buf_handle(&filp->prime, 205}
209 obj->export_dma_buf); 206
207static void drm_gem_object_ref_bug(struct kref *list_kref)
208{
209 BUG();
210}
211
212/**
213 * Called after the last handle to the object has been closed
214 *
215 * Removes any name for the object. Note that this must be
216 * called before drm_gem_object_free or we'll be touching
217 * freed memory
218 */
219static void drm_gem_object_handle_free(struct drm_gem_object *obj)
220{
221 struct drm_device *dev = obj->dev;
222
223 /* Remove any name for this object */
224 if (obj->name) {
225 idr_remove(&dev->object_name_idr, obj->name);
226 obj->name = 0;
227 /*
228 * The object name held a reference to this object, drop
229 * that now.
230 *
231 * This cannot be the last reference, since the handle holds one too.
232 */
233 kref_put(&obj->refcount, drm_gem_object_ref_bug);
210 } 234 }
211} 235}
212 236
237static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
238{
239 /* Unbreak the reference cycle if we have an exported dma_buf. */
240 if (obj->dma_buf) {
241 dma_buf_put(obj->dma_buf);
242 obj->dma_buf = NULL;
243 }
244}
245
246static void
247drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
248{
249 if (WARN_ON(obj->handle_count == 0))
250 return;
251
252 /*
253 * Must bump handle count first as this may be the last
254 * ref, in which case the object would disappear before we
255 * checked for a name
256 */
257
258 mutex_lock(&obj->dev->object_name_lock);
259 if (--obj->handle_count == 0) {
260 drm_gem_object_handle_free(obj);
261 drm_gem_object_exported_dma_buf_free(obj);
262 }
263 mutex_unlock(&obj->dev->object_name_lock);
264
265 drm_gem_object_unreference_unlocked(obj);
266}
267
213/** 268/**
214 * Removes the mapping from handle to filp for this object. 269 * Removes the mapping from handle to filp for this object.
215 */ 270 */
@@ -242,7 +297,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
242 idr_remove(&filp->object_idr, handle); 297 idr_remove(&filp->object_idr, handle);
243 spin_unlock(&filp->table_lock); 298 spin_unlock(&filp->table_lock);
244 299
245 drm_gem_remove_prime_handles(obj, filp); 300 if (drm_core_check_feature(dev, DRIVER_PRIME))
301 drm_gem_remove_prime_handles(obj, filp);
302 drm_vma_node_revoke(&obj->vma_node, filp->filp);
246 303
247 if (dev->driver->gem_close_object) 304 if (dev->driver->gem_close_object)
248 dev->driver->gem_close_object(obj, filp); 305 dev->driver->gem_close_object(obj, filp);
@@ -253,18 +310,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
253EXPORT_SYMBOL(drm_gem_handle_delete); 310EXPORT_SYMBOL(drm_gem_handle_delete);
254 311
255/** 312/**
256 * Create a handle for this object. This adds a handle reference 313 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
257 * to the object, which includes a regular reference count. Callers 314 *
258 * will likely want to dereference the object afterwards. 315 * This implements the ->dumb_destroy kms driver callback for drivers which use
316 * gem to manage their backing storage.
317 */
318int drm_gem_dumb_destroy(struct drm_file *file,
319 struct drm_device *dev,
320 uint32_t handle)
321{
322 return drm_gem_handle_delete(file, handle);
323}
324EXPORT_SYMBOL(drm_gem_dumb_destroy);
325
326/**
327 * drm_gem_handle_create_tail - internal functions to create a handle
328 *
329 * This expects the dev->object_name_lock to be held already and will drop it
330 * before returning. Used to avoid races in establishing new handles when
331 * importing an object from either an flink name or a dma-buf.
259 */ 332 */
260int 333int
261drm_gem_handle_create(struct drm_file *file_priv, 334drm_gem_handle_create_tail(struct drm_file *file_priv,
262 struct drm_gem_object *obj, 335 struct drm_gem_object *obj,
263 u32 *handlep) 336 u32 *handlep)
264{ 337{
265 struct drm_device *dev = obj->dev; 338 struct drm_device *dev = obj->dev;
266 int ret; 339 int ret;
267 340
341 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
342
268 /* 343 /*
269 * Get the user-visible handle using idr. Preload and perform 344 * Get the user-visible handle using idr. Preload and perform
270 * allocation under our spinlock. 345 * allocation under our spinlock.
@@ -273,14 +348,22 @@ drm_gem_handle_create(struct drm_file *file_priv,
273 spin_lock(&file_priv->table_lock); 348 spin_lock(&file_priv->table_lock);
274 349
275 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 350 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
276 351 drm_gem_object_reference(obj);
352 obj->handle_count++;
277 spin_unlock(&file_priv->table_lock); 353 spin_unlock(&file_priv->table_lock);
278 idr_preload_end(); 354 idr_preload_end();
279 if (ret < 0) 355 mutex_unlock(&dev->object_name_lock);
356 if (ret < 0) {
357 drm_gem_object_handle_unreference_unlocked(obj);
280 return ret; 358 return ret;
359 }
281 *handlep = ret; 360 *handlep = ret;
282 361
283 drm_gem_object_handle_reference(obj); 362 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
363 if (ret) {
364 drm_gem_handle_delete(file_priv, *handlep);
365 return ret;
366 }
284 367
285 if (dev->driver->gem_open_object) { 368 if (dev->driver->gem_open_object) {
286 ret = dev->driver->gem_open_object(obj, file_priv); 369 ret = dev->driver->gem_open_object(obj, file_priv);
@@ -292,6 +375,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
292 375
293 return 0; 376 return 0;
294} 377}
378
379/**
380 * Create a handle for this object. This adds a handle reference
381 * to the object, which includes a regular reference count. Callers
382 * will likely want to dereference the object afterwards.
383 */
384int
385drm_gem_handle_create(struct drm_file *file_priv,
386 struct drm_gem_object *obj,
387 u32 *handlep)
388{
389 mutex_lock(&obj->dev->object_name_lock);
390
391 return drm_gem_handle_create_tail(file_priv, obj, handlep);
392}
295EXPORT_SYMBOL(drm_gem_handle_create); 393EXPORT_SYMBOL(drm_gem_handle_create);
296 394
297 395
@@ -306,81 +404,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
306{ 404{
307 struct drm_device *dev = obj->dev; 405 struct drm_device *dev = obj->dev;
308 struct drm_gem_mm *mm = dev->mm_private; 406 struct drm_gem_mm *mm = dev->mm_private;
309 struct drm_map_list *list = &obj->map_list;
310 407
311 drm_ht_remove_item(&mm->offset_hash, &list->hash); 408 drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
312 drm_mm_put_block(list->file_offset_node);
313 kfree(list->map);
314 list->map = NULL;
315} 409}
316EXPORT_SYMBOL(drm_gem_free_mmap_offset); 410EXPORT_SYMBOL(drm_gem_free_mmap_offset);
317 411
318/** 412/**
319 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 413 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
320 * @obj: obj in question 414 * @obj: obj in question
415 * @size: the virtual size
321 * 416 *
322 * GEM memory mapping works by handing back to userspace a fake mmap offset 417 * GEM memory mapping works by handing back to userspace a fake mmap offset
323 * it can use in a subsequent mmap(2) call. The DRM core code then looks 418 * it can use in a subsequent mmap(2) call. The DRM core code then looks
324 * up the object based on the offset and sets up the various memory mapping 419 * up the object based on the offset and sets up the various memory mapping
325 * structures. 420 * structures.
326 * 421 *
327 * This routine allocates and attaches a fake offset for @obj. 422 * This routine allocates and attaches a fake offset for @obj, in cases where
423 * the virtual size differs from the physical size (ie. obj->size). Otherwise
424 * just use drm_gem_create_mmap_offset().
328 */ 425 */
329int 426int
330drm_gem_create_mmap_offset(struct drm_gem_object *obj) 427drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
331{ 428{
332 struct drm_device *dev = obj->dev; 429 struct drm_device *dev = obj->dev;
333 struct drm_gem_mm *mm = dev->mm_private; 430 struct drm_gem_mm *mm = dev->mm_private;
334 struct drm_map_list *list;
335 struct drm_local_map *map;
336 int ret;
337 431
338 /* Set the object up for mmap'ing */ 432 return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
339 list = &obj->map_list; 433 size / PAGE_SIZE);
340 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 434}
341 if (!list->map) 435EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
342 return -ENOMEM;
343
344 map = list->map;
345 map->type = _DRM_GEM;
346 map->size = obj->size;
347 map->handle = obj;
348 436
349 /* Get a DRM GEM mmap offset allocated... */ 437/**
350 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 438 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
351 obj->size / PAGE_SIZE, 0, false); 439 * @obj: obj in question
440 *
441 * GEM memory mapping works by handing back to userspace a fake mmap offset
442 * it can use in a subsequent mmap(2) call. The DRM core code then looks
443 * up the object based on the offset and sets up the various memory mapping
444 * structures.
445 *
446 * This routine allocates and attaches a fake offset for @obj.
447 */
448int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
449{
450 return drm_gem_create_mmap_offset_size(obj, obj->size);
451}
452EXPORT_SYMBOL(drm_gem_create_mmap_offset);
352 453
353 if (!list->file_offset_node) { 454/**
354 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 455 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
355 ret = -ENOSPC; 456 * from shmem
356 goto out_free_list; 457 * @obj: obj in question
458 * @gfpmask: gfp mask of requested pages
459 */
460struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
461{
462 struct inode *inode;
463 struct address_space *mapping;
464 struct page *p, **pages;
465 int i, npages;
466
467 /* This is the shared memory object that backs the GEM resource */
468 inode = file_inode(obj->filp);
469 mapping = inode->i_mapping;
470
471 /* We already BUG_ON() for non-page-aligned sizes in
472 * drm_gem_object_init(), so we should never hit this unless
473 * driver author is doing something really wrong:
474 */
475 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
476
477 npages = obj->size >> PAGE_SHIFT;
478
479 pages = drm_malloc_ab(npages, sizeof(struct page *));
480 if (pages == NULL)
481 return ERR_PTR(-ENOMEM);
482
483 gfpmask |= mapping_gfp_mask(mapping);
484
485 for (i = 0; i < npages; i++) {
486 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
487 if (IS_ERR(p))
488 goto fail;
489 pages[i] = p;
490
491 /* There is a hypothetical issue w/ drivers that require
492 * buffer memory in the low 4GB.. if the pages are un-
493 * pinned, and swapped out, they can end up swapped back
494 * in above 4GB. If pages are already in memory, then
495 * shmem_read_mapping_page_gfp will ignore the gfpmask,
496 * even if the already in-memory page disobeys the mask.
497 *
498 * It is only a theoretical issue today, because none of
499 * the devices with this limitation can be populated with
500 * enough memory to trigger the issue. But this BUG_ON()
501 * is here as a reminder in case the problem with
502 * shmem_read_mapping_page_gfp() isn't solved by the time
503 * it does become a real issue.
504 *
505 * See this thread: http://lkml.org/lkml/2011/7/11/238
506 */
507 BUG_ON((gfpmask & __GFP_DMA32) &&
508 (page_to_pfn(p) >= 0x00100000UL));
357 } 509 }
358 510
359 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 511 return pages;
360 obj->size / PAGE_SIZE, 0);
361 if (!list->file_offset_node) {
362 ret = -ENOMEM;
363 goto out_free_list;
364 }
365 512
366 list->hash.key = list->file_offset_node->start; 513fail:
367 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); 514 while (i--)
368 if (ret) { 515 page_cache_release(pages[i]);
369 DRM_ERROR("failed to add to map hash\n");
370 goto out_free_mm;
371 }
372 516
373 return 0; 517 drm_free_large(pages);
518 return ERR_CAST(p);
519}
520EXPORT_SYMBOL(drm_gem_get_pages);
374 521
375out_free_mm: 522/**
376 drm_mm_put_block(list->file_offset_node); 523 * drm_gem_put_pages - helper to free backing pages for a GEM object
377out_free_list: 524 * @obj: obj in question
378 kfree(list->map); 525 * @pages: pages to free
379 list->map = NULL; 526 * @dirty: if true, pages will be marked as dirty
527 * @accessed: if true, the pages will be marked as accessed
528 */
529void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
530 bool dirty, bool accessed)
531{
532 int i, npages;
380 533
381 return ret; 534 /* We already BUG_ON() for non-page-aligned sizes in
535 * drm_gem_object_init(), so we should never hit this unless
536 * driver author is doing something really wrong:
537 */
538 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
539
540 npages = obj->size >> PAGE_SHIFT;
541
542 for (i = 0; i < npages; i++) {
543 if (dirty)
544 set_page_dirty(pages[i]);
545
546 if (accessed)
547 mark_page_accessed(pages[i]);
548
549 /* Undo the reference we took when populating the table */
550 page_cache_release(pages[i]);
551 }
552
553 drm_free_large(pages);
382} 554}
383EXPORT_SYMBOL(drm_gem_create_mmap_offset); 555EXPORT_SYMBOL(drm_gem_put_pages);
384 556
385/** Returns a reference to the object named by the handle. */ 557/** Returns a reference to the object named by the handle. */
386struct drm_gem_object * 558struct drm_gem_object *
@@ -445,8 +617,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
445 if (obj == NULL) 617 if (obj == NULL)
446 return -ENOENT; 618 return -ENOENT;
447 619
620 mutex_lock(&dev->object_name_lock);
448 idr_preload(GFP_KERNEL); 621 idr_preload(GFP_KERNEL);
449 spin_lock(&dev->object_name_lock); 622 /* prevent races with concurrent gem_close. */
623 if (obj->handle_count == 0) {
624 ret = -ENOENT;
625 goto err;
626 }
627
450 if (!obj->name) { 628 if (!obj->name) {
451 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 629 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
452 if (ret < 0) 630 if (ret < 0)
@@ -462,8 +640,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
462 ret = 0; 640 ret = 0;
463 641
464err: 642err:
465 spin_unlock(&dev->object_name_lock);
466 idr_preload_end(); 643 idr_preload_end();
644 mutex_unlock(&dev->object_name_lock);
467 drm_gem_object_unreference_unlocked(obj); 645 drm_gem_object_unreference_unlocked(obj);
468 return ret; 646 return ret;
469} 647}
@@ -486,15 +664,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
486 if (!(dev->driver->driver_features & DRIVER_GEM)) 664 if (!(dev->driver->driver_features & DRIVER_GEM))
487 return -ENODEV; 665 return -ENODEV;
488 666
489 spin_lock(&dev->object_name_lock); 667 mutex_lock(&dev->object_name_lock);
490 obj = idr_find(&dev->object_name_idr, (int) args->name); 668 obj = idr_find(&dev->object_name_idr, (int) args->name);
491 if (obj) 669 if (obj) {
492 drm_gem_object_reference(obj); 670 drm_gem_object_reference(obj);
493 spin_unlock(&dev->object_name_lock); 671 } else {
494 if (!obj) 672 mutex_unlock(&dev->object_name_lock);
495 return -ENOENT; 673 return -ENOENT;
674 }
496 675
497 ret = drm_gem_handle_create(file_priv, obj, &handle); 676 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
677 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
498 drm_gem_object_unreference_unlocked(obj); 678 drm_gem_object_unreference_unlocked(obj);
499 if (ret) 679 if (ret)
500 return ret; 680 return ret;
@@ -527,7 +707,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
527 struct drm_gem_object *obj = ptr; 707 struct drm_gem_object *obj = ptr;
528 struct drm_device *dev = obj->dev; 708 struct drm_device *dev = obj->dev;
529 709
530 drm_gem_remove_prime_handles(obj, file_priv); 710 if (drm_core_check_feature(dev, DRIVER_PRIME))
711 drm_gem_remove_prime_handles(obj, file_priv);
712 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
531 713
532 if (dev->driver->gem_close_object) 714 if (dev->driver->gem_close_object)
533 dev->driver->gem_close_object(obj, file_priv); 715 dev->driver->gem_close_object(obj, file_priv);
@@ -553,6 +735,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
553void 735void
554drm_gem_object_release(struct drm_gem_object *obj) 736drm_gem_object_release(struct drm_gem_object *obj)
555{ 737{
738 WARN_ON(obj->dma_buf);
739
556 if (obj->filp) 740 if (obj->filp)
557 fput(obj->filp); 741 fput(obj->filp);
558} 742}
@@ -577,41 +761,6 @@ drm_gem_object_free(struct kref *kref)
577} 761}
578EXPORT_SYMBOL(drm_gem_object_free); 762EXPORT_SYMBOL(drm_gem_object_free);
579 763
580static void drm_gem_object_ref_bug(struct kref *list_kref)
581{
582 BUG();
583}
584
585/**
586 * Called after the last handle to the object has been closed
587 *
588 * Removes any name for the object. Note that this must be
589 * called before drm_gem_object_free or we'll be touching
590 * freed memory
591 */
592void drm_gem_object_handle_free(struct drm_gem_object *obj)
593{
594 struct drm_device *dev = obj->dev;
595
596 /* Remove any name for this object */
597 spin_lock(&dev->object_name_lock);
598 if (obj->name) {
599 idr_remove(&dev->object_name_idr, obj->name);
600 obj->name = 0;
601 spin_unlock(&dev->object_name_lock);
602 /*
603 * The object name held a reference to this object, drop
604 * that now.
605 *
606 * This cannot be the last reference, since the handle holds one too.
607 */
608 kref_put(&obj->refcount, drm_gem_object_ref_bug);
609 } else
610 spin_unlock(&dev->object_name_lock);
611
612}
613EXPORT_SYMBOL(drm_gem_object_handle_free);
614
615void drm_gem_vm_open(struct vm_area_struct *vma) 764void drm_gem_vm_open(struct vm_area_struct *vma)
616{ 765{
617 struct drm_gem_object *obj = vma->vm_private_data; 766 struct drm_gem_object *obj = vma->vm_private_data;
@@ -653,6 +802,10 @@ EXPORT_SYMBOL(drm_gem_vm_close);
653 * the GEM object is not looked up based on its fake offset. To implement the 802 * the GEM object is not looked up based on its fake offset. To implement the
654 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 803 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
655 * 804 *
805 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
806 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
807 * callers must verify access restrictions before calling this helper.
808 *
656 * NOTE: This function has to be protected with dev->struct_mutex 809 * NOTE: This function has to be protected with dev->struct_mutex
657 * 810 *
658 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 811 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
@@ -701,14 +854,17 @@ EXPORT_SYMBOL(drm_gem_mmap_obj);
701 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 854 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
702 * contain the fake offset we created when the GTT map ioctl was called on 855 * contain the fake offset we created when the GTT map ioctl was called on
703 * the object) and map it with a call to drm_gem_mmap_obj(). 856 * the object) and map it with a call to drm_gem_mmap_obj().
857 *
858 * If the caller is not granted access to the buffer object, the mmap will fail
859 * with EACCES. Please see the vma manager for more information.
704 */ 860 */
705int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 861int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
706{ 862{
707 struct drm_file *priv = filp->private_data; 863 struct drm_file *priv = filp->private_data;
708 struct drm_device *dev = priv->minor->dev; 864 struct drm_device *dev = priv->minor->dev;
709 struct drm_gem_mm *mm = dev->mm_private; 865 struct drm_gem_mm *mm = dev->mm_private;
710 struct drm_local_map *map = NULL; 866 struct drm_gem_object *obj;
711 struct drm_hash_item *hash; 867 struct drm_vma_offset_node *node;
712 int ret = 0; 868 int ret = 0;
713 869
714 if (drm_device_is_unplugged(dev)) 870 if (drm_device_is_unplugged(dev))
@@ -716,21 +872,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
716 872
717 mutex_lock(&dev->struct_mutex); 873 mutex_lock(&dev->struct_mutex);
718 874
719 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 875 node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
876 vma_pages(vma));
877 if (!node) {
720 mutex_unlock(&dev->struct_mutex); 878 mutex_unlock(&dev->struct_mutex);
721 return drm_mmap(filp, vma); 879 return drm_mmap(filp, vma);
880 } else if (!drm_vma_node_is_allowed(node, filp)) {
881 mutex_unlock(&dev->struct_mutex);
882 return -EACCES;
722 } 883 }
723 884
724 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 885 obj = container_of(node, struct drm_gem_object, vma_node);
725 if (!map || 886 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
726 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
727 ret = -EPERM;
728 goto out_unlock;
729 }
730
731 ret = drm_gem_mmap_obj(map->handle, map->size, vma);
732 887
733out_unlock:
734 mutex_unlock(&dev->struct_mutex); 888 mutex_unlock(&dev->struct_mutex);
735 889
736 return ret; 890 return ret;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 61c1d17f870c..6b51bf90df0e 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -27,11 +27,7 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm.h> 28#include <drm/drm.h>
29#include <drm/drm_gem_cma_helper.h> 29#include <drm/drm_gem_cma_helper.h>
30 30#include <drm/drm_vma_manager.h>
31static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
32{
33 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
34}
35 31
36/* 32/*
37 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory 33 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
172{ 168{
173 struct drm_gem_cma_object *cma_obj; 169 struct drm_gem_cma_object *cma_obj;
174 170
175 if (gem_obj->map_list.map) 171 drm_gem_free_mmap_offset(gem_obj);
176 drm_gem_free_mmap_offset(gem_obj);
177 172
178 cma_obj = to_drm_gem_cma_obj(gem_obj); 173 cma_obj = to_drm_gem_cma_obj(gem_obj);
179 174
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
237 return -EINVAL; 232 return -EINVAL;
238 } 233 }
239 234
240 *offset = get_gem_mmap_offset(gem_obj); 235 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
241 236
242 drm_gem_object_unreference(gem_obj); 237 drm_gem_object_unreference(gem_obj);
243 238
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
286} 281}
287EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); 282EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
288 283
289/*
290 * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
291 */
292int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
293 struct drm_device *drm, unsigned int handle)
294{
295 return drm_gem_handle_delete(file_priv, handle);
296}
297EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
298
299#ifdef CONFIG_DEBUG_FS 284#ifdef CONFIG_DEBUG_FS
300void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m) 285void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
301{ 286{
302 struct drm_gem_object *obj = &cma_obj->base; 287 struct drm_gem_object *obj = &cma_obj->base;
303 struct drm_device *dev = obj->dev; 288 struct drm_device *dev = obj->dev;
304 uint64_t off = 0; 289 uint64_t off;
305 290
306 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 291 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
307 292
308 if (obj->map_list.map) 293 off = drm_vma_node_start(&obj->vma_node);
309 off = (uint64_t)obj->map_list.hash.key;
310 294
311 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d", 295 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
312 obj->name, obj->refcount.refcount.counter, 296 obj->name, obj->refcount.refcount.counter,
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index d4b20ceda3fb..53298320080b 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
207 207
208 seq_printf(m, "%6d %8zd %7d %8d\n", 208 seq_printf(m, "%6d %8zd %7d %8d\n",
209 obj->name, obj->size, 209 obj->name, obj->size,
210 atomic_read(&obj->handle_count), 210 obj->handle_count,
211 atomic_read(&obj->refcount.refcount)); 211 atomic_read(&obj->refcount.refcount));
212 return 0; 212 return 0;
213} 213}
@@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
218 struct drm_device *dev = node->minor->dev; 218 struct drm_device *dev = node->minor->dev;
219 219
220 seq_printf(m, " name size handles refcount\n"); 220 seq_printf(m, " name size handles refcount\n");
221
222 mutex_lock(&dev->object_name_lock);
221 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); 223 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
224 mutex_unlock(&dev->object_name_lock);
225
222 return 0; 226 return 0;
223} 227}
224 228
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ffd7a7ba70d4..07247e2855a2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data,
217 struct drm_file *file_priv) 217 struct drm_file *file_priv)
218{ 218{
219 struct drm_client *client = data; 219 struct drm_client *client = data;
220 struct drm_file *pt;
221 int idx;
222 int i;
223 220
224 idx = client->idx; 221 /*
225 i = 0; 222 * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
226 223 * not breaking completely. Userspace tools stop enumerating one they
227 mutex_lock(&dev->struct_mutex); 224 * get -EINVAL, hence this is the return value we need to hand back for
228 list_for_each_entry(pt, &dev->filelist, lhead) { 225 * no clients tracked.
229 if (i++ >= idx) { 226 *
230 client->auth = pt->authenticated; 227 * Unfortunately some clients (*cough* libva *cough*) use this in a fun
231 client->pid = pid_vnr(pt->pid); 228 * attempt to figure out whether they're authenticated or not. Since
232 client->uid = from_kuid_munged(current_user_ns(), pt->uid); 229 * that's the only thing they care about, give it to the directly
233 client->magic = pt->magic; 230 * instead of walking one giant list.
234 client->iocs = pt->ioctl_count; 231 */
235 mutex_unlock(&dev->struct_mutex); 232 if (client->idx == 0) {
236 233 client->auth = file_priv->authenticated;
237 return 0; 234 client->pid = pid_vnr(file_priv->pid);
238 } 235 client->uid = from_kuid_munged(current_user_ns(),
236 file_priv->uid);
237 client->magic = 0;
238 client->iocs = 0;
239
240 return 0;
241 } else {
242 return -EINVAL;
239 } 243 }
240 mutex_unlock(&dev->struct_mutex);
241
242 return -EINVAL;
243} 244}
244 245
245/** 246/**
@@ -256,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data,
256 struct drm_file *file_priv) 257 struct drm_file *file_priv)
257{ 258{
258 struct drm_stats *stats = data; 259 struct drm_stats *stats = data;
259 int i;
260 260
261 /* Clear stats to prevent userspace from eating its stack garbage. */
261 memset(stats, 0, sizeof(*stats)); 262 memset(stats, 0, sizeof(*stats));
262 263
263 for (i = 0; i < dev->counters; i++) {
264 if (dev->types[i] == _DRM_STAT_LOCK)
265 stats->data[i].value =
266 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
267 else
268 stats->data[i].value = atomic_read(&dev->counts[i]);
269 stats->data[i].type = dev->types[i];
270 }
271
272 stats->count = dev->counters;
273
274 return 0; 264 return 0;
275} 265}
276 266
@@ -303,6 +293,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
303 case DRM_CAP_TIMESTAMP_MONOTONIC: 293 case DRM_CAP_TIMESTAMP_MONOTONIC:
304 req->value = drm_timestamp_monotonic; 294 req->value = drm_timestamp_monotonic;
305 break; 295 break;
296 case DRM_CAP_ASYNC_PAGE_FLIP:
297 req->value = dev->mode_config.async_page_flip;
298 break;
306 default: 299 default:
307 return -EINVAL; 300 return -EINVAL;
308 } 301 }
@@ -352,9 +345,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
352 retcode = -EINVAL; 345 retcode = -EINVAL;
353 goto done; 346 goto done;
354 } 347 }
355
356 if (dev->driver->set_version)
357 dev->driver->set_version(dev, sv);
358 } 348 }
359 349
360done: 350done:
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 126d50ea181f..64e44fad8ae8 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages)
86{ 86{
87 agp_free_memory(handle); 87 agp_free_memory(handle);
88} 88}
89EXPORT_SYMBOL(drm_free_agp);
90 89
91/** Wrapper around agp_bind_memory() */ 90/** Wrapper around agp_bind_memory() */
92int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) 91int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
99{ 98{
100 return agp_unbind_memory(handle); 99 return agp_unbind_memory(handle);
101} 100}
102EXPORT_SYMBOL(drm_unbind_agp);
103 101
104#else /* __OS_HAS_AGP */ 102#else /* __OS_HAS_AGP */
105static inline void *agp_remap(unsigned long offset, unsigned long size, 103static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 543b9b3171d3..af93cc55259f 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,58 +49,18 @@
49 49
50#define MM_UNUSED_TARGET 4 50#define MM_UNUSED_TARGET 4
51 51
52static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 52static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
53{ 53 unsigned long size,
54 struct drm_mm_node *child; 54 unsigned alignment,
55 55 unsigned long color,
56 if (atomic) 56 enum drm_mm_search_flags flags);
57 child = kzalloc(sizeof(*child), GFP_ATOMIC); 57static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
58 else 58 unsigned long size,
59 child = kzalloc(sizeof(*child), GFP_KERNEL); 59 unsigned alignment,
60 60 unsigned long color,
61 if (unlikely(child == NULL)) { 61 unsigned long start,
62 spin_lock(&mm->unused_lock); 62 unsigned long end,
63 if (list_empty(&mm->unused_nodes)) 63 enum drm_mm_search_flags flags);
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
70 --mm->num_unused;
71 }
72 spin_unlock(&mm->unused_lock);
73 }
74 return child;
75}
76
77/* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
79 *
80 * Returns 0 on success or -ENOMEM if allocation fails.
81 */
82int drm_mm_pre_get(struct drm_mm *mm)
83{
84 struct drm_mm_node *node;
85
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
89 node = kzalloc(sizeof(*node), GFP_KERNEL);
90 spin_lock(&mm->unused_lock);
91
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
96 }
97 ++mm->num_unused;
98 list_add_tail(&node->node_list, &mm->unused_nodes);
99 }
100 spin_unlock(&mm->unused_lock);
101 return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
104 64
105static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 65static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106 struct drm_mm_node *node, 66 struct drm_mm_node *node,
@@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
147 } 107 }
148} 108}
149 109
150struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 110int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
151 unsigned long start,
152 unsigned long size,
153 bool atomic)
154{ 111{
155 struct drm_mm_node *hole, *node; 112 struct drm_mm_node *hole;
156 unsigned long end = start + size; 113 unsigned long end = node->start + node->size;
157 unsigned long hole_start; 114 unsigned long hole_start;
158 unsigned long hole_end; 115 unsigned long hole_end;
159 116
117 BUG_ON(node == NULL);
118
119 /* Find the relevant hole to add our node to */
160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 120 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 if (hole_start > start || hole_end < end) 121 if (hole_start > node->start || hole_end < end)
162 continue; 122 continue;
163 123
164 node = drm_mm_kmalloc(mm, atomic);
165 if (unlikely(node == NULL))
166 return NULL;
167
168 node->start = start;
169 node->size = size;
170 node->mm = mm; 124 node->mm = mm;
171 node->allocated = 1; 125 node->allocated = 1;
172 126
173 INIT_LIST_HEAD(&node->hole_stack); 127 INIT_LIST_HEAD(&node->hole_stack);
174 list_add(&node->node_list, &hole->node_list); 128 list_add(&node->node_list, &hole->node_list);
175 129
176 if (start == hole_start) { 130 if (node->start == hole_start) {
177 hole->hole_follows = 0; 131 hole->hole_follows = 0;
178 list_del_init(&hole->hole_stack); 132 list_del_init(&hole->hole_stack);
179 } 133 }
@@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
184 node->hole_follows = 1; 138 node->hole_follows = 1;
185 } 139 }
186 140
187 return node; 141 return 0;
188 } 142 }
189 143
190 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); 144 WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
191 return NULL; 145 node->start, node->size);
192} 146 return -ENOSPC;
193EXPORT_SYMBOL(drm_mm_create_block);
194
195struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
196 unsigned long size,
197 unsigned alignment,
198 unsigned long color,
199 int atomic)
200{
201 struct drm_mm_node *node;
202
203 node = drm_mm_kmalloc(hole_node->mm, atomic);
204 if (unlikely(node == NULL))
205 return NULL;
206
207 drm_mm_insert_helper(hole_node, node, size, alignment, color);
208
209 return node;
210} 147}
211EXPORT_SYMBOL(drm_mm_get_block_generic); 148EXPORT_SYMBOL(drm_mm_reserve_node);
212 149
213/** 150/**
214 * Search for free space and insert a preallocated memory node. Returns 151 * Search for free space and insert a preallocated memory node. Returns
@@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
217 */ 154 */
218int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 155int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
219 unsigned long size, unsigned alignment, 156 unsigned long size, unsigned alignment,
220 unsigned long color) 157 unsigned long color,
158 enum drm_mm_search_flags flags)
221{ 159{
222 struct drm_mm_node *hole_node; 160 struct drm_mm_node *hole_node;
223 161
224 hole_node = drm_mm_search_free_generic(mm, size, alignment, 162 hole_node = drm_mm_search_free_generic(mm, size, alignment,
225 color, 0); 163 color, flags);
226 if (!hole_node) 164 if (!hole_node)
227 return -ENOSPC; 165 return -ENOSPC;
228 166
@@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
231} 169}
232EXPORT_SYMBOL(drm_mm_insert_node_generic); 170EXPORT_SYMBOL(drm_mm_insert_node_generic);
233 171
234int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
235 unsigned long size, unsigned alignment)
236{
237 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
238}
239EXPORT_SYMBOL(drm_mm_insert_node);
240
241static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 172static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
242 struct drm_mm_node *node, 173 struct drm_mm_node *node,
243 unsigned long size, unsigned alignment, 174 unsigned long size, unsigned alignment,
@@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
290 } 221 }
291} 222}
292 223
293struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
294 unsigned long size,
295 unsigned alignment,
296 unsigned long color,
297 unsigned long start,
298 unsigned long end,
299 int atomic)
300{
301 struct drm_mm_node *node;
302
303 node = drm_mm_kmalloc(hole_node->mm, atomic);
304 if (unlikely(node == NULL))
305 return NULL;
306
307 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
308 start, end);
309
310 return node;
311}
312EXPORT_SYMBOL(drm_mm_get_block_range_generic);
313
314/** 224/**
315 * Search for free space and insert a preallocated memory node. Returns 225 * Search for free space and insert a preallocated memory node. Returns
316 * -ENOSPC if no suitable free area is available. This is for range 226 * -ENOSPC if no suitable free area is available. This is for range
@@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
318 */ 228 */
319int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 229int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
320 unsigned long size, unsigned alignment, unsigned long color, 230 unsigned long size, unsigned alignment, unsigned long color,
321 unsigned long start, unsigned long end) 231 unsigned long start, unsigned long end,
232 enum drm_mm_search_flags flags)
322{ 233{
323 struct drm_mm_node *hole_node; 234 struct drm_mm_node *hole_node;
324 235
325 hole_node = drm_mm_search_free_in_range_generic(mm, 236 hole_node = drm_mm_search_free_in_range_generic(mm,
326 size, alignment, color, 237 size, alignment, color,
327 start, end, 0); 238 start, end, flags);
328 if (!hole_node) 239 if (!hole_node)
329 return -ENOSPC; 240 return -ENOSPC;
330 241
@@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
335} 246}
336EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 247EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
337 248
338int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
339 unsigned long size, unsigned alignment,
340 unsigned long start, unsigned long end)
341{
342 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
343}
344EXPORT_SYMBOL(drm_mm_insert_node_in_range);
345
346/** 249/**
347 * Remove a memory node from the allocator. 250 * Remove a memory node from the allocator.
348 */ 251 */
@@ -351,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
351 struct drm_mm *mm = node->mm; 254 struct drm_mm *mm = node->mm;
352 struct drm_mm_node *prev_node; 255 struct drm_mm_node *prev_node;
353 256
257 if (WARN_ON(!node->allocated))
258 return;
259
354 BUG_ON(node->scanned_block || node->scanned_prev_free 260 BUG_ON(node->scanned_block || node->scanned_prev_free
355 || node->scanned_next_free); 261 || node->scanned_next_free);
356 262
@@ -377,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
377} 283}
378EXPORT_SYMBOL(drm_mm_remove_node); 284EXPORT_SYMBOL(drm_mm_remove_node);
379 285
380/*
381 * Remove a memory node from the allocator and free the allocated struct
382 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
383 * drm_mm_get_block functions.
384 */
385void drm_mm_put_block(struct drm_mm_node *node)
386{
387
388 struct drm_mm *mm = node->mm;
389
390 drm_mm_remove_node(node);
391
392 spin_lock(&mm->unused_lock);
393 if (mm->num_unused < MM_UNUSED_TARGET) {
394 list_add(&node->node_list, &mm->unused_nodes);
395 ++mm->num_unused;
396 } else
397 kfree(node);
398 spin_unlock(&mm->unused_lock);
399}
400EXPORT_SYMBOL(drm_mm_put_block);
401
402static int check_free_hole(unsigned long start, unsigned long end, 286static int check_free_hole(unsigned long start, unsigned long end,
403 unsigned long size, unsigned alignment) 287 unsigned long size, unsigned alignment)
404{ 288{
@@ -414,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
414 return end >= start + size; 298 return end >= start + size;
415} 299}
416 300
417struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 301static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
418 unsigned long size, 302 unsigned long size,
419 unsigned alignment, 303 unsigned alignment,
420 unsigned long color, 304 unsigned long color,
421 bool best_match) 305 enum drm_mm_search_flags flags)
422{ 306{
423 struct drm_mm_node *entry; 307 struct drm_mm_node *entry;
424 struct drm_mm_node *best; 308 struct drm_mm_node *best;
@@ -441,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
441 if (!check_free_hole(adj_start, adj_end, size, alignment)) 325 if (!check_free_hole(adj_start, adj_end, size, alignment))
442 continue; 326 continue;
443 327
444 if (!best_match) 328 if (!(flags & DRM_MM_SEARCH_BEST))
445 return entry; 329 return entry;
446 330
447 if (entry->size < best_size) { 331 if (entry->size < best_size) {
@@ -452,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
452 336
453 return best; 337 return best;
454} 338}
455EXPORT_SYMBOL(drm_mm_search_free_generic);
456 339
457struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 340static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
458 unsigned long size, 341 unsigned long size,
459 unsigned alignment, 342 unsigned alignment,
460 unsigned long color, 343 unsigned long color,
461 unsigned long start, 344 unsigned long start,
462 unsigned long end, 345 unsigned long end,
463 bool best_match) 346 enum drm_mm_search_flags flags)
464{ 347{
465 struct drm_mm_node *entry; 348 struct drm_mm_node *entry;
466 struct drm_mm_node *best; 349 struct drm_mm_node *best;
@@ -488,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
488 if (!check_free_hole(adj_start, adj_end, size, alignment)) 371 if (!check_free_hole(adj_start, adj_end, size, alignment))
489 continue; 372 continue;
490 373
491 if (!best_match) 374 if (!(flags & DRM_MM_SEARCH_BEST))
492 return entry; 375 return entry;
493 376
494 if (entry->size < best_size) { 377 if (entry->size < best_size) {
@@ -499,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
499 382
500 return best; 383 return best;
501} 384}
502EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
503 385
504/** 386/**
505 * Moves an allocation. To be used with embedded struct drm_mm_node. 387 * Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -634,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
634 * corrupted. 516 * corrupted.
635 * 517 *
636 * When the scan list is empty, the selected memory nodes can be freed. An 518 * When the scan list is empty, the selected memory nodes can be freed. An
637 * immediately following drm_mm_search_free with best_match = 0 will then return 519 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
638 * the just freed block (because its at the top of the free_stack list). 520 * return the just freed block (because its at the top of the free_stack list).
639 * 521 *
640 * Returns one if this block should be evicted, zero otherwise. Will always 522 * Returns one if this block should be evicted, zero otherwise. Will always
641 * return zero when no hole has been found. 523 * return zero when no hole has been found.
@@ -672,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
672void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 554void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
673{ 555{
674 INIT_LIST_HEAD(&mm->hole_stack); 556 INIT_LIST_HEAD(&mm->hole_stack);
675 INIT_LIST_HEAD(&mm->unused_nodes);
676 mm->num_unused = 0;
677 mm->scanned_blocks = 0; 557 mm->scanned_blocks = 0;
678 spin_lock_init(&mm->unused_lock);
679 558
680 /* Clever trick to avoid a special case in the free hole tracking. */ 559 /* Clever trick to avoid a special case in the free hole tracking. */
681 INIT_LIST_HEAD(&mm->head_node.node_list); 560 INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -695,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
695 574
696void drm_mm_takedown(struct drm_mm * mm) 575void drm_mm_takedown(struct drm_mm * mm)
697{ 576{
698 struct drm_mm_node *entry, *next; 577 WARN(!list_empty(&mm->head_node.node_list),
699 578 "Memory manager not clean during takedown.\n");
700 if (WARN(!list_empty(&mm->head_node.node_list),
701 "Memory manager not clean. Delaying takedown\n")) {
702 return;
703 }
704
705 spin_lock(&mm->unused_lock);
706 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
707 list_del(&entry->node_list);
708 kfree(entry);
709 --mm->num_unused;
710 }
711 spin_unlock(&mm->unused_lock);
712
713 BUG_ON(mm->num_unused != 0);
714} 579}
715EXPORT_SYMBOL(drm_mm_takedown); 580EXPORT_SYMBOL(drm_mm_takedown);
716 581
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a6729bfe6860..fc2adb62b757 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -596,27 +596,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
596EXPORT_SYMBOL(drm_mode_set_name); 596EXPORT_SYMBOL(drm_mode_set_name);
597 597
598/** 598/**
599 * drm_mode_list_concat - move modes from one list to another
600 * @head: source list
601 * @new: dst list
602 *
603 * LOCKING:
604 * Caller must ensure both lists are locked.
605 *
606 * Move all the modes from @head to @new.
607 */
608void drm_mode_list_concat(struct list_head *head, struct list_head *new)
609{
610
611 struct list_head *entry, *tmp;
612
613 list_for_each_safe(entry, tmp, head) {
614 list_move_tail(entry, new);
615 }
616}
617EXPORT_SYMBOL(drm_mode_list_concat);
618
619/**
620 * drm_mode_width - get the width of a mode 599 * drm_mode_width - get the width of a mode
621 * @mode: mode 600 * @mode: mode
622 * 601 *
@@ -923,43 +902,6 @@ void drm_mode_validate_size(struct drm_device *dev,
923EXPORT_SYMBOL(drm_mode_validate_size); 902EXPORT_SYMBOL(drm_mode_validate_size);
924 903
925/** 904/**
926 * drm_mode_validate_clocks - validate modes against clock limits
927 * @dev: DRM device
928 * @mode_list: list of modes to check
929 * @min: minimum clock rate array
930 * @max: maximum clock rate array
931 * @n_ranges: number of clock ranges (size of arrays)
932 *
933 * LOCKING:
934 * Caller must hold a lock protecting @mode_list.
935 *
936 * Some code may need to check a mode list against the clock limits of the
937 * device in question. This function walks the mode list, testing to make
938 * sure each mode falls within a given range (defined by @min and @max
939 * arrays) and sets @mode->status as needed.
940 */
941void drm_mode_validate_clocks(struct drm_device *dev,
942 struct list_head *mode_list,
943 int *min, int *max, int n_ranges)
944{
945 struct drm_display_mode *mode;
946 int i;
947
948 list_for_each_entry(mode, mode_list, head) {
949 bool good = false;
950 for (i = 0; i < n_ranges; i++) {
951 if (mode->clock >= min[i] && mode->clock <= max[i]) {
952 good = true;
953 break;
954 }
955 }
956 if (!good)
957 mode->status = MODE_CLOCK_RANGE;
958 }
959}
960EXPORT_SYMBOL(drm_mode_validate_clocks);
961
962/**
963 * drm_mode_prune_invalid - remove invalid modes from mode list 905 * drm_mode_prune_invalid - remove invalid modes from mode list
964 * @dev: DRM device 906 * @dev: DRM device
965 * @mode_list: list of modes to check 907 * @mode_list: list of modes to check
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 80c0b2b29801..1f96cee6eee8 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -52,10 +52,8 @@
52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
53{ 53{
54 drm_dma_handle_t *dmah; 54 drm_dma_handle_t *dmah;
55#if 1
56 unsigned long addr; 55 unsigned long addr;
57 size_t sz; 56 size_t sz;
58#endif
59 57
60 /* pci_alloc_consistent only guarantees alignment to the smallest 58 /* pci_alloc_consistent only guarantees alignment to the smallest
61 * PAGE_SIZE order which is greater than or equal to the requested size. 59 * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
97 */ 95 */
98void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 96void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
99{ 97{
100#if 1
101 unsigned long addr; 98 unsigned long addr;
102 size_t sz; 99 size_t sz;
103#endif
104 100
105 if (dmah->vaddr) { 101 if (dmah->vaddr) {
106 /* XXX - Is virt_to_page() legal for consistent mem? */ 102 /* XXX - Is virt_to_page() legal for consistent mem? */
@@ -276,17 +272,26 @@ static int drm_pci_agp_init(struct drm_device *dev)
276 DRM_ERROR("Cannot initialize the agpgart module.\n"); 272 DRM_ERROR("Cannot initialize the agpgart module.\n");
277 return -EINVAL; 273 return -EINVAL;
278 } 274 }
279 if (drm_core_has_MTRR(dev)) { 275 if (dev->agp) {
280 if (dev->agp) 276 dev->agp->agp_mtrr = arch_phys_wc_add(
281 dev->agp->agp_mtrr = arch_phys_wc_add( 277 dev->agp->agp_info.aper_base,
282 dev->agp->agp_info.aper_base, 278 dev->agp->agp_info.aper_size *
283 dev->agp->agp_info.aper_size * 279 1024 * 1024);
284 1024 * 1024);
285 } 280 }
286 } 281 }
287 return 0; 282 return 0;
288} 283}
289 284
285static void drm_pci_agp_destroy(struct drm_device *dev)
286{
287 if (drm_core_has_AGP(dev) && dev->agp) {
288 arch_phys_wc_del(dev->agp->agp_mtrr);
289 drm_agp_clear(dev);
290 drm_agp_destroy(dev->agp);
291 dev->agp = NULL;
292 }
293}
294
290static struct drm_bus drm_pci_bus = { 295static struct drm_bus drm_pci_bus = {
291 .bus_type = DRIVER_BUS_PCI, 296 .bus_type = DRIVER_BUS_PCI,
292 .get_irq = drm_pci_get_irq, 297 .get_irq = drm_pci_get_irq,
@@ -295,6 +300,7 @@ static struct drm_bus drm_pci_bus = {
295 .set_unique = drm_pci_set_unique, 300 .set_unique = drm_pci_set_unique,
296 .irq_by_busid = drm_pci_irq_by_busid, 301 .irq_by_busid = drm_pci_irq_by_busid,
297 .agp_init = drm_pci_agp_init, 302 .agp_init = drm_pci_agp_init,
303 .agp_destroy = drm_pci_agp_destroy,
298}; 304};
299 305
300/** 306/**
@@ -348,6 +354,12 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
348 goto err_g2; 354 goto err_g2;
349 } 355 }
350 356
357 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
358 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
359 if (ret)
360 goto err_g21;
361 }
362
351 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) 363 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
352 goto err_g3; 364 goto err_g3;
353 365
@@ -377,6 +389,9 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
377err_g4: 389err_g4:
378 drm_put_minor(&dev->primary); 390 drm_put_minor(&dev->primary);
379err_g3: 391err_g3:
392 if (dev->render)
393 drm_put_minor(&dev->render);
394err_g21:
380 if (drm_core_check_feature(dev, DRIVER_MODESET)) 395 if (drm_core_check_feature(dev, DRIVER_MODESET))
381 drm_put_minor(&dev->control); 396 drm_put_minor(&dev->control);
382err_g2: 397err_g2:
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index b8a282ea8751..f7a18c6ba4c4 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -28,7 +28,7 @@
28#include <linux/export.h> 28#include <linux/export.h>
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30 30
31/** 31/*
32 * Register. 32 * Register.
33 * 33 *
34 * \param platdev - Platform device struture 34 * \param platdev - Platform device struture
@@ -39,8 +39,8 @@
39 * Try and register, if we fail to register, backout previous work. 39 * Try and register, if we fail to register, backout previous work.
40 */ 40 */
41 41
42int drm_get_platform_dev(struct platform_device *platdev, 42static int drm_get_platform_dev(struct platform_device *platdev,
43 struct drm_driver *driver) 43 struct drm_driver *driver)
44{ 44{
45 struct drm_device *dev; 45 struct drm_device *dev;
46 int ret; 46 int ret;
@@ -69,6 +69,12 @@ int drm_get_platform_dev(struct platform_device *platdev,
69 goto err_g1; 69 goto err_g1;
70 } 70 }
71 71
72 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
73 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
74 if (ret)
75 goto err_g11;
76 }
77
72 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 78 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
73 if (ret) 79 if (ret)
74 goto err_g2; 80 goto err_g2;
@@ -100,6 +106,9 @@ int drm_get_platform_dev(struct platform_device *platdev,
100err_g3: 106err_g3:
101 drm_put_minor(&dev->primary); 107 drm_put_minor(&dev->primary);
102err_g2: 108err_g2:
109 if (dev->render)
110 drm_put_minor(&dev->render);
111err_g11:
103 if (drm_core_check_feature(dev, DRIVER_MODESET)) 112 if (drm_core_check_feature(dev, DRIVER_MODESET))
104 drm_put_minor(&dev->control); 113 drm_put_minor(&dev->control);
105err_g1: 114err_g1:
@@ -107,7 +116,6 @@ err_g1:
107 mutex_unlock(&drm_global_mutex); 116 mutex_unlock(&drm_global_mutex);
108 return ret; 117 return ret;
109} 118}
110EXPORT_SYMBOL(drm_get_platform_dev);
111 119
112static int drm_platform_get_irq(struct drm_device *dev) 120static int drm_platform_get_irq(struct drm_device *dev)
113{ 121{
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 85e450e3241c..276d470f7b3e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -83,6 +83,34 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
83 return 0; 83 return 0;
84} 84}
85 85
86static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
87 uint32_t handle)
88{
89 struct drm_prime_member *member;
90
91 list_for_each_entry(member, &prime_fpriv->head, entry) {
92 if (member->handle == handle)
93 return member->dma_buf;
94 }
95
96 return NULL;
97}
98
99static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
100 struct dma_buf *dma_buf,
101 uint32_t *handle)
102{
103 struct drm_prime_member *member;
104
105 list_for_each_entry(member, &prime_fpriv->head, entry) {
106 if (member->dma_buf == dma_buf) {
107 *handle = member->handle;
108 return 0;
109 }
110 }
111 return -ENOENT;
112}
113
86static int drm_gem_map_attach(struct dma_buf *dma_buf, 114static int drm_gem_map_attach(struct dma_buf *dma_buf,
87 struct device *target_dev, 115 struct device *target_dev,
88 struct dma_buf_attachment *attach) 116 struct dma_buf_attachment *attach)
@@ -131,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
131 attach->priv = NULL; 159 attach->priv = NULL;
132} 160}
133 161
134static void drm_prime_remove_buf_handle_locked( 162void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
135 struct drm_prime_file_private *prime_fpriv, 163 struct dma_buf *dma_buf)
136 struct dma_buf *dma_buf)
137{ 164{
138 struct drm_prime_member *member, *safe; 165 struct drm_prime_member *member, *safe;
139 166
@@ -167,8 +194,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
167 if (WARN_ON(prime_attach->dir != DMA_NONE)) 194 if (WARN_ON(prime_attach->dir != DMA_NONE))
168 return ERR_PTR(-EBUSY); 195 return ERR_PTR(-EBUSY);
169 196
170 mutex_lock(&obj->dev->struct_mutex);
171
172 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 197 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
173 198
174 if (!IS_ERR(sgt)) { 199 if (!IS_ERR(sgt)) {
@@ -182,7 +207,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
182 } 207 }
183 } 208 }
184 209
185 mutex_unlock(&obj->dev->struct_mutex);
186 return sgt; 210 return sgt;
187} 211}
188 212
@@ -192,16 +216,14 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
192 /* nothing to be done here */ 216 /* nothing to be done here */
193} 217}
194 218
195static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 219void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
196{ 220{
197 struct drm_gem_object *obj = dma_buf->priv; 221 struct drm_gem_object *obj = dma_buf->priv;
198 222
199 if (obj->export_dma_buf == dma_buf) { 223 /* drop the reference on the export fd holds */
200 /* drop the reference on the export fd holds */ 224 drm_gem_object_unreference_unlocked(obj);
201 obj->export_dma_buf = NULL;
202 drm_gem_object_unreference_unlocked(obj);
203 }
204} 225}
226EXPORT_SYMBOL(drm_gem_dmabuf_release);
205 227
206static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 228static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
207{ 229{
@@ -300,62 +322,107 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
300} 322}
301EXPORT_SYMBOL(drm_gem_prime_export); 323EXPORT_SYMBOL(drm_gem_prime_export);
302 324
325static struct dma_buf *export_and_register_object(struct drm_device *dev,
326 struct drm_gem_object *obj,
327 uint32_t flags)
328{
329 struct dma_buf *dmabuf;
330
331 /* prevent races with concurrent gem_close. */
332 if (obj->handle_count == 0) {
333 dmabuf = ERR_PTR(-ENOENT);
334 return dmabuf;
335 }
336
337 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
338 if (IS_ERR(dmabuf)) {
339 /* normally the created dma-buf takes ownership of the ref,
340 * but if that fails then drop the ref
341 */
342 return dmabuf;
343 }
344
345 /*
346 * Note that callers do not need to clean up the export cache
347 * since the check for obj->handle_count guarantees that someone
348 * will clean it up.
349 */
350 obj->dma_buf = dmabuf;
351 get_dma_buf(obj->dma_buf);
352 /* Grab a new ref since the callers is now used by the dma-buf */
353 drm_gem_object_reference(obj);
354
355 return dmabuf;
356}
357
303int drm_gem_prime_handle_to_fd(struct drm_device *dev, 358int drm_gem_prime_handle_to_fd(struct drm_device *dev,
304 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 359 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
305 int *prime_fd) 360 int *prime_fd)
306{ 361{
307 struct drm_gem_object *obj; 362 struct drm_gem_object *obj;
308 void *buf;
309 int ret = 0; 363 int ret = 0;
310 struct dma_buf *dmabuf; 364 struct dma_buf *dmabuf;
311 365
366 mutex_lock(&file_priv->prime.lock);
312 obj = drm_gem_object_lookup(dev, file_priv, handle); 367 obj = drm_gem_object_lookup(dev, file_priv, handle);
313 if (!obj) 368 if (!obj) {
314 return -ENOENT; 369 ret = -ENOENT;
370 goto out_unlock;
371 }
315 372
316 mutex_lock(&file_priv->prime.lock); 373 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
374 if (dmabuf) {
375 get_dma_buf(dmabuf);
376 goto out_have_handle;
377 }
378
379 mutex_lock(&dev->object_name_lock);
317 /* re-export the original imported object */ 380 /* re-export the original imported object */
318 if (obj->import_attach) { 381 if (obj->import_attach) {
319 dmabuf = obj->import_attach->dmabuf; 382 dmabuf = obj->import_attach->dmabuf;
383 get_dma_buf(dmabuf);
320 goto out_have_obj; 384 goto out_have_obj;
321 } 385 }
322 386
323 if (obj->export_dma_buf) { 387 if (obj->dma_buf) {
324 dmabuf = obj->export_dma_buf; 388 get_dma_buf(obj->dma_buf);
389 dmabuf = obj->dma_buf;
325 goto out_have_obj; 390 goto out_have_obj;
326 } 391 }
327 392
328 buf = dev->driver->gem_prime_export(dev, obj, flags); 393 dmabuf = export_and_register_object(dev, obj, flags);
329 if (IS_ERR(buf)) { 394 if (IS_ERR(dmabuf)) {
330 /* normally the created dma-buf takes ownership of the ref, 395 /* normally the created dma-buf takes ownership of the ref,
331 * but if that fails then drop the ref 396 * but if that fails then drop the ref
332 */ 397 */
333 ret = PTR_ERR(buf); 398 ret = PTR_ERR(dmabuf);
399 mutex_unlock(&dev->object_name_lock);
334 goto out; 400 goto out;
335 } 401 }
336 obj->export_dma_buf = buf;
337 402
338 /* if we've exported this buffer the cheat and add it to the import list 403out_have_obj:
339 * so we get the correct handle back 404 /*
405 * If we've exported this buffer then cheat and add it to the import list
406 * so we get the correct handle back. We must do this under the
407 * protection of dev->object_name_lock to ensure that a racing gem close
408 * ioctl doesn't miss to remove this buffer handle from the cache.
340 */ 409 */
341 ret = drm_prime_add_buf_handle(&file_priv->prime, 410 ret = drm_prime_add_buf_handle(&file_priv->prime,
342 obj->export_dma_buf, handle); 411 dmabuf, handle);
412 mutex_unlock(&dev->object_name_lock);
343 if (ret) 413 if (ret)
344 goto fail_put_dmabuf; 414 goto fail_put_dmabuf;
345 415
346 ret = dma_buf_fd(buf, flags); 416out_have_handle:
347 if (ret < 0)
348 goto fail_rm_handle;
349
350 *prime_fd = ret;
351 mutex_unlock(&file_priv->prime.lock);
352 return 0;
353
354out_have_obj:
355 get_dma_buf(dmabuf);
356 ret = dma_buf_fd(dmabuf, flags); 417 ret = dma_buf_fd(dmabuf, flags);
418 /*
419 * We must _not_ remove the buffer from the handle cache since the newly
420 * created dma buf is already linked in the global obj->dma_buf pointer,
421 * and that is invariant as long as a userspace gem handle exists.
422 * Closing the handle will clean out the cache anyway, so we don't leak.
423 */
357 if (ret < 0) { 424 if (ret < 0) {
358 dma_buf_put(dmabuf); 425 goto fail_put_dmabuf;
359 } else { 426 } else {
360 *prime_fd = ret; 427 *prime_fd = ret;
361 ret = 0; 428 ret = 0;
@@ -363,15 +430,13 @@ out_have_obj:
363 430
364 goto out; 431 goto out;
365 432
366fail_rm_handle:
367 drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
368fail_put_dmabuf: 433fail_put_dmabuf:
369 /* clear NOT to be checked when releasing dma_buf */ 434 dma_buf_put(dmabuf);
370 obj->export_dma_buf = NULL;
371 dma_buf_put(buf);
372out: 435out:
373 drm_gem_object_unreference_unlocked(obj); 436 drm_gem_object_unreference_unlocked(obj);
437out_unlock:
374 mutex_unlock(&file_priv->prime.lock); 438 mutex_unlock(&file_priv->prime.lock);
439
375 return ret; 440 return ret;
376} 441}
377EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 442EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -446,19 +511,26 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
446 511
447 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 512 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
448 dma_buf, handle); 513 dma_buf, handle);
449 if (!ret) { 514 if (ret == 0)
450 ret = 0;
451 goto out_put; 515 goto out_put;
452 }
453 516
454 /* never seen this one, need to import */ 517 /* never seen this one, need to import */
518 mutex_lock(&dev->object_name_lock);
455 obj = dev->driver->gem_prime_import(dev, dma_buf); 519 obj = dev->driver->gem_prime_import(dev, dma_buf);
456 if (IS_ERR(obj)) { 520 if (IS_ERR(obj)) {
457 ret = PTR_ERR(obj); 521 ret = PTR_ERR(obj);
458 goto out_put; 522 goto out_unlock;
459 } 523 }
460 524
461 ret = drm_gem_handle_create(file_priv, obj, handle); 525 if (obj->dma_buf) {
526 WARN_ON(obj->dma_buf != dma_buf);
527 } else {
528 obj->dma_buf = dma_buf;
529 get_dma_buf(dma_buf);
530 }
531
532 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
533 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
462 drm_gem_object_unreference_unlocked(obj); 534 drm_gem_object_unreference_unlocked(obj);
463 if (ret) 535 if (ret)
464 goto out_put; 536 goto out_put;
@@ -478,7 +550,9 @@ fail:
478 /* hmm, if driver attached, we are relying on the free-object path 550 /* hmm, if driver attached, we are relying on the free-object path
479 * to detach.. which seems ok.. 551 * to detach.. which seems ok..
480 */ 552 */
481 drm_gem_object_handle_unreference_unlocked(obj); 553 drm_gem_handle_delete(file_priv, *handle);
554out_unlock:
555 mutex_unlock(&dev->object_name_lock);
482out_put: 556out_put:
483 dma_buf_put(dma_buf); 557 dma_buf_put(dma_buf);
484 mutex_unlock(&file_priv->prime.lock); 558 mutex_unlock(&file_priv->prime.lock);
@@ -618,25 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
618 WARN_ON(!list_empty(&prime_fpriv->head)); 692 WARN_ON(!list_empty(&prime_fpriv->head));
619} 693}
620EXPORT_SYMBOL(drm_prime_destroy_file_private); 694EXPORT_SYMBOL(drm_prime_destroy_file_private);
621
622int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
623{
624 struct drm_prime_member *member;
625
626 list_for_each_entry(member, &prime_fpriv->head, entry) {
627 if (member->dma_buf == dma_buf) {
628 *handle = member->handle;
629 return 0;
630 }
631 }
632 return -ENOENT;
633}
634EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
635
636void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
637{
638 mutex_lock(&prime_fpriv->lock);
639 drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
640 mutex_unlock(&prime_fpriv->lock);
641}
642EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
deleted file mode 100644
index d7f2324b4fb1..000000000000
--- a/drivers/gpu/drm/drm_proc.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/**
2 * \file drm_proc.c
3 * /proc support for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 *
8 * \par Acknowledgements:
9 * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
10 * the problem with the proc files not outputting all their information.
11 */
12
13/*
14 * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
15 *
16 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
17 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
18 * All Rights Reserved.
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the next
28 * paragraph) shall be included in all copies or substantial portions of the
29 * Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
35 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
37 * OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <linux/seq_file.h>
41#include <linux/slab.h>
42#include <linux/export.h>
43#include <drm/drmP.h>
44
45/***************************************************
46 * Initialization, etc.
47 **************************************************/
48
49/**
50 * Proc file list.
51 */
52static const struct drm_info_list drm_proc_list[] = {
53 {"name", drm_name_info, 0},
54 {"vm", drm_vm_info, 0},
55 {"clients", drm_clients_info, 0},
56 {"bufs", drm_bufs_info, 0},
57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
58#if DRM_DEBUG_CODE
59 {"vma", drm_vma_info, 0},
60#endif
61};
62#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
63
64static int drm_proc_open(struct inode *inode, struct file *file)
65{
66 struct drm_info_node* node = PDE_DATA(inode);
67
68 return single_open(file, node->info_ent->show, node);
69}
70
71static const struct file_operations drm_proc_fops = {
72 .owner = THIS_MODULE,
73 .open = drm_proc_open,
74 .read = seq_read,
75 .llseek = seq_lseek,
76 .release = single_release,
77};
78
79
80/**
81 * Initialize a given set of proc files for a device
82 *
83 * \param files The array of files to create
84 * \param count The number of files given
85 * \param root DRI proc dir entry.
86 * \param minor device minor number
87 * \return Zero on success, non-zero on failure
88 *
89 * Create a given set of proc files represented by an array of
90 * gdm_proc_lists in the given root directory.
91 */
92static int drm_proc_create_files(const struct drm_info_list *files, int count,
93 struct proc_dir_entry *root, struct drm_minor *minor)
94{
95 struct drm_device *dev = minor->dev;
96 struct proc_dir_entry *ent;
97 struct drm_info_node *tmp;
98 int i;
99
100 for (i = 0; i < count; i++) {
101 u32 features = files[i].driver_features;
102
103 if (features != 0 &&
104 (dev->driver->driver_features & features) != features)
105 continue;
106
107 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
108 if (!tmp)
109 return -1;
110
111 tmp->minor = minor;
112 tmp->info_ent = &files[i];
113 list_add(&tmp->list, &minor->proc_nodes.list);
114
115 ent = proc_create_data(files[i].name, S_IRUGO, root,
116 &drm_proc_fops, tmp);
117 if (!ent) {
118 DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
119 minor->index, files[i].name);
120 list_del(&tmp->list);
121 kfree(tmp);
122 return -1;
123 }
124 }
125 return 0;
126}
127
128/**
129 * Initialize the DRI proc filesystem for a device
130 *
131 * \param dev DRM device
132 * \param root DRI proc dir entry.
133 * \param dev_root resulting DRI device proc dir entry.
134 * \return root entry pointer on success, or NULL on failure.
135 *
136 * Create the DRI proc root entry "/proc/dri", the device proc root entry
137 * "/proc/dri/%minor%/", and each entry in proc_list as
138 * "/proc/dri/%minor%/%name%".
139 */
140int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
141{
142 char name[12];
143 int ret;
144
145 INIT_LIST_HEAD(&minor->proc_nodes.list);
146 sprintf(name, "%u", minor->index);
147 minor->proc_root = proc_mkdir(name, root);
148 if (!minor->proc_root) {
149 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
150 return -1;
151 }
152
153 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
154 minor->proc_root, minor);
155 if (ret) {
156 remove_proc_subtree(name, root);
157 minor->proc_root = NULL;
158 DRM_ERROR("Failed to create core drm proc files\n");
159 return ret;
160 }
161
162 return 0;
163}
164
165static int drm_proc_remove_files(const struct drm_info_list *files, int count,
166 struct drm_minor *minor)
167{
168 struct list_head *pos, *q;
169 struct drm_info_node *tmp;
170 int i;
171
172 for (i = 0; i < count; i++) {
173 list_for_each_safe(pos, q, &minor->proc_nodes.list) {
174 tmp = list_entry(pos, struct drm_info_node, list);
175 if (tmp->info_ent == &files[i]) {
176 remove_proc_entry(files[i].name,
177 minor->proc_root);
178 list_del(pos);
179 kfree(tmp);
180 }
181 }
182 }
183 return 0;
184}
185
186/**
187 * Cleanup the proc filesystem resources.
188 *
189 * \param minor device minor number.
190 * \param root DRI proc dir entry.
191 * \param dev_root DRI device proc dir entry.
192 * \return always zero.
193 *
194 * Remove all proc entries created by proc_init().
195 */
196int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
197{
198 char name[64];
199
200 if (!root || !minor->proc_root)
201 return 0;
202
203 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
204
205 sprintf(name, "%d", minor->index);
206 remove_proc_subtree(name, root);
207 return 0;
208}
209
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d87f60bbc330..1c78406f6e71 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
46#endif 46#endif
47} 47}
48 48
49void drm_sg_cleanup(struct drm_sg_mem * entry) 49static void drm_sg_cleanup(struct drm_sg_mem * entry)
50{ 50{
51 struct page *page; 51 struct page *page;
52 int i; 52 int i;
@@ -64,19 +64,32 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
64 kfree(entry); 64 kfree(entry);
65} 65}
66 66
67void drm_legacy_sg_cleanup(struct drm_device *dev)
68{
69 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
70 !drm_core_check_feature(dev, DRIVER_MODESET)) {
71 drm_sg_cleanup(dev->sg);
72 dev->sg = NULL;
73 }
74}
67#ifdef _LP64 75#ifdef _LP64
68# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) 76# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
69#else 77#else
70# define ScatterHandle(x) (unsigned int)(x) 78# define ScatterHandle(x) (unsigned int)(x)
71#endif 79#endif
72 80
73int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) 81int drm_sg_alloc(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
74{ 83{
84 struct drm_scatter_gather *request = data;
75 struct drm_sg_mem *entry; 85 struct drm_sg_mem *entry;
76 unsigned long pages, i, j; 86 unsigned long pages, i, j;
77 87
78 DRM_DEBUG("\n"); 88 DRM_DEBUG("\n");
79 89
90 if (drm_core_check_feature(dev, DRIVER_MODESET))
91 return -EINVAL;
92
80 if (!drm_core_check_feature(dev, DRIVER_SG)) 93 if (!drm_core_check_feature(dev, DRIVER_SG))
81 return -EINVAL; 94 return -EINVAL;
82 95
@@ -181,21 +194,15 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
181 return -ENOMEM; 194 return -ENOMEM;
182} 195}
183 196
184int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
185 struct drm_file *file_priv)
186{
187 struct drm_scatter_gather *request = data;
188
189 return drm_sg_alloc(dev, request);
190
191}
192
193int drm_sg_free(struct drm_device *dev, void *data, 197int drm_sg_free(struct drm_device *dev, void *data,
194 struct drm_file *file_priv) 198 struct drm_file *file_priv)
195{ 199{
196 struct drm_scatter_gather *request = data; 200 struct drm_scatter_gather *request = data;
197 struct drm_sg_mem *entry; 201 struct drm_sg_mem *entry;
198 202
203 if (drm_core_check_feature(dev, DRIVER_MODESET))
204 return -EINVAL;
205
199 if (!drm_core_check_feature(dev, DRIVER_SG)) 206 if (!drm_core_check_feature(dev, DRIVER_SG))
200 return -EINVAL; 207 return -EINVAL;
201 208
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 327ca19cda85..e7eb0276f7f1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,6 +40,9 @@
40unsigned int drm_debug = 0; /* 1 to enable debug output */ 40unsigned int drm_debug = 0; /* 1 to enable debug output */
41EXPORT_SYMBOL(drm_debug); 41EXPORT_SYMBOL(drm_debug);
42 42
43unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
44EXPORT_SYMBOL(drm_rnodes);
45
43unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 46unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
44EXPORT_SYMBOL(drm_vblank_offdelay); 47EXPORT_SYMBOL(drm_vblank_offdelay);
45 48
@@ -56,11 +59,13 @@ MODULE_AUTHOR(CORE_AUTHOR);
56MODULE_DESCRIPTION(CORE_DESC); 59MODULE_DESCRIPTION(CORE_DESC);
57MODULE_LICENSE("GPL and additional rights"); 60MODULE_LICENSE("GPL and additional rights");
58MODULE_PARM_DESC(debug, "Enable debug output"); 61MODULE_PARM_DESC(debug, "Enable debug output");
62MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
59MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 63MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
60MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 64MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
61MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 65MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
62 66
63module_param_named(debug, drm_debug, int, 0600); 67module_param_named(debug, drm_debug, int, 0600);
68module_param_named(rnodes, drm_rnodes, int, 0600);
64module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 69module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
65module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 70module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
66module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 71module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
@@ -68,7 +73,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
68struct idr drm_minors_idr; 73struct idr drm_minors_idr;
69 74
70struct class *drm_class; 75struct class *drm_class;
71struct proc_dir_entry *drm_proc_root;
72struct dentry *drm_debugfs_root; 76struct dentry *drm_debugfs_root;
73 77
74int drm_err(const char *func, const char *format, ...) 78int drm_err(const char *func, const char *format, ...)
@@ -113,12 +117,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
113 int base = 0, limit = 63; 117 int base = 0, limit = 63;
114 118
115 if (type == DRM_MINOR_CONTROL) { 119 if (type == DRM_MINOR_CONTROL) {
116 base += 64; 120 base += 64;
117 limit = base + 127; 121 limit = base + 63;
118 } else if (type == DRM_MINOR_RENDER) { 122 } else if (type == DRM_MINOR_RENDER) {
119 base += 128; 123 base += 128;
120 limit = base + 255; 124 limit = base + 63;
121 } 125 }
122 126
123 mutex_lock(&dev->struct_mutex); 127 mutex_lock(&dev->struct_mutex);
124 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL); 128 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
@@ -288,13 +292,7 @@ int drm_fill_in_dev(struct drm_device *dev,
288 goto error_out_unreg; 292 goto error_out_unreg;
289 } 293 }
290 294
291 295 drm_legacy_ctxbitmap_init(dev);
292
293 retcode = drm_ctxbitmap_init(dev);
294 if (retcode) {
295 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
296 goto error_out_unreg;
297 }
298 296
299 if (driver->driver_features & DRIVER_GEM) { 297 if (driver->driver_features & DRIVER_GEM) {
300 retcode = drm_gem_init(dev); 298 retcode = drm_gem_init(dev);
@@ -321,9 +319,8 @@ EXPORT_SYMBOL(drm_fill_in_dev);
321 * \param sec-minor structure to hold the assigned minor 319 * \param sec-minor structure to hold the assigned minor
322 * \return negative number on failure. 320 * \return negative number on failure.
323 * 321 *
324 * Search an empty entry and initialize it to the given parameters, and 322 * Search an empty entry and initialize it to the given parameters. This
325 * create the proc init entry via proc_init(). This routines assigns 323 * routines assigns minor numbers to secondary heads of multi-headed cards
326 * minor numbers to secondary heads of multi-headed cards
327 */ 324 */
328int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) 325int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
329{ 326{
@@ -351,20 +348,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
351 348
352 idr_replace(&drm_minors_idr, new_minor, minor_id); 349 idr_replace(&drm_minors_idr, new_minor, minor_id);
353 350
354 if (type == DRM_MINOR_LEGACY) {
355 ret = drm_proc_init(new_minor, drm_proc_root);
356 if (ret) {
357 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
358 goto err_mem;
359 }
360 } else
361 new_minor->proc_root = NULL;
362
363#if defined(CONFIG_DEBUG_FS) 351#if defined(CONFIG_DEBUG_FS)
364 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); 352 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
365 if (ret) { 353 if (ret) {
366 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 354 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
367 goto err_g2; 355 goto err_mem;
368 } 356 }
369#endif 357#endif
370 358
@@ -372,7 +360,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
372 if (ret) { 360 if (ret) {
373 printk(KERN_ERR 361 printk(KERN_ERR
374 "DRM: Error sysfs_device_add.\n"); 362 "DRM: Error sysfs_device_add.\n");
375 goto err_g2; 363 goto err_debugfs;
376 } 364 }
377 *minor = new_minor; 365 *minor = new_minor;
378 366
@@ -380,10 +368,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
380 return 0; 368 return 0;
381 369
382 370
383err_g2: 371err_debugfs:
384 if (new_minor->type == DRM_MINOR_LEGACY) 372#if defined(CONFIG_DEBUG_FS)
385 drm_proc_cleanup(new_minor, drm_proc_root); 373 drm_debugfs_cleanup(new_minor);
386err_mem: 374err_mem:
375#endif
387 kfree(new_minor); 376 kfree(new_minor);
388err_idr: 377err_idr:
389 idr_remove(&drm_minors_idr, minor_id); 378 idr_remove(&drm_minors_idr, minor_id);
@@ -397,10 +386,6 @@ EXPORT_SYMBOL(drm_get_minor);
397 * 386 *
398 * \param sec_minor - structure to be released 387 * \param sec_minor - structure to be released
399 * \return always zero 388 * \return always zero
400 *
401 * Cleans up the proc resources. Not legal for this to be the
402 * last minor released.
403 *
404 */ 389 */
405int drm_put_minor(struct drm_minor **minor_p) 390int drm_put_minor(struct drm_minor **minor_p)
406{ 391{
@@ -408,8 +393,6 @@ int drm_put_minor(struct drm_minor **minor_p)
408 393
409 DRM_DEBUG("release secondary minor %d\n", minor->index); 394 DRM_DEBUG("release secondary minor %d\n", minor->index);
410 395
411 if (minor->type == DRM_MINOR_LEGACY)
412 drm_proc_cleanup(minor, drm_proc_root);
413#if defined(CONFIG_DEBUG_FS) 396#if defined(CONFIG_DEBUG_FS)
414 drm_debugfs_cleanup(minor); 397 drm_debugfs_cleanup(minor);
415#endif 398#endif
@@ -451,16 +434,11 @@ void drm_put_dev(struct drm_device *dev)
451 434
452 drm_lastclose(dev); 435 drm_lastclose(dev);
453 436
454 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
455 arch_phys_wc_del(dev->agp->agp_mtrr);
456
457 if (dev->driver->unload) 437 if (dev->driver->unload)
458 dev->driver->unload(dev); 438 dev->driver->unload(dev);
459 439
460 if (drm_core_has_AGP(dev) && dev->agp) { 440 if (dev->driver->bus->agp_destroy)
461 kfree(dev->agp); 441 dev->driver->bus->agp_destroy(dev);
462 dev->agp = NULL;
463 }
464 442
465 drm_vblank_cleanup(dev); 443 drm_vblank_cleanup(dev);
466 444
@@ -468,11 +446,14 @@ void drm_put_dev(struct drm_device *dev)
468 drm_rmmap(dev, r_list->map); 446 drm_rmmap(dev, r_list->map);
469 drm_ht_remove(&dev->map_hash); 447 drm_ht_remove(&dev->map_hash);
470 448
471 drm_ctxbitmap_cleanup(dev); 449 drm_legacy_ctxbitmap_cleanup(dev);
472 450
473 if (drm_core_check_feature(dev, DRIVER_MODESET)) 451 if (drm_core_check_feature(dev, DRIVER_MODESET))
474 drm_put_minor(&dev->control); 452 drm_put_minor(&dev->control);
475 453
454 if (dev->render)
455 drm_put_minor(&dev->render);
456
476 if (driver->driver_features & DRIVER_GEM) 457 if (driver->driver_features & DRIVER_GEM)
477 drm_gem_destroy(dev); 458 drm_gem_destroy(dev);
478 459
@@ -489,6 +470,8 @@ void drm_unplug_dev(struct drm_device *dev)
489 /* for a USB device */ 470 /* for a USB device */
490 if (drm_core_check_feature(dev, DRIVER_MODESET)) 471 if (drm_core_check_feature(dev, DRIVER_MODESET))
491 drm_unplug_minor(dev->control); 472 drm_unplug_minor(dev->control);
473 if (dev->render)
474 drm_unplug_minor(dev->render);
492 drm_unplug_minor(dev->primary); 475 drm_unplug_minor(dev->primary);
493 476
494 mutex_lock(&drm_global_mutex); 477 mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 34a156f0c336..87664723b9ce 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -33,6 +33,12 @@ int drm_get_usb_dev(struct usb_interface *interface,
33 if (ret) 33 if (ret)
34 goto err_g1; 34 goto err_g1;
35 35
36 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
37 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
38 if (ret)
39 goto err_g11;
40 }
41
36 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 42 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
37 if (ret) 43 if (ret)
38 goto err_g2; 44 goto err_g2;
@@ -62,6 +68,9 @@ int drm_get_usb_dev(struct usb_interface *interface,
62err_g3: 68err_g3:
63 drm_put_minor(&dev->primary); 69 drm_put_minor(&dev->primary);
64err_g2: 70err_g2:
71 if (dev->render)
72 drm_put_minor(&dev->render);
73err_g11:
65 drm_put_minor(&dev->control); 74 drm_put_minor(&dev->control);
66err_g1: 75err_g1:
67 kfree(dev); 76 kfree(dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index feb20035b2c4..b5c5af7328df 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
251 switch (map->type) { 251 switch (map->type) {
252 case _DRM_REGISTERS: 252 case _DRM_REGISTERS:
253 case _DRM_FRAME_BUFFER: 253 case _DRM_FRAME_BUFFER:
254 if (drm_core_has_MTRR(dev)) 254 arch_phys_wc_del(map->mtrr);
255 arch_phys_wc_del(map->mtrr);
256 iounmap(map->handle); 255 iounmap(map->handle);
257 break; 256 break;
258 case _DRM_SHM: 257 case _DRM_SHM:
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644
index 000000000000..63b471205072
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
3 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
4 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/drmP.h>
26#include <drm/drm_mm.h>
27#include <drm/drm_vma_manager.h>
28#include <linux/fs.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/types.h>
35
36/**
37 * DOC: vma offset manager
38 *
39 * The vma-manager is responsible to map arbitrary driver-dependent memory
40 * regions into the linear user address-space. It provides offsets to the
41 * caller which can then be used on the address_space of the drm-device. It
42 * takes care to not overlap regions, size them appropriately and to not
43 * confuse mm-core by inconsistent fake vm_pgoff fields.
44 * Drivers shouldn't use this for object placement in VMEM. This manager should
45 * only be used to manage mappings into linear user-space VMs.
46 *
47 * We use drm_mm as backend to manage object allocations. But it is highly
48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49 * speed up offset lookups.
50 *
51 * You must not use multiple offset managers on a single address_space.
52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
53 * no longer be linear. Please use VM_NONLINEAR in that case and implement your
54 * own offset managers.
55 *
56 * This offset manager works on page-based addresses. That is, every argument
57 * and return code (with the exception of drm_vma_node_offset_addr()) is given
58 * in number of pages, not number of bytes. That means, object sizes and offsets
59 * must always be page-aligned (as usual).
60 * If you want to get a valid byte-based user-space address for a given offset,
61 * please see drm_vma_node_offset_addr().
62 *
63 * Additionally to offset management, the vma offset manager also handles access
64 * management. For every open-file context that is allowed to access a given
65 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
66 * open-file with the offset of the node will fail with -EACCES. To revoke
67 * access again, use drm_vma_node_revoke(). However, the caller is responsible
68 * for destroying already existing mappings, if required.
69 */
70
71/**
72 * drm_vma_offset_manager_init - Initialize new offset-manager
73 * @mgr: Manager object
74 * @page_offset: Offset of available memory area (page-based)
75 * @size: Size of available address space range (page-based)
76 *
77 * Initialize a new offset-manager. The offset and area size available for the
78 * manager are given as @page_offset and @size. Both are interpreted as
79 * page-numbers, not bytes.
80 *
81 * Adding/removing nodes from the manager is locked internally and protected
82 * against concurrent access. However, node allocation and destruction is left
83 * for the caller. While calling into the vma-manager, a given node must
84 * always be guaranteed to be referenced.
85 */
86void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
87 unsigned long page_offset, unsigned long size)
88{
89 rwlock_init(&mgr->vm_lock);
90 mgr->vm_addr_space_rb = RB_ROOT;
91 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
92}
93EXPORT_SYMBOL(drm_vma_offset_manager_init);
94
95/**
96 * drm_vma_offset_manager_destroy() - Destroy offset manager
97 * @mgr: Manager object
98 *
99 * Destroy an object manager which was previously created via
100 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
101 * before destroying the manager. Otherwise, drm_mm will refuse to free the
102 * requested resources.
103 *
104 * The manager must not be accessed after this function is called.
105 */
106void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
107{
108 /* take the lock to protect against buggy drivers */
109 write_lock(&mgr->vm_lock);
110 drm_mm_takedown(&mgr->vm_addr_space_mm);
111 write_unlock(&mgr->vm_lock);
112}
113EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
114
115/**
116 * drm_vma_offset_lookup() - Find node in offset space
117 * @mgr: Manager object
118 * @start: Start address for object (page-based)
119 * @pages: Size of object (page-based)
120 *
121 * Find a node given a start address and object size. This returns the _best_
122 * match for the given node. That is, @start may point somewhere into a valid
123 * region and the given node will be returned, as long as the node spans the
124 * whole requested area (given the size in number of pages as @pages).
125 *
126 * RETURNS:
127 * Returns NULL if no suitable node can be found. Otherwise, the best match
128 * is returned. It's the caller's responsibility to make sure the node doesn't
129 * get destroyed before the caller can access it.
130 */
131struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
132 unsigned long start,
133 unsigned long pages)
134{
135 struct drm_vma_offset_node *node;
136
137 read_lock(&mgr->vm_lock);
138 node = drm_vma_offset_lookup_locked(mgr, start, pages);
139 read_unlock(&mgr->vm_lock);
140
141 return node;
142}
143EXPORT_SYMBOL(drm_vma_offset_lookup);
144
145/**
146 * drm_vma_offset_lookup_locked() - Find node in offset space
147 * @mgr: Manager object
148 * @start: Start address for object (page-based)
149 * @pages: Size of object (page-based)
150 *
151 * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
152 * manually. See drm_vma_offset_lock_lookup() for an example.
153 *
154 * RETURNS:
155 * Returns NULL if no suitable node can be found. Otherwise, the best match
156 * is returned.
157 */
158struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
159 unsigned long start,
160 unsigned long pages)
161{
162 struct drm_vma_offset_node *node, *best;
163 struct rb_node *iter;
164 unsigned long offset;
165
166 iter = mgr->vm_addr_space_rb.rb_node;
167 best = NULL;
168
169 while (likely(iter)) {
170 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
171 offset = node->vm_node.start;
172 if (start >= offset) {
173 iter = iter->rb_right;
174 best = node;
175 if (start == offset)
176 break;
177 } else {
178 iter = iter->rb_left;
179 }
180 }
181
182 /* verify that the node spans the requested area */
183 if (best) {
184 offset = best->vm_node.start + best->vm_node.size;
185 if (offset < start + pages)
186 best = NULL;
187 }
188
189 return best;
190}
191EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
192
193/* internal helper to link @node into the rb-tree */
194static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
195 struct drm_vma_offset_node *node)
196{
197 struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
198 struct rb_node *parent = NULL;
199 struct drm_vma_offset_node *iter_node;
200
201 while (likely(*iter)) {
202 parent = *iter;
203 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
204
205 if (node->vm_node.start < iter_node->vm_node.start)
206 iter = &(*iter)->rb_left;
207 else if (node->vm_node.start > iter_node->vm_node.start)
208 iter = &(*iter)->rb_right;
209 else
210 BUG();
211 }
212
213 rb_link_node(&node->vm_rb, parent, iter);
214 rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
215}
216
217/**
218 * drm_vma_offset_add() - Add offset node to manager
219 * @mgr: Manager object
220 * @node: Node to be added
221 * @pages: Allocation size visible to user-space (in number of pages)
222 *
223 * Add a node to the offset-manager. If the node was already added, this does
224 * nothing and return 0. @pages is the size of the object given in number of
225 * pages.
226 * After this call succeeds, you can access the offset of the node until it
227 * is removed again.
228 *
229 * If this call fails, it is safe to retry the operation or call
230 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
231 * case.
232 *
233 * @pages is not required to be the same size as the underlying memory object
234 * that you want to map. It only limits the size that user-space can map into
235 * their address space.
236 *
237 * RETURNS:
238 * 0 on success, negative error code on failure.
239 */
240int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
241 struct drm_vma_offset_node *node, unsigned long pages)
242{
243 int ret;
244
245 write_lock(&mgr->vm_lock);
246
247 if (drm_mm_node_allocated(&node->vm_node)) {
248 ret = 0;
249 goto out_unlock;
250 }
251
252 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
253 pages, 0, DRM_MM_SEARCH_DEFAULT);
254 if (ret)
255 goto out_unlock;
256
257 _drm_vma_offset_add_rb(mgr, node);
258
259out_unlock:
260 write_unlock(&mgr->vm_lock);
261 return ret;
262}
263EXPORT_SYMBOL(drm_vma_offset_add);
264
265/**
266 * drm_vma_offset_remove() - Remove offset node from manager
267 * @mgr: Manager object
268 * @node: Node to be removed
269 *
270 * Remove a node from the offset manager. If the node wasn't added before, this
271 * does nothing. After this call returns, the offset and size will be 0 until a
272 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
273 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
274 * offset is allocated.
275 */
276void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
277 struct drm_vma_offset_node *node)
278{
279 write_lock(&mgr->vm_lock);
280
281 if (drm_mm_node_allocated(&node->vm_node)) {
282 rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
283 drm_mm_remove_node(&node->vm_node);
284 memset(&node->vm_node, 0, sizeof(node->vm_node));
285 }
286
287 write_unlock(&mgr->vm_lock);
288}
289EXPORT_SYMBOL(drm_vma_offset_remove);
290
291/**
292 * drm_vma_node_allow - Add open-file to list of allowed users
293 * @node: Node to modify
294 * @filp: Open file to add
295 *
296 * Add @filp to the list of allowed open-files for this node. If @filp is
297 * already on this list, the ref-count is incremented.
298 *
299 * The list of allowed-users is preserved across drm_vma_offset_add() and
300 * drm_vma_offset_remove() calls. You may even call it if the node is currently
301 * not added to any offset-manager.
302 *
303 * You must remove all open-files the same number of times as you added them
304 * before destroying the node. Otherwise, you will leak memory.
305 *
306 * This is locked against concurrent access internally.
307 *
308 * RETURNS:
309 * 0 on success, negative error code on internal failure (out-of-mem)
310 */
311int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
312{
313 struct rb_node **iter;
314 struct rb_node *parent = NULL;
315 struct drm_vma_offset_file *new, *entry;
316 int ret = 0;
317
318 /* Preallocate entry to avoid atomic allocations below. It is quite
319 * unlikely that an open-file is added twice to a single node so we
320 * don't optimize for this case. OOM is checked below only if the entry
321 * is actually used. */
322 new = kmalloc(sizeof(*entry), GFP_KERNEL);
323
324 write_lock(&node->vm_lock);
325
326 iter = &node->vm_files.rb_node;
327
328 while (likely(*iter)) {
329 parent = *iter;
330 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
331
332 if (filp == entry->vm_filp) {
333 entry->vm_count++;
334 goto unlock;
335 } else if (filp > entry->vm_filp) {
336 iter = &(*iter)->rb_right;
337 } else {
338 iter = &(*iter)->rb_left;
339 }
340 }
341
342 if (!new) {
343 ret = -ENOMEM;
344 goto unlock;
345 }
346
347 new->vm_filp = filp;
348 new->vm_count = 1;
349 rb_link_node(&new->vm_rb, parent, iter);
350 rb_insert_color(&new->vm_rb, &node->vm_files);
351 new = NULL;
352
353unlock:
354 write_unlock(&node->vm_lock);
355 kfree(new);
356 return ret;
357}
358EXPORT_SYMBOL(drm_vma_node_allow);
359
360/**
361 * drm_vma_node_revoke - Remove open-file from list of allowed users
362 * @node: Node to modify
363 * @filp: Open file to remove
364 *
365 * Decrement the ref-count of @filp in the list of allowed open-files on @node.
366 * If the ref-count drops to zero, remove @filp from the list. You must call
367 * this once for every drm_vma_node_allow() on @filp.
368 *
369 * This is locked against concurrent access internally.
370 *
371 * If @filp is not on the list, nothing is done.
372 */
373void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
374{
375 struct drm_vma_offset_file *entry;
376 struct rb_node *iter;
377
378 write_lock(&node->vm_lock);
379
380 iter = node->vm_files.rb_node;
381 while (likely(iter)) {
382 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
383 if (filp == entry->vm_filp) {
384 if (!--entry->vm_count) {
385 rb_erase(&entry->vm_rb, &node->vm_files);
386 kfree(entry);
387 }
388 break;
389 } else if (filp > entry->vm_filp) {
390 iter = iter->rb_right;
391 } else {
392 iter = iter->rb_left;
393 }
394 }
395
396 write_unlock(&node->vm_lock);
397}
398EXPORT_SYMBOL(drm_vma_node_revoke);
399
400/**
401 * drm_vma_node_is_allowed - Check whether an open-file is granted access
402 * @node: Node to check
403 * @filp: Open-file to check for
404 *
405 * Search the list in @node whether @filp is currently on the list of allowed
406 * open-files (see drm_vma_node_allow()).
407 *
408 * This is locked against concurrent access internally.
409 *
410 * RETURNS:
411 * true iff @filp is on the list
412 */
413bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
414 struct file *filp)
415{
416 struct drm_vma_offset_file *entry;
417 struct rb_node *iter;
418
419 read_lock(&node->vm_lock);
420
421 iter = node->vm_files.rb_node;
422 while (likely(iter)) {
423 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
424 if (filp == entry->vm_filp)
425 break;
426 else if (filp > entry->vm_filp)
427 iter = iter->rb_right;
428 else
429 iter = iter->rb_left;
430 }
431
432 read_unlock(&node->vm_lock);
433
434 return iter;
435}
436EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 772c62a6e2ac..4752f223e5b2 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,11 +1,12 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT 7 select FB_CFB_IMAGEBLIT
8 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE 8 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
9 select VIDEOMODE_HELPERS
9 help 10 help
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 11 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 12 If M is selected the module will be called exynosdrm.
@@ -24,9 +25,8 @@ config DRM_EXYNOS_DMABUF
24 25
25config DRM_EXYNOS_FIMD 26config DRM_EXYNOS_FIMD
26 bool "Exynos DRM FIMD" 27 bool "Exynos DRM FIMD"
27 depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM 28 depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
28 select FB_MODE_HELPERS 29 select FB_MODE_HELPERS
29 select VIDEOMODE_HELPERS
30 help 30 help
31 Choose this option if you want to use Exynos FIMD for DRM. 31 Choose this option if you want to use Exynos FIMD for DRM.
32 32
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 30ef41bcd7b8..6a8c84e7c839 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18 18#include <linux/of.h>
19 19
20#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
21#include "exynos_hdmi.h" 21#include "exynos_hdmi.h"
@@ -41,13 +41,6 @@ static int s5p_ddc_remove(struct i2c_client *client)
41 return 0; 41 return 0;
42} 42}
43 43
44static struct i2c_device_id ddc_idtable[] = {
45 {"s5p_ddc", 0},
46 {"exynos5-hdmiddc", 0},
47 { },
48};
49
50#ifdef CONFIG_OF
51static struct of_device_id hdmiddc_match_types[] = { 44static struct of_device_id hdmiddc_match_types[] = {
52 { 45 {
53 .compatible = "samsung,exynos5-hdmiddc", 46 .compatible = "samsung,exynos5-hdmiddc",
@@ -57,15 +50,13 @@ static struct of_device_id hdmiddc_match_types[] = {
57 /* end node */ 50 /* end node */
58 } 51 }
59}; 52};
60#endif
61 53
62struct i2c_driver ddc_driver = { 54struct i2c_driver ddc_driver = {
63 .driver = { 55 .driver = {
64 .name = "exynos-hdmiddc", 56 .name = "exynos-hdmiddc",
65 .owner = THIS_MODULE, 57 .owner = THIS_MODULE,
66 .of_match_table = of_match_ptr(hdmiddc_match_types), 58 .of_match_table = hdmiddc_match_types,
67 }, 59 },
68 .id_table = ddc_idtable,
69 .probe = s5p_ddc_probe, 60 .probe = s5p_ddc_probe,
70 .remove = s5p_ddc_remove, 61 .remove = s5p_ddc_remove,
71 .command = NULL, 62 .command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index b8ac06d92fbf..3445a0f3a6b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -149,10 +149,8 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
149 DRM_DEBUG_KMS("desired size = 0x%x\n", size); 149 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
150 150
151 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 151 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
152 if (!buffer) { 152 if (!buffer)
153 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
154 return NULL; 153 return NULL;
155 }
156 154
157 buffer->size = size; 155 buffer->size = size;
158 return buffer; 156 return buffer;
@@ -161,11 +159,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
161void exynos_drm_fini_buf(struct drm_device *dev, 159void exynos_drm_fini_buf(struct drm_device *dev,
162 struct exynos_drm_gem_buf *buffer) 160 struct exynos_drm_gem_buf *buffer)
163{ 161{
164 if (!buffer) {
165 DRM_DEBUG_KMS("buffer is null.\n");
166 return;
167 }
168
169 kfree(buffer); 162 kfree(buffer);
170 buffer = NULL; 163 buffer = NULL;
171} 164}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 02a8bc5226ca..e082efb2fece 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -17,6 +17,7 @@
17#include <drm/exynos_drm.h> 17#include <drm/exynos_drm.h>
18#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h" 19#include "exynos_drm_encoder.h"
20#include "exynos_drm_connector.h"
20 21
21#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ 22#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
22 drm_connector) 23 drm_connector)
@@ -28,35 +29,6 @@ struct exynos_drm_connector {
28 uint32_t dpms; 29 uint32_t dpms;
29}; 30};
30 31
31/* convert exynos_video_timings to drm_display_mode */
32static inline void
33convert_to_display_mode(struct drm_display_mode *mode,
34 struct exynos_drm_panel_info *panel)
35{
36 struct fb_videomode *timing = &panel->timing;
37
38 mode->clock = timing->pixclock / 1000;
39 mode->vrefresh = timing->refresh;
40
41 mode->hdisplay = timing->xres;
42 mode->hsync_start = mode->hdisplay + timing->right_margin;
43 mode->hsync_end = mode->hsync_start + timing->hsync_len;
44 mode->htotal = mode->hsync_end + timing->left_margin;
45
46 mode->vdisplay = timing->yres;
47 mode->vsync_start = mode->vdisplay + timing->lower_margin;
48 mode->vsync_end = mode->vsync_start + timing->vsync_len;
49 mode->vtotal = mode->vsync_end + timing->upper_margin;
50 mode->width_mm = panel->width_mm;
51 mode->height_mm = panel->height_mm;
52
53 if (timing->vmode & FB_VMODE_INTERLACED)
54 mode->flags |= DRM_MODE_FLAG_INTERLACE;
55
56 if (timing->vmode & FB_VMODE_DOUBLE)
57 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
58}
59
60static int exynos_drm_connector_get_modes(struct drm_connector *connector) 32static int exynos_drm_connector_get_modes(struct drm_connector *connector)
61{ 33{
62 struct exynos_drm_connector *exynos_connector = 34 struct exynos_drm_connector *exynos_connector =
@@ -111,7 +83,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
111 return 0; 83 return 0;
112 } 84 }
113 85
114 convert_to_display_mode(mode, panel); 86 drm_display_mode_from_videomode(&panel->vm, mode);
87 mode->width_mm = panel->width_mm;
88 mode->height_mm = panel->height_mm;
115 connector->display_info.width_mm = mode->width_mm; 89 connector->display_info.width_mm = mode->width_mm;
116 connector->display_info.height_mm = mode->height_mm; 90 connector->display_info.height_mm = mode->height_mm;
117 91
@@ -278,10 +252,8 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
278 int err; 252 int err;
279 253
280 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); 254 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
281 if (!exynos_connector) { 255 if (!exynos_connector)
282 DRM_ERROR("failed to allocate connector\n");
283 return NULL; 256 return NULL;
284 }
285 257
286 connector = &exynos_connector->drm_connector; 258 connector = &exynos_connector->drm_connector;
287 259
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9a35d171a6d3..ebc01503d50e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -15,6 +15,7 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "exynos_drm_crtc.h"
18#include "exynos_drm_drv.h" 19#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h" 20#include "exynos_drm_encoder.h"
20#include "exynos_drm_plane.h" 21#include "exynos_drm_plane.h"
@@ -184,8 +185,9 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
184}; 185};
185 186
186static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, 187static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
187 struct drm_framebuffer *fb, 188 struct drm_framebuffer *fb,
188 struct drm_pending_vblank_event *event) 189 struct drm_pending_vblank_event *event,
190 uint32_t page_flip_flags)
189{ 191{
190 struct drm_device *dev = crtc->dev; 192 struct drm_device *dev = crtc->dev;
191 struct exynos_drm_private *dev_priv = dev->dev_private; 193 struct exynos_drm_private *dev_priv = dev->dev_private;
@@ -323,10 +325,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
323 struct drm_crtc *crtc; 325 struct drm_crtc *crtc;
324 326
325 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); 327 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
326 if (!exynos_crtc) { 328 if (!exynos_crtc)
327 DRM_ERROR("failed to allocate exynos crtc\n");
328 return -ENOMEM; 329 return -ENOMEM;
329 }
330 330
331 exynos_crtc->pipe = nr; 331 exynos_crtc->pipe = nr;
332 exynos_crtc->dpms = DRM_MODE_DPMS_OFF; 332 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index a0f997e0cbdf..59827cc5e770 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -11,6 +11,7 @@
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/exynos_drm.h> 13#include <drm/exynos_drm.h>
14#include "exynos_drm_dmabuf.h"
14#include "exynos_drm_drv.h" 15#include "exynos_drm_drv.h"
15#include "exynos_drm_gem.h" 16#include "exynos_drm_gem.h"
16 17
@@ -22,6 +23,11 @@ struct exynos_drm_dmabuf_attachment {
22 bool is_mapped; 23 bool is_mapped;
23}; 24};
24 25
26static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
27{
28 return to_exynos_gem_obj(buf->priv);
29}
30
25static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, 31static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
26 struct device *dev, 32 struct device *dev,
27 struct dma_buf_attachment *attach) 33 struct dma_buf_attachment *attach)
@@ -63,7 +69,7 @@ static struct sg_table *
63 enum dma_data_direction dir) 69 enum dma_data_direction dir)
64{ 70{
65 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; 71 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
66 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; 72 struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
67 struct drm_device *dev = gem_obj->base.dev; 73 struct drm_device *dev = gem_obj->base.dev;
68 struct exynos_drm_gem_buf *buf; 74 struct exynos_drm_gem_buf *buf;
69 struct scatterlist *rd, *wr; 75 struct scatterlist *rd, *wr;
@@ -127,27 +133,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
127 /* Nothing to do. */ 133 /* Nothing to do. */
128} 134}
129 135
130static void exynos_dmabuf_release(struct dma_buf *dmabuf)
131{
132 struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
133
134 /*
135 * exynos_dmabuf_release() call means that file object's
136 * f_count is 0 and it calls drm_gem_object_handle_unreference()
137 * to drop the references that these values had been increased
138 * at drm_prime_handle_to_fd()
139 */
140 if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
141 exynos_gem_obj->base.export_dma_buf = NULL;
142
143 /*
144 * drop this gem object refcount to release allocated buffer
145 * and resources.
146 */
147 drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
148 }
149}
150
151static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 136static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
152 unsigned long page_num) 137 unsigned long page_num)
153{ 138{
@@ -193,7 +178,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
193 .kunmap = exynos_gem_dmabuf_kunmap, 178 .kunmap = exynos_gem_dmabuf_kunmap,
194 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, 179 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
195 .mmap = exynos_gem_dmabuf_mmap, 180 .mmap = exynos_gem_dmabuf_mmap,
196 .release = exynos_dmabuf_release, 181 .release = drm_gem_dmabuf_release,
197}; 182};
198 183
199struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, 184struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -201,7 +186,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
201{ 186{
202 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 187 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
203 188
204 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops, 189 return dma_buf_export(obj, &exynos_dmabuf_ops,
205 exynos_gem_obj->base.size, flags); 190 exynos_gem_obj->base.size, flags);
206} 191}
207 192
@@ -219,8 +204,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
219 if (dma_buf->ops == &exynos_dmabuf_ops) { 204 if (dma_buf->ops == &exynos_dmabuf_ops) {
220 struct drm_gem_object *obj; 205 struct drm_gem_object *obj;
221 206
222 exynos_gem_obj = dma_buf->priv; 207 obj = dma_buf->priv;
223 obj = &exynos_gem_obj->base;
224 208
225 /* is it from our device? */ 209 /* is it from our device? */
226 if (obj->dev == drm_dev) { 210 if (obj->dev == drm_dev) {
@@ -247,7 +231,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
247 231
248 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 232 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
249 if (!buffer) { 233 if (!buffer) {
250 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
251 ret = -ENOMEM; 234 ret = -ENOMEM;
252 goto err_unmap_attach; 235 goto err_unmap_attach;
253 } 236 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ca2729a85129..bb82ef78ca85 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -47,10 +47,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
47 int nr; 47 int nr;
48 48
49 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); 49 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
50 if (!private) { 50 if (!private)
51 DRM_ERROR("failed to allocate private\n");
52 return -ENOMEM; 51 return -ENOMEM;
53 }
54 52
55 INIT_LIST_HEAD(&private->pageflip_event_list); 53 INIT_LIST_HEAD(&private->pageflip_event_list);
56 dev->dev_private = (void *)private; 54 dev->dev_private = (void *)private;
@@ -213,7 +211,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
213 .close = drm_gem_vm_close, 211 .close = drm_gem_vm_close,
214}; 212};
215 213
216static struct drm_ioctl_desc exynos_ioctls[] = { 214static const struct drm_ioctl_desc exynos_ioctls[] = {
217 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 215 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
218 DRM_UNLOCKED | DRM_AUTH), 216 DRM_UNLOCKED | DRM_AUTH),
219 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET, 217 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +269,13 @@ static struct drm_driver exynos_drm_driver = {
271 .gem_vm_ops = &exynos_drm_gem_vm_ops, 269 .gem_vm_ops = &exynos_drm_gem_vm_ops,
272 .dumb_create = exynos_drm_gem_dumb_create, 270 .dumb_create = exynos_drm_gem_dumb_create,
273 .dumb_map_offset = exynos_drm_gem_dumb_map_offset, 271 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
274 .dumb_destroy = exynos_drm_gem_dumb_destroy, 272 .dumb_destroy = drm_gem_dumb_destroy,
275 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 273 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
276 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 274 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
277 .gem_prime_export = exynos_dmabuf_prime_export, 275 .gem_prime_export = exynos_dmabuf_prime_export,
278 .gem_prime_import = exynos_dmabuf_prime_import, 276 .gem_prime_import = exynos_dmabuf_prime_import,
279 .ioctls = exynos_ioctls, 277 .ioctls = exynos_ioctls,
278 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
280 .fops = &exynos_drm_driver_fops, 279 .fops = &exynos_drm_driver_fops,
281 .name = DRIVER_NAME, 280 .name = DRIVER_NAME,
282 .desc = DRIVER_DESC, 281 .desc = DRIVER_DESC,
@@ -288,7 +287,6 @@ static struct drm_driver exynos_drm_driver = {
288static int exynos_drm_platform_probe(struct platform_device *pdev) 287static int exynos_drm_platform_probe(struct platform_device *pdev)
289{ 288{
290 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 289 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
291 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
292 290
293 return drm_platform_init(&exynos_drm_driver, pdev); 291 return drm_platform_init(&exynos_drm_driver, pdev);
294} 292}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index a99a033793bc..06f1b2a09da7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -324,10 +324,8 @@ exynos_drm_encoder_create(struct drm_device *dev,
324 return NULL; 324 return NULL;
325 325
326 exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); 326 exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
327 if (!exynos_encoder) { 327 if (!exynos_encoder)
328 DRM_ERROR("failed to allocate encoder\n");
329 return NULL; 328 return NULL;
330 }
331 329
332 exynos_encoder->dpms = DRM_MODE_DPMS_OFF; 330 exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
333 exynos_encoder->manager = manager; 331 exynos_encoder->manager = manager;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c2d149f0408a..ea39e0ef2ae4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -156,10 +156,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
156 } 156 }
157 157
158 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 158 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
159 if (!exynos_fb) { 159 if (!exynos_fb)
160 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
161 return ERR_PTR(-ENOMEM); 160 return ERR_PTR(-ENOMEM);
162 }
163 161
164 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 162 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
165 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; 163 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
@@ -220,10 +218,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
220 int i, ret; 218 int i, ret;
221 219
222 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 220 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
223 if (!exynos_fb) { 221 if (!exynos_fb)
224 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
225 return ERR_PTR(-ENOMEM); 222 return ERR_PTR(-ENOMEM);
226 }
227 223
228 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 224 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
229 if (!obj) { 225 if (!obj) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8e60bd61137f..78e868bcf1ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -16,9 +16,11 @@
16#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
17#include <drm/drm_fb_helper.h> 17#include <drm/drm_fb_helper.h>
18#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
19#include <drm/exynos_drm.h>
19 20
20#include "exynos_drm_drv.h" 21#include "exynos_drm_drv.h"
21#include "exynos_drm_fb.h" 22#include "exynos_drm_fb.h"
23#include "exynos_drm_fbdev.h"
22#include "exynos_drm_gem.h" 24#include "exynos_drm_gem.h"
23#include "exynos_drm_iommu.h" 25#include "exynos_drm_iommu.h"
24 26
@@ -165,8 +167,18 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
165 167
166 size = mode_cmd.pitches[0] * mode_cmd.height; 168 size = mode_cmd.pitches[0] * mode_cmd.height;
167 169
168 /* 0 means to allocate physically continuous memory */ 170 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
169 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); 171 /*
172 * If physically contiguous memory allocation fails and if IOMMU is
173 * supported then try to get buffer from non physically contiguous
174 * memory area.
175 */
176 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
177 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
178 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
179 size);
180 }
181
170 if (IS_ERR(exynos_gem_obj)) { 182 if (IS_ERR(exynos_gem_obj)) {
171 ret = PTR_ERR(exynos_gem_obj); 183 ret = PTR_ERR(exynos_gem_obj);
172 goto err_release_framebuffer; 184 goto err_release_framebuffer;
@@ -236,10 +248,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
236 return 0; 248 return 0;
237 249
238 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); 250 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
239 if (!fbdev) { 251 if (!fbdev)
240 DRM_ERROR("failed to allocate drm fbdev.\n");
241 return -ENOMEM; 252 return -ENOMEM;
242 }
243 253
244 private->fb_helper = helper = &fbdev->drm_fb_helper; 254 private->fb_helper = helper = &fbdev->drm_fb_helper;
245 helper->funcs = &exynos_drm_fb_helper_funcs; 255 helper->funcs = &exynos_drm_fb_helper_funcs;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6e047bd53e2f..8adfc8f1e08f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -17,10 +17,12 @@
17#include <linux/regmap.h> 17#include <linux/regmap.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/of.h>
20 21
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 23#include <drm/exynos_drm.h>
23#include "regs-fimc.h" 24#include "regs-fimc.h"
25#include "exynos_drm_drv.h"
24#include "exynos_drm_ipp.h" 26#include "exynos_drm_ipp.h"
25#include "exynos_drm_fimc.h" 27#include "exynos_drm_fimc.h"
26 28
@@ -1343,10 +1345,8 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1343 struct drm_exynos_ipp_prop_list *prop_list; 1345 struct drm_exynos_ipp_prop_list *prop_list;
1344 1346
1345 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 1347 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1346 if (!prop_list) { 1348 if (!prop_list)
1347 DRM_ERROR("failed to alloc property list.\n");
1348 return -ENOMEM; 1349 return -ENOMEM;
1349 }
1350 1350
1351 prop_list->version = 1; 1351 prop_list->version = 1;
1352 prop_list->writeback = 1; 1352 prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 1c263dac3c1c..868a14d52995 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -16,10 +16,12 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/of.h>
19#include <linux/of_device.h> 20#include <linux/of_device.h>
20#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
21 22
22#include <video/of_display_timing.h> 23#include <video/of_display_timing.h>
24#include <video/of_videomode.h>
23#include <video/samsung_fimd.h> 25#include <video/samsung_fimd.h>
24#include <drm/exynos_drm.h> 26#include <drm/exynos_drm.h>
25 27
@@ -35,6 +37,8 @@
35 * CPU Interface. 37 * CPU Interface.
36 */ 38 */
37 39
40#define FIMD_DEFAULT_FRAMERATE 60
41
38/* position control register for hardware window 0, 2 ~ 4.*/ 42/* position control register for hardware window 0, 2 ~ 4.*/
39#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) 43#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
40#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) 44#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
@@ -65,11 +69,13 @@ struct fimd_driver_data {
65 69
66 unsigned int has_shadowcon:1; 70 unsigned int has_shadowcon:1;
67 unsigned int has_clksel:1; 71 unsigned int has_clksel:1;
72 unsigned int has_limited_fmt:1;
68}; 73};
69 74
70static struct fimd_driver_data s3c64xx_fimd_driver_data = { 75static struct fimd_driver_data s3c64xx_fimd_driver_data = {
71 .timing_base = 0x0, 76 .timing_base = 0x0,
72 .has_clksel = 1, 77 .has_clksel = 1,
78 .has_limited_fmt = 1,
73}; 79};
74 80
75static struct fimd_driver_data exynos4_fimd_driver_data = { 81static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -90,6 +96,7 @@ struct fimd_win_data {
90 unsigned int fb_width; 96 unsigned int fb_width;
91 unsigned int fb_height; 97 unsigned int fb_height;
92 unsigned int bpp; 98 unsigned int bpp;
99 unsigned int pixel_format;
93 dma_addr_t dma_addr; 100 dma_addr_t dma_addr;
94 unsigned int buf_offsize; 101 unsigned int buf_offsize;
95 unsigned int line_size; /* bytes */ 102 unsigned int line_size; /* bytes */
@@ -115,11 +122,10 @@ struct fimd_context {
115 wait_queue_head_t wait_vsync_queue; 122 wait_queue_head_t wait_vsync_queue;
116 atomic_t wait_vsync_event; 123 atomic_t wait_vsync_event;
117 124
118 struct exynos_drm_panel_info *panel; 125 struct exynos_drm_panel_info panel;
119 struct fimd_driver_data *driver_data; 126 struct fimd_driver_data *driver_data;
120}; 127};
121 128
122#ifdef CONFIG_OF
123static const struct of_device_id fimd_driver_dt_match[] = { 129static const struct of_device_id fimd_driver_dt_match[] = {
124 { .compatible = "samsung,s3c6400-fimd", 130 { .compatible = "samsung,s3c6400-fimd",
125 .data = &s3c64xx_fimd_driver_data }, 131 .data = &s3c64xx_fimd_driver_data },
@@ -129,21 +135,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
129 .data = &exynos5_fimd_driver_data }, 135 .data = &exynos5_fimd_driver_data },
130 {}, 136 {},
131}; 137};
132#endif
133 138
134static inline struct fimd_driver_data *drm_fimd_get_driver_data( 139static inline struct fimd_driver_data *drm_fimd_get_driver_data(
135 struct platform_device *pdev) 140 struct platform_device *pdev)
136{ 141{
137#ifdef CONFIG_OF
138 const struct of_device_id *of_id = 142 const struct of_device_id *of_id =
139 of_match_device(fimd_driver_dt_match, &pdev->dev); 143 of_match_device(fimd_driver_dt_match, &pdev->dev);
140 144
141 if (of_id) 145 return (struct fimd_driver_data *)of_id->data;
142 return (struct fimd_driver_data *)of_id->data;
143#endif
144
145 return (struct fimd_driver_data *)
146 platform_get_device_id(pdev)->driver_data;
147} 146}
148 147
149static bool fimd_display_is_connected(struct device *dev) 148static bool fimd_display_is_connected(struct device *dev)
@@ -157,7 +156,7 @@ static void *fimd_get_panel(struct device *dev)
157{ 156{
158 struct fimd_context *ctx = get_fimd_context(dev); 157 struct fimd_context *ctx = get_fimd_context(dev);
159 158
160 return ctx->panel; 159 return &ctx->panel;
161} 160}
162 161
163static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode) 162static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
@@ -237,8 +236,8 @@ static void fimd_apply(struct device *subdrv_dev)
237static void fimd_commit(struct device *dev) 236static void fimd_commit(struct device *dev)
238{ 237{
239 struct fimd_context *ctx = get_fimd_context(dev); 238 struct fimd_context *ctx = get_fimd_context(dev);
240 struct exynos_drm_panel_info *panel = ctx->panel; 239 struct exynos_drm_panel_info *panel = &ctx->panel;
241 struct fb_videomode *timing = &panel->timing; 240 struct videomode *vm = &panel->vm;
242 struct fimd_driver_data *driver_data; 241 struct fimd_driver_data *driver_data;
243 u32 val; 242 u32 val;
244 243
@@ -250,22 +249,22 @@ static void fimd_commit(struct device *dev)
250 writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); 249 writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
251 250
252 /* setup vertical timing values. */ 251 /* setup vertical timing values. */
253 val = VIDTCON0_VBPD(timing->upper_margin - 1) | 252 val = VIDTCON0_VBPD(vm->vback_porch - 1) |
254 VIDTCON0_VFPD(timing->lower_margin - 1) | 253 VIDTCON0_VFPD(vm->vfront_porch - 1) |
255 VIDTCON0_VSPW(timing->vsync_len - 1); 254 VIDTCON0_VSPW(vm->vsync_len - 1);
256 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); 255 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
257 256
258 /* setup horizontal timing values. */ 257 /* setup horizontal timing values. */
259 val = VIDTCON1_HBPD(timing->left_margin - 1) | 258 val = VIDTCON1_HBPD(vm->hback_porch - 1) |
260 VIDTCON1_HFPD(timing->right_margin - 1) | 259 VIDTCON1_HFPD(vm->hfront_porch - 1) |
261 VIDTCON1_HSPW(timing->hsync_len - 1); 260 VIDTCON1_HSPW(vm->hsync_len - 1);
262 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); 261 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
263 262
264 /* setup horizontal and vertical display size. */ 263 /* setup horizontal and vertical display size. */
265 val = VIDTCON2_LINEVAL(timing->yres - 1) | 264 val = VIDTCON2_LINEVAL(vm->vactive - 1) |
266 VIDTCON2_HOZVAL(timing->xres - 1) | 265 VIDTCON2_HOZVAL(vm->hactive - 1) |
267 VIDTCON2_LINEVAL_E(timing->yres - 1) | 266 VIDTCON2_LINEVAL_E(vm->vactive - 1) |
268 VIDTCON2_HOZVAL_E(timing->xres - 1); 267 VIDTCON2_HOZVAL_E(vm->hactive - 1);
269 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); 268 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
270 269
271 /* setup clock source, clock divider, enable dma. */ 270 /* setup clock source, clock divider, enable dma. */
@@ -396,6 +395,7 @@ static void fimd_win_mode_set(struct device *dev,
396 win_data->fb_height = overlay->fb_height; 395 win_data->fb_height = overlay->fb_height;
397 win_data->dma_addr = overlay->dma_addr[0] + offset; 396 win_data->dma_addr = overlay->dma_addr[0] + offset;
398 win_data->bpp = overlay->bpp; 397 win_data->bpp = overlay->bpp;
398 win_data->pixel_format = overlay->pixel_format;
399 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 399 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
400 (overlay->bpp >> 3); 400 (overlay->bpp >> 3);
401 win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); 401 win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
@@ -417,39 +417,38 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
417 417
418 val = WINCONx_ENWIN; 418 val = WINCONx_ENWIN;
419 419
420 switch (win_data->bpp) { 420 /*
421 case 1: 421 * In case of s3c64xx, window 0 doesn't support alpha channel.
422 val |= WINCON0_BPPMODE_1BPP; 422 * So the request format is ARGB8888 then change it to XRGB8888.
423 val |= WINCONx_BITSWP; 423 */
424 val |= WINCONx_BURSTLEN_4WORD; 424 if (ctx->driver_data->has_limited_fmt && !win) {
425 break; 425 if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
426 case 2: 426 win_data->pixel_format = DRM_FORMAT_XRGB8888;
427 val |= WINCON0_BPPMODE_2BPP; 427 }
428 val |= WINCONx_BITSWP; 428
429 val |= WINCONx_BURSTLEN_8WORD; 429 switch (win_data->pixel_format) {
430 break; 430 case DRM_FORMAT_C8:
431 case 4:
432 val |= WINCON0_BPPMODE_4BPP;
433 val |= WINCONx_BITSWP;
434 val |= WINCONx_BURSTLEN_8WORD;
435 break;
436 case 8:
437 val |= WINCON0_BPPMODE_8BPP_PALETTE; 431 val |= WINCON0_BPPMODE_8BPP_PALETTE;
438 val |= WINCONx_BURSTLEN_8WORD; 432 val |= WINCONx_BURSTLEN_8WORD;
439 val |= WINCONx_BYTSWP; 433 val |= WINCONx_BYTSWP;
440 break; 434 break;
441 case 16: 435 case DRM_FORMAT_XRGB1555:
436 val |= WINCON0_BPPMODE_16BPP_1555;
437 val |= WINCONx_HAWSWP;
438 val |= WINCONx_BURSTLEN_16WORD;
439 break;
440 case DRM_FORMAT_RGB565:
442 val |= WINCON0_BPPMODE_16BPP_565; 441 val |= WINCON0_BPPMODE_16BPP_565;
443 val |= WINCONx_HAWSWP; 442 val |= WINCONx_HAWSWP;
444 val |= WINCONx_BURSTLEN_16WORD; 443 val |= WINCONx_BURSTLEN_16WORD;
445 break; 444 break;
446 case 24: 445 case DRM_FORMAT_XRGB8888:
447 val |= WINCON0_BPPMODE_24BPP_888; 446 val |= WINCON0_BPPMODE_24BPP_888;
448 val |= WINCONx_WSWP; 447 val |= WINCONx_WSWP;
449 val |= WINCONx_BURSTLEN_16WORD; 448 val |= WINCONx_BURSTLEN_16WORD;
450 break; 449 break;
451 case 32: 450 case DRM_FORMAT_ARGB8888:
452 val |= WINCON1_BPPMODE_28BPP_A4888 451 val |= WINCON1_BPPMODE_25BPP_A1888
453 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; 452 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
454 val |= WINCONx_WSWP; 453 val |= WINCONx_WSWP;
455 val |= WINCONx_BURSTLEN_16WORD; 454 val |= WINCONx_BURSTLEN_16WORD;
@@ -746,45 +745,54 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
746 drm_iommu_detach_device(drm_dev, dev); 745 drm_iommu_detach_device(drm_dev, dev);
747} 746}
748 747
749static int fimd_calc_clkdiv(struct fimd_context *ctx, 748static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
750 struct fb_videomode *timing)
751{ 749{
752 unsigned long clk = clk_get_rate(ctx->lcd_clk); 750 struct videomode *vm = &ctx->panel.vm;
753 u32 retrace; 751 unsigned long clk;
754 u32 clkdiv; 752
755 u32 best_framerate = 0; 753 ctx->bus_clk = devm_clk_get(dev, "fimd");
756 u32 framerate; 754 if (IS_ERR(ctx->bus_clk)) {
757 755 dev_err(dev, "failed to get bus clock\n");
758 retrace = timing->left_margin + timing->hsync_len + 756 return PTR_ERR(ctx->bus_clk);
759 timing->right_margin + timing->xres; 757 }
760 retrace *= timing->upper_margin + timing->vsync_len + 758
761 timing->lower_margin + timing->yres; 759 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
762 760 if (IS_ERR(ctx->lcd_clk)) {
763 /* default framerate is 60Hz */ 761 dev_err(dev, "failed to get lcd clock\n");
764 if (!timing->refresh) 762 return PTR_ERR(ctx->lcd_clk);
765 timing->refresh = 60; 763 }
766 764
767 clk /= retrace; 765 clk = clk_get_rate(ctx->lcd_clk);
768 766 if (clk == 0) {
769 for (clkdiv = 1; clkdiv < 0x100; clkdiv++) { 767 dev_err(dev, "error getting sclk_fimd clock rate\n");
770 int tmp; 768 return -EINVAL;
771 769 }
772 /* get best framerate */ 770
773 framerate = clk / clkdiv; 771 if (vm->pixelclock == 0) {
774 tmp = timing->refresh - framerate; 772 unsigned long c;
775 if (tmp < 0) { 773 c = vm->hactive + vm->hback_porch + vm->hfront_porch +
776 best_framerate = framerate; 774 vm->hsync_len;
777 continue; 775 c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
778 } else { 776 vm->vsync_len;
779 if (!best_framerate) 777 vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
780 best_framerate = framerate; 778 if (vm->pixelclock == 0) {
781 else if (tmp < (best_framerate - framerate)) 779 dev_err(dev, "incorrect display timings\n");
782 best_framerate = framerate; 780 return -EINVAL;
783 break;
784 } 781 }
782 dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
783 vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
785 } 784 }
785 ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
786 if (ctx->clkdiv > 256) {
787 dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
788 ctx->clkdiv);
789 ctx->clkdiv = 256;
790 }
791 vm->pixelclock = clk / ctx->clkdiv;
792 DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
793 ctx->clkdiv);
786 794
787 return clkdiv; 795 return 0;
788} 796}
789 797
790static void fimd_clear_win(struct fimd_context *ctx, int win) 798static void fimd_clear_win(struct fimd_context *ctx, int win)
@@ -876,59 +884,53 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
876 return 0; 884 return 0;
877} 885}
878 886
887static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev)
888{
889 struct videomode *vm;
890 int ret;
891
892 vm = &ctx->panel.vm;
893 ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE);
894 if (ret) {
895 DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
896 return ret;
897 }
898
899 if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
900 ctx->vidcon1 |= VIDCON1_INV_VSYNC;
901 if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
902 ctx->vidcon1 |= VIDCON1_INV_HSYNC;
903 if (vm->flags & DISPLAY_FLAGS_DE_LOW)
904 ctx->vidcon1 |= VIDCON1_INV_VDEN;
905 if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
906 ctx->vidcon1 |= VIDCON1_INV_VCLK;
907
908 return 0;
909}
910
879static int fimd_probe(struct platform_device *pdev) 911static int fimd_probe(struct platform_device *pdev)
880{ 912{
881 struct device *dev = &pdev->dev; 913 struct device *dev = &pdev->dev;
882 struct fimd_context *ctx; 914 struct fimd_context *ctx;
883 struct exynos_drm_subdrv *subdrv; 915 struct exynos_drm_subdrv *subdrv;
884 struct exynos_drm_fimd_pdata *pdata;
885 struct exynos_drm_panel_info *panel;
886 struct resource *res; 916 struct resource *res;
887 int win; 917 int win;
888 int ret = -EINVAL; 918 int ret = -EINVAL;
889 919
890 if (dev->of_node) { 920 if (!dev->of_node)
891 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 921 return -ENODEV;
892 if (!pdata) {
893 DRM_ERROR("memory allocation for pdata failed\n");
894 return -ENOMEM;
895 }
896
897 ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
898 OF_USE_NATIVE_MODE);
899 if (ret) {
900 DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
901 return ret;
902 }
903 } else {
904 pdata = dev->platform_data;
905 if (!pdata) {
906 DRM_ERROR("no platform data specified\n");
907 return -EINVAL;
908 }
909 }
910
911 panel = &pdata->panel;
912 if (!panel) {
913 dev_err(dev, "panel is null.\n");
914 return -EINVAL;
915 }
916 922
917 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 923 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
918 if (!ctx) 924 if (!ctx)
919 return -ENOMEM; 925 return -ENOMEM;
920 926
921 ctx->bus_clk = devm_clk_get(dev, "fimd"); 927 ret = fimd_get_platform_data(ctx, dev);
922 if (IS_ERR(ctx->bus_clk)) { 928 if (ret)
923 dev_err(dev, "failed to get bus clock\n"); 929 return ret;
924 return PTR_ERR(ctx->bus_clk);
925 }
926 930
927 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); 931 ret = fimd_configure_clocks(ctx, dev);
928 if (IS_ERR(ctx->lcd_clk)) { 932 if (ret)
929 dev_err(dev, "failed to get lcd clock\n"); 933 return ret;
930 return PTR_ERR(ctx->lcd_clk);
931 }
932 934
933 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 935 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
934 936
@@ -952,10 +954,6 @@ static int fimd_probe(struct platform_device *pdev)
952 } 954 }
953 955
954 ctx->driver_data = drm_fimd_get_driver_data(pdev); 956 ctx->driver_data = drm_fimd_get_driver_data(pdev);
955 ctx->vidcon0 = pdata->vidcon0;
956 ctx->vidcon1 = pdata->vidcon1;
957 ctx->default_win = pdata->default_win;
958 ctx->panel = panel;
959 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue); 957 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
960 atomic_set(&ctx->wait_vsync_event, 0); 958 atomic_set(&ctx->wait_vsync_event, 0);
961 959
@@ -973,12 +971,6 @@ static int fimd_probe(struct platform_device *pdev)
973 pm_runtime_enable(dev); 971 pm_runtime_enable(dev);
974 pm_runtime_get_sync(dev); 972 pm_runtime_get_sync(dev);
975 973
976 ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
977 panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
978
979 DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
980 panel->timing.pixclock, ctx->clkdiv);
981
982 for (win = 0; win < WINDOWS_NR; win++) 974 for (win = 0; win < WINDOWS_NR; win++)
983 fimd_clear_win(ctx, win); 975 fimd_clear_win(ctx, win);
984 976
@@ -1067,20 +1059,6 @@ static int fimd_runtime_resume(struct device *dev)
1067} 1059}
1068#endif 1060#endif
1069 1061
1070static struct platform_device_id fimd_driver_ids[] = {
1071 {
1072 .name = "s3c64xx-fb",
1073 .driver_data = (unsigned long)&s3c64xx_fimd_driver_data,
1074 }, {
1075 .name = "exynos4-fb",
1076 .driver_data = (unsigned long)&exynos4_fimd_driver_data,
1077 }, {
1078 .name = "exynos5-fb",
1079 .driver_data = (unsigned long)&exynos5_fimd_driver_data,
1080 },
1081 {},
1082};
1083
1084static const struct dev_pm_ops fimd_pm_ops = { 1062static const struct dev_pm_ops fimd_pm_ops = {
1085 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) 1063 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
1086 SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL) 1064 SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
@@ -1089,11 +1067,10 @@ static const struct dev_pm_ops fimd_pm_ops = {
1089struct platform_driver fimd_driver = { 1067struct platform_driver fimd_driver = {
1090 .probe = fimd_probe, 1068 .probe = fimd_probe,
1091 .remove = fimd_remove, 1069 .remove = fimd_remove,
1092 .id_table = fimd_driver_ids,
1093 .driver = { 1070 .driver = {
1094 .name = "exynos4-fb", 1071 .name = "exynos4-fb",
1095 .owner = THIS_MODULE, 1072 .owner = THIS_MODULE,
1096 .pm = &fimd_pm_ops, 1073 .pm = &fimd_pm_ops,
1097 .of_match_table = of_match_ptr(fimd_driver_dt_match), 1074 .of_match_table = fimd_driver_dt_match,
1098 }, 1075 },
1099}; 1076};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index eddea4941483..3271fd4b1724 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -23,6 +23,7 @@
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
25#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_g2d.h"
26#include "exynos_drm_gem.h" 27#include "exynos_drm_gem.h"
27#include "exynos_drm_iommu.h" 28#include "exynos_drm_iommu.h"
28 29
@@ -446,10 +447,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
446 } 447 }
447 448
448 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); 449 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
449 if (!g2d_userptr) { 450 if (!g2d_userptr)
450 DRM_ERROR("failed to allocate g2d_userptr.\n");
451 return ERR_PTR(-ENOMEM); 451 return ERR_PTR(-ENOMEM);
452 }
453 452
454 atomic_set(&g2d_userptr->refcount, 1); 453 atomic_set(&g2d_userptr->refcount, 1);
455 454
@@ -499,7 +498,6 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
499 498
500 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 499 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
501 if (!sgt) { 500 if (!sgt) {
502 DRM_ERROR("failed to allocate sg table.\n");
503 ret = -ENOMEM; 501 ret = -ENOMEM;
504 goto err_free_userptr; 502 goto err_free_userptr;
505 } 503 }
@@ -808,17 +806,8 @@ static void g2d_dma_start(struct g2d_data *g2d,
808 int ret; 806 int ret;
809 807
810 ret = pm_runtime_get_sync(g2d->dev); 808 ret = pm_runtime_get_sync(g2d->dev);
811 if (ret < 0) { 809 if (ret < 0)
812 dev_warn(g2d->dev, "failed pm power on.\n");
813 return;
814 }
815
816 ret = clk_prepare_enable(g2d->gate_clk);
817 if (ret < 0) {
818 dev_warn(g2d->dev, "failed to enable clock.\n");
819 pm_runtime_put_sync(g2d->dev);
820 return; 810 return;
821 }
822 811
823 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 812 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
824 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 813 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -871,7 +860,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
871 runqueue_work); 860 runqueue_work);
872 861
873 mutex_lock(&g2d->runqueue_mutex); 862 mutex_lock(&g2d->runqueue_mutex);
874 clk_disable_unprepare(g2d->gate_clk);
875 pm_runtime_put_sync(g2d->dev); 863 pm_runtime_put_sync(g2d->dev);
876 864
877 complete(&g2d->runqueue_node->complete); 865 complete(&g2d->runqueue_node->complete);
@@ -1096,8 +1084,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1096 1084
1097 e = kzalloc(sizeof(*node->event), GFP_KERNEL); 1085 e = kzalloc(sizeof(*node->event), GFP_KERNEL);
1098 if (!e) { 1086 if (!e) {
1099 dev_err(dev, "failed to allocate event\n");
1100
1101 spin_lock_irqsave(&drm_dev->event_lock, flags); 1087 spin_lock_irqsave(&drm_dev->event_lock, flags);
1102 file->event_space += sizeof(e->event); 1088 file->event_space += sizeof(e->event);
1103 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1089 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1327,10 +1313,8 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
1327 struct exynos_drm_g2d_private *g2d_priv; 1313 struct exynos_drm_g2d_private *g2d_priv;
1328 1314
1329 g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL); 1315 g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
1330 if (!g2d_priv) { 1316 if (!g2d_priv)
1331 dev_err(dev, "failed to allocate g2d private data\n");
1332 return -ENOMEM; 1317 return -ENOMEM;
1333 }
1334 1318
1335 g2d_priv->dev = dev; 1319 g2d_priv->dev = dev;
1336 file_priv->g2d_priv = g2d_priv; 1320 file_priv->g2d_priv = g2d_priv;
@@ -1386,10 +1370,8 @@ static int g2d_probe(struct platform_device *pdev)
1386 int ret; 1370 int ret;
1387 1371
1388 g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); 1372 g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
1389 if (!g2d) { 1373 if (!g2d)
1390 dev_err(dev, "failed to allocate driver data\n");
1391 return -ENOMEM; 1374 return -ENOMEM;
1392 }
1393 1375
1394 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", 1376 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
1395 sizeof(struct g2d_runqueue_node), 0, 0, NULL); 1377 sizeof(struct g2d_runqueue_node), 0, 0, NULL);
@@ -1524,14 +1506,38 @@ static int g2d_resume(struct device *dev)
1524} 1506}
1525#endif 1507#endif
1526 1508
1527static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); 1509#ifdef CONFIG_PM_RUNTIME
1510static int g2d_runtime_suspend(struct device *dev)
1511{
1512 struct g2d_data *g2d = dev_get_drvdata(dev);
1513
1514 clk_disable_unprepare(g2d->gate_clk);
1515
1516 return 0;
1517}
1518
1519static int g2d_runtime_resume(struct device *dev)
1520{
1521 struct g2d_data *g2d = dev_get_drvdata(dev);
1522 int ret;
1523
1524 ret = clk_prepare_enable(g2d->gate_clk);
1525 if (ret < 0)
1526 dev_warn(dev, "failed to enable clock.\n");
1527
1528 return ret;
1529}
1530#endif
1531
1532static const struct dev_pm_ops g2d_pm_ops = {
1533 SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
1534 SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
1535};
1528 1536
1529#ifdef CONFIG_OF
1530static const struct of_device_id exynos_g2d_match[] = { 1537static const struct of_device_id exynos_g2d_match[] = {
1531 { .compatible = "samsung,exynos5250-g2d" }, 1538 { .compatible = "samsung,exynos5250-g2d" },
1532 {}, 1539 {},
1533}; 1540};
1534#endif
1535 1541
1536struct platform_driver g2d_driver = { 1542struct platform_driver g2d_driver = {
1537 .probe = g2d_probe, 1543 .probe = g2d_probe,
@@ -1540,6 +1546,6 @@ struct platform_driver g2d_driver = {
1540 .name = "s5p-g2d", 1546 .name = "s5p-g2d",
1541 .owner = THIS_MODULE, 1547 .owner = THIS_MODULE,
1542 .pm = &g2d_pm_ops, 1548 .pm = &g2d_pm_ops,
1543 .of_match_table = of_match_ptr(exynos_g2d_match), 1549 .of_match_table = exynos_g2d_match,
1544 }, 1550 },
1545}; 1551};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 24c22a8c3364..49f9cd232757 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_vma_manager.h>
13 14
14#include <linux/shmem_fs.h> 15#include <linux/shmem_fs.h>
15#include <drm/exynos_drm.h> 16#include <drm/exynos_drm.h>
@@ -17,6 +18,7 @@
17#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
18#include "exynos_drm_gem.h" 19#include "exynos_drm_gem.h"
19#include "exynos_drm_buf.h" 20#include "exynos_drm_buf.h"
21#include "exynos_drm_iommu.h"
20 22
21static unsigned int convert_to_vm_err_msg(int msg) 23static unsigned int convert_to_vm_err_msg(int msg)
22{ 24{
@@ -135,7 +137,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
135 obj = &exynos_gem_obj->base; 137 obj = &exynos_gem_obj->base;
136 buf = exynos_gem_obj->buffer; 138 buf = exynos_gem_obj->buffer;
137 139
138 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 140 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
139 141
140 /* 142 /*
141 * do not release memory region from exporter. 143 * do not release memory region from exporter.
@@ -152,8 +154,7 @@ out:
152 exynos_drm_fini_buf(obj->dev, buf); 154 exynos_drm_fini_buf(obj->dev, buf);
153 exynos_gem_obj->buffer = NULL; 155 exynos_gem_obj->buffer = NULL;
154 156
155 if (obj->map_list.map) 157 drm_gem_free_mmap_offset(obj);
156 drm_gem_free_mmap_offset(obj);
157 158
158 /* release file pointer to gem object. */ 159 /* release file pointer to gem object. */
159 drm_gem_object_release(obj); 160 drm_gem_object_release(obj);
@@ -191,10 +192,8 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
191 int ret; 192 int ret;
192 193
193 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 194 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
194 if (!exynos_gem_obj) { 195 if (!exynos_gem_obj)
195 DRM_ERROR("failed to allocate exynos gem object\n");
196 return NULL; 196 return NULL;
197 }
198 197
199 exynos_gem_obj->size = size; 198 exynos_gem_obj->size = size;
200 obj = &exynos_gem_obj->base; 199 obj = &exynos_gem_obj->base;
@@ -668,6 +667,18 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
668 667
669 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG | 668 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
670 EXYNOS_BO_WC, args->size); 669 EXYNOS_BO_WC, args->size);
670 /*
671 * If physically contiguous memory allocation fails and if IOMMU is
672 * supported then try to get buffer from non physically contiguous
673 * memory area.
674 */
675 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
676 dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
677 exynos_gem_obj = exynos_drm_gem_create(dev,
678 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
679 args->size);
680 }
681
671 if (IS_ERR(exynos_gem_obj)) 682 if (IS_ERR(exynos_gem_obj))
672 return PTR_ERR(exynos_gem_obj); 683 return PTR_ERR(exynos_gem_obj);
673 684
@@ -703,13 +714,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
703 goto unlock; 714 goto unlock;
704 } 715 }
705 716
706 if (!obj->map_list.map) { 717 ret = drm_gem_create_mmap_offset(obj);
707 ret = drm_gem_create_mmap_offset(obj); 718 if (ret)
708 if (ret) 719 goto out;
709 goto out;
710 }
711 720
712 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; 721 *offset = drm_vma_node_offset_addr(&obj->vma_node);
713 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 722 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
714 723
715out: 724out:
@@ -719,26 +728,6 @@ unlock:
719 return ret; 728 return ret;
720} 729}
721 730
722int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
723 struct drm_device *dev,
724 unsigned int handle)
725{
726 int ret;
727
728 /*
729 * obj->refcount and obj->handle_count are decreased and
730 * if both them are 0 then exynos_drm_gem_free_object()
731 * would be called by callback to release resources.
732 */
733 ret = drm_gem_handle_delete(file_priv, handle);
734 if (ret < 0) {
735 DRM_ERROR("failed to delete drm_gem_handle.\n");
736 return ret;
737 }
738
739 return 0;
740}
741
742int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 731int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
743{ 732{
744 struct drm_gem_object *obj = vma->vm_private_data; 733 struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 468766bee450..09555afdfe9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
151 struct drm_device *dev, uint32_t handle, 151 struct drm_device *dev, uint32_t handle,
152 uint64_t *offset); 152 uint64_t *offset);
153 153
154/*
155 * destroy memory region allocated.
156 * - a gem handle and physical memory region pointed by a gem object
157 * would be released by drm_gem_handle_delete().
158 */
159int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
160 struct drm_device *dev,
161 unsigned int handle);
162
163/* page fault handler and mmap fault address(virtual) to physical memory. */ 154/* page fault handler and mmap fault address(virtual) to physical memory. */
164int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 155int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
165 156
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 90b8a1a5344c..cd6aebd53bd0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -20,6 +20,7 @@
20#include <drm/drmP.h> 20#include <drm/drmP.h>
21#include <drm/exynos_drm.h> 21#include <drm/exynos_drm.h>
22#include "regs-gsc.h" 22#include "regs-gsc.h"
23#include "exynos_drm_drv.h"
23#include "exynos_drm_ipp.h" 24#include "exynos_drm_ipp.h"
24#include "exynos_drm_gsc.h" 25#include "exynos_drm_gsc.h"
25 26
@@ -1337,10 +1338,8 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1337 struct drm_exynos_ipp_prop_list *prop_list; 1338 struct drm_exynos_ipp_prop_list *prop_list;
1338 1339
1339 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 1340 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1340 if (!prop_list) { 1341 if (!prop_list)
1341 DRM_ERROR("failed to alloc property list.\n");
1342 return -ENOMEM; 1342 return -ENOMEM;
1343 }
1344 1343
1345 prop_list->version = 1; 1344 prop_list->version = 1;
1346 prop_list->writeback = 1; 1345 prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 8d3bc01d6834..8548b974bd59 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -403,10 +403,8 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
403 struct drm_hdmi_context *ctx; 403 struct drm_hdmi_context *ctx;
404 404
405 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 405 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
406 if (!ctx) { 406 if (!ctx)
407 DRM_LOG_KMS("failed to alloc common hdmi context.\n");
408 return -ENOMEM; 407 return -ENOMEM;
409 }
410 408
411 subdrv = &ctx->subdrv; 409 subdrv = &ctx->subdrv;
412 410
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 3799d5c2b5df..fb8db0378274 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -47,10 +47,16 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
47 47
48 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), 48 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
49 GFP_KERNEL); 49 GFP_KERNEL);
50 if (!dev->dma_parms)
51 goto error;
52
50 dma_set_max_seg_size(dev, 0xffffffffu); 53 dma_set_max_seg_size(dev, 0xffffffffu);
51 dev->archdata.mapping = mapping; 54 dev->archdata.mapping = mapping;
52 55
53 return 0; 56 return 0;
57error:
58 arm_iommu_release_mapping(mapping);
59 return -ENOMEM;
54} 60}
55 61
56/* 62/*
@@ -91,6 +97,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
91 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, 97 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
92 sizeof(*subdrv_dev->dma_parms), 98 sizeof(*subdrv_dev->dma_parms),
93 GFP_KERNEL); 99 GFP_KERNEL);
100 if (!subdrv_dev->dma_parms)
101 return -ENOMEM;
102
94 dma_set_max_seg_size(subdrv_dev, 0xffffffffu); 103 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
95 104
96 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); 105 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d2b6ab4def93..824e0705c8d3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -408,10 +408,8 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
408 struct drm_exynos_ipp_cmd_work *cmd_work; 408 struct drm_exynos_ipp_cmd_work *cmd_work;
409 409
410 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 410 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
411 if (!cmd_work) { 411 if (!cmd_work)
412 DRM_ERROR("failed to alloc cmd_work.\n");
413 return ERR_PTR(-ENOMEM); 412 return ERR_PTR(-ENOMEM);
414 }
415 413
416 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); 414 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
417 415
@@ -423,10 +421,8 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
423 struct drm_exynos_ipp_event_work *event_work; 421 struct drm_exynos_ipp_event_work *event_work;
424 422
425 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 423 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
426 if (!event_work) { 424 if (!event_work)
427 DRM_ERROR("failed to alloc event_work.\n");
428 return ERR_PTR(-ENOMEM); 425 return ERR_PTR(-ENOMEM);
429 }
430 426
431 INIT_WORK((struct work_struct *)event_work, ipp_sched_event); 427 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
432 428
@@ -482,10 +478,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
482 478
483 /* allocate command node */ 479 /* allocate command node */
484 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); 480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
485 if (!c_node) { 481 if (!c_node)
486 DRM_ERROR("failed to allocate map node.\n");
487 return -ENOMEM; 482 return -ENOMEM;
488 }
489 483
490 /* create property id */ 484 /* create property id */
491 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, 485 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
@@ -694,10 +688,8 @@ static struct drm_exynos_ipp_mem_node
694 mutex_lock(&c_node->mem_lock); 688 mutex_lock(&c_node->mem_lock);
695 689
696 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 690 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
697 if (!m_node) { 691 if (!m_node)
698 DRM_ERROR("failed to allocate queue node.\n");
699 goto err_unlock; 692 goto err_unlock;
700 }
701 693
702 /* clear base address for error handling */ 694 /* clear base address for error handling */
703 memset(&buf_info, 0x0, sizeof(buf_info)); 695 memset(&buf_info, 0x0, sizeof(buf_info));
@@ -798,9 +790,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
798 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); 790 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
799 791
800 e = kzalloc(sizeof(*e), GFP_KERNEL); 792 e = kzalloc(sizeof(*e), GFP_KERNEL);
801
802 if (!e) { 793 if (!e) {
803 DRM_ERROR("failed to allocate event.\n");
804 spin_lock_irqsave(&drm_dev->event_lock, flags); 794 spin_lock_irqsave(&drm_dev->event_lock, flags);
805 file->event_space += sizeof(e->event); 795 file->event_space += sizeof(e->event);
806 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 796 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1780,10 +1770,8 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1780 struct exynos_drm_ipp_private *priv; 1770 struct exynos_drm_ipp_private *priv;
1781 1771
1782 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1772 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1783 if (!priv) { 1773 if (!priv)
1784 DRM_ERROR("failed to allocate priv.\n");
1785 return -ENOMEM; 1774 return -ENOMEM;
1786 }
1787 priv->dev = dev; 1775 priv->dev = dev;
1788 file_priv->ipp_priv = priv; 1776 file_priv->ipp_priv = priv;
1789 1777
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 6ee55e68e0a2..fcb0652e77d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -16,6 +16,7 @@
16#include "exynos_drm_encoder.h" 16#include "exynos_drm_encoder.h"
17#include "exynos_drm_fb.h" 17#include "exynos_drm_fb.h"
18#include "exynos_drm_gem.h" 18#include "exynos_drm_gem.h"
19#include "exynos_drm_plane.h"
19 20
20#define to_exynos_plane(x) container_of(x, struct exynos_plane, base) 21#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
21 22
@@ -264,10 +265,8 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
264 int err; 265 int err;
265 266
266 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); 267 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
267 if (!exynos_plane) { 268 if (!exynos_plane)
268 DRM_ERROR("failed to allocate plane\n");
269 return NULL; 269 return NULL;
270 }
271 270
272 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, 271 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
273 &exynos_plane_funcs, formats, ARRAY_SIZE(formats), 272 &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 49669aa24c45..7b901688defa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -21,6 +21,7 @@
21#include <drm/exynos_drm.h> 21#include <drm/exynos_drm.h>
22#include "regs-rotator.h" 22#include "regs-rotator.h"
23#include "exynos_drm.h" 23#include "exynos_drm.h"
24#include "exynos_drm_drv.h"
24#include "exynos_drm_ipp.h" 25#include "exynos_drm_ipp.h"
25 26
26/* 27/*
@@ -471,10 +472,8 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
471 struct drm_exynos_ipp_prop_list *prop_list; 472 struct drm_exynos_ipp_prop_list *prop_list;
472 473
473 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 474 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
474 if (!prop_list) { 475 if (!prop_list)
475 DRM_ERROR("failed to alloc property list.\n");
476 return -ENOMEM; 476 return -ENOMEM;
477 }
478 477
479 prop_list->version = 1; 478 prop_list->version = 1;
480 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | 479 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -631,21 +630,96 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
631 return 0; 630 return 0;
632} 631}
633 632
633static struct rot_limit_table rot_limit_tbl_4210 = {
634 .ycbcr420_2p = {
635 .min_w = 32,
636 .min_h = 32,
637 .max_w = SZ_64K,
638 .max_h = SZ_64K,
639 .align = 3,
640 },
641 .rgb888 = {
642 .min_w = 8,
643 .min_h = 8,
644 .max_w = SZ_16K,
645 .max_h = SZ_16K,
646 .align = 2,
647 },
648};
649
650static struct rot_limit_table rot_limit_tbl_4x12 = {
651 .ycbcr420_2p = {
652 .min_w = 32,
653 .min_h = 32,
654 .max_w = SZ_32K,
655 .max_h = SZ_32K,
656 .align = 3,
657 },
658 .rgb888 = {
659 .min_w = 8,
660 .min_h = 8,
661 .max_w = SZ_8K,
662 .max_h = SZ_8K,
663 .align = 2,
664 },
665};
666
667static struct rot_limit_table rot_limit_tbl_5250 = {
668 .ycbcr420_2p = {
669 .min_w = 32,
670 .min_h = 32,
671 .max_w = SZ_32K,
672 .max_h = SZ_32K,
673 .align = 3,
674 },
675 .rgb888 = {
676 .min_w = 8,
677 .min_h = 8,
678 .max_w = SZ_8K,
679 .max_h = SZ_8K,
680 .align = 1,
681 },
682};
683
684static const struct of_device_id exynos_rotator_match[] = {
685 {
686 .compatible = "samsung,exynos4210-rotator",
687 .data = &rot_limit_tbl_4210,
688 },
689 {
690 .compatible = "samsung,exynos4212-rotator",
691 .data = &rot_limit_tbl_4x12,
692 },
693 {
694 .compatible = "samsung,exynos5250-rotator",
695 .data = &rot_limit_tbl_5250,
696 },
697 {},
698};
699
634static int rotator_probe(struct platform_device *pdev) 700static int rotator_probe(struct platform_device *pdev)
635{ 701{
636 struct device *dev = &pdev->dev; 702 struct device *dev = &pdev->dev;
637 struct rot_context *rot; 703 struct rot_context *rot;
638 struct exynos_drm_ippdrv *ippdrv; 704 struct exynos_drm_ippdrv *ippdrv;
705 const struct of_device_id *match;
639 int ret; 706 int ret;
640 707
708 if (!dev->of_node) {
709 dev_err(dev, "cannot find of_node.\n");
710 return -ENODEV;
711 }
712
641 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); 713 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
642 if (!rot) { 714 if (!rot)
643 dev_err(dev, "failed to allocate rot\n");
644 return -ENOMEM; 715 return -ENOMEM;
645 }
646 716
647 rot->limit_tbl = (struct rot_limit_table *) 717 match = of_match_node(exynos_rotator_match, dev->of_node);
648 platform_get_device_id(pdev)->driver_data; 718 if (!match) {
719 dev_err(dev, "failed to match node\n");
720 return -ENODEV;
721 }
722 rot->limit_tbl = (struct rot_limit_table *)match->data;
649 723
650 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 724 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
651 rot->regs = devm_ioremap_resource(dev, rot->regs_res); 725 rot->regs = devm_ioremap_resource(dev, rot->regs_res);
@@ -717,31 +791,6 @@ static int rotator_remove(struct platform_device *pdev)
717 return 0; 791 return 0;
718} 792}
719 793
720static struct rot_limit_table rot_limit_tbl = {
721 .ycbcr420_2p = {
722 .min_w = 32,
723 .min_h = 32,
724 .max_w = SZ_32K,
725 .max_h = SZ_32K,
726 .align = 3,
727 },
728 .rgb888 = {
729 .min_w = 8,
730 .min_h = 8,
731 .max_w = SZ_8K,
732 .max_h = SZ_8K,
733 .align = 2,
734 },
735};
736
737static struct platform_device_id rotator_driver_ids[] = {
738 {
739 .name = "exynos-rot",
740 .driver_data = (unsigned long)&rot_limit_tbl,
741 },
742 {},
743};
744
745static int rotator_clk_crtl(struct rot_context *rot, bool enable) 794static int rotator_clk_crtl(struct rot_context *rot, bool enable)
746{ 795{
747 if (enable) { 796 if (enable) {
@@ -803,10 +852,10 @@ static const struct dev_pm_ops rotator_pm_ops = {
803struct platform_driver rotator_driver = { 852struct platform_driver rotator_driver = {
804 .probe = rotator_probe, 853 .probe = rotator_probe,
805 .remove = rotator_remove, 854 .remove = rotator_remove,
806 .id_table = rotator_driver_ids,
807 .driver = { 855 .driver = {
808 .name = "exynos-rot", 856 .name = "exynos-rot",
809 .owner = THIS_MODULE, 857 .owner = THIS_MODULE,
810 .pm = &rotator_pm_ops, 858 .pm = &rotator_pm_ops,
859 .of_match_table = exynos_rotator_match,
811 }, 860 },
812}; 861};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index c57c56519add..4400330e4449 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -23,6 +23,7 @@
23#include "exynos_drm_drv.h" 23#include "exynos_drm_drv.h"
24#include "exynos_drm_crtc.h" 24#include "exynos_drm_crtc.h"
25#include "exynos_drm_encoder.h" 25#include "exynos_drm_encoder.h"
26#include "exynos_drm_vidi.h"
26 27
27/* vidi has totally three virtual windows. */ 28/* vidi has totally three virtual windows. */
28#define WINDOWS_NR 3 29#define WINDOWS_NR 3
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2f5c6942c968..a0e10aeb0e67 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -32,6 +32,7 @@
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/of.h>
35#include <linux/of_gpio.h> 36#include <linux/of_gpio.h>
36 37
37#include <drm/exynos_drm.h> 38#include <drm/exynos_drm.h>
@@ -1824,10 +1825,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
1824 1825
1825 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * 1826 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
1826 sizeof(res->regul_bulk[0]), GFP_KERNEL); 1827 sizeof(res->regul_bulk[0]), GFP_KERNEL);
1827 if (!res->regul_bulk) { 1828 if (!res->regul_bulk)
1828 DRM_ERROR("failed to get memory for regulators\n");
1829 goto fail; 1829 goto fail;
1830 }
1831 for (i = 0; i < ARRAY_SIZE(supply); ++i) { 1830 for (i = 0; i < ARRAY_SIZE(supply); ++i) {
1832 res->regul_bulk[i].supply = supply[i]; 1831 res->regul_bulk[i].supply = supply[i];
1833 res->regul_bulk[i].consumer = NULL; 1832 res->regul_bulk[i].consumer = NULL;
@@ -1859,7 +1858,6 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
1859 hdmi_hdmiphy = hdmiphy; 1858 hdmi_hdmiphy = hdmiphy;
1860} 1859}
1861 1860
1862#ifdef CONFIG_OF
1863static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata 1861static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1864 (struct device *dev) 1862 (struct device *dev)
1865{ 1863{
@@ -1868,10 +1866,8 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1868 u32 value; 1866 u32 value;
1869 1867
1870 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 1868 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1871 if (!pd) { 1869 if (!pd)
1872 DRM_ERROR("memory allocation for pdata failed\n");
1873 goto err_data; 1870 goto err_data;
1874 }
1875 1871
1876 if (!of_find_property(np, "hpd-gpio", &value)) { 1872 if (!of_find_property(np, "hpd-gpio", &value)) {
1877 DRM_ERROR("no hpd gpio property found\n"); 1873 DRM_ERROR("no hpd gpio property found\n");
@@ -1885,33 +1881,7 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1885err_data: 1881err_data:
1886 return NULL; 1882 return NULL;
1887} 1883}
1888#else
1889static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1890 (struct device *dev)
1891{
1892 return NULL;
1893}
1894#endif
1895
1896static struct platform_device_id hdmi_driver_types[] = {
1897 {
1898 .name = "s5pv210-hdmi",
1899 .driver_data = HDMI_TYPE13,
1900 }, {
1901 .name = "exynos4-hdmi",
1902 .driver_data = HDMI_TYPE13,
1903 }, {
1904 .name = "exynos4-hdmi14",
1905 .driver_data = HDMI_TYPE14,
1906 }, {
1907 .name = "exynos5-hdmi",
1908 .driver_data = HDMI_TYPE14,
1909 }, {
1910 /* end node */
1911 }
1912};
1913 1884
1914#ifdef CONFIG_OF
1915static struct of_device_id hdmi_match_types[] = { 1885static struct of_device_id hdmi_match_types[] = {
1916 { 1886 {
1917 .compatible = "samsung,exynos5-hdmi", 1887 .compatible = "samsung,exynos5-hdmi",
@@ -1923,7 +1893,6 @@ static struct of_device_id hdmi_match_types[] = {
1923 /* end node */ 1893 /* end node */
1924 } 1894 }
1925}; 1895};
1926#endif
1927 1896
1928static int hdmi_probe(struct platform_device *pdev) 1897static int hdmi_probe(struct platform_device *pdev)
1929{ 1898{
@@ -1932,36 +1901,23 @@ static int hdmi_probe(struct platform_device *pdev)
1932 struct hdmi_context *hdata; 1901 struct hdmi_context *hdata;
1933 struct s5p_hdmi_platform_data *pdata; 1902 struct s5p_hdmi_platform_data *pdata;
1934 struct resource *res; 1903 struct resource *res;
1904 const struct of_device_id *match;
1935 int ret; 1905 int ret;
1936 1906
1937 if (dev->of_node) { 1907 if (!dev->of_node)
1938 pdata = drm_hdmi_dt_parse_pdata(dev); 1908 return -ENODEV;
1939 if (IS_ERR(pdata)) {
1940 DRM_ERROR("failed to parse dt\n");
1941 return PTR_ERR(pdata);
1942 }
1943 } else {
1944 pdata = dev->platform_data;
1945 }
1946 1909
1947 if (!pdata) { 1910 pdata = drm_hdmi_dt_parse_pdata(dev);
1948 DRM_ERROR("no platform data specified\n"); 1911 if (!pdata)
1949 return -EINVAL; 1912 return -EINVAL;
1950 }
1951 1913
1952 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), 1914 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
1953 GFP_KERNEL); 1915 if (!drm_hdmi_ctx)
1954 if (!drm_hdmi_ctx) {
1955 DRM_ERROR("failed to allocate common hdmi context.\n");
1956 return -ENOMEM; 1916 return -ENOMEM;
1957 }
1958 1917
1959 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), 1918 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
1960 GFP_KERNEL); 1919 if (!hdata)
1961 if (!hdata) {
1962 DRM_ERROR("out of memory\n");
1963 return -ENOMEM; 1920 return -ENOMEM;
1964 }
1965 1921
1966 mutex_init(&hdata->hdmi_mutex); 1922 mutex_init(&hdata->hdmi_mutex);
1967 1923
@@ -1970,23 +1926,15 @@ static int hdmi_probe(struct platform_device *pdev)
1970 1926
1971 platform_set_drvdata(pdev, drm_hdmi_ctx); 1927 platform_set_drvdata(pdev, drm_hdmi_ctx);
1972 1928
1973 if (dev->of_node) { 1929 match = of_match_node(hdmi_match_types, dev->of_node);
1974 const struct of_device_id *match; 1930 if (!match)
1975 match = of_match_node(of_match_ptr(hdmi_match_types), 1931 return -ENODEV;
1976 dev->of_node); 1932 hdata->type = (enum hdmi_type)match->data;
1977 if (match == NULL)
1978 return -ENODEV;
1979 hdata->type = (enum hdmi_type)match->data;
1980 } else {
1981 hdata->type = (enum hdmi_type)platform_get_device_id
1982 (pdev)->driver_data;
1983 }
1984 1933
1985 hdata->hpd_gpio = pdata->hpd_gpio; 1934 hdata->hpd_gpio = pdata->hpd_gpio;
1986 hdata->dev = dev; 1935 hdata->dev = dev;
1987 1936
1988 ret = hdmi_resources_init(hdata); 1937 ret = hdmi_resources_init(hdata);
1989
1990 if (ret) { 1938 if (ret) {
1991 DRM_ERROR("hdmi_resources_init failed\n"); 1939 DRM_ERROR("hdmi_resources_init failed\n");
1992 return -EINVAL; 1940 return -EINVAL;
@@ -2141,11 +2089,10 @@ static const struct dev_pm_ops hdmi_pm_ops = {
2141struct platform_driver hdmi_driver = { 2089struct platform_driver hdmi_driver = {
2142 .probe = hdmi_probe, 2090 .probe = hdmi_probe,
2143 .remove = hdmi_remove, 2091 .remove = hdmi_remove,
2144 .id_table = hdmi_driver_types,
2145 .driver = { 2092 .driver = {
2146 .name = "exynos-hdmi", 2093 .name = "exynos-hdmi",
2147 .owner = THIS_MODULE, 2094 .owner = THIS_MODULE,
2148 .pm = &hdmi_pm_ops, 2095 .pm = &hdmi_pm_ops,
2149 .of_match_table = of_match_ptr(hdmi_match_types), 2096 .of_match_table = hdmi_match_types,
2150 }, 2097 },
2151}; 2098};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 6e320ae9afed..59abb1494ceb 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/of.h>
18 19
19#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
20#include "exynos_hdmi.h" 21#include "exynos_hdmi.h"
@@ -39,13 +40,6 @@ static int hdmiphy_remove(struct i2c_client *client)
39 return 0; 40 return 0;
40} 41}
41 42
42static const struct i2c_device_id hdmiphy_id[] = {
43 { "s5p_hdmiphy", 0 },
44 { "exynos5-hdmiphy", 0 },
45 { },
46};
47
48#ifdef CONFIG_OF
49static struct of_device_id hdmiphy_match_types[] = { 43static struct of_device_id hdmiphy_match_types[] = {
50 { 44 {
51 .compatible = "samsung,exynos5-hdmiphy", 45 .compatible = "samsung,exynos5-hdmiphy",
@@ -57,15 +51,13 @@ static struct of_device_id hdmiphy_match_types[] = {
57 /* end node */ 51 /* end node */
58 } 52 }
59}; 53};
60#endif
61 54
62struct i2c_driver hdmiphy_driver = { 55struct i2c_driver hdmiphy_driver = {
63 .driver = { 56 .driver = {
64 .name = "exynos-hdmiphy", 57 .name = "exynos-hdmiphy",
65 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
66 .of_match_table = of_match_ptr(hdmiphy_match_types), 59 .of_match_table = hdmiphy_match_types,
67 }, 60 },
68 .id_table = hdmiphy_id,
69 .probe = hdmiphy_probe, 61 .probe = hdmiphy_probe,
70 .remove = hdmiphy_remove, 62 .remove = hdmiphy_remove,
71 .command = NULL, 63 .command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c9a137caea41..63bc5f92fbb3 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -30,6 +30,7 @@
30#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/regulator/consumer.h> 32#include <linux/regulator/consumer.h>
33#include <linux/of.h>
33 34
34#include <drm/exynos_drm.h> 35#include <drm/exynos_drm.h>
35 36
@@ -1185,16 +1186,12 @@ static int mixer_probe(struct platform_device *pdev)
1185 1186
1186 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), 1187 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
1187 GFP_KERNEL); 1188 GFP_KERNEL);
1188 if (!drm_hdmi_ctx) { 1189 if (!drm_hdmi_ctx)
1189 DRM_ERROR("failed to allocate common hdmi context.\n");
1190 return -ENOMEM; 1190 return -ENOMEM;
1191 }
1192 1191
1193 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1192 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1194 if (!ctx) { 1193 if (!ctx)
1195 DRM_ERROR("failed to alloc mixer context.\n");
1196 return -ENOMEM; 1194 return -ENOMEM;
1197 }
1198 1195
1199 mutex_init(&ctx->mixer_mutex); 1196 mutex_init(&ctx->mixer_mutex);
1200 1197
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 7a2d40a5c1e1..e9064dd9045d 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -15,6 +15,7 @@ gma500_gfx-y += \
15 mmu.o \ 15 mmu.o \
16 power.o \ 16 power.o \
17 psb_drv.o \ 17 psb_drv.o \
18 gma_display.o \
18 psb_intel_display.o \ 19 psb_intel_display.o \
19 psb_intel_lvds.o \ 20 psb_intel_lvds.o \
20 psb_intel_modes.o \ 21 psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 23e14e93991f..162f686c532d 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -641,6 +641,7 @@ const struct psb_ops cdv_chip_ops = {
641 641
642 .crtc_helper = &cdv_intel_helper_funcs, 642 .crtc_helper = &cdv_intel_helper_funcs,
643 .crtc_funcs = &cdv_intel_crtc_funcs, 643 .crtc_funcs = &cdv_intel_crtc_funcs,
644 .clock_funcs = &cdv_clock_funcs,
644 645
645 .output_init = cdv_output_init, 646 .output_init = cdv_output_init,
646 .hotplug = cdv_hotplug_event, 647 .hotplug = cdv_hotplug_event,
@@ -655,4 +656,6 @@ const struct psb_ops cdv_chip_ops = {
655 .restore_regs = cdv_restore_display_registers, 656 .restore_regs = cdv_restore_display_registers,
656 .power_down = cdv_power_down, 657 .power_down = cdv_power_down,
657 .power_up = cdv_power_up, 658 .power_up = cdv_power_up,
659 .update_wm = cdv_update_wm,
660 .disable_sr = cdv_disable_sr,
658}; 661};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 9561e17621b3..705c11d47d45 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -17,6 +17,7 @@
17 17
18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs; 18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs; 19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
20extern const struct gma_clock_funcs cdv_clock_funcs;
20extern void cdv_intel_crt_init(struct drm_device *dev, 21extern void cdv_intel_crt_init(struct drm_device *dev,
21 struct psb_intel_mode_device *mode_dev); 22 struct psb_intel_mode_device *mode_dev);
22extern void cdv_intel_lvds_init(struct drm_device *dev, 23extern void cdv_intel_lvds_init(struct drm_device *dev,
@@ -25,12 +26,5 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *
25 int reg); 26 int reg);
26extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, 27extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
27 struct drm_crtc *crtc); 28 struct drm_crtc *crtc);
28 29extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc);
29static inline void cdv_intel_wait_for_vblank(struct drm_device *dev) 30extern void cdv_disable_sr(struct drm_device *dev);
30{
31 /* Wait for 20ms, i.e. one cycle at 50hz. */
32 /* FIXME: msleep ?? */
33 mdelay(20);
34}
35
36
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 7b8386fc3024..661af492173d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -95,13 +95,12 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
95 95
96 struct drm_device *dev = encoder->dev; 96 struct drm_device *dev = encoder->dev;
97 struct drm_crtc *crtc = encoder->crtc; 97 struct drm_crtc *crtc = encoder->crtc;
98 struct psb_intel_crtc *psb_intel_crtc = 98 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
99 to_psb_intel_crtc(crtc);
100 int dpll_md_reg; 99 int dpll_md_reg;
101 u32 adpa, dpll_md; 100 u32 adpa, dpll_md;
102 u32 adpa_reg; 101 u32 adpa_reg;
103 102
104 if (psb_intel_crtc->pipe == 0) 103 if (gma_crtc->pipe == 0)
105 dpll_md_reg = DPLL_A_MD; 104 dpll_md_reg = DPLL_A_MD;
106 else 105 else
107 dpll_md_reg = DPLL_B_MD; 106 dpll_md_reg = DPLL_B_MD;
@@ -124,7 +123,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
124 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 123 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
125 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 124 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
126 125
127 if (psb_intel_crtc->pipe == 0) 126 if (gma_crtc->pipe == 0)
128 adpa |= ADPA_PIPE_A_SELECT; 127 adpa |= ADPA_PIPE_A_SELECT;
129 else 128 else
130 adpa |= ADPA_PIPE_B_SELECT; 129 adpa |= ADPA_PIPE_B_SELECT;
@@ -197,10 +196,9 @@ static enum drm_connector_status cdv_intel_crt_detect(
197 196
198static void cdv_intel_crt_destroy(struct drm_connector *connector) 197static void cdv_intel_crt_destroy(struct drm_connector *connector)
199{ 198{
200 struct psb_intel_encoder *psb_intel_encoder = 199 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
201 psb_intel_attached_encoder(connector);
202 200
203 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 201 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
204 drm_sysfs_connector_remove(connector); 202 drm_sysfs_connector_remove(connector);
205 drm_connector_cleanup(connector); 203 drm_connector_cleanup(connector);
206 kfree(connector); 204 kfree(connector);
@@ -208,9 +206,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
208 206
209static int cdv_intel_crt_get_modes(struct drm_connector *connector) 207static int cdv_intel_crt_get_modes(struct drm_connector *connector)
210{ 208{
211 struct psb_intel_encoder *psb_intel_encoder = 209 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
212 psb_intel_attached_encoder(connector); 210 return psb_intel_ddc_get_modes(connector,
213 return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter); 211 &gma_encoder->ddc_bus->adapter);
214} 212}
215 213
216static int cdv_intel_crt_set_property(struct drm_connector *connector, 214static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -227,8 +225,8 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
227static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { 225static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
228 .dpms = cdv_intel_crt_dpms, 226 .dpms = cdv_intel_crt_dpms,
229 .mode_fixup = cdv_intel_crt_mode_fixup, 227 .mode_fixup = cdv_intel_crt_mode_fixup,
230 .prepare = psb_intel_encoder_prepare, 228 .prepare = gma_encoder_prepare,
231 .commit = psb_intel_encoder_commit, 229 .commit = gma_encoder_commit,
232 .mode_set = cdv_intel_crt_mode_set, 230 .mode_set = cdv_intel_crt_mode_set,
233}; 231};
234 232
@@ -244,7 +242,7 @@ static const struct drm_connector_helper_funcs
244 cdv_intel_crt_connector_helper_funcs = { 242 cdv_intel_crt_connector_helper_funcs = {
245 .mode_valid = cdv_intel_crt_mode_valid, 243 .mode_valid = cdv_intel_crt_mode_valid,
246 .get_modes = cdv_intel_crt_get_modes, 244 .get_modes = cdv_intel_crt_get_modes,
247 .best_encoder = psb_intel_best_encoder, 245 .best_encoder = gma_best_encoder,
248}; 246};
249 247
250static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder) 248static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
@@ -260,32 +258,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
260 struct psb_intel_mode_device *mode_dev) 258 struct psb_intel_mode_device *mode_dev)
261{ 259{
262 260
263 struct psb_intel_connector *psb_intel_connector; 261 struct gma_connector *gma_connector;
264 struct psb_intel_encoder *psb_intel_encoder; 262 struct gma_encoder *gma_encoder;
265 struct drm_connector *connector; 263 struct drm_connector *connector;
266 struct drm_encoder *encoder; 264 struct drm_encoder *encoder;
267 265
268 u32 i2c_reg; 266 u32 i2c_reg;
269 267
270 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 268 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
271 if (!psb_intel_encoder) 269 if (!gma_encoder)
272 return; 270 return;
273 271
274 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 272 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
275 if (!psb_intel_connector) 273 if (!gma_connector)
276 goto failed_connector; 274 goto failed_connector;
277 275
278 connector = &psb_intel_connector->base; 276 connector = &gma_connector->base;
279 connector->polled = DRM_CONNECTOR_POLL_HPD; 277 connector->polled = DRM_CONNECTOR_POLL_HPD;
280 drm_connector_init(dev, connector, 278 drm_connector_init(dev, connector,
281 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 279 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
282 280
283 encoder = &psb_intel_encoder->base; 281 encoder = &gma_encoder->base;
284 drm_encoder_init(dev, encoder, 282 drm_encoder_init(dev, encoder,
285 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); 283 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
286 284
287 psb_intel_connector_attach_encoder(psb_intel_connector, 285 gma_connector_attach_encoder(gma_connector, gma_encoder);
288 psb_intel_encoder);
289 286
290 /* Set up the DDC bus. */ 287 /* Set up the DDC bus. */
291 i2c_reg = GPIOA; 288 i2c_reg = GPIOA;
@@ -294,15 +291,15 @@ void cdv_intel_crt_init(struct drm_device *dev,
294 if (dev_priv->crt_ddc_bus != 0) 291 if (dev_priv->crt_ddc_bus != 0)
295 i2c_reg = dev_priv->crt_ddc_bus; 292 i2c_reg = dev_priv->crt_ddc_bus;
296 }*/ 293 }*/
297 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, 294 gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
298 i2c_reg, "CRTDDC_A"); 295 i2c_reg, "CRTDDC_A");
299 if (!psb_intel_encoder->ddc_bus) { 296 if (!gma_encoder->ddc_bus) {
300 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 297 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
301 "failed.\n"); 298 "failed.\n");
302 goto failed_ddc; 299 goto failed_ddc;
303 } 300 }
304 301
305 psb_intel_encoder->type = INTEL_OUTPUT_ANALOG; 302 gma_encoder->type = INTEL_OUTPUT_ANALOG;
306 /* 303 /*
307 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT); 304 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
308 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1); 305 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -318,10 +315,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
318 315
319 return; 316 return;
320failed_ddc: 317failed_ddc:
321 drm_encoder_cleanup(&psb_intel_encoder->base); 318 drm_encoder_cleanup(&gma_encoder->base);
322 drm_connector_cleanup(&psb_intel_connector->base); 319 drm_connector_cleanup(&gma_connector->base);
323 kfree(psb_intel_connector); 320 kfree(gma_connector);
324failed_connector: 321failed_connector:
325 kfree(psb_intel_encoder); 322 kfree(gma_encoder);
326 return; 323 return;
327} 324}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 82430ad8ba62..8fbfa06da62d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -19,54 +19,20 @@
19 */ 19 */
20 20
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23 22
24#include <drm/drmP.h> 23#include <drm/drmP.h>
25#include "framebuffer.h" 24#include "framebuffer.h"
26#include "psb_drv.h" 25#include "psb_drv.h"
27#include "psb_intel_drv.h" 26#include "psb_intel_drv.h"
28#include "psb_intel_reg.h" 27#include "psb_intel_reg.h"
29#include "psb_intel_display.h" 28#include "gma_display.h"
30#include "power.h" 29#include "power.h"
31#include "cdv_device.h" 30#include "cdv_device.h"
32 31
32static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
33 struct drm_crtc *crtc, int target,
34 int refclk, struct gma_clock_t *best_clock);
33 35
34struct cdv_intel_range_t {
35 int min, max;
36};
37
38struct cdv_intel_p2_t {
39 int dot_limit;
40 int p2_slow, p2_fast;
41};
42
43struct cdv_intel_clock_t {
44 /* given values */
45 int n;
46 int m1, m2;
47 int p1, p2;
48 /* derived values */
49 int dot;
50 int vco;
51 int m;
52 int p;
53};
54
55#define INTEL_P2_NUM 2
56
57struct cdv_intel_limit_t {
58 struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
59 struct cdv_intel_p2_t p2;
60 bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
61 int, int, struct cdv_intel_clock_t *);
62};
63
64static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
65 struct drm_crtc *crtc, int target, int refclk,
66 struct cdv_intel_clock_t *best_clock);
67static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
68 int refclk,
69 struct cdv_intel_clock_t *best_clock);
70 36
71#define CDV_LIMIT_SINGLE_LVDS_96 0 37#define CDV_LIMIT_SINGLE_LVDS_96 0
72#define CDV_LIMIT_SINGLE_LVDS_100 1 38#define CDV_LIMIT_SINGLE_LVDS_100 1
@@ -75,7 +41,7 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
75#define CDV_LIMIT_DP_27 4 41#define CDV_LIMIT_DP_27 4
76#define CDV_LIMIT_DP_100 5 42#define CDV_LIMIT_DP_100 5
77 43
78static const struct cdv_intel_limit_t cdv_intel_limits[] = { 44static const struct gma_limit_t cdv_intel_limits[] = {
79 { /* CDV_SINGLE_LVDS_96MHz */ 45 { /* CDV_SINGLE_LVDS_96MHz */
80 .dot = {.min = 20000, .max = 115500}, 46 .dot = {.min = 20000, .max = 115500},
81 .vco = {.min = 1800000, .max = 3600000}, 47 .vco = {.min = 1800000, .max = 3600000},
@@ -85,9 +51,8 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
85 .m2 = {.min = 58, .max = 158}, 51 .m2 = {.min = 58, .max = 158},
86 .p = {.min = 28, .max = 140}, 52 .p = {.min = 28, .max = 140},
87 .p1 = {.min = 2, .max = 10}, 53 .p1 = {.min = 2, .max = 10},
88 .p2 = {.dot_limit = 200000, 54 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
89 .p2_slow = 14, .p2_fast = 14}, 55 .find_pll = gma_find_best_pll,
90 .find_pll = cdv_intel_find_best_PLL,
91 }, 56 },
92 { /* CDV_SINGLE_LVDS_100MHz */ 57 { /* CDV_SINGLE_LVDS_100MHz */
93 .dot = {.min = 20000, .max = 115500}, 58 .dot = {.min = 20000, .max = 115500},
@@ -102,7 +67,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
102 * is 80-224Mhz. Prefer single channel as much as possible. 67 * is 80-224Mhz. Prefer single channel as much as possible.
103 */ 68 */
104 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, 69 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
105 .find_pll = cdv_intel_find_best_PLL, 70 .find_pll = gma_find_best_pll,
106 }, 71 },
107 { /* CDV_DAC_HDMI_27MHz */ 72 { /* CDV_DAC_HDMI_27MHz */
108 .dot = {.min = 20000, .max = 400000}, 73 .dot = {.min = 20000, .max = 400000},
@@ -114,7 +79,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
114 .p = {.min = 5, .max = 90}, 79 .p = {.min = 5, .max = 90},
115 .p1 = {.min = 1, .max = 9}, 80 .p1 = {.min = 1, .max = 9},
116 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 81 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
117 .find_pll = cdv_intel_find_best_PLL, 82 .find_pll = gma_find_best_pll,
118 }, 83 },
119 { /* CDV_DAC_HDMI_96MHz */ 84 { /* CDV_DAC_HDMI_96MHz */
120 .dot = {.min = 20000, .max = 400000}, 85 .dot = {.min = 20000, .max = 400000},
@@ -126,7 +91,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
126 .p = {.min = 5, .max = 100}, 91 .p = {.min = 5, .max = 100},
127 .p1 = {.min = 1, .max = 10}, 92 .p1 = {.min = 1, .max = 10},
128 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 93 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
129 .find_pll = cdv_intel_find_best_PLL, 94 .find_pll = gma_find_best_pll,
130 }, 95 },
131 { /* CDV_DP_27MHz */ 96 { /* CDV_DP_27MHz */
132 .dot = {.min = 160000, .max = 272000}, 97 .dot = {.min = 160000, .max = 272000},
@@ -255,10 +220,10 @@ void cdv_sb_reset(struct drm_device *dev)
255 */ 220 */
256static int 221static int
257cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, 222cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
258 struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select) 223 struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
259{ 224{
260 struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); 225 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
261 int pipe = psb_crtc->pipe; 226 int pipe = gma_crtc->pipe;
262 u32 m, n_vco, p; 227 u32 m, n_vco, p;
263 int ret = 0; 228 int ret = 0;
264 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 229 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
@@ -405,31 +370,11 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
405 return 0; 370 return 0;
406} 371}
407 372
408/* 373static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
409 * Returns whether any encoder on the specified pipe is of the specified type 374 int refclk)
410 */
411static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
412{
413 struct drm_device *dev = crtc->dev;
414 struct drm_mode_config *mode_config = &dev->mode_config;
415 struct drm_connector *l_entry;
416
417 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
418 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
419 struct psb_intel_encoder *psb_intel_encoder =
420 psb_intel_attached_encoder(l_entry);
421 if (psb_intel_encoder->type == type)
422 return true;
423 }
424 }
425 return false;
426}
427
428static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
429 int refclk)
430{ 375{
431 const struct cdv_intel_limit_t *limit; 376 const struct gma_limit_t *limit;
432 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 377 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
433 /* 378 /*
434 * Now only single-channel LVDS is supported on CDV. If it is 379 * Now only single-channel LVDS is supported on CDV. If it is
435 * incorrect, please add the dual-channel LVDS. 380 * incorrect, please add the dual-channel LVDS.
@@ -438,8 +383,8 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
438 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96]; 383 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
439 else 384 else
440 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; 385 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
441 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 386 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
442 psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 387 gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
443 if (refclk == 27000) 388 if (refclk == 27000)
444 limit = &cdv_intel_limits[CDV_LIMIT_DP_27]; 389 limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
445 else 390 else
@@ -454,8 +399,7 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
454} 399}
455 400
456/* m1 is reserved as 0 in CDV, n is a ring counter */ 401/* m1 is reserved as 0 in CDV, n is a ring counter */
457static void cdv_intel_clock(struct drm_device *dev, 402static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
458 int refclk, struct cdv_intel_clock_t *clock)
459{ 403{
460 clock->m = clock->m2 + 2; 404 clock->m = clock->m2 + 2;
461 clock->p = clock->p1 * clock->p2; 405 clock->p = clock->p1 * clock->p2;
@@ -463,93 +407,12 @@ static void cdv_intel_clock(struct drm_device *dev,
463 clock->dot = clock->vco / clock->p; 407 clock->dot = clock->vco / clock->p;
464} 408}
465 409
466 410static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
467#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 411 struct drm_crtc *crtc, int target,
468static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc, 412 int refclk,
469 const struct cdv_intel_limit_t *limit, 413 struct gma_clock_t *best_clock)
470 struct cdv_intel_clock_t *clock)
471{
472 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
473 INTELPllInvalid("p1 out of range\n");
474 if (clock->p < limit->p.min || limit->p.max < clock->p)
475 INTELPllInvalid("p out of range\n");
476 /* unnecessary to check the range of m(m1/M2)/n again */
477 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
478 INTELPllInvalid("vco out of range\n");
479 /* XXX: We may need to be checking "Dot clock"
480 * depending on the multiplier, connector, etc.,
481 * rather than just a single range.
482 */
483 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
484 INTELPllInvalid("dot out of range\n");
485
486 return true;
487}
488
489static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
490 struct drm_crtc *crtc, int target, int refclk,
491 struct cdv_intel_clock_t *best_clock)
492{ 414{
493 struct drm_device *dev = crtc->dev; 415 struct gma_clock_t clock;
494 struct cdv_intel_clock_t clock;
495 int err = target;
496
497
498 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
499 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
500 /*
501 * For LVDS, if the panel is on, just rely on its current
502 * settings for dual-channel. We haven't figured out how to
503 * reliably set up different single/dual channel state, if we
504 * even can.
505 */
506 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
507 LVDS_CLKB_POWER_UP)
508 clock.p2 = limit->p2.p2_fast;
509 else
510 clock.p2 = limit->p2.p2_slow;
511 } else {
512 if (target < limit->p2.dot_limit)
513 clock.p2 = limit->p2.p2_slow;
514 else
515 clock.p2 = limit->p2.p2_fast;
516 }
517
518 memset(best_clock, 0, sizeof(*best_clock));
519 clock.m1 = 0;
520 /* m1 is reserved as 0 in CDV, n is a ring counter.
521 So skip the m1 loop */
522 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
523 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
524 clock.m2++) {
525 for (clock.p1 = limit->p1.min;
526 clock.p1 <= limit->p1.max;
527 clock.p1++) {
528 int this_err;
529
530 cdv_intel_clock(dev, refclk, &clock);
531
532 if (!cdv_intel_PLL_is_valid(crtc,
533 limit, &clock))
534 continue;
535
536 this_err = abs(clock.dot - target);
537 if (this_err < err) {
538 *best_clock = clock;
539 err = this_err;
540 }
541 }
542 }
543 }
544
545 return err != target;
546}
547
548static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
549 int refclk,
550 struct cdv_intel_clock_t *best_clock)
551{
552 struct cdv_intel_clock_t clock;
553 if (refclk == 27000) { 416 if (refclk == 27000) {
554 if (target < 200000) { 417 if (target < 200000) {
555 clock.p1 = 2; 418 clock.p1 = 2;
@@ -584,85 +447,10 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
584 clock.p = clock.p1 * clock.p2; 447 clock.p = clock.p1 * clock.p2;
585 clock.vco = (refclk * clock.m) / clock.n; 448 clock.vco = (refclk * clock.m) / clock.n;
586 clock.dot = clock.vco / clock.p; 449 clock.dot = clock.vco / clock.p;
587 memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t)); 450 memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
588 return true; 451 return true;
589} 452}
590 453
591static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
592 int x, int y, struct drm_framebuffer *old_fb)
593{
594 struct drm_device *dev = crtc->dev;
595 struct drm_psb_private *dev_priv = dev->dev_private;
596 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
597 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
598 int pipe = psb_intel_crtc->pipe;
599 const struct psb_offset *map = &dev_priv->regmap[pipe];
600 unsigned long start, offset;
601 u32 dspcntr;
602 int ret = 0;
603
604 if (!gma_power_begin(dev, true))
605 return 0;
606
607 /* no fb bound */
608 if (!crtc->fb) {
609 dev_err(dev->dev, "No FB bound\n");
610 goto psb_intel_pipe_cleaner;
611 }
612
613
614 /* We are displaying this buffer, make sure it is actually loaded
615 into the GTT */
616 ret = psb_gtt_pin(psbfb->gtt);
617 if (ret < 0)
618 goto psb_intel_pipe_set_base_exit;
619 start = psbfb->gtt->offset;
620 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
621
622 REG_WRITE(map->stride, crtc->fb->pitches[0]);
623
624 dspcntr = REG_READ(map->cntr);
625 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
626
627 switch (crtc->fb->bits_per_pixel) {
628 case 8:
629 dspcntr |= DISPPLANE_8BPP;
630 break;
631 case 16:
632 if (crtc->fb->depth == 15)
633 dspcntr |= DISPPLANE_15_16BPP;
634 else
635 dspcntr |= DISPPLANE_16BPP;
636 break;
637 case 24:
638 case 32:
639 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
640 break;
641 default:
642 dev_err(dev->dev, "Unknown color depth\n");
643 ret = -EINVAL;
644 goto psb_intel_pipe_set_base_exit;
645 }
646 REG_WRITE(map->cntr, dspcntr);
647
648 dev_dbg(dev->dev,
649 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
650
651 REG_WRITE(map->base, offset);
652 REG_READ(map->base);
653 REG_WRITE(map->surf, start);
654 REG_READ(map->surf);
655
656psb_intel_pipe_cleaner:
657 /* If there was a previous display we can now unpin it */
658 if (old_fb)
659 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
660
661psb_intel_pipe_set_base_exit:
662 gma_power_end(dev);
663 return ret;
664}
665
666#define FIFO_PIPEA (1 << 0) 454#define FIFO_PIPEA (1 << 0)
667#define FIFO_PIPEB (1 << 1) 455#define FIFO_PIPEB (1 << 1)
668 456
@@ -670,12 +458,12 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
670{ 458{
671 struct drm_crtc *crtc; 459 struct drm_crtc *crtc;
672 struct drm_psb_private *dev_priv = dev->dev_private; 460 struct drm_psb_private *dev_priv = dev->dev_private;
673 struct psb_intel_crtc *psb_intel_crtc = NULL; 461 struct gma_crtc *gma_crtc = NULL;
674 462
675 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 463 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
676 psb_intel_crtc = to_psb_intel_crtc(crtc); 464 gma_crtc = to_gma_crtc(crtc);
677 465
678 if (crtc->fb == NULL || !psb_intel_crtc->active) 466 if (crtc->fb == NULL || !gma_crtc->active)
679 return false; 467 return false;
680 return true; 468 return true;
681} 469}
@@ -701,29 +489,29 @@ static bool cdv_intel_single_pipe_active (struct drm_device *dev)
701 489
702static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) 490static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
703{ 491{
704 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 492 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
705 struct drm_mode_config *mode_config = &dev->mode_config; 493 struct drm_mode_config *mode_config = &dev->mode_config;
706 struct drm_connector *connector; 494 struct drm_connector *connector;
707 495
708 if (psb_intel_crtc->pipe != 1) 496 if (gma_crtc->pipe != 1)
709 return false; 497 return false;
710 498
711 list_for_each_entry(connector, &mode_config->connector_list, head) { 499 list_for_each_entry(connector, &mode_config->connector_list, head) {
712 struct psb_intel_encoder *psb_intel_encoder = 500 struct gma_encoder *gma_encoder =
713 psb_intel_attached_encoder(connector); 501 gma_attached_encoder(connector);
714 502
715 if (!connector->encoder 503 if (!connector->encoder
716 || connector->encoder->crtc != crtc) 504 || connector->encoder->crtc != crtc)
717 continue; 505 continue;
718 506
719 if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS) 507 if (gma_encoder->type == INTEL_OUTPUT_LVDS)
720 return true; 508 return true;
721 } 509 }
722 510
723 return false; 511 return false;
724} 512}
725 513
726static void cdv_intel_disable_self_refresh (struct drm_device *dev) 514void cdv_disable_sr(struct drm_device *dev)
727{ 515{
728 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { 516 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
729 517
@@ -731,7 +519,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
731 REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN)); 519 REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
732 REG_READ(FW_BLC_SELF); 520 REG_READ(FW_BLC_SELF);
733 521
734 cdv_intel_wait_for_vblank(dev); 522 gma_wait_for_vblank(dev);
735 523
736 /* Cedarview workaround to write ovelay plane, which force to leave 524 /* Cedarview workaround to write ovelay plane, which force to leave
737 * MAX_FIFO state. 525 * MAX_FIFO state.
@@ -739,13 +527,14 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
739 REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/); 527 REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
740 REG_READ(OV_OVADD); 528 REG_READ(OV_OVADD);
741 529
742 cdv_intel_wait_for_vblank(dev); 530 gma_wait_for_vblank(dev);
743 } 531 }
744 532
745} 533}
746 534
747static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc) 535void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
748{ 536{
537 struct drm_psb_private *dev_priv = dev->dev_private;
749 538
750 if (cdv_intel_single_pipe_active(dev)) { 539 if (cdv_intel_single_pipe_active(dev)) {
751 u32 fw; 540 u32 fw;
@@ -780,12 +569,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
780 569
781 REG_WRITE(DSPFW6, 0x10); 570 REG_WRITE(DSPFW6, 0x10);
782 571
783 cdv_intel_wait_for_vblank(dev); 572 gma_wait_for_vblank(dev);
784 573
785 /* enable self-refresh for single pipe active */ 574 /* enable self-refresh for single pipe active */
786 REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 575 REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
787 REG_READ(FW_BLC_SELF); 576 REG_READ(FW_BLC_SELF);
788 cdv_intel_wait_for_vblank(dev); 577 gma_wait_for_vblank(dev);
789 578
790 } else { 579 } else {
791 580
@@ -797,216 +586,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
797 REG_WRITE(DSPFW5, 0x01010101); 586 REG_WRITE(DSPFW5, 0x01010101);
798 REG_WRITE(DSPFW6, 0x1d0); 587 REG_WRITE(DSPFW6, 0x1d0);
799 588
800 cdv_intel_wait_for_vblank(dev); 589 gma_wait_for_vblank(dev);
801
802 cdv_intel_disable_self_refresh(dev);
803
804 }
805}
806
807/** Loads the palette/gamma unit for the CRTC with the prepared values */
808static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
809{
810 struct drm_device *dev = crtc->dev;
811 struct drm_psb_private *dev_priv = dev->dev_private;
812 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
813 int palreg = PALETTE_A;
814 int i;
815
816 /* The clocks have to be on to load the palette. */
817 if (!crtc->enabled)
818 return;
819
820 switch (psb_intel_crtc->pipe) {
821 case 0:
822 break;
823 case 1:
824 palreg = PALETTE_B;
825 break;
826 case 2:
827 palreg = PALETTE_C;
828 break;
829 default:
830 dev_err(dev->dev, "Illegal Pipe Number.\n");
831 return;
832 }
833
834 if (gma_power_begin(dev, false)) {
835 for (i = 0; i < 256; i++) {
836 REG_WRITE(palreg + 4 * i,
837 ((psb_intel_crtc->lut_r[i] +
838 psb_intel_crtc->lut_adj[i]) << 16) |
839 ((psb_intel_crtc->lut_g[i] +
840 psb_intel_crtc->lut_adj[i]) << 8) |
841 (psb_intel_crtc->lut_b[i] +
842 psb_intel_crtc->lut_adj[i]));
843 }
844 gma_power_end(dev);
845 } else {
846 for (i = 0; i < 256; i++) {
847 dev_priv->regs.pipe[0].palette[i] =
848 ((psb_intel_crtc->lut_r[i] +
849 psb_intel_crtc->lut_adj[i]) << 16) |
850 ((psb_intel_crtc->lut_g[i] +
851 psb_intel_crtc->lut_adj[i]) << 8) |
852 (psb_intel_crtc->lut_b[i] +
853 psb_intel_crtc->lut_adj[i]);
854 }
855
856 }
857}
858
859/**
860 * Sets the power management mode of the pipe and plane.
861 *
862 * This code should probably grow support for turning the cursor off and back
863 * on appropriately at the same time as we're turning the pipe off/on.
864 */
865static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
866{
867 struct drm_device *dev = crtc->dev;
868 struct drm_psb_private *dev_priv = dev->dev_private;
869 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
870 int pipe = psb_intel_crtc->pipe;
871 const struct psb_offset *map = &dev_priv->regmap[pipe];
872 u32 temp;
873
874 /* XXX: When our outputs are all unaware of DPMS modes other than off
875 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
876 */
877 cdv_intel_disable_self_refresh(dev);
878
879 switch (mode) {
880 case DRM_MODE_DPMS_ON:
881 case DRM_MODE_DPMS_STANDBY:
882 case DRM_MODE_DPMS_SUSPEND:
883 if (psb_intel_crtc->active)
884 break;
885
886 psb_intel_crtc->active = true;
887
888 /* Enable the DPLL */
889 temp = REG_READ(map->dpll);
890 if ((temp & DPLL_VCO_ENABLE) == 0) {
891 REG_WRITE(map->dpll, temp);
892 REG_READ(map->dpll);
893 /* Wait for the clocks to stabilize. */
894 udelay(150);
895 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
896 REG_READ(map->dpll);
897 /* Wait for the clocks to stabilize. */
898 udelay(150);
899 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
900 REG_READ(map->dpll);
901 /* Wait for the clocks to stabilize. */
902 udelay(150);
903 }
904
905 /* Jim Bish - switch plan and pipe per scott */
906 /* Enable the plane */
907 temp = REG_READ(map->cntr);
908 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
909 REG_WRITE(map->cntr,
910 temp | DISPLAY_PLANE_ENABLE);
911 /* Flush the plane changes */
912 REG_WRITE(map->base, REG_READ(map->base));
913 }
914
915 udelay(150);
916
917 /* Enable the pipe */
918 temp = REG_READ(map->conf);
919 if ((temp & PIPEACONF_ENABLE) == 0)
920 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
921
922 temp = REG_READ(map->status);
923 temp &= ~(0xFFFF);
924 temp |= PIPE_FIFO_UNDERRUN;
925 REG_WRITE(map->status, temp);
926 REG_READ(map->status);
927
928 cdv_intel_crtc_load_lut(crtc);
929
930 /* Give the overlay scaler a chance to enable
931 * if it's on this pipe */
932 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
933 break;
934 case DRM_MODE_DPMS_OFF:
935 if (!psb_intel_crtc->active)
936 break;
937
938 psb_intel_crtc->active = false;
939
940 /* Give the overlay scaler a chance to disable
941 * if it's on this pipe */
942 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
943
944 /* Disable the VGA plane that we never use */
945 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
946
947 /* Jim Bish - changed pipe/plane here as well. */
948
949 drm_vblank_off(dev, pipe);
950 /* Wait for vblank for the disable to take effect */
951 cdv_intel_wait_for_vblank(dev);
952
953 /* Next, disable display pipes */
954 temp = REG_READ(map->conf);
955 if ((temp & PIPEACONF_ENABLE) != 0) {
956 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
957 REG_READ(map->conf);
958 }
959
960 /* Wait for vblank for the disable to take effect. */
961 cdv_intel_wait_for_vblank(dev);
962
963 udelay(150);
964
965 /* Disable display plane */
966 temp = REG_READ(map->cntr);
967 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
968 REG_WRITE(map->cntr,
969 temp & ~DISPLAY_PLANE_ENABLE);
970 /* Flush the plane changes */
971 REG_WRITE(map->base, REG_READ(map->base));
972 REG_READ(map->base);
973 }
974
975 temp = REG_READ(map->dpll);
976 if ((temp & DPLL_VCO_ENABLE) != 0) {
977 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
978 REG_READ(map->dpll);
979 }
980 590
981 /* Wait for the clocks to turn off. */ 591 dev_priv->ops->disable_sr(dev);
982 udelay(150);
983 break;
984 } 592 }
985 cdv_intel_update_watermark(dev, crtc);
986 /*Set FIFO Watermarks*/
987 REG_WRITE(DSPARB, 0x3F3E);
988}
989
990static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
991{
992 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
993 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
994}
995
996static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
997{
998 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
999 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
1000}
1001
1002static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
1003 const struct drm_display_mode *mode,
1004 struct drm_display_mode *adjusted_mode)
1005{
1006 return true;
1007} 593}
1008 594
1009
1010/** 595/**
1011 * Return the pipe currently connected to the panel fitter, 596 * Return the pipe currently connected to the panel fitter,
1012 * or -1 if the panel fitter is not present or not in use 597 * or -1 if the panel fitter is not present or not in use
@@ -1031,31 +616,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1031{ 616{
1032 struct drm_device *dev = crtc->dev; 617 struct drm_device *dev = crtc->dev;
1033 struct drm_psb_private *dev_priv = dev->dev_private; 618 struct drm_psb_private *dev_priv = dev->dev_private;
1034 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 619 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1035 int pipe = psb_intel_crtc->pipe; 620 int pipe = gma_crtc->pipe;
1036 const struct psb_offset *map = &dev_priv->regmap[pipe]; 621 const struct psb_offset *map = &dev_priv->regmap[pipe];
1037 int refclk; 622 int refclk;
1038 struct cdv_intel_clock_t clock; 623 struct gma_clock_t clock;
1039 u32 dpll = 0, dspcntr, pipeconf; 624 u32 dpll = 0, dspcntr, pipeconf;
1040 bool ok; 625 bool ok;
1041 bool is_crt = false, is_lvds = false, is_tv = false; 626 bool is_crt = false, is_lvds = false, is_tv = false;
1042 bool is_hdmi = false, is_dp = false; 627 bool is_hdmi = false, is_dp = false;
1043 struct drm_mode_config *mode_config = &dev->mode_config; 628 struct drm_mode_config *mode_config = &dev->mode_config;
1044 struct drm_connector *connector; 629 struct drm_connector *connector;
1045 const struct cdv_intel_limit_t *limit; 630 const struct gma_limit_t *limit;
1046 u32 ddi_select = 0; 631 u32 ddi_select = 0;
1047 bool is_edp = false; 632 bool is_edp = false;
1048 633
1049 list_for_each_entry(connector, &mode_config->connector_list, head) { 634 list_for_each_entry(connector, &mode_config->connector_list, head) {
1050 struct psb_intel_encoder *psb_intel_encoder = 635 struct gma_encoder *gma_encoder =
1051 psb_intel_attached_encoder(connector); 636 gma_attached_encoder(connector);
1052 637
1053 if (!connector->encoder 638 if (!connector->encoder
1054 || connector->encoder->crtc != crtc) 639 || connector->encoder->crtc != crtc)
1055 continue; 640 continue;
1056 641
1057 ddi_select = psb_intel_encoder->ddi_select; 642 ddi_select = gma_encoder->ddi_select;
1058 switch (psb_intel_encoder->type) { 643 switch (gma_encoder->type) {
1059 case INTEL_OUTPUT_LVDS: 644 case INTEL_OUTPUT_LVDS:
1060 is_lvds = true; 645 is_lvds = true;
1061 break; 646 break;
@@ -1108,12 +693,13 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1108 693
1109 drm_mode_debug_printmodeline(adjusted_mode); 694 drm_mode_debug_printmodeline(adjusted_mode);
1110 695
1111 limit = cdv_intel_limit(crtc, refclk); 696 limit = gma_crtc->clock_funcs->limit(crtc, refclk);
1112 697
1113 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, 698 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
1114 &clock); 699 &clock);
1115 if (!ok) { 700 if (!ok) {
1116 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); 701 DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
702 adjusted_mode->clock, clock.dot);
1117 return 0; 703 return 0;
1118 } 704 }
1119 705
@@ -1264,7 +850,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1264 REG_WRITE(map->conf, pipeconf); 850 REG_WRITE(map->conf, pipeconf);
1265 REG_READ(map->conf); 851 REG_READ(map->conf);
1266 852
1267 cdv_intel_wait_for_vblank(dev); 853 gma_wait_for_vblank(dev);
1268 854
1269 REG_WRITE(map->cntr, dspcntr); 855 REG_WRITE(map->cntr, dspcntr);
1270 856
@@ -1275,344 +861,16 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1275 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 861 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
1276 } 862 }
1277 863
1278 cdv_intel_wait_for_vblank(dev); 864 gma_wait_for_vblank(dev);
1279
1280 return 0;
1281}
1282
1283
1284/**
1285 * Save HW states of giving crtc
1286 */
1287static void cdv_intel_crtc_save(struct drm_crtc *crtc)
1288{
1289 struct drm_device *dev = crtc->dev;
1290 struct drm_psb_private *dev_priv = dev->dev_private;
1291 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1292 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1293 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
1294 uint32_t paletteReg;
1295 int i;
1296
1297 if (!crtc_state) {
1298 dev_dbg(dev->dev, "No CRTC state found\n");
1299 return;
1300 }
1301
1302 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
1303 crtc_state->savePIPECONF = REG_READ(map->conf);
1304 crtc_state->savePIPESRC = REG_READ(map->src);
1305 crtc_state->saveFP0 = REG_READ(map->fp0);
1306 crtc_state->saveFP1 = REG_READ(map->fp1);
1307 crtc_state->saveDPLL = REG_READ(map->dpll);
1308 crtc_state->saveHTOTAL = REG_READ(map->htotal);
1309 crtc_state->saveHBLANK = REG_READ(map->hblank);
1310 crtc_state->saveHSYNC = REG_READ(map->hsync);
1311 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
1312 crtc_state->saveVBLANK = REG_READ(map->vblank);
1313 crtc_state->saveVSYNC = REG_READ(map->vsync);
1314 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
1315
1316 /*NOTE: DSPSIZE DSPPOS only for psb*/
1317 crtc_state->saveDSPSIZE = REG_READ(map->size);
1318 crtc_state->saveDSPPOS = REG_READ(map->pos);
1319
1320 crtc_state->saveDSPBASE = REG_READ(map->base);
1321
1322 DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1323 crtc_state->saveDSPCNTR,
1324 crtc_state->savePIPECONF,
1325 crtc_state->savePIPESRC,
1326 crtc_state->saveFP0,
1327 crtc_state->saveFP1,
1328 crtc_state->saveDPLL,
1329 crtc_state->saveHTOTAL,
1330 crtc_state->saveHBLANK,
1331 crtc_state->saveHSYNC,
1332 crtc_state->saveVTOTAL,
1333 crtc_state->saveVBLANK,
1334 crtc_state->saveVSYNC,
1335 crtc_state->saveDSPSTRIDE,
1336 crtc_state->saveDSPSIZE,
1337 crtc_state->saveDSPPOS,
1338 crtc_state->saveDSPBASE
1339 );
1340
1341 paletteReg = map->palette;
1342 for (i = 0; i < 256; ++i)
1343 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
1344}
1345
1346/**
1347 * Restore HW states of giving crtc
1348 */
1349static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1350{
1351 struct drm_device *dev = crtc->dev;
1352 struct drm_psb_private *dev_priv = dev->dev_private;
1353 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1354 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1355 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
1356 uint32_t paletteReg;
1357 int i;
1358
1359 if (!crtc_state) {
1360 dev_dbg(dev->dev, "No crtc state\n");
1361 return;
1362 }
1363
1364 DRM_DEBUG(
1365 "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1366 REG_READ(map->cntr),
1367 REG_READ(map->conf),
1368 REG_READ(map->src),
1369 REG_READ(map->fp0),
1370 REG_READ(map->fp1),
1371 REG_READ(map->dpll),
1372 REG_READ(map->htotal),
1373 REG_READ(map->hblank),
1374 REG_READ(map->hsync),
1375 REG_READ(map->vtotal),
1376 REG_READ(map->vblank),
1377 REG_READ(map->vsync),
1378 REG_READ(map->stride),
1379 REG_READ(map->size),
1380 REG_READ(map->pos),
1381 REG_READ(map->base)
1382 );
1383
1384 DRM_DEBUG(
1385 "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1386 crtc_state->saveDSPCNTR,
1387 crtc_state->savePIPECONF,
1388 crtc_state->savePIPESRC,
1389 crtc_state->saveFP0,
1390 crtc_state->saveFP1,
1391 crtc_state->saveDPLL,
1392 crtc_state->saveHTOTAL,
1393 crtc_state->saveHBLANK,
1394 crtc_state->saveHSYNC,
1395 crtc_state->saveVTOTAL,
1396 crtc_state->saveVBLANK,
1397 crtc_state->saveVSYNC,
1398 crtc_state->saveDSPSTRIDE,
1399 crtc_state->saveDSPSIZE,
1400 crtc_state->saveDSPPOS,
1401 crtc_state->saveDSPBASE
1402 );
1403
1404
1405 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
1406 REG_WRITE(map->dpll,
1407 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
1408 REG_READ(map->dpll);
1409 DRM_DEBUG("write dpll: %x\n",
1410 REG_READ(map->dpll));
1411 udelay(150);
1412 }
1413
1414 REG_WRITE(map->fp0, crtc_state->saveFP0);
1415 REG_READ(map->fp0);
1416
1417 REG_WRITE(map->fp1, crtc_state->saveFP1);
1418 REG_READ(map->fp1);
1419
1420 REG_WRITE(map->dpll, crtc_state->saveDPLL);
1421 REG_READ(map->dpll);
1422 udelay(150);
1423
1424 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
1425 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
1426 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
1427 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
1428 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
1429 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
1430 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
1431
1432 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
1433 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
1434
1435 REG_WRITE(map->src, crtc_state->savePIPESRC);
1436 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1437 REG_WRITE(map->conf, crtc_state->savePIPECONF);
1438
1439 cdv_intel_wait_for_vblank(dev);
1440
1441 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
1442 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1443
1444 cdv_intel_wait_for_vblank(dev);
1445
1446 paletteReg = map->palette;
1447 for (i = 0; i < 256; ++i)
1448 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
1449}
1450
1451static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1452 struct drm_file *file_priv,
1453 uint32_t handle,
1454 uint32_t width, uint32_t height)
1455{
1456 struct drm_device *dev = crtc->dev;
1457 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1458 int pipe = psb_intel_crtc->pipe;
1459 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1460 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1461 uint32_t temp;
1462 size_t addr = 0;
1463 struct gtt_range *gt;
1464 struct drm_gem_object *obj;
1465 int ret = 0;
1466
1467 /* if we want to turn of the cursor ignore width and height */
1468 if (!handle) {
1469 /* turn off the cursor */
1470 temp = CURSOR_MODE_DISABLE;
1471
1472 if (gma_power_begin(dev, false)) {
1473 REG_WRITE(control, temp);
1474 REG_WRITE(base, 0);
1475 gma_power_end(dev);
1476 }
1477
1478 /* unpin the old GEM object */
1479 if (psb_intel_crtc->cursor_obj) {
1480 gt = container_of(psb_intel_crtc->cursor_obj,
1481 struct gtt_range, gem);
1482 psb_gtt_unpin(gt);
1483 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1484 psb_intel_crtc->cursor_obj = NULL;
1485 }
1486
1487 return 0;
1488 }
1489
1490 /* Currently we only support 64x64 cursors */
1491 if (width != 64 || height != 64) {
1492 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
1493 return -EINVAL;
1494 }
1495
1496 obj = drm_gem_object_lookup(dev, file_priv, handle);
1497 if (!obj)
1498 return -ENOENT;
1499
1500 if (obj->size < width * height * 4) {
1501 dev_dbg(dev->dev, "buffer is to small\n");
1502 ret = -ENOMEM;
1503 goto unref_cursor;
1504 }
1505
1506 gt = container_of(obj, struct gtt_range, gem);
1507
1508 /* Pin the memory into the GTT */
1509 ret = psb_gtt_pin(gt);
1510 if (ret) {
1511 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1512 goto unref_cursor;
1513 }
1514
1515 addr = gt->offset; /* Or resource.start ??? */
1516
1517 psb_intel_crtc->cursor_addr = addr;
1518
1519 temp = 0;
1520 /* set the pipe for the cursor */
1521 temp |= (pipe << 28);
1522 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1523
1524 if (gma_power_begin(dev, false)) {
1525 REG_WRITE(control, temp);
1526 REG_WRITE(base, addr);
1527 gma_power_end(dev);
1528 }
1529
1530 /* unpin the old GEM object */
1531 if (psb_intel_crtc->cursor_obj) {
1532 gt = container_of(psb_intel_crtc->cursor_obj,
1533 struct gtt_range, gem);
1534 psb_gtt_unpin(gt);
1535 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1536 }
1537
1538 psb_intel_crtc->cursor_obj = obj;
1539 return ret;
1540
1541unref_cursor:
1542 drm_gem_object_unreference(obj);
1543 return ret;
1544}
1545
1546static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1547{
1548 struct drm_device *dev = crtc->dev;
1549 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1550 int pipe = psb_intel_crtc->pipe;
1551 uint32_t temp = 0;
1552 uint32_t adder;
1553
1554
1555 if (x < 0) {
1556 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1557 x = -x;
1558 }
1559 if (y < 0) {
1560 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1561 y = -y;
1562 }
1563
1564 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1565 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1566 865
1567 adder = psb_intel_crtc->cursor_addr;
1568
1569 if (gma_power_begin(dev, false)) {
1570 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1571 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
1572 gma_power_end(dev);
1573 }
1574 return 0; 866 return 0;
1575} 867}
1576 868
1577static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1578 u16 *green, u16 *blue, uint32_t start, uint32_t size)
1579{
1580 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1581 int i;
1582 int end = (start + size > 256) ? 256 : start + size;
1583
1584 for (i = start; i < end; i++) {
1585 psb_intel_crtc->lut_r[i] = red[i] >> 8;
1586 psb_intel_crtc->lut_g[i] = green[i] >> 8;
1587 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
1588 }
1589
1590 cdv_intel_crtc_load_lut(crtc);
1591}
1592
1593static int cdv_crtc_set_config(struct drm_mode_set *set)
1594{
1595 int ret = 0;
1596 struct drm_device *dev = set->crtc->dev;
1597 struct drm_psb_private *dev_priv = dev->dev_private;
1598
1599 if (!dev_priv->rpm_enabled)
1600 return drm_crtc_helper_set_config(set);
1601
1602 pm_runtime_forbid(&dev->pdev->dev);
1603
1604 ret = drm_crtc_helper_set_config(set);
1605
1606 pm_runtime_allow(&dev->pdev->dev);
1607
1608 return ret;
1609}
1610
1611/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 869/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
1612 870
1613/* FIXME: why are we using this, should it be cdv_ in this tree ? */ 871/* FIXME: why are we using this, should it be cdv_ in this tree ? */
1614 872
1615static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock) 873static void i8xx_clock(int refclk, struct gma_clock_t *clock)
1616{ 874{
1617 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 875 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
1618 clock->p = clock->p1 * clock->p2; 876 clock->p = clock->p1 * clock->p2;
@@ -1625,12 +883,12 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1625 struct drm_crtc *crtc) 883 struct drm_crtc *crtc)
1626{ 884{
1627 struct drm_psb_private *dev_priv = dev->dev_private; 885 struct drm_psb_private *dev_priv = dev->dev_private;
1628 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 886 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1629 int pipe = psb_intel_crtc->pipe; 887 int pipe = gma_crtc->pipe;
1630 const struct psb_offset *map = &dev_priv->regmap[pipe]; 888 const struct psb_offset *map = &dev_priv->regmap[pipe];
1631 u32 dpll; 889 u32 dpll;
1632 u32 fp; 890 u32 fp;
1633 struct cdv_intel_clock_t clock; 891 struct gma_clock_t clock;
1634 bool is_lvds; 892 bool is_lvds;
1635 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 893 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1636 894
@@ -1703,8 +961,8 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1703struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, 961struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1704 struct drm_crtc *crtc) 962 struct drm_crtc *crtc)
1705{ 963{
1706 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 964 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1707 int pipe = psb_intel_crtc->pipe; 965 int pipe = gma_crtc->pipe;
1708 struct drm_psb_private *dev_priv = dev->dev_private; 966 struct drm_psb_private *dev_priv = dev->dev_private;
1709 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 967 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1710 const struct psb_offset *map = &dev_priv->regmap[pipe]; 968 const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -1747,44 +1005,28 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1747 return mode; 1005 return mode;
1748} 1006}
1749 1007
1750static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1751{
1752 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1753
1754 kfree(psb_intel_crtc->crtc_state);
1755 drm_crtc_cleanup(crtc);
1756 kfree(psb_intel_crtc);
1757}
1758
1759static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
1760{
1761 struct gtt_range *gt;
1762 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1763
1764 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1765
1766 if (crtc->fb) {
1767 gt = to_psb_fb(crtc->fb)->gtt;
1768 psb_gtt_unpin(gt);
1769 }
1770}
1771
1772const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { 1008const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1773 .dpms = cdv_intel_crtc_dpms, 1009 .dpms = gma_crtc_dpms,
1774 .mode_fixup = cdv_intel_crtc_mode_fixup, 1010 .mode_fixup = gma_crtc_mode_fixup,
1775 .mode_set = cdv_intel_crtc_mode_set, 1011 .mode_set = cdv_intel_crtc_mode_set,
1776 .mode_set_base = cdv_intel_pipe_set_base, 1012 .mode_set_base = gma_pipe_set_base,
1777 .prepare = cdv_intel_crtc_prepare, 1013 .prepare = gma_crtc_prepare,
1778 .commit = cdv_intel_crtc_commit, 1014 .commit = gma_crtc_commit,
1779 .disable = cdv_intel_crtc_disable, 1015 .disable = gma_crtc_disable,
1780}; 1016};
1781 1017
1782const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 1018const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
1783 .save = cdv_intel_crtc_save, 1019 .save = gma_crtc_save,
1784 .restore = cdv_intel_crtc_restore, 1020 .restore = gma_crtc_restore,
1785 .cursor_set = cdv_intel_crtc_cursor_set, 1021 .cursor_set = gma_crtc_cursor_set,
1786 .cursor_move = cdv_intel_crtc_cursor_move, 1022 .cursor_move = gma_crtc_cursor_move,
1787 .gamma_set = cdv_intel_crtc_gamma_set, 1023 .gamma_set = gma_crtc_gamma_set,
1788 .set_config = cdv_crtc_set_config, 1024 .set_config = gma_crtc_set_config,
1789 .destroy = cdv_intel_crtc_destroy, 1025 .destroy = gma_crtc_destroy,
1026};
1027
1028const struct gma_clock_funcs cdv_clock_funcs = {
1029 .clock = cdv_intel_clock,
1030 .limit = cdv_intel_limit,
1031 .pll_is_valid = gma_pll_is_valid,
1790}; 1032};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 88d9ef6b5b4a..f4eb43573cad 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -34,6 +34,7 @@
34#include "psb_drv.h" 34#include "psb_drv.h"
35#include "psb_intel_drv.h" 35#include "psb_intel_drv.h"
36#include "psb_intel_reg.h" 36#include "psb_intel_reg.h"
37#include "gma_display.h"
37#include <drm/drm_dp_helper.h> 38#include <drm/drm_dp_helper.h>
38 39
39#define _wait_for(COND, MS, W) ({ \ 40#define _wait_for(COND, MS, W) ({ \
@@ -68,7 +69,7 @@ struct cdv_intel_dp {
68 uint8_t link_bw; 69 uint8_t link_bw;
69 uint8_t lane_count; 70 uint8_t lane_count;
70 uint8_t dpcd[4]; 71 uint8_t dpcd[4];
71 struct psb_intel_encoder *encoder; 72 struct gma_encoder *encoder;
72 struct i2c_adapter adapter; 73 struct i2c_adapter adapter;
73 struct i2c_algo_dp_aux_data algo; 74 struct i2c_algo_dp_aux_data algo;
74 uint8_t train_set[4]; 75 uint8_t train_set[4];
@@ -114,18 +115,18 @@ static uint32_t dp_vswing_premph_table[] = {
114 * If a CPU or PCH DP output is attached to an eDP panel, this function 115 * If a CPU or PCH DP output is attached to an eDP panel, this function
115 * will return true, and false otherwise. 116 * will return true, and false otherwise.
116 */ 117 */
117static bool is_edp(struct psb_intel_encoder *encoder) 118static bool is_edp(struct gma_encoder *encoder)
118{ 119{
119 return encoder->type == INTEL_OUTPUT_EDP; 120 return encoder->type == INTEL_OUTPUT_EDP;
120} 121}
121 122
122 123
123static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder); 124static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
124static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder); 125static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
125static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder); 126static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
126 127
127static int 128static int
128cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) 129cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
129{ 130{
130 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 131 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
131 int max_lane_count = 4; 132 int max_lane_count = 4;
@@ -143,7 +144,7 @@ cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
143} 144}
144 145
145static int 146static int
146cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder) 147cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
147{ 148{
148 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 149 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
149 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 150 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -180,7 +181,7 @@ cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
180 return (max_link_clock * max_lanes * 19) / 20; 181 return (max_link_clock * max_lanes * 19) / 20;
181} 182}
182 183
183static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) 184static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
184{ 185{
185 struct drm_device *dev = intel_encoder->base.dev; 186 struct drm_device *dev = intel_encoder->base.dev;
186 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 187 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -200,7 +201,7 @@ static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
200 msleep(intel_dp->panel_power_up_delay); 201 msleep(intel_dp->panel_power_up_delay);
201} 202}
202 203
203static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) 204static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
204{ 205{
205 struct drm_device *dev = intel_encoder->base.dev; 206 struct drm_device *dev = intel_encoder->base.dev;
206 u32 pp; 207 u32 pp;
@@ -215,7 +216,7 @@ static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
215} 216}
216 217
217/* Returns true if the panel was already on when called */ 218/* Returns true if the panel was already on when called */
218static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) 219static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
219{ 220{
220 struct drm_device *dev = intel_encoder->base.dev; 221 struct drm_device *dev = intel_encoder->base.dev;
221 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 222 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -242,7 +243,7 @@ static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
242 return false; 243 return false;
243} 244}
244 245
245static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) 246static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
246{ 247{
247 struct drm_device *dev = intel_encoder->base.dev; 248 struct drm_device *dev = intel_encoder->base.dev;
248 u32 pp, idle_off_mask = PP_ON ; 249 u32 pp, idle_off_mask = PP_ON ;
@@ -274,7 +275,7 @@ static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
274 DRM_DEBUG_KMS("Over\n"); 275 DRM_DEBUG_KMS("Over\n");
275} 276}
276 277
277static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) 278static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
278{ 279{
279 struct drm_device *dev = intel_encoder->base.dev; 280 struct drm_device *dev = intel_encoder->base.dev;
280 u32 pp; 281 u32 pp;
@@ -294,7 +295,7 @@ static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
294 gma_backlight_enable(dev); 295 gma_backlight_enable(dev);
295} 296}
296 297
297static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder) 298static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
298{ 299{
299 struct drm_device *dev = intel_encoder->base.dev; 300 struct drm_device *dev = intel_encoder->base.dev;
300 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 301 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -314,7 +315,7 @@ static int
314cdv_intel_dp_mode_valid(struct drm_connector *connector, 315cdv_intel_dp_mode_valid(struct drm_connector *connector,
315 struct drm_display_mode *mode) 316 struct drm_display_mode *mode)
316{ 317{
317 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 318 struct gma_encoder *encoder = gma_attached_encoder(connector);
318 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 319 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
319 int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); 320 int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
320 int max_lanes = cdv_intel_dp_max_lane_count(encoder); 321 int max_lanes = cdv_intel_dp_max_lane_count(encoder);
@@ -370,7 +371,7 @@ unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
370} 371}
371 372
372static int 373static int
373cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, 374cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
374 uint8_t *send, int send_bytes, 375 uint8_t *send, int send_bytes,
375 uint8_t *recv, int recv_size) 376 uint8_t *recv, int recv_size)
376{ 377{
@@ -472,7 +473,7 @@ cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
472 473
473/* Write data to the aux channel in native mode */ 474/* Write data to the aux channel in native mode */
474static int 475static int
475cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, 476cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
476 uint16_t address, uint8_t *send, int send_bytes) 477 uint16_t address, uint8_t *send, int send_bytes)
477{ 478{
478 int ret; 479 int ret;
@@ -504,7 +505,7 @@ cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
504 505
505/* Write a single byte to the aux channel in native mode */ 506/* Write a single byte to the aux channel in native mode */
506static int 507static int
507cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, 508cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
508 uint16_t address, uint8_t byte) 509 uint16_t address, uint8_t byte)
509{ 510{
510 return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1); 511 return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
@@ -512,7 +513,7 @@ cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
512 513
513/* read bytes from a native aux channel */ 514/* read bytes from a native aux channel */
514static int 515static int
515cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder, 516cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
516 uint16_t address, uint8_t *recv, int recv_bytes) 517 uint16_t address, uint8_t *recv, int recv_bytes)
517{ 518{
518 uint8_t msg[4]; 519 uint8_t msg[4];
@@ -557,7 +558,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
557 struct cdv_intel_dp *intel_dp = container_of(adapter, 558 struct cdv_intel_dp *intel_dp = container_of(adapter,
558 struct cdv_intel_dp, 559 struct cdv_intel_dp,
559 adapter); 560 adapter);
560 struct psb_intel_encoder *encoder = intel_dp->encoder; 561 struct gma_encoder *encoder = intel_dp->encoder;
561 uint16_t address = algo_data->address; 562 uint16_t address = algo_data->address;
562 uint8_t msg[5]; 563 uint8_t msg[5];
563 uint8_t reply[2]; 564 uint8_t reply[2];
@@ -647,7 +648,8 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
647} 648}
648 649
649static int 650static int
650cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name) 651cdv_intel_dp_i2c_init(struct gma_connector *connector,
652 struct gma_encoder *encoder, const char *name)
651{ 653{
652 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 654 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
653 int ret; 655 int ret;
@@ -698,7 +700,7 @@ cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mo
698 struct drm_display_mode *adjusted_mode) 700 struct drm_display_mode *adjusted_mode)
699{ 701{
700 struct drm_psb_private *dev_priv = encoder->dev->dev_private; 702 struct drm_psb_private *dev_priv = encoder->dev->dev_private;
701 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 703 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
702 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 704 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
703 int lane_count, clock; 705 int lane_count, clock;
704 int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder); 706 int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
@@ -792,22 +794,22 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
792 struct drm_psb_private *dev_priv = dev->dev_private; 794 struct drm_psb_private *dev_priv = dev->dev_private;
793 struct drm_mode_config *mode_config = &dev->mode_config; 795 struct drm_mode_config *mode_config = &dev->mode_config;
794 struct drm_encoder *encoder; 796 struct drm_encoder *encoder;
795 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 797 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
796 int lane_count = 4, bpp = 24; 798 int lane_count = 4, bpp = 24;
797 struct cdv_intel_dp_m_n m_n; 799 struct cdv_intel_dp_m_n m_n;
798 int pipe = intel_crtc->pipe; 800 int pipe = gma_crtc->pipe;
799 801
800 /* 802 /*
801 * Find the lane count in the intel_encoder private 803 * Find the lane count in the intel_encoder private
802 */ 804 */
803 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 805 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
804 struct psb_intel_encoder *intel_encoder; 806 struct gma_encoder *intel_encoder;
805 struct cdv_intel_dp *intel_dp; 807 struct cdv_intel_dp *intel_dp;
806 808
807 if (encoder->crtc != crtc) 809 if (encoder->crtc != crtc)
808 continue; 810 continue;
809 811
810 intel_encoder = to_psb_intel_encoder(encoder); 812 intel_encoder = to_gma_encoder(encoder);
811 intel_dp = intel_encoder->dev_priv; 813 intel_dp = intel_encoder->dev_priv;
812 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 814 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
813 lane_count = intel_dp->lane_count; 815 lane_count = intel_dp->lane_count;
@@ -841,9 +843,9 @@ static void
841cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 843cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
842 struct drm_display_mode *adjusted_mode) 844 struct drm_display_mode *adjusted_mode)
843{ 845{
844 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 846 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
845 struct drm_crtc *crtc = encoder->crtc; 847 struct drm_crtc *crtc = encoder->crtc;
846 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 848 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
847 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 849 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
848 struct drm_device *dev = encoder->dev; 850 struct drm_device *dev = encoder->dev;
849 851
@@ -885,7 +887,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
885 } 887 }
886 888
887 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 889 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
888 if (intel_crtc->pipe == 1) 890 if (gma_crtc->pipe == 1)
889 intel_dp->DP |= DP_PIPEB_SELECT; 891 intel_dp->DP |= DP_PIPEB_SELECT;
890 892
891 REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN)); 893 REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
@@ -900,7 +902,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
900 else 902 else
901 pfit_control = 0; 903 pfit_control = 0;
902 904
903 pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; 905 pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
904 906
905 REG_WRITE(PFIT_CONTROL, pfit_control); 907 REG_WRITE(PFIT_CONTROL, pfit_control);
906 } 908 }
@@ -908,7 +910,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
908 910
909 911
910/* If the sink supports it, try to set the power state appropriately */ 912/* If the sink supports it, try to set the power state appropriately */
911static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) 913static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
912{ 914{
913 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 915 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
914 int ret, i; 916 int ret, i;
@@ -940,7 +942,7 @@ static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
940 942
941static void cdv_intel_dp_prepare(struct drm_encoder *encoder) 943static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
942{ 944{
943 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 945 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
944 int edp = is_edp(intel_encoder); 946 int edp = is_edp(intel_encoder);
945 947
946 if (edp) { 948 if (edp) {
@@ -957,7 +959,7 @@ static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
957 959
958static void cdv_intel_dp_commit(struct drm_encoder *encoder) 960static void cdv_intel_dp_commit(struct drm_encoder *encoder)
959{ 961{
960 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 962 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
961 int edp = is_edp(intel_encoder); 963 int edp = is_edp(intel_encoder);
962 964
963 if (edp) 965 if (edp)
@@ -971,7 +973,7 @@ static void cdv_intel_dp_commit(struct drm_encoder *encoder)
971static void 973static void
972cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) 974cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
973{ 975{
974 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 976 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
975 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 977 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
976 struct drm_device *dev = encoder->dev; 978 struct drm_device *dev = encoder->dev;
977 uint32_t dp_reg = REG_READ(intel_dp->output_reg); 979 uint32_t dp_reg = REG_READ(intel_dp->output_reg);
@@ -1006,7 +1008,7 @@ cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
1006 * cases where the sink may still be asleep. 1008 * cases where the sink may still be asleep.
1007 */ 1009 */
1008static bool 1010static bool
1009cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address, 1011cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
1010 uint8_t *recv, int recv_bytes) 1012 uint8_t *recv, int recv_bytes)
1011{ 1013{
1012 int ret, i; 1014 int ret, i;
@@ -1031,7 +1033,7 @@ cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t a
1031 * link status information 1033 * link status information
1032 */ 1034 */
1033static bool 1035static bool
1034cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder) 1036cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
1035{ 1037{
1036 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1038 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1037 return cdv_intel_dp_aux_native_read_retry(encoder, 1039 return cdv_intel_dp_aux_native_read_retry(encoder,
@@ -1105,7 +1107,7 @@ cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1105} 1107}
1106*/ 1108*/
1107static void 1109static void
1108cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder) 1110cdv_intel_get_adjust_train(struct gma_encoder *encoder)
1109{ 1111{
1110 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1112 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1111 uint8_t v = 0; 1113 uint8_t v = 0;
@@ -1164,7 +1166,7 @@ cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_c
1164 DP_LANE_CHANNEL_EQ_DONE|\ 1166 DP_LANE_CHANNEL_EQ_DONE|\
1165 DP_LANE_SYMBOL_LOCKED) 1167 DP_LANE_SYMBOL_LOCKED)
1166static bool 1168static bool
1167cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) 1169cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
1168{ 1170{
1169 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1171 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1170 uint8_t lane_align; 1172 uint8_t lane_align;
@@ -1184,7 +1186,7 @@ cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
1184} 1186}
1185 1187
1186static bool 1188static bool
1187cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, 1189cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
1188 uint32_t dp_reg_value, 1190 uint32_t dp_reg_value,
1189 uint8_t dp_train_pat) 1191 uint8_t dp_train_pat)
1190{ 1192{
@@ -1211,7 +1213,7 @@ cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
1211 1213
1212 1214
1213static bool 1215static bool
1214cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, 1216cdv_intel_dplink_set_level(struct gma_encoder *encoder,
1215 uint8_t dp_train_pat) 1217 uint8_t dp_train_pat)
1216{ 1218{
1217 1219
@@ -1232,7 +1234,7 @@ cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
1232} 1234}
1233 1235
1234static void 1236static void
1235cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level) 1237cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
1236{ 1238{
1237 struct drm_device *dev = encoder->base.dev; 1239 struct drm_device *dev = encoder->base.dev;
1238 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1240 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1298,7 +1300,7 @@ cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal
1298 1300
1299/* Enable corresponding port and start training pattern 1 */ 1301/* Enable corresponding port and start training pattern 1 */
1300static void 1302static void
1301cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) 1303cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
1302{ 1304{
1303 struct drm_device *dev = encoder->base.dev; 1305 struct drm_device *dev = encoder->base.dev;
1304 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1306 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1317,7 +1319,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
1317 /* Enable output, wait for it to become active */ 1319 /* Enable output, wait for it to become active */
1318 REG_WRITE(intel_dp->output_reg, reg); 1320 REG_WRITE(intel_dp->output_reg, reg);
1319 REG_READ(intel_dp->output_reg); 1321 REG_READ(intel_dp->output_reg);
1320 psb_intel_wait_for_vblank(dev); 1322 gma_wait_for_vblank(dev);
1321 1323
1322 DRM_DEBUG_KMS("Link config\n"); 1324 DRM_DEBUG_KMS("Link config\n");
1323 /* Write the link configuration data */ 1325 /* Write the link configuration data */
@@ -1392,7 +1394,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
1392} 1394}
1393 1395
1394static void 1396static void
1395cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) 1397cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
1396{ 1398{
1397 struct drm_device *dev = encoder->base.dev; 1399 struct drm_device *dev = encoder->base.dev;
1398 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1400 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1478,7 +1480,7 @@ cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
1478} 1480}
1479 1481
1480static void 1482static void
1481cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) 1483cdv_intel_dp_link_down(struct gma_encoder *encoder)
1482{ 1484{
1483 struct drm_device *dev = encoder->base.dev; 1485 struct drm_device *dev = encoder->base.dev;
1484 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1486 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1502,8 +1504,7 @@ cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
1502 REG_READ(intel_dp->output_reg); 1504 REG_READ(intel_dp->output_reg);
1503} 1505}
1504 1506
1505static enum drm_connector_status 1507static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
1506cdv_dp_detect(struct psb_intel_encoder *encoder)
1507{ 1508{
1508 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1509 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1509 enum drm_connector_status status; 1510 enum drm_connector_status status;
@@ -1531,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder)
1531static enum drm_connector_status 1532static enum drm_connector_status
1532cdv_intel_dp_detect(struct drm_connector *connector, bool force) 1533cdv_intel_dp_detect(struct drm_connector *connector, bool force)
1533{ 1534{
1534 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1535 struct gma_encoder *encoder = gma_attached_encoder(connector);
1535 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1536 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1536 enum drm_connector_status status; 1537 enum drm_connector_status status;
1537 struct edid *edid = NULL; 1538 struct edid *edid = NULL;
@@ -1565,7 +1566,7 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force)
1565 1566
1566static int cdv_intel_dp_get_modes(struct drm_connector *connector) 1567static int cdv_intel_dp_get_modes(struct drm_connector *connector)
1567{ 1568{
1568 struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector); 1569 struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
1569 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 1570 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
1570 struct edid *edid = NULL; 1571 struct edid *edid = NULL;
1571 int ret = 0; 1572 int ret = 0;
@@ -1621,7 +1622,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
1621static bool 1622static bool
1622cdv_intel_dp_detect_audio(struct drm_connector *connector) 1623cdv_intel_dp_detect_audio(struct drm_connector *connector)
1623{ 1624{
1624 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1625 struct gma_encoder *encoder = gma_attached_encoder(connector);
1625 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1626 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1626 struct edid *edid; 1627 struct edid *edid;
1627 bool has_audio = false; 1628 bool has_audio = false;
@@ -1647,7 +1648,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
1647 uint64_t val) 1648 uint64_t val)
1648{ 1649{
1649 struct drm_psb_private *dev_priv = connector->dev->dev_private; 1650 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1650 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1651 struct gma_encoder *encoder = gma_attached_encoder(connector);
1651 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1652 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1652 int ret; 1653 int ret;
1653 1654
@@ -1700,11 +1701,10 @@ done:
1700static void 1701static void
1701cdv_intel_dp_destroy(struct drm_connector *connector) 1702cdv_intel_dp_destroy(struct drm_connector *connector)
1702{ 1703{
1703 struct psb_intel_encoder *psb_intel_encoder = 1704 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1704 psb_intel_attached_encoder(connector); 1705 struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
1705 struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
1706 1706
1707 if (is_edp(psb_intel_encoder)) { 1707 if (is_edp(gma_encoder)) {
1708 /* cdv_intel_panel_destroy_backlight(connector->dev); */ 1708 /* cdv_intel_panel_destroy_backlight(connector->dev); */
1709 if (intel_dp->panel_fixed_mode) { 1709 if (intel_dp->panel_fixed_mode) {
1710 kfree(intel_dp->panel_fixed_mode); 1710 kfree(intel_dp->panel_fixed_mode);
@@ -1741,7 +1741,7 @@ static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
1741static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = { 1741static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
1742 .get_modes = cdv_intel_dp_get_modes, 1742 .get_modes = cdv_intel_dp_get_modes,
1743 .mode_valid = cdv_intel_dp_mode_valid, 1743 .mode_valid = cdv_intel_dp_mode_valid,
1744 .best_encoder = psb_intel_best_encoder, 1744 .best_encoder = gma_best_encoder,
1745}; 1745};
1746 1746
1747static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { 1747static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
@@ -1800,19 +1800,19 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev)
1800void 1800void
1801cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) 1801cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
1802{ 1802{
1803 struct psb_intel_encoder *psb_intel_encoder; 1803 struct gma_encoder *gma_encoder;
1804 struct psb_intel_connector *psb_intel_connector; 1804 struct gma_connector *gma_connector;
1805 struct drm_connector *connector; 1805 struct drm_connector *connector;
1806 struct drm_encoder *encoder; 1806 struct drm_encoder *encoder;
1807 struct cdv_intel_dp *intel_dp; 1807 struct cdv_intel_dp *intel_dp;
1808 const char *name = NULL; 1808 const char *name = NULL;
1809 int type = DRM_MODE_CONNECTOR_DisplayPort; 1809 int type = DRM_MODE_CONNECTOR_DisplayPort;
1810 1810
1811 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 1811 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
1812 if (!psb_intel_encoder) 1812 if (!gma_encoder)
1813 return; 1813 return;
1814 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 1814 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
1815 if (!psb_intel_connector) 1815 if (!gma_connector)
1816 goto err_connector; 1816 goto err_connector;
1817 intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL); 1817 intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
1818 if (!intel_dp) 1818 if (!intel_dp)
@@ -1821,22 +1821,22 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1821 if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev)) 1821 if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
1822 type = DRM_MODE_CONNECTOR_eDP; 1822 type = DRM_MODE_CONNECTOR_eDP;
1823 1823
1824 connector = &psb_intel_connector->base; 1824 connector = &gma_connector->base;
1825 encoder = &psb_intel_encoder->base; 1825 encoder = &gma_encoder->base;
1826 1826
1827 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); 1827 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
1828 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); 1828 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
1829 1829
1830 psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); 1830 gma_connector_attach_encoder(gma_connector, gma_encoder);
1831 1831
1832 if (type == DRM_MODE_CONNECTOR_DisplayPort) 1832 if (type == DRM_MODE_CONNECTOR_DisplayPort)
1833 psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 1833 gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1834 else 1834 else
1835 psb_intel_encoder->type = INTEL_OUTPUT_EDP; 1835 gma_encoder->type = INTEL_OUTPUT_EDP;
1836 1836
1837 1837
1838 psb_intel_encoder->dev_priv=intel_dp; 1838 gma_encoder->dev_priv=intel_dp;
1839 intel_dp->encoder = psb_intel_encoder; 1839 intel_dp->encoder = gma_encoder;
1840 intel_dp->output_reg = output_reg; 1840 intel_dp->output_reg = output_reg;
1841 1841
1842 drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs); 1842 drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
@@ -1852,21 +1852,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1852 switch (output_reg) { 1852 switch (output_reg) {
1853 case DP_B: 1853 case DP_B:
1854 name = "DPDDC-B"; 1854 name = "DPDDC-B";
1855 psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT); 1855 gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
1856 break; 1856 break;
1857 case DP_C: 1857 case DP_C:
1858 name = "DPDDC-C"; 1858 name = "DPDDC-C";
1859 psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT); 1859 gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
1860 break; 1860 break;
1861 } 1861 }
1862 1862
1863 cdv_disable_intel_clock_gating(dev); 1863 cdv_disable_intel_clock_gating(dev);
1864 1864
1865 cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name); 1865 cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
1866 /* FIXME:fail check */ 1866 /* FIXME:fail check */
1867 cdv_intel_dp_add_properties(connector); 1867 cdv_intel_dp_add_properties(connector);
1868 1868
1869 if (is_edp(psb_intel_encoder)) { 1869 if (is_edp(gma_encoder)) {
1870 int ret; 1870 int ret;
1871 struct edp_power_seq cur; 1871 struct edp_power_seq cur;
1872 u32 pp_on, pp_off, pp_div; 1872 u32 pp_on, pp_off, pp_div;
@@ -1920,11 +1920,11 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1920 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 1920 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
1921 1921
1922 1922
1923 cdv_intel_edp_panel_vdd_on(psb_intel_encoder); 1923 cdv_intel_edp_panel_vdd_on(gma_encoder);
1924 ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV, 1924 ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
1925 intel_dp->dpcd, 1925 intel_dp->dpcd,
1926 sizeof(intel_dp->dpcd)); 1926 sizeof(intel_dp->dpcd));
1927 cdv_intel_edp_panel_vdd_off(psb_intel_encoder); 1927 cdv_intel_edp_panel_vdd_off(gma_encoder);
1928 if (ret == 0) { 1928 if (ret == 0) {
1929 /* if this fails, presume the device is a ghost */ 1929 /* if this fails, presume the device is a ghost */
1930 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 1930 DRM_INFO("failed to retrieve link info, disabling eDP\n");
@@ -1945,7 +1945,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1945 return; 1945 return;
1946 1946
1947err_priv: 1947err_priv:
1948 kfree(psb_intel_connector); 1948 kfree(gma_connector);
1949err_connector: 1949err_connector:
1950 kfree(psb_intel_encoder); 1950 kfree(gma_encoder);
1951} 1951}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 464153d9d2df..1c0d723b8d24 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -64,11 +64,11 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
64 struct drm_display_mode *adjusted_mode) 64 struct drm_display_mode *adjusted_mode)
65{ 65{
66 struct drm_device *dev = encoder->dev; 66 struct drm_device *dev = encoder->dev;
67 struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); 67 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
68 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; 68 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
69 u32 hdmib; 69 u32 hdmib;
70 struct drm_crtc *crtc = encoder->crtc; 70 struct drm_crtc *crtc = encoder->crtc;
71 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 71 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
72 72
73 hdmib = (2 << 10); 73 hdmib = (2 << 10);
74 74
@@ -77,7 +77,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
77 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 77 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
78 hdmib |= HDMI_HSYNC_ACTIVE_HIGH; 78 hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
79 79
80 if (intel_crtc->pipe == 1) 80 if (gma_crtc->pipe == 1)
81 hdmib |= HDMIB_PIPE_B_SELECT; 81 hdmib |= HDMIB_PIPE_B_SELECT;
82 82
83 if (hdmi_priv->has_hdmi_audio) { 83 if (hdmi_priv->has_hdmi_audio) {
@@ -99,9 +99,8 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) 99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
100{ 100{
101 struct drm_device *dev = encoder->dev; 101 struct drm_device *dev = encoder->dev;
102 struct psb_intel_encoder *psb_intel_encoder = 102 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
103 to_psb_intel_encoder(encoder); 103 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
104 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
105 u32 hdmib; 104 u32 hdmib;
106 105
107 hdmib = REG_READ(hdmi_priv->hdmi_reg); 106 hdmib = REG_READ(hdmi_priv->hdmi_reg);
@@ -116,9 +115,8 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
116static void cdv_hdmi_save(struct drm_connector *connector) 115static void cdv_hdmi_save(struct drm_connector *connector)
117{ 116{
118 struct drm_device *dev = connector->dev; 117 struct drm_device *dev = connector->dev;
119 struct psb_intel_encoder *psb_intel_encoder = 118 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
120 psb_intel_attached_encoder(connector); 119 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
121 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
122 120
123 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg); 121 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
124} 122}
@@ -126,9 +124,8 @@ static void cdv_hdmi_save(struct drm_connector *connector)
126static void cdv_hdmi_restore(struct drm_connector *connector) 124static void cdv_hdmi_restore(struct drm_connector *connector)
127{ 125{
128 struct drm_device *dev = connector->dev; 126 struct drm_device *dev = connector->dev;
129 struct psb_intel_encoder *psb_intel_encoder = 127 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
130 psb_intel_attached_encoder(connector); 128 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
131 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
132 129
133 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB); 130 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
134 REG_READ(hdmi_priv->hdmi_reg); 131 REG_READ(hdmi_priv->hdmi_reg);
@@ -137,13 +134,12 @@ static void cdv_hdmi_restore(struct drm_connector *connector)
137static enum drm_connector_status cdv_hdmi_detect( 134static enum drm_connector_status cdv_hdmi_detect(
138 struct drm_connector *connector, bool force) 135 struct drm_connector *connector, bool force)
139{ 136{
140 struct psb_intel_encoder *psb_intel_encoder = 137 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
141 psb_intel_attached_encoder(connector); 138 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
142 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
143 struct edid *edid = NULL; 139 struct edid *edid = NULL;
144 enum drm_connector_status status = connector_status_disconnected; 140 enum drm_connector_status status = connector_status_disconnected;
145 141
146 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); 142 edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
147 143
148 hdmi_priv->has_hdmi_sink = false; 144 hdmi_priv->has_hdmi_sink = false;
149 hdmi_priv->has_hdmi_audio = false; 145 hdmi_priv->has_hdmi_audio = false;
@@ -167,7 +163,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
167 struct drm_encoder *encoder = connector->encoder; 163 struct drm_encoder *encoder = connector->encoder;
168 164
169 if (!strcmp(property->name, "scaling mode") && encoder) { 165 if (!strcmp(property->name, "scaling mode") && encoder) {
170 struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc); 166 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
171 bool centre; 167 bool centre;
172 uint64_t curValue; 168 uint64_t curValue;
173 169
@@ -221,12 +217,11 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
221 */ 217 */
222static int cdv_hdmi_get_modes(struct drm_connector *connector) 218static int cdv_hdmi_get_modes(struct drm_connector *connector)
223{ 219{
224 struct psb_intel_encoder *psb_intel_encoder = 220 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
225 psb_intel_attached_encoder(connector);
226 struct edid *edid = NULL; 221 struct edid *edid = NULL;
227 int ret = 0; 222 int ret = 0;
228 223
229 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); 224 edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
230 if (edid) { 225 if (edid) {
231 drm_mode_connector_update_edid_property(connector, edid); 226 drm_mode_connector_update_edid_property(connector, edid);
232 ret = drm_add_edid_modes(connector, edid); 227 ret = drm_add_edid_modes(connector, edid);
@@ -256,11 +251,10 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
256 251
257static void cdv_hdmi_destroy(struct drm_connector *connector) 252static void cdv_hdmi_destroy(struct drm_connector *connector)
258{ 253{
259 struct psb_intel_encoder *psb_intel_encoder = 254 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
260 psb_intel_attached_encoder(connector);
261 255
262 if (psb_intel_encoder->i2c_bus) 256 if (gma_encoder->i2c_bus)
263 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 257 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
264 drm_sysfs_connector_remove(connector); 258 drm_sysfs_connector_remove(connector);
265 drm_connector_cleanup(connector); 259 drm_connector_cleanup(connector);
266 kfree(connector); 260 kfree(connector);
@@ -269,16 +263,16 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
269static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { 263static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
270 .dpms = cdv_hdmi_dpms, 264 .dpms = cdv_hdmi_dpms,
271 .mode_fixup = cdv_hdmi_mode_fixup, 265 .mode_fixup = cdv_hdmi_mode_fixup,
272 .prepare = psb_intel_encoder_prepare, 266 .prepare = gma_encoder_prepare,
273 .mode_set = cdv_hdmi_mode_set, 267 .mode_set = cdv_hdmi_mode_set,
274 .commit = psb_intel_encoder_commit, 268 .commit = gma_encoder_commit,
275}; 269};
276 270
277static const struct drm_connector_helper_funcs 271static const struct drm_connector_helper_funcs
278 cdv_hdmi_connector_helper_funcs = { 272 cdv_hdmi_connector_helper_funcs = {
279 .get_modes = cdv_hdmi_get_modes, 273 .get_modes = cdv_hdmi_get_modes,
280 .mode_valid = cdv_hdmi_mode_valid, 274 .mode_valid = cdv_hdmi_mode_valid,
281 .best_encoder = psb_intel_best_encoder, 275 .best_encoder = gma_best_encoder,
282}; 276};
283 277
284static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { 278static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
@@ -294,23 +288,22 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
294void cdv_hdmi_init(struct drm_device *dev, 288void cdv_hdmi_init(struct drm_device *dev,
295 struct psb_intel_mode_device *mode_dev, int reg) 289 struct psb_intel_mode_device *mode_dev, int reg)
296{ 290{
297 struct psb_intel_encoder *psb_intel_encoder; 291 struct gma_encoder *gma_encoder;
298 struct psb_intel_connector *psb_intel_connector; 292 struct gma_connector *gma_connector;
299 struct drm_connector *connector; 293 struct drm_connector *connector;
300 struct drm_encoder *encoder; 294 struct drm_encoder *encoder;
301 struct mid_intel_hdmi_priv *hdmi_priv; 295 struct mid_intel_hdmi_priv *hdmi_priv;
302 int ddc_bus; 296 int ddc_bus;
303 297
304 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), 298 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
305 GFP_KERNEL);
306 299
307 if (!psb_intel_encoder) 300 if (!gma_encoder)
308 return; 301 return;
309 302
310 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), 303 gma_connector = kzalloc(sizeof(struct gma_connector),
311 GFP_KERNEL); 304 GFP_KERNEL);
312 305
313 if (!psb_intel_connector) 306 if (!gma_connector)
314 goto err_connector; 307 goto err_connector;
315 308
316 hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL); 309 hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
@@ -318,9 +311,9 @@ void cdv_hdmi_init(struct drm_device *dev,
318 if (!hdmi_priv) 311 if (!hdmi_priv)
319 goto err_priv; 312 goto err_priv;
320 313
321 connector = &psb_intel_connector->base; 314 connector = &gma_connector->base;
322 connector->polled = DRM_CONNECTOR_POLL_HPD; 315 connector->polled = DRM_CONNECTOR_POLL_HPD;
323 encoder = &psb_intel_encoder->base; 316 encoder = &gma_encoder->base;
324 drm_connector_init(dev, connector, 317 drm_connector_init(dev, connector,
325 &cdv_hdmi_connector_funcs, 318 &cdv_hdmi_connector_funcs,
326 DRM_MODE_CONNECTOR_DVID); 319 DRM_MODE_CONNECTOR_DVID);
@@ -328,12 +321,11 @@ void cdv_hdmi_init(struct drm_device *dev,
328 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 321 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
329 DRM_MODE_ENCODER_TMDS); 322 DRM_MODE_ENCODER_TMDS);
330 323
331 psb_intel_connector_attach_encoder(psb_intel_connector, 324 gma_connector_attach_encoder(gma_connector, gma_encoder);
332 psb_intel_encoder); 325 gma_encoder->type = INTEL_OUTPUT_HDMI;
333 psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
334 hdmi_priv->hdmi_reg = reg; 326 hdmi_priv->hdmi_reg = reg;
335 hdmi_priv->has_hdmi_sink = false; 327 hdmi_priv->has_hdmi_sink = false;
336 psb_intel_encoder->dev_priv = hdmi_priv; 328 gma_encoder->dev_priv = hdmi_priv;
337 329
338 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs); 330 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
339 drm_connector_helper_add(connector, 331 drm_connector_helper_add(connector,
@@ -349,11 +341,11 @@ void cdv_hdmi_init(struct drm_device *dev,
349 switch (reg) { 341 switch (reg) {
350 case SDVOB: 342 case SDVOB:
351 ddc_bus = GPIOE; 343 ddc_bus = GPIOE;
352 psb_intel_encoder->ddi_select = DDI0_SELECT; 344 gma_encoder->ddi_select = DDI0_SELECT;
353 break; 345 break;
354 case SDVOC: 346 case SDVOC:
355 ddc_bus = GPIOD; 347 ddc_bus = GPIOD;
356 psb_intel_encoder->ddi_select = DDI1_SELECT; 348 gma_encoder->ddi_select = DDI1_SELECT;
357 break; 349 break;
358 default: 350 default:
359 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); 351 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
@@ -361,16 +353,15 @@ void cdv_hdmi_init(struct drm_device *dev,
361 break; 353 break;
362 } 354 }
363 355
364 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, 356 gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
365 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC"); 357 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
366 358
367 if (!psb_intel_encoder->i2c_bus) { 359 if (!gma_encoder->i2c_bus) {
368 dev_err(dev->dev, "No ddc adapter available!\n"); 360 dev_err(dev->dev, "No ddc adapter available!\n");
369 goto failed_ddc; 361 goto failed_ddc;
370 } 362 }
371 363
372 hdmi_priv->hdmi_i2c_adapter = 364 hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
373 &(psb_intel_encoder->i2c_bus->adapter);
374 hdmi_priv->dev = dev; 365 hdmi_priv->dev = dev;
375 drm_sysfs_connector_add(connector); 366 drm_sysfs_connector_add(connector);
376 return; 367 return;
@@ -379,7 +370,7 @@ failed_ddc:
379 drm_encoder_cleanup(encoder); 370 drm_encoder_cleanup(encoder);
380 drm_connector_cleanup(connector); 371 drm_connector_cleanup(connector);
381err_priv: 372err_priv:
382 kfree(psb_intel_connector); 373 kfree(gma_connector);
383err_connector: 374err_connector:
384 kfree(psb_intel_encoder); 375 kfree(gma_encoder);
385} 376}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index d81dbc3368f0..20e08e65d46c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,8 +356,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
356{ 356{
357 struct drm_device *dev = encoder->dev; 357 struct drm_device *dev = encoder->dev;
358 struct drm_psb_private *dev_priv = dev->dev_private; 358 struct drm_psb_private *dev_priv = dev->dev_private;
359 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc( 359 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
360 encoder->crtc);
361 u32 pfit_control; 360 u32 pfit_control;
362 361
363 /* 362 /*
@@ -379,7 +378,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
379 else 378 else
380 pfit_control = 0; 379 pfit_control = 0;
381 380
382 pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT; 381 pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
383 382
384 if (dev_priv->lvds_dither) 383 if (dev_priv->lvds_dither)
385 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 384 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -407,12 +406,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
407{ 406{
408 struct drm_device *dev = connector->dev; 407 struct drm_device *dev = connector->dev;
409 struct drm_psb_private *dev_priv = dev->dev_private; 408 struct drm_psb_private *dev_priv = dev->dev_private;
410 struct psb_intel_encoder *psb_intel_encoder = 409 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
411 psb_intel_attached_encoder(connector);
412 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 410 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
413 int ret; 411 int ret;
414 412
415 ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter); 413 ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
416 414
417 if (ret) 415 if (ret)
418 return ret; 416 return ret;
@@ -444,11 +442,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
444 */ 442 */
445static void cdv_intel_lvds_destroy(struct drm_connector *connector) 443static void cdv_intel_lvds_destroy(struct drm_connector *connector)
446{ 444{
447 struct psb_intel_encoder *psb_intel_encoder = 445 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
448 psb_intel_attached_encoder(connector);
449 446
450 if (psb_intel_encoder->i2c_bus) 447 if (gma_encoder->i2c_bus)
451 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 448 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
452 drm_sysfs_connector_remove(connector); 449 drm_sysfs_connector_remove(connector);
453 drm_connector_cleanup(connector); 450 drm_connector_cleanup(connector);
454 kfree(connector); 451 kfree(connector);
@@ -461,8 +458,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
461 struct drm_encoder *encoder = connector->encoder; 458 struct drm_encoder *encoder = connector->encoder;
462 459
463 if (!strcmp(property->name, "scaling mode") && encoder) { 460 if (!strcmp(property->name, "scaling mode") && encoder) {
464 struct psb_intel_crtc *crtc = 461 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
465 to_psb_intel_crtc(encoder->crtc);
466 uint64_t curValue; 462 uint64_t curValue;
467 463
468 if (!crtc) 464 if (!crtc)
@@ -529,7 +525,7 @@ static const struct drm_connector_helper_funcs
529 cdv_intel_lvds_connector_helper_funcs = { 525 cdv_intel_lvds_connector_helper_funcs = {
530 .get_modes = cdv_intel_lvds_get_modes, 526 .get_modes = cdv_intel_lvds_get_modes,
531 .mode_valid = cdv_intel_lvds_mode_valid, 527 .mode_valid = cdv_intel_lvds_mode_valid,
532 .best_encoder = psb_intel_best_encoder, 528 .best_encoder = gma_best_encoder,
533}; 529};
534 530
535static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { 531static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
@@ -612,8 +608,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
612void cdv_intel_lvds_init(struct drm_device *dev, 608void cdv_intel_lvds_init(struct drm_device *dev,
613 struct psb_intel_mode_device *mode_dev) 609 struct psb_intel_mode_device *mode_dev)
614{ 610{
615 struct psb_intel_encoder *psb_intel_encoder; 611 struct gma_encoder *gma_encoder;
616 struct psb_intel_connector *psb_intel_connector; 612 struct gma_connector *gma_connector;
617 struct cdv_intel_lvds_priv *lvds_priv; 613 struct cdv_intel_lvds_priv *lvds_priv;
618 struct drm_connector *connector; 614 struct drm_connector *connector;
619 struct drm_encoder *encoder; 615 struct drm_encoder *encoder;
@@ -630,24 +626,24 @@ void cdv_intel_lvds_init(struct drm_device *dev,
630 return; 626 return;
631 } 627 }
632 628
633 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), 629 gma_encoder = kzalloc(sizeof(struct gma_encoder),
634 GFP_KERNEL); 630 GFP_KERNEL);
635 if (!psb_intel_encoder) 631 if (!gma_encoder)
636 return; 632 return;
637 633
638 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), 634 gma_connector = kzalloc(sizeof(struct gma_connector),
639 GFP_KERNEL); 635 GFP_KERNEL);
640 if (!psb_intel_connector) 636 if (!gma_connector)
641 goto failed_connector; 637 goto failed_connector;
642 638
643 lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL); 639 lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
644 if (!lvds_priv) 640 if (!lvds_priv)
645 goto failed_lvds_priv; 641 goto failed_lvds_priv;
646 642
647 psb_intel_encoder->dev_priv = lvds_priv; 643 gma_encoder->dev_priv = lvds_priv;
648 644
649 connector = &psb_intel_connector->base; 645 connector = &gma_connector->base;
650 encoder = &psb_intel_encoder->base; 646 encoder = &gma_encoder->base;
651 647
652 648
653 drm_connector_init(dev, connector, 649 drm_connector_init(dev, connector,
@@ -659,9 +655,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
659 DRM_MODE_ENCODER_LVDS); 655 DRM_MODE_ENCODER_LVDS);
660 656
661 657
662 psb_intel_connector_attach_encoder(psb_intel_connector, 658 gma_connector_attach_encoder(gma_connector, gma_encoder);
663 psb_intel_encoder); 659 gma_encoder->type = INTEL_OUTPUT_LVDS;
664 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
665 660
666 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs); 661 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
667 drm_connector_helper_add(connector, 662 drm_connector_helper_add(connector,
@@ -682,16 +677,16 @@ void cdv_intel_lvds_init(struct drm_device *dev,
682 * Set up I2C bus 677 * Set up I2C bus
683 * FIXME: distroy i2c_bus when exit 678 * FIXME: distroy i2c_bus when exit
684 */ 679 */
685 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, 680 gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
686 GPIOB, 681 GPIOB,
687 "LVDSBLC_B"); 682 "LVDSBLC_B");
688 if (!psb_intel_encoder->i2c_bus) { 683 if (!gma_encoder->i2c_bus) {
689 dev_printk(KERN_ERR, 684 dev_printk(KERN_ERR,
690 &dev->pdev->dev, "I2C bus registration failed.\n"); 685 &dev->pdev->dev, "I2C bus registration failed.\n");
691 goto failed_blc_i2c; 686 goto failed_blc_i2c;
692 } 687 }
693 psb_intel_encoder->i2c_bus->slave_addr = 0x2C; 688 gma_encoder->i2c_bus->slave_addr = 0x2C;
694 dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus; 689 dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
695 690
696 /* 691 /*
697 * LVDS discovery: 692 * LVDS discovery:
@@ -704,10 +699,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
704 */ 699 */
705 700
706 /* Set up the DDC bus. */ 701 /* Set up the DDC bus. */
707 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, 702 gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
708 GPIOC, 703 GPIOC,
709 "LVDSDDC_C"); 704 "LVDSDDC_C");
710 if (!psb_intel_encoder->ddc_bus) { 705 if (!gma_encoder->ddc_bus) {
711 dev_printk(KERN_ERR, &dev->pdev->dev, 706 dev_printk(KERN_ERR, &dev->pdev->dev,
712 "DDC bus registration " "failed.\n"); 707 "DDC bus registration " "failed.\n");
713 goto failed_ddc; 708 goto failed_ddc;
@@ -718,7 +713,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
718 * preferred mode is the right one. 713 * preferred mode is the right one.
719 */ 714 */
720 psb_intel_ddc_get_modes(connector, 715 psb_intel_ddc_get_modes(connector,
721 &psb_intel_encoder->ddc_bus->adapter); 716 &gma_encoder->ddc_bus->adapter);
722 list_for_each_entry(scan, &connector->probed_modes, head) { 717 list_for_each_entry(scan, &connector->probed_modes, head) {
723 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 718 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
724 mode_dev->panel_fixed_mode = 719 mode_dev->panel_fixed_mode =
@@ -782,19 +777,19 @@ out:
782 777
783failed_find: 778failed_find:
784 printk(KERN_ERR "Failed find\n"); 779 printk(KERN_ERR "Failed find\n");
785 if (psb_intel_encoder->ddc_bus) 780 if (gma_encoder->ddc_bus)
786 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 781 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
787failed_ddc: 782failed_ddc:
788 printk(KERN_ERR "Failed DDC\n"); 783 printk(KERN_ERR "Failed DDC\n");
789 if (psb_intel_encoder->i2c_bus) 784 if (gma_encoder->i2c_bus)
790 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 785 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
791failed_blc_i2c: 786failed_blc_i2c:
792 printk(KERN_ERR "Failed BLC\n"); 787 printk(KERN_ERR "Failed BLC\n");
793 drm_encoder_cleanup(encoder); 788 drm_encoder_cleanup(encoder);
794 drm_connector_cleanup(connector); 789 drm_connector_cleanup(connector);
795 kfree(lvds_priv); 790 kfree(lvds_priv);
796failed_lvds_priv: 791failed_lvds_priv:
797 kfree(psb_intel_connector); 792 kfree(gma_connector);
798failed_connector: 793failed_connector:
799 kfree(psb_intel_encoder); 794 kfree(gma_encoder);
800} 795}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8b1b6d923abe..01dd7d225762 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
321 /* Begin by trying to use stolen memory backing */ 321 /* Begin by trying to use stolen memory backing */
322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1); 322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
323 if (backing) { 323 if (backing) {
324 if (drm_gem_private_object_init(dev, 324 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
325 &backing->gem, aligned_size) == 0) 325 return backing;
326 return backing;
327 psb_gtt_free_range(dev, backing);
328 } 326 }
329 return NULL; 327 return NULL;
330} 328}
@@ -522,21 +520,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create
522static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 520static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
523 u16 blue, int regno) 521 u16 blue, int regno)
524{ 522{
525 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 523 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
526 524
527 intel_crtc->lut_r[regno] = red >> 8; 525 gma_crtc->lut_r[regno] = red >> 8;
528 intel_crtc->lut_g[regno] = green >> 8; 526 gma_crtc->lut_g[regno] = green >> 8;
529 intel_crtc->lut_b[regno] = blue >> 8; 527 gma_crtc->lut_b[regno] = blue >> 8;
530} 528}
531 529
532static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, 530static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
533 u16 *green, u16 *blue, int regno) 531 u16 *green, u16 *blue, int regno)
534{ 532{
535 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 533 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
536 534
537 *red = intel_crtc->lut_r[regno] << 8; 535 *red = gma_crtc->lut_r[regno] << 8;
538 *green = intel_crtc->lut_g[regno] << 8; 536 *green = gma_crtc->lut_g[regno] << 8;
539 *blue = intel_crtc->lut_b[regno] << 8; 537 *blue = gma_crtc->lut_b[regno] << 8;
540} 538}
541 539
542static int psbfb_probe(struct drm_fb_helper *helper, 540static int psbfb_probe(struct drm_fb_helper *helper,
@@ -705,13 +703,12 @@ static void psb_setup_outputs(struct drm_device *dev)
705 703
706 list_for_each_entry(connector, &dev->mode_config.connector_list, 704 list_for_each_entry(connector, &dev->mode_config.connector_list,
707 head) { 705 head) {
708 struct psb_intel_encoder *psb_intel_encoder = 706 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
709 psb_intel_attached_encoder(connector); 707 struct drm_encoder *encoder = &gma_encoder->base;
710 struct drm_encoder *encoder = &psb_intel_encoder->base;
711 int crtc_mask = 0, clone_mask = 0; 708 int crtc_mask = 0, clone_mask = 0;
712 709
713 /* valid crtcs */ 710 /* valid crtcs */
714 switch (psb_intel_encoder->type) { 711 switch (gma_encoder->type) {
715 case INTEL_OUTPUT_ANALOG: 712 case INTEL_OUTPUT_ANALOG:
716 crtc_mask = (1 << 0); 713 crtc_mask = (1 << 0);
717 clone_mask = (1 << INTEL_OUTPUT_ANALOG); 714 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
@@ -746,7 +743,7 @@ static void psb_setup_outputs(struct drm_device *dev)
746 } 743 }
747 encoder->possible_crtcs = crtc_mask; 744 encoder->possible_crtcs = crtc_mask;
748 encoder->possible_clones = 745 encoder->possible_clones =
749 psb_intel_connector_clones(dev, clone_mask); 746 gma_connector_clones(dev, clone_mask);
750 } 747 }
751} 748}
752 749
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 989558a9e6ee..395f20b07aab 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -41,7 +41,7 @@ struct psb_fbdev {
41 41
42#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base) 42#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
43 43
44extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask); 44extern int gma_connector_clones(struct drm_device *dev, int type_mask);
45 45
46#endif 46#endif
47 47
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index eefd6cc5b80d..10ae8c52d06f 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -26,6 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm.h> 27#include <drm/drm.h>
28#include <drm/gma_drm.h> 28#include <drm/gma_drm.h>
29#include <drm/drm_vma_manager.h>
29#include "psb_drv.h" 30#include "psb_drv.h"
30 31
31int psb_gem_init_object(struct drm_gem_object *obj) 32int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
38 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); 39 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
39 40
40 /* Remove the list map if one is present */ 41 /* Remove the list map if one is present */
41 if (obj->map_list.map) 42 drm_gem_free_mmap_offset(obj);
42 drm_gem_free_mmap_offset(obj);
43 drm_gem_object_release(obj); 43 drm_gem_object_release(obj);
44 44
45 /* This must occur last as it frees up the memory of the GEM object */ 45 /* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
81 /* What validation is needed here ? */ 81 /* What validation is needed here ? */
82 82
83 /* Make it mmapable */ 83 /* Make it mmapable */
84 if (!obj->map_list.map) { 84 ret = drm_gem_create_mmap_offset(obj);
85 ret = drm_gem_create_mmap_offset(obj); 85 if (ret)
86 if (ret) 86 goto out;
87 goto out; 87 *offset = drm_vma_node_offset_addr(&obj->vma_node);
88 }
89 /* GEM should really work out the hash offsets for us */
90 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
91out: 88out:
92 drm_gem_object_unreference(obj); 89 drm_gem_object_unreference(obj);
93unlock: 90unlock:
@@ -165,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
165} 162}
166 163
167/** 164/**
168 * psb_gem_dumb_destroy - destroy a dumb buffer
169 * @file: client file
170 * @dev: our DRM device
171 * @handle: the object handle
172 *
173 * Destroy a handle that was created via psb_gem_dumb_create, at least
174 * we hope it was created that way. i915 seems to assume the caller
175 * does the checking but that might be worth review ! FIXME
176 */
177int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
178 uint32_t handle)
179{
180 /* No special work needed, drop the reference and see what falls out */
181 return drm_gem_handle_delete(file, handle);
182}
183
184/**
185 * psb_gem_fault - pagefault handler for GEM objects 165 * psb_gem_fault - pagefault handler for GEM objects
186 * @vma: the VMA of the GEM object 166 * @vma: the VMA of the GEM object
187 * @vmf: fault detail 167 * @vmf: fault detail
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
261 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1); 241 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
262 if (gtt == NULL) 242 if (gtt == NULL)
263 return -ENOMEM; 243 return -ENOMEM;
264 if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0) 244
265 goto free_gtt; 245 drm_gem_private_object_init(dev, &gtt->gem, size);
266 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0) 246 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
267 return 0; 247 return 0;
268free_gtt: 248
249 drm_gem_object_release(&gtt->gem);
269 psb_gtt_free_range(dev, gtt); 250 psb_gtt_free_range(dev, gtt);
270 return -ENOMEM; 251 return -ENOMEM;
271} 252}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
new file mode 100644
index 000000000000..24e8af3d22bf
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
20 */
21
22#include <drm/drmP.h>
23#include "gma_display.h"
24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h"
26#include "psb_drv.h"
27#include "framebuffer.h"
28
29/**
30 * Returns whether any output on the specified pipe is of the specified type
31 */
32bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
33{
34 struct drm_device *dev = crtc->dev;
35 struct drm_mode_config *mode_config = &dev->mode_config;
36 struct drm_connector *l_entry;
37
38 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
39 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
40 struct gma_encoder *gma_encoder =
41 gma_attached_encoder(l_entry);
42 if (gma_encoder->type == type)
43 return true;
44 }
45 }
46
47 return false;
48}
49
50void gma_wait_for_vblank(struct drm_device *dev)
51{
52 /* Wait for 20ms, i.e. one cycle at 50hz. */
53 mdelay(20);
54}
55
56int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
57 struct drm_framebuffer *old_fb)
58{
59 struct drm_device *dev = crtc->dev;
60 struct drm_psb_private *dev_priv = dev->dev_private;
61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
62 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
63 int pipe = gma_crtc->pipe;
64 const struct psb_offset *map = &dev_priv->regmap[pipe];
65 unsigned long start, offset;
66 u32 dspcntr;
67 int ret = 0;
68
69 if (!gma_power_begin(dev, true))
70 return 0;
71
72 /* no fb bound */
73 if (!crtc->fb) {
74 dev_err(dev->dev, "No FB bound\n");
75 goto gma_pipe_cleaner;
76 }
77
78 /* We are displaying this buffer, make sure it is actually loaded
79 into the GTT */
80 ret = psb_gtt_pin(psbfb->gtt);
81 if (ret < 0)
82 goto gma_pipe_set_base_exit;
83 start = psbfb->gtt->offset;
84 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
85
86 REG_WRITE(map->stride, crtc->fb->pitches[0]);
87
88 dspcntr = REG_READ(map->cntr);
89 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
90
91 switch (crtc->fb->bits_per_pixel) {
92 case 8:
93 dspcntr |= DISPPLANE_8BPP;
94 break;
95 case 16:
96 if (crtc->fb->depth == 15)
97 dspcntr |= DISPPLANE_15_16BPP;
98 else
99 dspcntr |= DISPPLANE_16BPP;
100 break;
101 case 24:
102 case 32:
103 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
104 break;
105 default:
106 dev_err(dev->dev, "Unknown color depth\n");
107 ret = -EINVAL;
108 goto gma_pipe_set_base_exit;
109 }
110 REG_WRITE(map->cntr, dspcntr);
111
112 dev_dbg(dev->dev,
113 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
114
115 /* FIXME: Investigate whether this really is the base for psb and why
116 the linear offset is named base for the other chips. map->surf
117 should be the base and map->linoff the offset for all chips */
118 if (IS_PSB(dev)) {
119 REG_WRITE(map->base, offset + start);
120 REG_READ(map->base);
121 } else {
122 REG_WRITE(map->base, offset);
123 REG_READ(map->base);
124 REG_WRITE(map->surf, start);
125 REG_READ(map->surf);
126 }
127
128gma_pipe_cleaner:
129 /* If there was a previous display we can now unpin it */
130 if (old_fb)
131 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
132
133gma_pipe_set_base_exit:
134 gma_power_end(dev);
135 return ret;
136}
137
138/* Loads the palette/gamma unit for the CRTC with the prepared values */
139void gma_crtc_load_lut(struct drm_crtc *crtc)
140{
141 struct drm_device *dev = crtc->dev;
142 struct drm_psb_private *dev_priv = dev->dev_private;
143 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
144 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
145 int palreg = map->palette;
146 int i;
147
148 /* The clocks have to be on to load the palette. */
149 if (!crtc->enabled)
150 return;
151
152 if (gma_power_begin(dev, false)) {
153 for (i = 0; i < 256; i++) {
154 REG_WRITE(palreg + 4 * i,
155 ((gma_crtc->lut_r[i] +
156 gma_crtc->lut_adj[i]) << 16) |
157 ((gma_crtc->lut_g[i] +
158 gma_crtc->lut_adj[i]) << 8) |
159 (gma_crtc->lut_b[i] +
160 gma_crtc->lut_adj[i]));
161 }
162 gma_power_end(dev);
163 } else {
164 for (i = 0; i < 256; i++) {
165 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
166 dev_priv->regs.pipe[0].palette[i] =
167 ((gma_crtc->lut_r[i] +
168 gma_crtc->lut_adj[i]) << 16) |
169 ((gma_crtc->lut_g[i] +
170 gma_crtc->lut_adj[i]) << 8) |
171 (gma_crtc->lut_b[i] +
172 gma_crtc->lut_adj[i]);
173 }
174
175 }
176}
177
178void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 start, u32 size)
180{
181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
182 int i;
183 int end = (start + size > 256) ? 256 : start + size;
184
185 for (i = start; i < end; i++) {
186 gma_crtc->lut_r[i] = red[i] >> 8;
187 gma_crtc->lut_g[i] = green[i] >> 8;
188 gma_crtc->lut_b[i] = blue[i] >> 8;
189 }
190
191 gma_crtc_load_lut(crtc);
192}
193
194/**
195 * Sets the power management mode of the pipe and plane.
196 *
197 * This code should probably grow support for turning the cursor off and back
198 * on appropriately at the same time as we're turning the pipe off/on.
199 */
200void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
201{
202 struct drm_device *dev = crtc->dev;
203 struct drm_psb_private *dev_priv = dev->dev_private;
204 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
205 int pipe = gma_crtc->pipe;
206 const struct psb_offset *map = &dev_priv->regmap[pipe];
207 u32 temp;
208
209 /* XXX: When our outputs are all unaware of DPMS modes other than off
210 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
211 */
212
213 if (IS_CDV(dev))
214 dev_priv->ops->disable_sr(dev);
215
216 switch (mode) {
217 case DRM_MODE_DPMS_ON:
218 case DRM_MODE_DPMS_STANDBY:
219 case DRM_MODE_DPMS_SUSPEND:
220 if (gma_crtc->active)
221 break;
222
223 gma_crtc->active = true;
224
225 /* Enable the DPLL */
226 temp = REG_READ(map->dpll);
227 if ((temp & DPLL_VCO_ENABLE) == 0) {
228 REG_WRITE(map->dpll, temp);
229 REG_READ(map->dpll);
230 /* Wait for the clocks to stabilize. */
231 udelay(150);
232 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
233 REG_READ(map->dpll);
234 /* Wait for the clocks to stabilize. */
235 udelay(150);
236 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
237 REG_READ(map->dpll);
238 /* Wait for the clocks to stabilize. */
239 udelay(150);
240 }
241
242 /* Enable the plane */
243 temp = REG_READ(map->cntr);
244 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
245 REG_WRITE(map->cntr,
246 temp | DISPLAY_PLANE_ENABLE);
247 /* Flush the plane changes */
248 REG_WRITE(map->base, REG_READ(map->base));
249 }
250
251 udelay(150);
252
253 /* Enable the pipe */
254 temp = REG_READ(map->conf);
255 if ((temp & PIPEACONF_ENABLE) == 0)
256 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
257
258 temp = REG_READ(map->status);
259 temp &= ~(0xFFFF);
260 temp |= PIPE_FIFO_UNDERRUN;
261 REG_WRITE(map->status, temp);
262 REG_READ(map->status);
263
264 gma_crtc_load_lut(crtc);
265
266 /* Give the overlay scaler a chance to enable
267 * if it's on this pipe */
268 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
269 break;
270 case DRM_MODE_DPMS_OFF:
271 if (!gma_crtc->active)
272 break;
273
274 gma_crtc->active = false;
275
276 /* Give the overlay scaler a chance to disable
277 * if it's on this pipe */
278 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
279
280 /* Disable the VGA plane that we never use */
281 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
282
283 /* Turn off vblank interrupts */
284 drm_vblank_off(dev, pipe);
285
286 /* Wait for vblank for the disable to take effect */
287 gma_wait_for_vblank(dev);
288
289 /* Disable plane */
290 temp = REG_READ(map->cntr);
291 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
292 REG_WRITE(map->cntr,
293 temp & ~DISPLAY_PLANE_ENABLE);
294 /* Flush the plane changes */
295 REG_WRITE(map->base, REG_READ(map->base));
296 REG_READ(map->base);
297 }
298
299 /* Disable pipe */
300 temp = REG_READ(map->conf);
301 if ((temp & PIPEACONF_ENABLE) != 0) {
302 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
303 REG_READ(map->conf);
304 }
305
306 /* Wait for vblank for the disable to take effect. */
307 gma_wait_for_vblank(dev);
308
309 udelay(150);
310
311 /* Disable DPLL */
312 temp = REG_READ(map->dpll);
313 if ((temp & DPLL_VCO_ENABLE) != 0) {
314 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
315 REG_READ(map->dpll);
316 }
317
318 /* Wait for the clocks to turn off. */
319 udelay(150);
320 break;
321 }
322
323 if (IS_CDV(dev))
324 dev_priv->ops->update_wm(dev, crtc);
325
326 /* Set FIFO watermarks */
327 REG_WRITE(DSPARB, 0x3F3E);
328}
329
330int gma_crtc_cursor_set(struct drm_crtc *crtc,
331 struct drm_file *file_priv,
332 uint32_t handle,
333 uint32_t width, uint32_t height)
334{
335 struct drm_device *dev = crtc->dev;
336 struct drm_psb_private *dev_priv = dev->dev_private;
337 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
338 int pipe = gma_crtc->pipe;
339 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
340 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
341 uint32_t temp;
342 size_t addr = 0;
343 struct gtt_range *gt;
344 struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
345 struct drm_gem_object *obj;
346 void *tmp_dst, *tmp_src;
347 int ret = 0, i, cursor_pages;
348
349 /* If we didn't get a handle then turn the cursor off */
350 if (!handle) {
351 temp = CURSOR_MODE_DISABLE;
352
353 if (gma_power_begin(dev, false)) {
354 REG_WRITE(control, temp);
355 REG_WRITE(base, 0);
356 gma_power_end(dev);
357 }
358
359 /* Unpin the old GEM object */
360 if (gma_crtc->cursor_obj) {
361 gt = container_of(gma_crtc->cursor_obj,
362 struct gtt_range, gem);
363 psb_gtt_unpin(gt);
364 drm_gem_object_unreference(gma_crtc->cursor_obj);
365 gma_crtc->cursor_obj = NULL;
366 }
367
368 return 0;
369 }
370
371 /* Currently we only support 64x64 cursors */
372 if (width != 64 || height != 64) {
373 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
374 return -EINVAL;
375 }
376
377 obj = drm_gem_object_lookup(dev, file_priv, handle);
378 if (!obj)
379 return -ENOENT;
380
381 if (obj->size < width * height * 4) {
382 dev_dbg(dev->dev, "Buffer is too small\n");
383 ret = -ENOMEM;
384 goto unref_cursor;
385 }
386
387 gt = container_of(obj, struct gtt_range, gem);
388
389 /* Pin the memory into the GTT */
390 ret = psb_gtt_pin(gt);
391 if (ret) {
392 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
393 goto unref_cursor;
394 }
395
396 if (dev_priv->ops->cursor_needs_phys) {
397 if (cursor_gt == NULL) {
398 dev_err(dev->dev, "No hardware cursor mem available");
399 ret = -ENOMEM;
400 goto unref_cursor;
401 }
402
403 /* Prevent overflow */
404 if (gt->npage > 4)
405 cursor_pages = 4;
406 else
407 cursor_pages = gt->npage;
408
409 /* Copy the cursor to cursor mem */
410 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
411 for (i = 0; i < cursor_pages; i++) {
412 tmp_src = kmap(gt->pages[i]);
413 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
414 kunmap(gt->pages[i]);
415 tmp_dst += PAGE_SIZE;
416 }
417
418 addr = gma_crtc->cursor_addr;
419 } else {
420 addr = gt->offset;
421 gma_crtc->cursor_addr = addr;
422 }
423
424 temp = 0;
425 /* set the pipe for the cursor */
426 temp |= (pipe << 28);
427 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
428
429 if (gma_power_begin(dev, false)) {
430 REG_WRITE(control, temp);
431 REG_WRITE(base, addr);
432 gma_power_end(dev);
433 }
434
435 /* unpin the old bo */
436 if (gma_crtc->cursor_obj) {
437 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
438 psb_gtt_unpin(gt);
439 drm_gem_object_unreference(gma_crtc->cursor_obj);
440 }
441
442 gma_crtc->cursor_obj = obj;
443 return ret;
444
445unref_cursor:
446 drm_gem_object_unreference(obj);
447 return ret;
448}
449
450int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
451{
452 struct drm_device *dev = crtc->dev;
453 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
454 int pipe = gma_crtc->pipe;
455 uint32_t temp = 0;
456 uint32_t addr;
457
458 if (x < 0) {
459 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
460 x = -x;
461 }
462 if (y < 0) {
463 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
464 y = -y;
465 }
466
467 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
468 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
469
470 addr = gma_crtc->cursor_addr;
471
472 if (gma_power_begin(dev, false)) {
473 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
474 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
475 gma_power_end(dev);
476 }
477 return 0;
478}
479
480bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
481 const struct drm_display_mode *mode,
482 struct drm_display_mode *adjusted_mode)
483{
484 return true;
485}
486
487void gma_crtc_prepare(struct drm_crtc *crtc)
488{
489 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
490 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
491}
492
493void gma_crtc_commit(struct drm_crtc *crtc)
494{
495 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
496 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
497}
498
499void gma_crtc_disable(struct drm_crtc *crtc)
500{
501 struct gtt_range *gt;
502 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
503
504 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
505
506 if (crtc->fb) {
507 gt = to_psb_fb(crtc->fb)->gtt;
508 psb_gtt_unpin(gt);
509 }
510}
511
512void gma_crtc_destroy(struct drm_crtc *crtc)
513{
514 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
515
516 kfree(gma_crtc->crtc_state);
517 drm_crtc_cleanup(crtc);
518 kfree(gma_crtc);
519}
520
521int gma_crtc_set_config(struct drm_mode_set *set)
522{
523 struct drm_device *dev = set->crtc->dev;
524 struct drm_psb_private *dev_priv = dev->dev_private;
525 int ret;
526
527 if (!dev_priv->rpm_enabled)
528 return drm_crtc_helper_set_config(set);
529
530 pm_runtime_forbid(&dev->pdev->dev);
531 ret = drm_crtc_helper_set_config(set);
532 pm_runtime_allow(&dev->pdev->dev);
533
534 return ret;
535}
536
537/**
538 * Save HW states of given crtc
539 */
540void gma_crtc_save(struct drm_crtc *crtc)
541{
542 struct drm_device *dev = crtc->dev;
543 struct drm_psb_private *dev_priv = dev->dev_private;
544 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
545 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
546 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
547 uint32_t palette_reg;
548 int i;
549
550 if (!crtc_state) {
551 dev_err(dev->dev, "No CRTC state found\n");
552 return;
553 }
554
555 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
556 crtc_state->savePIPECONF = REG_READ(map->conf);
557 crtc_state->savePIPESRC = REG_READ(map->src);
558 crtc_state->saveFP0 = REG_READ(map->fp0);
559 crtc_state->saveFP1 = REG_READ(map->fp1);
560 crtc_state->saveDPLL = REG_READ(map->dpll);
561 crtc_state->saveHTOTAL = REG_READ(map->htotal);
562 crtc_state->saveHBLANK = REG_READ(map->hblank);
563 crtc_state->saveHSYNC = REG_READ(map->hsync);
564 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
565 crtc_state->saveVBLANK = REG_READ(map->vblank);
566 crtc_state->saveVSYNC = REG_READ(map->vsync);
567 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
568
569 /* NOTE: DSPSIZE DSPPOS only for psb */
570 crtc_state->saveDSPSIZE = REG_READ(map->size);
571 crtc_state->saveDSPPOS = REG_READ(map->pos);
572
573 crtc_state->saveDSPBASE = REG_READ(map->base);
574
575 palette_reg = map->palette;
576 for (i = 0; i < 256; ++i)
577 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
578}
579
580/**
581 * Restore HW states of given crtc
582 */
583void gma_crtc_restore(struct drm_crtc *crtc)
584{
585 struct drm_device *dev = crtc->dev;
586 struct drm_psb_private *dev_priv = dev->dev_private;
587 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
588 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
589 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
590 uint32_t palette_reg;
591 int i;
592
593 if (!crtc_state) {
594 dev_err(dev->dev, "No crtc state\n");
595 return;
596 }
597
598 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
599 REG_WRITE(map->dpll,
600 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
601 REG_READ(map->dpll);
602 udelay(150);
603 }
604
605 REG_WRITE(map->fp0, crtc_state->saveFP0);
606 REG_READ(map->fp0);
607
608 REG_WRITE(map->fp1, crtc_state->saveFP1);
609 REG_READ(map->fp1);
610
611 REG_WRITE(map->dpll, crtc_state->saveDPLL);
612 REG_READ(map->dpll);
613 udelay(150);
614
615 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
616 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
617 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
618 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
619 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
620 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
621 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
622
623 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
624 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
625
626 REG_WRITE(map->src, crtc_state->savePIPESRC);
627 REG_WRITE(map->base, crtc_state->saveDSPBASE);
628 REG_WRITE(map->conf, crtc_state->savePIPECONF);
629
630 gma_wait_for_vblank(dev);
631
632 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
633 REG_WRITE(map->base, crtc_state->saveDSPBASE);
634
635 gma_wait_for_vblank(dev);
636
637 palette_reg = map->palette;
638 for (i = 0; i < 256; ++i)
639 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
640}
641
642void gma_encoder_prepare(struct drm_encoder *encoder)
643{
644 struct drm_encoder_helper_funcs *encoder_funcs =
645 encoder->helper_private;
646 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
647 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
648}
649
650void gma_encoder_commit(struct drm_encoder *encoder)
651{
652 struct drm_encoder_helper_funcs *encoder_funcs =
653 encoder->helper_private;
654 /* lvds has its own version of commit see psb_intel_lvds_commit */
655 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
656}
657
658void gma_encoder_destroy(struct drm_encoder *encoder)
659{
660 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
661
662 drm_encoder_cleanup(encoder);
663 kfree(intel_encoder);
664}
665
666/* Currently there is only a 1:1 mapping of encoders and connectors */
667struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
668{
669 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
670
671 return &gma_encoder->base;
672}
673
674void gma_connector_attach_encoder(struct gma_connector *connector,
675 struct gma_encoder *encoder)
676{
677 connector->encoder = encoder;
678 drm_mode_connector_attach_encoder(&connector->base,
679 &encoder->base);
680}
681
682#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
683
684bool gma_pll_is_valid(struct drm_crtc *crtc,
685 const struct gma_limit_t *limit,
686 struct gma_clock_t *clock)
687{
688 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
689 GMA_PLL_INVALID("p1 out of range");
690 if (clock->p < limit->p.min || limit->p.max < clock->p)
691 GMA_PLL_INVALID("p out of range");
692 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
693 GMA_PLL_INVALID("m2 out of range");
694 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
695 GMA_PLL_INVALID("m1 out of range");
696 /* On CDV m1 is always 0 */
697 if (clock->m1 <= clock->m2 && clock->m1 != 0)
698 GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
699 if (clock->m < limit->m.min || limit->m.max < clock->m)
700 GMA_PLL_INVALID("m out of range");
701 if (clock->n < limit->n.min || limit->n.max < clock->n)
702 GMA_PLL_INVALID("n out of range");
703 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
704 GMA_PLL_INVALID("vco out of range");
705 /* XXX: We may need to be checking "Dot clock"
706 * depending on the multiplier, connector, etc.,
707 * rather than just a single range.
708 */
709 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
710 GMA_PLL_INVALID("dot out of range");
711
712 return true;
713}
714
715bool gma_find_best_pll(const struct gma_limit_t *limit,
716 struct drm_crtc *crtc, int target, int refclk,
717 struct gma_clock_t *best_clock)
718{
719 struct drm_device *dev = crtc->dev;
720 const struct gma_clock_funcs *clock_funcs =
721 to_gma_crtc(crtc)->clock_funcs;
722 struct gma_clock_t clock;
723 int err = target;
724
725 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
726 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
727 /*
728 * For LVDS, if the panel is on, just rely on its current
729 * settings for dual-channel. We haven't figured out how to
730 * reliably set up different single/dual channel state, if we
731 * even can.
732 */
733 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
734 LVDS_CLKB_POWER_UP)
735 clock.p2 = limit->p2.p2_fast;
736 else
737 clock.p2 = limit->p2.p2_slow;
738 } else {
739 if (target < limit->p2.dot_limit)
740 clock.p2 = limit->p2.p2_slow;
741 else
742 clock.p2 = limit->p2.p2_fast;
743 }
744
745 memset(best_clock, 0, sizeof(*best_clock));
746
747 /* m1 is always 0 on CDV so the outmost loop will run just once */
748 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
749 for (clock.m2 = limit->m2.min;
750 (clock.m2 < clock.m1 || clock.m1 == 0) &&
751 clock.m2 <= limit->m2.max; clock.m2++) {
752 for (clock.n = limit->n.min;
753 clock.n <= limit->n.max; clock.n++) {
754 for (clock.p1 = limit->p1.min;
755 clock.p1 <= limit->p1.max;
756 clock.p1++) {
757 int this_err;
758
759 clock_funcs->clock(refclk, &clock);
760
761 if (!clock_funcs->pll_is_valid(crtc,
762 limit, &clock))
763 continue;
764
765 this_err = abs(clock.dot - target);
766 if (this_err < err) {
767 *best_clock = clock;
768 err = this_err;
769 }
770 }
771 }
772 }
773 }
774
775 return err != target;
776}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
new file mode 100644
index 000000000000..78b9f986a6e5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -0,0 +1,103 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
20 */
21
22#ifndef _GMA_DISPLAY_H_
23#define _GMA_DISPLAY_H_
24
25#include <linux/pm_runtime.h>
26
27struct gma_clock_t {
28 /* given values */
29 int n;
30 int m1, m2;
31 int p1, p2;
32 /* derived values */
33 int dot;
34 int vco;
35 int m;
36 int p;
37};
38
39struct gma_range_t {
40 int min, max;
41};
42
43struct gma_p2_t {
44 int dot_limit;
45 int p2_slow, p2_fast;
46};
47
48struct gma_limit_t {
49 struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
50 struct gma_p2_t p2;
51 bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *,
52 int target, int refclk,
53 struct gma_clock_t *best_clock);
54};
55
56struct gma_clock_funcs {
57 void (*clock)(int refclk, struct gma_clock_t *clock);
58 const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk);
59 bool (*pll_is_valid)(struct drm_crtc *crtc,
60 const struct gma_limit_t *limit,
61 struct gma_clock_t *clock);
62};
63
64/* Common pipe related functions */
65extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
66extern void gma_wait_for_vblank(struct drm_device *dev);
67extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
68 struct drm_framebuffer *old_fb);
69extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
70 struct drm_file *file_priv,
71 uint32_t handle,
72 uint32_t width, uint32_t height);
73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
74extern void gma_crtc_load_lut(struct drm_crtc *crtc);
75extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
76 u16 *blue, u32 start, u32 size);
77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
78extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
79 const struct drm_display_mode *mode,
80 struct drm_display_mode *adjusted_mode);
81extern void gma_crtc_prepare(struct drm_crtc *crtc);
82extern void gma_crtc_commit(struct drm_crtc *crtc);
83extern void gma_crtc_disable(struct drm_crtc *crtc);
84extern void gma_crtc_destroy(struct drm_crtc *crtc);
85extern int gma_crtc_set_config(struct drm_mode_set *set);
86
87extern void gma_crtc_save(struct drm_crtc *crtc);
88extern void gma_crtc_restore(struct drm_crtc *crtc);
89
90extern void gma_encoder_prepare(struct drm_encoder *encoder);
91extern void gma_encoder_commit(struct drm_encoder *encoder);
92extern void gma_encoder_destroy(struct drm_encoder *encoder);
93
94/* Common clock related functions */
95extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
96extern void gma_clock(int refclk, struct gma_clock_t *clock);
97extern bool gma_pll_is_valid(struct drm_crtc *crtc,
98 const struct gma_limit_t *limit,
99 struct gma_clock_t *clock);
100extern bool gma_find_best_pll(const struct gma_limit_t *limit,
101 struct drm_crtc *crtc, int target, int refclk,
102 struct gma_clock_t *best_clock);
103#endif
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 1f82183536a3..92babac362ec 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
196 */ 196 */
197static int psb_gtt_attach_pages(struct gtt_range *gt) 197static int psb_gtt_attach_pages(struct gtt_range *gt)
198{ 198{
199 struct inode *inode; 199 struct page **pages;
200 struct address_space *mapping;
201 int i;
202 struct page *p;
203 int pages = gt->gem.size / PAGE_SIZE;
204 200
205 WARN_ON(gt->pages); 201 WARN_ON(gt->pages);
206 202
207 /* This is the shared memory object that backs the GEM resource */ 203 pages = drm_gem_get_pages(&gt->gem, 0);
208 inode = file_inode(gt->gem.filp); 204 if (IS_ERR(pages))
209 mapping = inode->i_mapping; 205 return PTR_ERR(pages);
210 206
211 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL); 207 gt->pages = pages;
212 if (gt->pages == NULL)
213 return -ENOMEM;
214 gt->npage = pages;
215 208
216 for (i = 0; i < pages; i++) {
217 p = shmem_read_mapping_page(mapping, i);
218 if (IS_ERR(p))
219 goto err;
220 gt->pages[i] = p;
221 }
222 return 0; 209 return 0;
223
224err:
225 while (i--)
226 page_cache_release(gt->pages[i]);
227 kfree(gt->pages);
228 gt->pages = NULL;
229 return PTR_ERR(p);
230} 210}
231 211
232/** 212/**
@@ -240,13 +220,7 @@ err:
240 */ 220 */
241static void psb_gtt_detach_pages(struct gtt_range *gt) 221static void psb_gtt_detach_pages(struct gtt_range *gt)
242{ 222{
243 int i; 223 drm_gem_put_pages(&gt->gem, gt->pages, true, false);
244 for (i = 0; i < gt->npage; i++) {
245 /* FIXME: do we need to force dirty */
246 set_page_dirty(gt->pages[i]);
247 page_cache_release(gt->pages[i]);
248 }
249 kfree(gt->pages);
250 gt->pages = NULL; 224 gt->pages = NULL;
251} 225}
252 226
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 3abf8315f57c..860a4ee9baaf 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -249,12 +249,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
249 struct drm_encoder *encoder = connector->encoder; 249 struct drm_encoder *encoder = connector->encoder;
250 250
251 if (!strcmp(property->name, "scaling mode") && encoder) { 251 if (!strcmp(property->name, "scaling mode") && encoder) {
252 struct psb_intel_crtc *psb_crtc = 252 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
253 to_psb_intel_crtc(encoder->crtc);
254 bool centerechange; 253 bool centerechange;
255 uint64_t val; 254 uint64_t val;
256 255
257 if (!psb_crtc) 256 if (!gma_crtc)
258 goto set_prop_error; 257 goto set_prop_error;
259 258
260 switch (value) { 259 switch (value) {
@@ -281,11 +280,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
281 centerechange = (val == DRM_MODE_SCALE_NO_SCALE) || 280 centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
282 (value == DRM_MODE_SCALE_NO_SCALE); 281 (value == DRM_MODE_SCALE_NO_SCALE);
283 282
284 if (psb_crtc->saved_mode.hdisplay != 0 && 283 if (gma_crtc->saved_mode.hdisplay != 0 &&
285 psb_crtc->saved_mode.vdisplay != 0) { 284 gma_crtc->saved_mode.vdisplay != 0) {
286 if (centerechange) { 285 if (centerechange) {
287 if (!drm_crtc_helper_set_mode(encoder->crtc, 286 if (!drm_crtc_helper_set_mode(encoder->crtc,
288 &psb_crtc->saved_mode, 287 &gma_crtc->saved_mode,
289 encoder->crtc->x, 288 encoder->crtc->x,
290 encoder->crtc->y, 289 encoder->crtc->y,
291 encoder->crtc->fb)) 290 encoder->crtc->fb))
@@ -294,8 +293,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
294 struct drm_encoder_helper_funcs *funcs = 293 struct drm_encoder_helper_funcs *funcs =
295 encoder->helper_private; 294 encoder->helper_private;
296 funcs->mode_set(encoder, 295 funcs->mode_set(encoder,
297 &psb_crtc->saved_mode, 296 &gma_crtc->saved_mode,
298 &psb_crtc->saved_adjusted_mode); 297 &gma_crtc->saved_adjusted_mode);
299 } 298 }
300 } 299 }
301 } else if (!strcmp(property->name, "backlight") && encoder) { 300 } else if (!strcmp(property->name, "backlight") && encoder) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 36eb0744841c..45d5af0546bf 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -227,7 +227,7 @@ enum {
227#define DSI_DPI_DISABLE_BTA BIT(3) 227#define DSI_DPI_DISABLE_BTA BIT(3)
228 228
229struct mdfld_dsi_connector { 229struct mdfld_dsi_connector {
230 struct psb_intel_connector base; 230 struct gma_connector base;
231 231
232 int pipe; 232 int pipe;
233 void *private; 233 void *private;
@@ -238,7 +238,7 @@ struct mdfld_dsi_connector {
238}; 238};
239 239
240struct mdfld_dsi_encoder { 240struct mdfld_dsi_encoder {
241 struct psb_intel_encoder base; 241 struct gma_encoder base;
242 void *private; 242 void *private;
243}; 243};
244 244
@@ -269,21 +269,21 @@ struct mdfld_dsi_config {
269static inline struct mdfld_dsi_connector *mdfld_dsi_connector( 269static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
270 struct drm_connector *connector) 270 struct drm_connector *connector)
271{ 271{
272 struct psb_intel_connector *psb_connector; 272 struct gma_connector *gma_connector;
273 273
274 psb_connector = to_psb_intel_connector(connector); 274 gma_connector = to_gma_connector(connector);
275 275
276 return container_of(psb_connector, struct mdfld_dsi_connector, base); 276 return container_of(gma_connector, struct mdfld_dsi_connector, base);
277} 277}
278 278
279static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder( 279static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
280 struct drm_encoder *encoder) 280 struct drm_encoder *encoder)
281{ 281{
282 struct psb_intel_encoder *psb_encoder; 282 struct gma_encoder *gma_encoder;
283 283
284 psb_encoder = to_psb_intel_encoder(encoder); 284 gma_encoder = to_gma_encoder(encoder);
285 285
286 return container_of(psb_encoder, struct mdfld_dsi_encoder, base); 286 return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
287} 287}
288 288
289static inline struct mdfld_dsi_config * 289static inline struct mdfld_dsi_config *
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 74485dc43945..321c00a944e9 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -23,7 +23,7 @@
23 23
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "psb_intel_reg.h" 25#include "psb_intel_reg.h"
26#include "psb_intel_display.h" 26#include "gma_display.h"
27#include "framebuffer.h" 27#include "framebuffer.h"
28#include "mdfld_output.h" 28#include "mdfld_output.h"
29#include "mdfld_dsi_output.h" 29#include "mdfld_dsi_output.h"
@@ -65,7 +65,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
65 } 65 }
66 66
67 /* FIXME JLIU7_PO */ 67 /* FIXME JLIU7_PO */
68 psb_intel_wait_for_vblank(dev); 68 gma_wait_for_vblank(dev);
69 return; 69 return;
70 70
71 /* Wait for for the pipe disable to take effect. */ 71 /* Wait for for the pipe disable to take effect. */
@@ -93,7 +93,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
93 } 93 }
94 94
95 /* FIXME JLIU7_PO */ 95 /* FIXME JLIU7_PO */
96 psb_intel_wait_for_vblank(dev); 96 gma_wait_for_vblank(dev);
97 return; 97 return;
98 98
99 /* Wait for for the pipe enable to take effect. */ 99 /* Wait for for the pipe enable to take effect. */
@@ -104,25 +104,6 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
104 } 104 }
105} 105}
106 106
107static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
108{
109 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
110 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
111}
112
113static void psb_intel_crtc_commit(struct drm_crtc *crtc)
114{
115 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
116 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
117}
118
119static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
120 const struct drm_display_mode *mode,
121 struct drm_display_mode *adjusted_mode)
122{
123 return true;
124}
125
126/** 107/**
127 * Return the pipe currently connected to the panel fitter, 108 * Return the pipe currently connected to the panel fitter,
128 * or -1 if the panel fitter is not present or not in use 109 * or -1 if the panel fitter is not present or not in use
@@ -184,9 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
184{ 165{
185 struct drm_device *dev = crtc->dev; 166 struct drm_device *dev = crtc->dev;
186 struct drm_psb_private *dev_priv = dev->dev_private; 167 struct drm_psb_private *dev_priv = dev->dev_private;
187 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 168 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
188 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 169 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
189 int pipe = psb_intel_crtc->pipe; 170 int pipe = gma_crtc->pipe;
190 const struct psb_offset *map = &dev_priv->regmap[pipe]; 171 const struct psb_offset *map = &dev_priv->regmap[pipe];
191 unsigned long start, offset; 172 unsigned long start, offset;
192 u32 dspcntr; 173 u32 dspcntr;
@@ -324,8 +305,8 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
324{ 305{
325 struct drm_device *dev = crtc->dev; 306 struct drm_device *dev = crtc->dev;
326 struct drm_psb_private *dev_priv = dev->dev_private; 307 struct drm_psb_private *dev_priv = dev->dev_private;
327 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 308 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
328 int pipe = psb_intel_crtc->pipe; 309 int pipe = gma_crtc->pipe;
329 const struct psb_offset *map = &dev_priv->regmap[pipe]; 310 const struct psb_offset *map = &dev_priv->regmap[pipe];
330 u32 pipeconf = dev_priv->pipeconf[pipe]; 311 u32 pipeconf = dev_priv->pipeconf[pipe];
331 u32 temp; 312 u32 temp;
@@ -436,7 +417,7 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
436 } 417 }
437 } 418 }
438 419
439 psb_intel_crtc_load_lut(crtc); 420 gma_crtc_load_lut(crtc);
440 421
441 /* Give the overlay scaler a chance to enable 422 /* Give the overlay scaler a chance to enable
442 if it's on this pipe */ 423 if it's on this pipe */
@@ -611,8 +592,8 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
611 struct drm_device *dev = crtc->dev; 592 struct drm_device *dev = crtc->dev;
612 struct drm_psb_private *dev_priv = dev->dev_private; 593 struct drm_psb_private *dev_priv = dev->dev_private;
613 594
614 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) 595 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
615 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { 596 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
616 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) 597 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
617 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19]; 598 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
618 else if (ksel == KSEL_BYPASS_25) 599 else if (ksel == KSEL_BYPASS_25)
@@ -624,7 +605,7 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
624 (dev_priv->core_freq == 100 || 605 (dev_priv->core_freq == 100 ||
625 dev_priv->core_freq == 200)) 606 dev_priv->core_freq == 200))
626 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100]; 607 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
627 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 608 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
628 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) 609 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
629 limit = &mdfld_limits[MDFLD_LIMT_DPLL_19]; 610 limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
630 else if (ksel == KSEL_BYPASS_25) 611 else if (ksel == KSEL_BYPASS_25)
@@ -688,9 +669,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
688 struct drm_framebuffer *old_fb) 669 struct drm_framebuffer *old_fb)
689{ 670{
690 struct drm_device *dev = crtc->dev; 671 struct drm_device *dev = crtc->dev;
691 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 672 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
692 struct drm_psb_private *dev_priv = dev->dev_private; 673 struct drm_psb_private *dev_priv = dev->dev_private;
693 int pipe = psb_intel_crtc->pipe; 674 int pipe = gma_crtc->pipe;
694 const struct psb_offset *map = &dev_priv->regmap[pipe]; 675 const struct psb_offset *map = &dev_priv->regmap[pipe];
695 int refclk = 0; 676 int refclk = 0;
696 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, 677 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
@@ -700,7 +681,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
700 u32 dpll = 0, fp = 0; 681 u32 dpll = 0, fp = 0;
701 bool is_mipi = false, is_mipi2 = false, is_hdmi = false; 682 bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
702 struct drm_mode_config *mode_config = &dev->mode_config; 683 struct drm_mode_config *mode_config = &dev->mode_config;
703 struct psb_intel_encoder *psb_intel_encoder = NULL; 684 struct gma_encoder *gma_encoder = NULL;
704 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 685 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
705 struct drm_encoder *encoder; 686 struct drm_encoder *encoder;
706 struct drm_connector *connector; 687 struct drm_connector *connector;
@@ -749,9 +730,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
749 if (!gma_power_begin(dev, true)) 730 if (!gma_power_begin(dev, true))
750 return 0; 731 return 0;
751 732
752 memcpy(&psb_intel_crtc->saved_mode, mode, 733 memcpy(&gma_crtc->saved_mode, mode,
753 sizeof(struct drm_display_mode)); 734 sizeof(struct drm_display_mode));
754 memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, 735 memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
755 sizeof(struct drm_display_mode)); 736 sizeof(struct drm_display_mode));
756 737
757 list_for_each_entry(connector, &mode_config->connector_list, head) { 738 list_for_each_entry(connector, &mode_config->connector_list, head) {
@@ -766,9 +747,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
766 if (encoder->crtc != crtc) 747 if (encoder->crtc != crtc)
767 continue; 748 continue;
768 749
769 psb_intel_encoder = psb_intel_attached_encoder(connector); 750 gma_encoder = gma_attached_encoder(connector);
770 751
771 switch (psb_intel_encoder->type) { 752 switch (gma_encoder->type) {
772 case INTEL_OUTPUT_MIPI: 753 case INTEL_OUTPUT_MIPI:
773 is_mipi = true; 754 is_mipi = true;
774 break; 755 break;
@@ -819,7 +800,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
819 800
820 REG_WRITE(map->pos, 0); 801 REG_WRITE(map->pos, 0);
821 802
822 if (psb_intel_encoder) 803 if (gma_encoder)
823 drm_object_property_get_value(&connector->base, 804 drm_object_property_get_value(&connector->base,
824 dev->mode_config.scaling_mode_property, &scalingType); 805 dev->mode_config.scaling_mode_property, &scalingType);
825 806
@@ -1034,7 +1015,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1034 1015
1035 /* Wait for for the pipe enable to take effect. */ 1016 /* Wait for for the pipe enable to take effect. */
1036 REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]); 1017 REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
1037 psb_intel_wait_for_vblank(dev); 1018 gma_wait_for_vblank(dev);
1038 1019
1039mrst_crtc_mode_set_exit: 1020mrst_crtc_mode_set_exit:
1040 1021
@@ -1045,10 +1026,10 @@ mrst_crtc_mode_set_exit:
1045 1026
1046const struct drm_crtc_helper_funcs mdfld_helper_funcs = { 1027const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
1047 .dpms = mdfld_crtc_dpms, 1028 .dpms = mdfld_crtc_dpms,
1048 .mode_fixup = psb_intel_crtc_mode_fixup, 1029 .mode_fixup = gma_crtc_mode_fixup,
1049 .mode_set = mdfld_crtc_mode_set, 1030 .mode_set = mdfld_crtc_mode_set,
1050 .mode_set_base = mdfld__intel_pipe_set_base, 1031 .mode_set_base = mdfld__intel_pipe_set_base,
1051 .prepare = psb_intel_crtc_prepare, 1032 .prepare = gma_crtc_prepare,
1052 .commit = psb_intel_crtc_commit, 1033 .commit = gma_crtc_commit,
1053}; 1034};
1054 1035
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 3071526bc3c1..54c98962b73e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -23,7 +23,7 @@
23#include "psb_drv.h" 23#include "psb_drv.h"
24#include "psb_intel_drv.h" 24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h" 25#include "psb_intel_reg.h"
26#include "psb_intel_display.h" 26#include "gma_display.h"
27#include "power.h" 27#include "power.h"
28 28
29struct psb_intel_range_t { 29struct psb_intel_range_t {
@@ -88,8 +88,8 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
88 struct drm_device *dev = crtc->dev; 88 struct drm_device *dev = crtc->dev;
89 struct drm_psb_private *dev_priv = dev->dev_private; 89 struct drm_psb_private *dev_priv = dev->dev_private;
90 90
91 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 91 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
92 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { 92 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
93 switch (dev_priv->core_freq) { 93 switch (dev_priv->core_freq) {
94 case 100: 94 case 100:
95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L]; 95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
@@ -163,8 +163,8 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
163{ 163{
164 struct drm_device *dev = crtc->dev; 164 struct drm_device *dev = crtc->dev;
165 struct drm_psb_private *dev_priv = dev->dev_private; 165 struct drm_psb_private *dev_priv = dev->dev_private;
166 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 166 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
167 int pipe = psb_intel_crtc->pipe; 167 int pipe = gma_crtc->pipe;
168 const struct psb_offset *map = &dev_priv->regmap[pipe]; 168 const struct psb_offset *map = &dev_priv->regmap[pipe];
169 u32 temp; 169 u32 temp;
170 170
@@ -212,7 +212,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
212 REG_WRITE(map->base, REG_READ(map->base)); 212 REG_WRITE(map->base, REG_READ(map->base));
213 } 213 }
214 214
215 psb_intel_crtc_load_lut(crtc); 215 gma_crtc_load_lut(crtc);
216 216
217 /* Give the overlay scaler a chance to enable 217 /* Give the overlay scaler a chance to enable
218 if it's on this pipe */ 218 if it's on this pipe */
@@ -242,7 +242,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
242 REG_READ(map->conf); 242 REG_READ(map->conf);
243 } 243 }
244 /* Wait for for the pipe disable to take effect. */ 244 /* Wait for for the pipe disable to take effect. */
245 psb_intel_wait_for_vblank(dev); 245 gma_wait_for_vblank(dev);
246 246
247 temp = REG_READ(map->dpll); 247 temp = REG_READ(map->dpll);
248 if ((temp & DPLL_VCO_ENABLE) != 0) { 248 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -292,9 +292,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
292 struct drm_framebuffer *old_fb) 292 struct drm_framebuffer *old_fb)
293{ 293{
294 struct drm_device *dev = crtc->dev; 294 struct drm_device *dev = crtc->dev;
295 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 295 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
296 struct drm_psb_private *dev_priv = dev->dev_private; 296 struct drm_psb_private *dev_priv = dev->dev_private;
297 int pipe = psb_intel_crtc->pipe; 297 int pipe = gma_crtc->pipe;
298 const struct psb_offset *map = &dev_priv->regmap[pipe]; 298 const struct psb_offset *map = &dev_priv->regmap[pipe];
299 int refclk = 0; 299 int refclk = 0;
300 struct oaktrail_clock_t clock; 300 struct oaktrail_clock_t clock;
@@ -303,7 +303,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
303 bool is_lvds = false; 303 bool is_lvds = false;
304 bool is_mipi = false; 304 bool is_mipi = false;
305 struct drm_mode_config *mode_config = &dev->mode_config; 305 struct drm_mode_config *mode_config = &dev->mode_config;
306 struct psb_intel_encoder *psb_intel_encoder = NULL; 306 struct gma_encoder *gma_encoder = NULL;
307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
308 struct drm_connector *connector; 308 struct drm_connector *connector;
309 309
@@ -313,10 +313,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
313 if (!gma_power_begin(dev, true)) 313 if (!gma_power_begin(dev, true))
314 return 0; 314 return 0;
315 315
316 memcpy(&psb_intel_crtc->saved_mode, 316 memcpy(&gma_crtc->saved_mode,
317 mode, 317 mode,
318 sizeof(struct drm_display_mode)); 318 sizeof(struct drm_display_mode));
319 memcpy(&psb_intel_crtc->saved_adjusted_mode, 319 memcpy(&gma_crtc->saved_adjusted_mode,
320 adjusted_mode, 320 adjusted_mode,
321 sizeof(struct drm_display_mode)); 321 sizeof(struct drm_display_mode));
322 322
@@ -324,9 +324,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
324 if (!connector->encoder || connector->encoder->crtc != crtc) 324 if (!connector->encoder || connector->encoder->crtc != crtc)
325 continue; 325 continue;
326 326
327 psb_intel_encoder = psb_intel_attached_encoder(connector); 327 gma_encoder = gma_attached_encoder(connector);
328 328
329 switch (psb_intel_encoder->type) { 329 switch (gma_encoder->type) {
330 case INTEL_OUTPUT_LVDS: 330 case INTEL_OUTPUT_LVDS:
331 is_lvds = true; 331 is_lvds = true;
332 break; 332 break;
@@ -350,7 +350,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
350 ((mode->crtc_hdisplay - 1) << 16) | 350 ((mode->crtc_hdisplay - 1) << 16) |
351 (mode->crtc_vdisplay - 1)); 351 (mode->crtc_vdisplay - 1));
352 352
353 if (psb_intel_encoder) 353 if (gma_encoder)
354 drm_object_property_get_value(&connector->base, 354 drm_object_property_get_value(&connector->base,
355 dev->mode_config.scaling_mode_property, &scalingType); 355 dev->mode_config.scaling_mode_property, &scalingType);
356 356
@@ -484,31 +484,24 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
484 484
485 REG_WRITE(map->conf, pipeconf); 485 REG_WRITE(map->conf, pipeconf);
486 REG_READ(map->conf); 486 REG_READ(map->conf);
487 psb_intel_wait_for_vblank(dev); 487 gma_wait_for_vblank(dev);
488 488
489 REG_WRITE(map->cntr, dspcntr); 489 REG_WRITE(map->cntr, dspcntr);
490 psb_intel_wait_for_vblank(dev); 490 gma_wait_for_vblank(dev);
491 491
492oaktrail_crtc_mode_set_exit: 492oaktrail_crtc_mode_set_exit:
493 gma_power_end(dev); 493 gma_power_end(dev);
494 return 0; 494 return 0;
495} 495}
496 496
497static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
498 const struct drm_display_mode *mode,
499 struct drm_display_mode *adjusted_mode)
500{
501 return true;
502}
503
504static int oaktrail_pipe_set_base(struct drm_crtc *crtc, 497static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
505 int x, int y, struct drm_framebuffer *old_fb) 498 int x, int y, struct drm_framebuffer *old_fb)
506{ 499{
507 struct drm_device *dev = crtc->dev; 500 struct drm_device *dev = crtc->dev;
508 struct drm_psb_private *dev_priv = dev->dev_private; 501 struct drm_psb_private *dev_priv = dev->dev_private;
509 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 502 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
510 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 503 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
511 int pipe = psb_intel_crtc->pipe; 504 int pipe = gma_crtc->pipe;
512 const struct psb_offset *map = &dev_priv->regmap[pipe]; 505 const struct psb_offset *map = &dev_priv->regmap[pipe];
513 unsigned long start, offset; 506 unsigned long start, offset;
514 507
@@ -563,24 +556,12 @@ pipe_set_base_exit:
563 return ret; 556 return ret;
564} 557}
565 558
566static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
567{
568 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
569 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
570}
571
572static void oaktrail_crtc_commit(struct drm_crtc *crtc)
573{
574 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
575 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
576}
577
578const struct drm_crtc_helper_funcs oaktrail_helper_funcs = { 559const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
579 .dpms = oaktrail_crtc_dpms, 560 .dpms = oaktrail_crtc_dpms,
580 .mode_fixup = oaktrail_crtc_mode_fixup, 561 .mode_fixup = gma_crtc_mode_fixup,
581 .mode_set = oaktrail_crtc_mode_set, 562 .mode_set = oaktrail_crtc_mode_set,
582 .mode_set_base = oaktrail_pipe_set_base, 563 .mode_set_base = oaktrail_pipe_set_base,
583 .prepare = oaktrail_crtc_prepare, 564 .prepare = gma_crtc_prepare,
584 .commit = oaktrail_crtc_commit, 565 .commit = gma_crtc_commit,
585}; 566};
586 567
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f036f1fc161e..38153143ed8c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,12 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
155 HDMI_READ(HDMI_HCR); 155 HDMI_READ(HDMI_HCR);
156} 156}
157 157
158static void wait_for_vblank(struct drm_device *dev)
159{
160 /* Wait for 20ms, i.e. one cycle at 50hz. */
161 mdelay(20);
162}
163
164static unsigned int htotal_calculate(struct drm_display_mode *mode) 158static unsigned int htotal_calculate(struct drm_display_mode *mode)
165{ 159{
166 u32 htotal, new_crtc_htotal; 160 u32 htotal, new_crtc_htotal;
@@ -372,10 +366,10 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
372 366
373 REG_WRITE(PCH_PIPEBCONF, pipeconf); 367 REG_WRITE(PCH_PIPEBCONF, pipeconf);
374 REG_READ(PCH_PIPEBCONF); 368 REG_READ(PCH_PIPEBCONF);
375 wait_for_vblank(dev); 369 gma_wait_for_vblank(dev);
376 370
377 REG_WRITE(dspcntr_reg, dspcntr); 371 REG_WRITE(dspcntr_reg, dspcntr);
378 wait_for_vblank(dev); 372 gma_wait_for_vblank(dev);
379 373
380 gma_power_end(dev); 374 gma_power_end(dev);
381 375
@@ -459,7 +453,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
459 REG_READ(PCH_PIPEBCONF); 453 REG_READ(PCH_PIPEBCONF);
460 } 454 }
461 455
462 wait_for_vblank(dev); 456 gma_wait_for_vblank(dev);
463 457
464 /* Enable plane */ 458 /* Enable plane */
465 temp = REG_READ(DSPBCNTR); 459 temp = REG_READ(DSPBCNTR);
@@ -470,7 +464,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
470 REG_READ(DSPBSURF); 464 REG_READ(DSPBSURF);
471 } 465 }
472 466
473 psb_intel_crtc_load_lut(crtc); 467 gma_crtc_load_lut(crtc);
474 } 468 }
475 469
476 /* DSPARB */ 470 /* DSPARB */
@@ -615,16 +609,16 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
615static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { 609static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
616 .dpms = oaktrail_hdmi_dpms, 610 .dpms = oaktrail_hdmi_dpms,
617 .mode_fixup = oaktrail_hdmi_mode_fixup, 611 .mode_fixup = oaktrail_hdmi_mode_fixup,
618 .prepare = psb_intel_encoder_prepare, 612 .prepare = gma_encoder_prepare,
619 .mode_set = oaktrail_hdmi_mode_set, 613 .mode_set = oaktrail_hdmi_mode_set,
620 .commit = psb_intel_encoder_commit, 614 .commit = gma_encoder_commit,
621}; 615};
622 616
623static const struct drm_connector_helper_funcs 617static const struct drm_connector_helper_funcs
624 oaktrail_hdmi_connector_helper_funcs = { 618 oaktrail_hdmi_connector_helper_funcs = {
625 .get_modes = oaktrail_hdmi_get_modes, 619 .get_modes = oaktrail_hdmi_get_modes,
626 .mode_valid = oaktrail_hdmi_mode_valid, 620 .mode_valid = oaktrail_hdmi_mode_valid,
627 .best_encoder = psb_intel_best_encoder, 621 .best_encoder = gma_best_encoder,
628}; 622};
629 623
630static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = { 624static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
@@ -646,21 +640,21 @@ static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
646void oaktrail_hdmi_init(struct drm_device *dev, 640void oaktrail_hdmi_init(struct drm_device *dev,
647 struct psb_intel_mode_device *mode_dev) 641 struct psb_intel_mode_device *mode_dev)
648{ 642{
649 struct psb_intel_encoder *psb_intel_encoder; 643 struct gma_encoder *gma_encoder;
650 struct psb_intel_connector *psb_intel_connector; 644 struct gma_connector *gma_connector;
651 struct drm_connector *connector; 645 struct drm_connector *connector;
652 struct drm_encoder *encoder; 646 struct drm_encoder *encoder;
653 647
654 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 648 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
655 if (!psb_intel_encoder) 649 if (!gma_encoder)
656 return; 650 return;
657 651
658 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 652 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
659 if (!psb_intel_connector) 653 if (!gma_connector)
660 goto failed_connector; 654 goto failed_connector;
661 655
662 connector = &psb_intel_connector->base; 656 connector = &gma_connector->base;
663 encoder = &psb_intel_encoder->base; 657 encoder = &gma_encoder->base;
664 drm_connector_init(dev, connector, 658 drm_connector_init(dev, connector,
665 &oaktrail_hdmi_connector_funcs, 659 &oaktrail_hdmi_connector_funcs,
666 DRM_MODE_CONNECTOR_DVID); 660 DRM_MODE_CONNECTOR_DVID);
@@ -669,10 +663,9 @@ void oaktrail_hdmi_init(struct drm_device *dev,
669 &oaktrail_hdmi_enc_funcs, 663 &oaktrail_hdmi_enc_funcs,
670 DRM_MODE_ENCODER_TMDS); 664 DRM_MODE_ENCODER_TMDS);
671 665
672 psb_intel_connector_attach_encoder(psb_intel_connector, 666 gma_connector_attach_encoder(gma_connector, gma_encoder);
673 psb_intel_encoder);
674 667
675 psb_intel_encoder->type = INTEL_OUTPUT_HDMI; 668 gma_encoder->type = INTEL_OUTPUT_HDMI;
676 drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs); 669 drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
677 drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs); 670 drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
678 671
@@ -685,7 +678,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
685 return; 678 return;
686 679
687failed_connector: 680failed_connector:
688 kfree(psb_intel_encoder); 681 kfree(gma_encoder);
689} 682}
690 683
691static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { 684static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 325013a9c48c..e77d7214fca4 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -43,7 +43,7 @@
43 * Sets the power state for the panel. 43 * Sets the power state for the panel.
44 */ 44 */
45static void oaktrail_lvds_set_power(struct drm_device *dev, 45static void oaktrail_lvds_set_power(struct drm_device *dev,
46 struct psb_intel_encoder *psb_intel_encoder, 46 struct gma_encoder *gma_encoder,
47 bool on) 47 bool on)
48{ 48{
49 u32 pp_status; 49 u32 pp_status;
@@ -78,13 +78,12 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
78static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode) 78static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
79{ 79{
80 struct drm_device *dev = encoder->dev; 80 struct drm_device *dev = encoder->dev;
81 struct psb_intel_encoder *psb_intel_encoder = 81 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
82 to_psb_intel_encoder(encoder);
83 82
84 if (mode == DRM_MODE_DPMS_ON) 83 if (mode == DRM_MODE_DPMS_ON)
85 oaktrail_lvds_set_power(dev, psb_intel_encoder, true); 84 oaktrail_lvds_set_power(dev, gma_encoder, true);
86 else 85 else
87 oaktrail_lvds_set_power(dev, psb_intel_encoder, false); 86 oaktrail_lvds_set_power(dev, gma_encoder, false);
88 87
89 /* XXX: We never power down the LVDS pairs. */ 88 /* XXX: We never power down the LVDS pairs. */
90} 89}
@@ -166,8 +165,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
166{ 165{
167 struct drm_device *dev = encoder->dev; 166 struct drm_device *dev = encoder->dev;
168 struct drm_psb_private *dev_priv = dev->dev_private; 167 struct drm_psb_private *dev_priv = dev->dev_private;
169 struct psb_intel_encoder *psb_intel_encoder = 168 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
170 to_psb_intel_encoder(encoder);
171 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 169 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
172 170
173 if (!gma_power_begin(dev, true)) 171 if (!gma_power_begin(dev, true))
@@ -176,7 +174,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
176 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); 174 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
177 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & 175 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
178 BACKLIGHT_DUTY_CYCLE_MASK); 176 BACKLIGHT_DUTY_CYCLE_MASK);
179 oaktrail_lvds_set_power(dev, psb_intel_encoder, false); 177 oaktrail_lvds_set_power(dev, gma_encoder, false);
180 gma_power_end(dev); 178 gma_power_end(dev);
181} 179}
182 180
@@ -203,14 +201,13 @@ static void oaktrail_lvds_commit(struct drm_encoder *encoder)
203{ 201{
204 struct drm_device *dev = encoder->dev; 202 struct drm_device *dev = encoder->dev;
205 struct drm_psb_private *dev_priv = dev->dev_private; 203 struct drm_psb_private *dev_priv = dev->dev_private;
206 struct psb_intel_encoder *psb_intel_encoder = 204 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
207 to_psb_intel_encoder(encoder);
208 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 205 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
209 206
210 if (mode_dev->backlight_duty_cycle == 0) 207 if (mode_dev->backlight_duty_cycle == 0)
211 mode_dev->backlight_duty_cycle = 208 mode_dev->backlight_duty_cycle =
212 oaktrail_lvds_get_max_backlight(dev); 209 oaktrail_lvds_get_max_backlight(dev);
213 oaktrail_lvds_set_power(dev, psb_intel_encoder, true); 210 oaktrail_lvds_set_power(dev, gma_encoder, true);
214} 211}
215 212
216static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = { 213static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
@@ -325,8 +322,8 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
325void oaktrail_lvds_init(struct drm_device *dev, 322void oaktrail_lvds_init(struct drm_device *dev,
326 struct psb_intel_mode_device *mode_dev) 323 struct psb_intel_mode_device *mode_dev)
327{ 324{
328 struct psb_intel_encoder *psb_intel_encoder; 325 struct gma_encoder *gma_encoder;
329 struct psb_intel_connector *psb_intel_connector; 326 struct gma_connector *gma_connector;
330 struct drm_connector *connector; 327 struct drm_connector *connector;
331 struct drm_encoder *encoder; 328 struct drm_encoder *encoder;
332 struct drm_psb_private *dev_priv = dev->dev_private; 329 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -334,16 +331,16 @@ void oaktrail_lvds_init(struct drm_device *dev,
334 struct i2c_adapter *i2c_adap; 331 struct i2c_adapter *i2c_adap;
335 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 332 struct drm_display_mode *scan; /* *modes, *bios_mode; */
336 333
337 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 334 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
338 if (!psb_intel_encoder) 335 if (!gma_encoder)
339 return; 336 return;
340 337
341 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 338 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
342 if (!psb_intel_connector) 339 if (!gma_connector)
343 goto failed_connector; 340 goto failed_connector;
344 341
345 connector = &psb_intel_connector->base; 342 connector = &gma_connector->base;
346 encoder = &psb_intel_encoder->base; 343 encoder = &gma_encoder->base;
347 dev_priv->is_lvds_on = true; 344 dev_priv->is_lvds_on = true;
348 drm_connector_init(dev, connector, 345 drm_connector_init(dev, connector,
349 &psb_intel_lvds_connector_funcs, 346 &psb_intel_lvds_connector_funcs,
@@ -352,9 +349,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
352 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 349 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
353 DRM_MODE_ENCODER_LVDS); 350 DRM_MODE_ENCODER_LVDS);
354 351
355 psb_intel_connector_attach_encoder(psb_intel_connector, 352 gma_connector_attach_encoder(gma_connector, gma_encoder);
356 psb_intel_encoder); 353 gma_encoder->type = INTEL_OUTPUT_LVDS;
357 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
358 354
359 drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs); 355 drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
360 drm_connector_helper_add(connector, 356 drm_connector_helper_add(connector,
@@ -434,15 +430,15 @@ out:
434 430
435failed_find: 431failed_find:
436 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); 432 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
437 if (psb_intel_encoder->ddc_bus) 433 if (gma_encoder->ddc_bus)
438 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 434 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
439 435
440/* failed_ddc: */ 436/* failed_ddc: */
441 437
442 drm_encoder_cleanup(encoder); 438 drm_encoder_cleanup(encoder);
443 drm_connector_cleanup(connector); 439 drm_connector_cleanup(connector);
444 kfree(psb_intel_connector); 440 kfree(gma_connector);
445failed_connector: 441failed_connector:
446 kfree(psb_intel_encoder); 442 kfree(gma_encoder);
447} 443}
448 444
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index f6f534b4197e..697678619bd1 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -25,7 +25,7 @@
25#include "psb_reg.h" 25#include "psb_reg.h"
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28 28#include "psb_device.h"
29 29
30static int psb_output_init(struct drm_device *dev) 30static int psb_output_init(struct drm_device *dev)
31{ 31{
@@ -380,6 +380,7 @@ const struct psb_ops psb_chip_ops = {
380 380
381 .crtc_helper = &psb_intel_helper_funcs, 381 .crtc_helper = &psb_intel_helper_funcs,
382 .crtc_funcs = &psb_intel_crtc_funcs, 382 .crtc_funcs = &psb_intel_crtc_funcs,
383 .clock_funcs = &psb_clock_funcs,
383 384
384 .output_init = psb_output_init, 385 .output_init = psb_output_init,
385 386
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_device.h
index 3724b971e91c..35e304c7f85a 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_device.h
@@ -1,4 +1,6 @@
1/* copyright (c) 2008, Intel Corporation 1/*
2 * Copyright © 2013 Patrik Jakobsson
3 * Copyright © 2011 Intel Corporation
2 * 4 *
3 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
4 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -12,14 +14,11 @@
12 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
13 * this program; if not, write to the Free Software Foundation, Inc., 15 * this program; if not, write to the Free Software Foundation, Inc.,
14 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15 *
16 * Authors:
17 * Eric Anholt <eric@anholt.net>
18 */ 17 */
19 18
20#ifndef _INTEL_DISPLAY_H_ 19#ifndef _PSB_DEVICE_H_
21#define _INTEL_DISPLAY_H_ 20#define _PSB_DEVICE_H_
22 21
23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); 22extern const struct gma_clock_funcs psb_clock_funcs;
24 23
25#endif 24#endif
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index bddea5807442..fcb4e9ff1f20 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, 131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
132 struct drm_file *file_priv); 132 struct drm_file *file_priv);
133 133
134static struct drm_ioctl_desc psb_ioctls[] = { 134static const struct drm_ioctl_desc psb_ioctls[] = {
135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH), 135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl, 136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
137 DRM_AUTH), 137 DRM_AUTH),
@@ -270,7 +270,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
270 unsigned long irqflags; 270 unsigned long irqflags;
271 int ret = -ENOMEM; 271 int ret = -ENOMEM;
272 struct drm_connector *connector; 272 struct drm_connector *connector;
273 struct psb_intel_encoder *psb_intel_encoder; 273 struct gma_encoder *gma_encoder;
274 274
275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
276 if (dev_priv == NULL) 276 if (dev_priv == NULL)
@@ -372,9 +372,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
372 /* Only add backlight support if we have LVDS output */ 372 /* Only add backlight support if we have LVDS output */
373 list_for_each_entry(connector, &dev->mode_config.connector_list, 373 list_for_each_entry(connector, &dev->mode_config.connector_list,
374 head) { 374 head) {
375 psb_intel_encoder = psb_intel_attached_encoder(connector); 375 gma_encoder = gma_attached_encoder(connector);
376 376
377 switch (psb_intel_encoder->type) { 377 switch (gma_encoder->type) {
378 case INTEL_OUTPUT_LVDS: 378 case INTEL_OUTPUT_LVDS:
379 case INTEL_OUTPUT_MIPI: 379 case INTEL_OUTPUT_MIPI:
380 ret = gma_backlight_init(dev); 380 ret = gma_backlight_init(dev);
@@ -441,7 +441,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
441 struct drm_mode_object *obj; 441 struct drm_mode_object *obj;
442 struct drm_crtc *crtc; 442 struct drm_crtc *crtc;
443 struct drm_connector *connector; 443 struct drm_connector *connector;
444 struct psb_intel_crtc *psb_intel_crtc; 444 struct gma_crtc *gma_crtc;
445 int i = 0; 445 int i = 0;
446 int32_t obj_id; 446 int32_t obj_id;
447 447
@@ -454,12 +454,12 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
454 454
455 connector = obj_to_connector(obj); 455 connector = obj_to_connector(obj);
456 crtc = connector->encoder->crtc; 456 crtc = connector->encoder->crtc;
457 psb_intel_crtc = to_psb_intel_crtc(crtc); 457 gma_crtc = to_gma_crtc(crtc);
458 458
459 for (i = 0; i < 256; i++) 459 for (i = 0; i < 256; i++)
460 psb_intel_crtc->lut_adj[i] = lut_arg->lut[i]; 460 gma_crtc->lut_adj[i] = lut_arg->lut[i];
461 461
462 psb_intel_crtc_load_lut(crtc); 462 gma_crtc_load_lut(crtc);
463 463
464 return 0; 464 return 0;
465} 465}
@@ -622,13 +622,12 @@ static const struct file_operations psb_gem_fops = {
622 .unlocked_ioctl = psb_unlocked_ioctl, 622 .unlocked_ioctl = psb_unlocked_ioctl,
623 .mmap = drm_gem_mmap, 623 .mmap = drm_gem_mmap,
624 .poll = drm_poll, 624 .poll = drm_poll,
625 .fasync = drm_fasync,
626 .read = drm_read, 625 .read = drm_read,
627}; 626};
628 627
629static struct drm_driver driver = { 628static struct drm_driver driver = {
630 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ 629 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
631 DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM , 630 DRIVER_MODESET | DRIVER_GEM ,
632 .load = psb_driver_load, 631 .load = psb_driver_load,
633 .unload = psb_driver_unload, 632 .unload = psb_driver_unload,
634 633
@@ -652,7 +651,7 @@ static struct drm_driver driver = {
652 .gem_vm_ops = &psb_gem_vm_ops, 651 .gem_vm_ops = &psb_gem_vm_ops,
653 .dumb_create = psb_gem_dumb_create, 652 .dumb_create = psb_gem_dumb_create,
654 .dumb_map_offset = psb_gem_dumb_map_gtt, 653 .dumb_map_offset = psb_gem_dumb_map_gtt,
655 .dumb_destroy = psb_gem_dumb_destroy, 654 .dumb_destroy = drm_gem_dumb_destroy,
656 .fops = &psb_gem_fops, 655 .fops = &psb_gem_fops,
657 .name = DRIVER_NAME, 656 .name = DRIVER_NAME,
658 .desc = DRIVER_DESC, 657 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 6053b8abcd12..4535ac7708f8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -27,6 +27,7 @@
27#include <drm/gma_drm.h> 27#include <drm/gma_drm.h>
28#include "psb_reg.h" 28#include "psb_reg.h"
29#include "psb_intel_drv.h" 29#include "psb_intel_drv.h"
30#include "gma_display.h"
30#include "intel_bios.h" 31#include "intel_bios.h"
31#include "gtt.h" 32#include "gtt.h"
32#include "power.h" 33#include "power.h"
@@ -46,6 +47,7 @@ enum {
46#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) 47#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
47#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) 48#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
48#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) 49#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
49 51
50/* 52/*
51 * Driver definitions 53 * Driver definitions
@@ -675,6 +677,7 @@ struct psb_ops {
675 /* Sub functions */ 677 /* Sub functions */
676 struct drm_crtc_helper_funcs const *crtc_helper; 678 struct drm_crtc_helper_funcs const *crtc_helper;
677 struct drm_crtc_funcs const *crtc_funcs; 679 struct drm_crtc_funcs const *crtc_funcs;
680 const struct gma_clock_funcs *clock_funcs;
678 681
679 /* Setup hooks */ 682 /* Setup hooks */
680 int (*chip_setup)(struct drm_device *dev); 683 int (*chip_setup)(struct drm_device *dev);
@@ -692,6 +695,8 @@ struct psb_ops {
692 int (*restore_regs)(struct drm_device *dev); 695 int (*restore_regs)(struct drm_device *dev);
693 int (*power_up)(struct drm_device *dev); 696 int (*power_up)(struct drm_device *dev);
694 int (*power_down)(struct drm_device *dev); 697 int (*power_down)(struct drm_device *dev);
698 void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
699 void (*disable_sr)(struct drm_device *dev);
695 700
696 void (*lvds_bl_power)(struct drm_device *dev, bool on); 701 void (*lvds_bl_power)(struct drm_device *dev, bool on);
697#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 702#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -838,8 +843,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
838 struct drm_file *file); 843 struct drm_file *file);
839extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 844extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
840 struct drm_mode_create_dumb *args); 845 struct drm_mode_create_dumb *args);
841extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
842 uint32_t handle);
843extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, 846extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
844 uint32_t handle, uint64_t *offset); 847 uint32_t handle, uint64_t *offset);
845extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 848extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6666493789d1..97f8a03fee43 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -19,46 +19,19 @@
19 */ 19 */
20 20
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23 22
24#include <drm/drmP.h> 23#include <drm/drmP.h>
25#include "framebuffer.h" 24#include "framebuffer.h"
26#include "psb_drv.h" 25#include "psb_drv.h"
27#include "psb_intel_drv.h" 26#include "psb_intel_drv.h"
28#include "psb_intel_reg.h" 27#include "psb_intel_reg.h"
29#include "psb_intel_display.h" 28#include "gma_display.h"
30#include "power.h" 29#include "power.h"
31 30
32struct psb_intel_clock_t {
33 /* given values */
34 int n;
35 int m1, m2;
36 int p1, p2;
37 /* derived values */
38 int dot;
39 int vco;
40 int m;
41 int p;
42};
43
44struct psb_intel_range_t {
45 int min, max;
46};
47
48struct psb_intel_p2_t {
49 int dot_limit;
50 int p2_slow, p2_fast;
51};
52
53struct psb_intel_limit_t {
54 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
55 struct psb_intel_p2_t p2;
56};
57
58#define INTEL_LIMIT_I9XX_SDVO_DAC 0 31#define INTEL_LIMIT_I9XX_SDVO_DAC 0
59#define INTEL_LIMIT_I9XX_LVDS 1 32#define INTEL_LIMIT_I9XX_LVDS 1
60 33
61static const struct psb_intel_limit_t psb_intel_limits[] = { 34static const struct gma_limit_t psb_intel_limits[] = {
62 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 35 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
63 .dot = {.min = 20000, .max = 400000}, 36 .dot = {.min = 20000, .max = 400000},
64 .vco = {.min = 1400000, .max = 2800000}, 37 .vco = {.min = 1400000, .max = 2800000},
@@ -68,8 +41,8 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
68 .m2 = {.min = 3, .max = 7}, 41 .m2 = {.min = 3, .max = 7},
69 .p = {.min = 5, .max = 80}, 42 .p = {.min = 5, .max = 80},
70 .p1 = {.min = 1, .max = 8}, 43 .p1 = {.min = 1, .max = 8},
71 .p2 = {.dot_limit = 200000, 44 .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5},
72 .p2_slow = 10, .p2_fast = 5}, 45 .find_pll = gma_find_best_pll,
73 }, 46 },
74 { /* INTEL_LIMIT_I9XX_LVDS */ 47 { /* INTEL_LIMIT_I9XX_LVDS */
75 .dot = {.min = 20000, .max = 400000}, 48 .dot = {.min = 20000, .max = 400000},
@@ -83,23 +56,24 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
83 /* The single-channel range is 25-112Mhz, and dual-channel 56 /* The single-channel range is 25-112Mhz, and dual-channel
84 * is 80-224Mhz. Prefer single channel as much as possible. 57 * is 80-224Mhz. Prefer single channel as much as possible.
85 */ 58 */
86 .p2 = {.dot_limit = 112000, 59 .p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7},
87 .p2_slow = 14, .p2_fast = 7}, 60 .find_pll = gma_find_best_pll,
88 }, 61 },
89}; 62};
90 63
91static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) 64static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc,
65 int refclk)
92{ 66{
93 const struct psb_intel_limit_t *limit; 67 const struct gma_limit_t *limit;
94 68
95 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 69 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
96 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; 70 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
97 else 71 else
98 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 72 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
99 return limit; 73 return limit;
100} 74}
101 75
102static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock) 76static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
103{ 77{
104 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 78 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
105 clock->p = clock->p1 * clock->p2; 79 clock->p = clock->p1 * clock->p2;
@@ -108,353 +82,6 @@ static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
108} 82}
109 83
110/** 84/**
111 * Returns whether any output on the specified pipe is of the specified type
112 */
113bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
114{
115 struct drm_device *dev = crtc->dev;
116 struct drm_mode_config *mode_config = &dev->mode_config;
117 struct drm_connector *l_entry;
118
119 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
120 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
121 struct psb_intel_encoder *psb_intel_encoder =
122 psb_intel_attached_encoder(l_entry);
123 if (psb_intel_encoder->type == type)
124 return true;
125 }
126 }
127 return false;
128}
129
130#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
131/**
132 * Returns whether the given set of divisors are valid for a given refclk with
133 * the given connectors.
134 */
135
136static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
137 struct psb_intel_clock_t *clock)
138{
139 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
140
141 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
142 INTELPllInvalid("p1 out of range\n");
143 if (clock->p < limit->p.min || limit->p.max < clock->p)
144 INTELPllInvalid("p out of range\n");
145 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
146 INTELPllInvalid("m2 out of range\n");
147 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
148 INTELPllInvalid("m1 out of range\n");
149 if (clock->m1 <= clock->m2)
150 INTELPllInvalid("m1 <= m2\n");
151 if (clock->m < limit->m.min || limit->m.max < clock->m)
152 INTELPllInvalid("m out of range\n");
153 if (clock->n < limit->n.min || limit->n.max < clock->n)
154 INTELPllInvalid("n out of range\n");
155 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
156 INTELPllInvalid("vco out of range\n");
157 /* XXX: We may need to be checking "Dot clock"
158 * depending on the multiplier, connector, etc.,
159 * rather than just a single range.
160 */
161 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
162 INTELPllInvalid("dot out of range\n");
163
164 return true;
165}
166
167/**
168 * Returns a set of divisors for the desired target clock with the given
169 * refclk, or FALSE. The returned values represent the clock equation:
170 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
171 */
172static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
173 int refclk,
174 struct psb_intel_clock_t *best_clock)
175{
176 struct drm_device *dev = crtc->dev;
177 struct psb_intel_clock_t clock;
178 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
179 int err = target;
180
181 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
182 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
183 /*
184 * For LVDS, if the panel is on, just rely on its current
185 * settings for dual-channel. We haven't figured out how to
186 * reliably set up different single/dual channel state, if we
187 * even can.
188 */
189 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
190 LVDS_CLKB_POWER_UP)
191 clock.p2 = limit->p2.p2_fast;
192 else
193 clock.p2 = limit->p2.p2_slow;
194 } else {
195 if (target < limit->p2.dot_limit)
196 clock.p2 = limit->p2.p2_slow;
197 else
198 clock.p2 = limit->p2.p2_fast;
199 }
200
201 memset(best_clock, 0, sizeof(*best_clock));
202
203 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
204 clock.m1++) {
205 for (clock.m2 = limit->m2.min;
206 clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
207 clock.m2++) {
208 for (clock.n = limit->n.min;
209 clock.n <= limit->n.max; clock.n++) {
210 for (clock.p1 = limit->p1.min;
211 clock.p1 <= limit->p1.max;
212 clock.p1++) {
213 int this_err;
214
215 psb_intel_clock(refclk, &clock);
216
217 if (!psb_intel_PLL_is_valid
218 (crtc, &clock))
219 continue;
220
221 this_err = abs(clock.dot - target);
222 if (this_err < err) {
223 *best_clock = clock;
224 err = this_err;
225 }
226 }
227 }
228 }
229 }
230
231 return err != target;
232}
233
234void psb_intel_wait_for_vblank(struct drm_device *dev)
235{
236 /* Wait for 20ms, i.e. one cycle at 50hz. */
237 mdelay(20);
238}
239
240static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
241 int x, int y, struct drm_framebuffer *old_fb)
242{
243 struct drm_device *dev = crtc->dev;
244 struct drm_psb_private *dev_priv = dev->dev_private;
245 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
246 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
247 int pipe = psb_intel_crtc->pipe;
248 const struct psb_offset *map = &dev_priv->regmap[pipe];
249 unsigned long start, offset;
250 u32 dspcntr;
251 int ret = 0;
252
253 if (!gma_power_begin(dev, true))
254 return 0;
255
256 /* no fb bound */
257 if (!crtc->fb) {
258 dev_dbg(dev->dev, "No FB bound\n");
259 goto psb_intel_pipe_cleaner;
260 }
261
262 /* We are displaying this buffer, make sure it is actually loaded
263 into the GTT */
264 ret = psb_gtt_pin(psbfb->gtt);
265 if (ret < 0)
266 goto psb_intel_pipe_set_base_exit;
267 start = psbfb->gtt->offset;
268
269 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
270
271 REG_WRITE(map->stride, crtc->fb->pitches[0]);
272
273 dspcntr = REG_READ(map->cntr);
274 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
275
276 switch (crtc->fb->bits_per_pixel) {
277 case 8:
278 dspcntr |= DISPPLANE_8BPP;
279 break;
280 case 16:
281 if (crtc->fb->depth == 15)
282 dspcntr |= DISPPLANE_15_16BPP;
283 else
284 dspcntr |= DISPPLANE_16BPP;
285 break;
286 case 24:
287 case 32:
288 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
289 break;
290 default:
291 dev_err(dev->dev, "Unknown color depth\n");
292 ret = -EINVAL;
293 psb_gtt_unpin(psbfb->gtt);
294 goto psb_intel_pipe_set_base_exit;
295 }
296 REG_WRITE(map->cntr, dspcntr);
297
298 REG_WRITE(map->base, start + offset);
299 REG_READ(map->base);
300
301psb_intel_pipe_cleaner:
302 /* If there was a previous display we can now unpin it */
303 if (old_fb)
304 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
305
306psb_intel_pipe_set_base_exit:
307 gma_power_end(dev);
308 return ret;
309}
310
311/**
312 * Sets the power management mode of the pipe and plane.
313 *
314 * This code should probably grow support for turning the cursor off and back
315 * on appropriately at the same time as we're turning the pipe off/on.
316 */
317static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
318{
319 struct drm_device *dev = crtc->dev;
320 struct drm_psb_private *dev_priv = dev->dev_private;
321 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
322 int pipe = psb_intel_crtc->pipe;
323 const struct psb_offset *map = &dev_priv->regmap[pipe];
324 u32 temp;
325
326 /* XXX: When our outputs are all unaware of DPMS modes other than off
327 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
328 */
329 switch (mode) {
330 case DRM_MODE_DPMS_ON:
331 case DRM_MODE_DPMS_STANDBY:
332 case DRM_MODE_DPMS_SUSPEND:
333 /* Enable the DPLL */
334 temp = REG_READ(map->dpll);
335 if ((temp & DPLL_VCO_ENABLE) == 0) {
336 REG_WRITE(map->dpll, temp);
337 REG_READ(map->dpll);
338 /* Wait for the clocks to stabilize. */
339 udelay(150);
340 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
341 REG_READ(map->dpll);
342 /* Wait for the clocks to stabilize. */
343 udelay(150);
344 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
345 REG_READ(map->dpll);
346 /* Wait for the clocks to stabilize. */
347 udelay(150);
348 }
349
350 /* Enable the pipe */
351 temp = REG_READ(map->conf);
352 if ((temp & PIPEACONF_ENABLE) == 0)
353 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
354
355 /* Enable the plane */
356 temp = REG_READ(map->cntr);
357 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
358 REG_WRITE(map->cntr,
359 temp | DISPLAY_PLANE_ENABLE);
360 /* Flush the plane changes */
361 REG_WRITE(map->base, REG_READ(map->base));
362 }
363
364 psb_intel_crtc_load_lut(crtc);
365
366 /* Give the overlay scaler a chance to enable
367 * if it's on this pipe */
368 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
369 break;
370 case DRM_MODE_DPMS_OFF:
371 /* Give the overlay scaler a chance to disable
372 * if it's on this pipe */
373 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
374
375 /* Disable the VGA plane that we never use */
376 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
377
378 /* Disable display plane */
379 temp = REG_READ(map->cntr);
380 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
381 REG_WRITE(map->cntr,
382 temp & ~DISPLAY_PLANE_ENABLE);
383 /* Flush the plane changes */
384 REG_WRITE(map->base, REG_READ(map->base));
385 REG_READ(map->base);
386 }
387
388 /* Next, disable display pipes */
389 temp = REG_READ(map->conf);
390 if ((temp & PIPEACONF_ENABLE) != 0) {
391 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
392 REG_READ(map->conf);
393 }
394
395 /* Wait for vblank for the disable to take effect. */
396 psb_intel_wait_for_vblank(dev);
397
398 temp = REG_READ(map->dpll);
399 if ((temp & DPLL_VCO_ENABLE) != 0) {
400 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
401 REG_READ(map->dpll);
402 }
403
404 /* Wait for the clocks to turn off. */
405 udelay(150);
406 break;
407 }
408
409 /*Set FIFO Watermarks*/
410 REG_WRITE(DSPARB, 0x3F3E);
411}
412
413static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
414{
415 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
416 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
417}
418
419static void psb_intel_crtc_commit(struct drm_crtc *crtc)
420{
421 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
422 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
423}
424
425void psb_intel_encoder_prepare(struct drm_encoder *encoder)
426{
427 struct drm_encoder_helper_funcs *encoder_funcs =
428 encoder->helper_private;
429 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
430 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
431}
432
433void psb_intel_encoder_commit(struct drm_encoder *encoder)
434{
435 struct drm_encoder_helper_funcs *encoder_funcs =
436 encoder->helper_private;
437 /* lvds has its own version of commit see psb_intel_lvds_commit */
438 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
439}
440
441void psb_intel_encoder_destroy(struct drm_encoder *encoder)
442{
443 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
444
445 drm_encoder_cleanup(encoder);
446 kfree(intel_encoder);
447}
448
449static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
450 const struct drm_display_mode *mode,
451 struct drm_display_mode *adjusted_mode)
452{
453 return true;
454}
455
456
457/**
458 * Return the pipe currently connected to the panel fitter, 85 * Return the pipe currently connected to the panel fitter,
459 * or -1 if the panel fitter is not present or not in use 86 * or -1 if the panel fitter is not present or not in use
460 */ 87 */
@@ -479,17 +106,18 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
479{ 106{
480 struct drm_device *dev = crtc->dev; 107 struct drm_device *dev = crtc->dev;
481 struct drm_psb_private *dev_priv = dev->dev_private; 108 struct drm_psb_private *dev_priv = dev->dev_private;
482 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 109 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
483 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 110 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
484 int pipe = psb_intel_crtc->pipe; 111 int pipe = gma_crtc->pipe;
485 const struct psb_offset *map = &dev_priv->regmap[pipe]; 112 const struct psb_offset *map = &dev_priv->regmap[pipe];
486 int refclk; 113 int refclk;
487 struct psb_intel_clock_t clock; 114 struct gma_clock_t clock;
488 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 115 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
489 bool ok, is_sdvo = false; 116 bool ok, is_sdvo = false;
490 bool is_lvds = false, is_tv = false; 117 bool is_lvds = false, is_tv = false;
491 struct drm_mode_config *mode_config = &dev->mode_config; 118 struct drm_mode_config *mode_config = &dev->mode_config;
492 struct drm_connector *connector; 119 struct drm_connector *connector;
120 const struct gma_limit_t *limit;
493 121
494 /* No scan out no play */ 122 /* No scan out no play */
495 if (crtc->fb == NULL) { 123 if (crtc->fb == NULL) {
@@ -498,14 +126,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
498 } 126 }
499 127
500 list_for_each_entry(connector, &mode_config->connector_list, head) { 128 list_for_each_entry(connector, &mode_config->connector_list, head) {
501 struct psb_intel_encoder *psb_intel_encoder = 129 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
502 psb_intel_attached_encoder(connector);
503 130
504 if (!connector->encoder 131 if (!connector->encoder
505 || connector->encoder->crtc != crtc) 132 || connector->encoder->crtc != crtc)
506 continue; 133 continue;
507 134
508 switch (psb_intel_encoder->type) { 135 switch (gma_encoder->type) {
509 case INTEL_OUTPUT_LVDS: 136 case INTEL_OUTPUT_LVDS:
510 is_lvds = true; 137 is_lvds = true;
511 break; 138 break;
@@ -520,10 +147,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
520 147
521 refclk = 96000; 148 refclk = 96000;
522 149
523 ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, 150 limit = gma_crtc->clock_funcs->limit(crtc, refclk);
151
152 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
524 &clock); 153 &clock);
525 if (!ok) { 154 if (!ok) {
526 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); 155 DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
156 adjusted_mode->clock, clock.dot);
527 return 0; 157 return 0;
528 } 158 }
529 159
@@ -661,368 +291,29 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
661 REG_WRITE(map->conf, pipeconf); 291 REG_WRITE(map->conf, pipeconf);
662 REG_READ(map->conf); 292 REG_READ(map->conf);
663 293
664 psb_intel_wait_for_vblank(dev); 294 gma_wait_for_vblank(dev);
665 295
666 REG_WRITE(map->cntr, dspcntr); 296 REG_WRITE(map->cntr, dspcntr);
667 297
668 /* Flush the plane changes */ 298 /* Flush the plane changes */
669 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 299 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
670 300
671 psb_intel_wait_for_vblank(dev); 301 gma_wait_for_vblank(dev);
672
673 return 0;
674}
675
676/** Loads the palette/gamma unit for the CRTC with the prepared values */
677void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
678{
679 struct drm_device *dev = crtc->dev;
680 struct drm_psb_private *dev_priv = dev->dev_private;
681 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
682 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
683 int palreg = map->palette;
684 int i;
685
686 /* The clocks have to be on to load the palette. */
687 if (!crtc->enabled)
688 return;
689
690 switch (psb_intel_crtc->pipe) {
691 case 0:
692 case 1:
693 break;
694 default:
695 dev_err(dev->dev, "Illegal Pipe Number.\n");
696 return;
697 }
698
699 if (gma_power_begin(dev, false)) {
700 for (i = 0; i < 256; i++) {
701 REG_WRITE(palreg + 4 * i,
702 ((psb_intel_crtc->lut_r[i] +
703 psb_intel_crtc->lut_adj[i]) << 16) |
704 ((psb_intel_crtc->lut_g[i] +
705 psb_intel_crtc->lut_adj[i]) << 8) |
706 (psb_intel_crtc->lut_b[i] +
707 psb_intel_crtc->lut_adj[i]));
708 }
709 gma_power_end(dev);
710 } else {
711 for (i = 0; i < 256; i++) {
712 dev_priv->regs.pipe[0].palette[i] =
713 ((psb_intel_crtc->lut_r[i] +
714 psb_intel_crtc->lut_adj[i]) << 16) |
715 ((psb_intel_crtc->lut_g[i] +
716 psb_intel_crtc->lut_adj[i]) << 8) |
717 (psb_intel_crtc->lut_b[i] +
718 psb_intel_crtc->lut_adj[i]);
719 }
720
721 }
722}
723
724/**
725 * Save HW states of giving crtc
726 */
727static void psb_intel_crtc_save(struct drm_crtc *crtc)
728{
729 struct drm_device *dev = crtc->dev;
730 struct drm_psb_private *dev_priv = dev->dev_private;
731 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
732 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
733 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
734 uint32_t paletteReg;
735 int i;
736
737 if (!crtc_state) {
738 dev_err(dev->dev, "No CRTC state found\n");
739 return;
740 }
741
742 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
743 crtc_state->savePIPECONF = REG_READ(map->conf);
744 crtc_state->savePIPESRC = REG_READ(map->src);
745 crtc_state->saveFP0 = REG_READ(map->fp0);
746 crtc_state->saveFP1 = REG_READ(map->fp1);
747 crtc_state->saveDPLL = REG_READ(map->dpll);
748 crtc_state->saveHTOTAL = REG_READ(map->htotal);
749 crtc_state->saveHBLANK = REG_READ(map->hblank);
750 crtc_state->saveHSYNC = REG_READ(map->hsync);
751 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
752 crtc_state->saveVBLANK = REG_READ(map->vblank);
753 crtc_state->saveVSYNC = REG_READ(map->vsync);
754 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
755
756 /*NOTE: DSPSIZE DSPPOS only for psb*/
757 crtc_state->saveDSPSIZE = REG_READ(map->size);
758 crtc_state->saveDSPPOS = REG_READ(map->pos);
759
760 crtc_state->saveDSPBASE = REG_READ(map->base);
761
762 paletteReg = map->palette;
763 for (i = 0; i < 256; ++i)
764 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
765}
766
767/**
768 * Restore HW states of giving crtc
769 */
770static void psb_intel_crtc_restore(struct drm_crtc *crtc)
771{
772 struct drm_device *dev = crtc->dev;
773 struct drm_psb_private *dev_priv = dev->dev_private;
774 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
775 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
776 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
777 uint32_t paletteReg;
778 int i;
779
780 if (!crtc_state) {
781 dev_err(dev->dev, "No crtc state\n");
782 return;
783 }
784
785 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
786 REG_WRITE(map->dpll,
787 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
788 REG_READ(map->dpll);
789 udelay(150);
790 }
791
792 REG_WRITE(map->fp0, crtc_state->saveFP0);
793 REG_READ(map->fp0);
794
795 REG_WRITE(map->fp1, crtc_state->saveFP1);
796 REG_READ(map->fp1);
797
798 REG_WRITE(map->dpll, crtc_state->saveDPLL);
799 REG_READ(map->dpll);
800 udelay(150);
801
802 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
803 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
804 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
805 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
806 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
807 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
808 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
809
810 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
811 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
812
813 REG_WRITE(map->src, crtc_state->savePIPESRC);
814 REG_WRITE(map->base, crtc_state->saveDSPBASE);
815 REG_WRITE(map->conf, crtc_state->savePIPECONF);
816
817 psb_intel_wait_for_vblank(dev);
818
819 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
820 REG_WRITE(map->base, crtc_state->saveDSPBASE);
821
822 psb_intel_wait_for_vblank(dev);
823
824 paletteReg = map->palette;
825 for (i = 0; i < 256; ++i)
826 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
827}
828
829static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
830 struct drm_file *file_priv,
831 uint32_t handle,
832 uint32_t width, uint32_t height)
833{
834 struct drm_device *dev = crtc->dev;
835 struct drm_psb_private *dev_priv = dev->dev_private;
836 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
837 int pipe = psb_intel_crtc->pipe;
838 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
839 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
840 uint32_t temp;
841 size_t addr = 0;
842 struct gtt_range *gt;
843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
844 struct drm_gem_object *obj;
845 void *tmp_dst, *tmp_src;
846 int ret = 0, i, cursor_pages;
847
848 /* if we want to turn of the cursor ignore width and height */
849 if (!handle) {
850 /* turn off the cursor */
851 temp = CURSOR_MODE_DISABLE;
852
853 if (gma_power_begin(dev, false)) {
854 REG_WRITE(control, temp);
855 REG_WRITE(base, 0);
856 gma_power_end(dev);
857 }
858
859 /* Unpin the old GEM object */
860 if (psb_intel_crtc->cursor_obj) {
861 gt = container_of(psb_intel_crtc->cursor_obj,
862 struct gtt_range, gem);
863 psb_gtt_unpin(gt);
864 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
865 psb_intel_crtc->cursor_obj = NULL;
866 }
867
868 return 0;
869 }
870
871 /* Currently we only support 64x64 cursors */
872 if (width != 64 || height != 64) {
873 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
874 return -EINVAL;
875 }
876
877 obj = drm_gem_object_lookup(dev, file_priv, handle);
878 if (!obj)
879 return -ENOENT;
880
881 if (obj->size < width * height * 4) {
882 dev_dbg(dev->dev, "buffer is to small\n");
883 ret = -ENOMEM;
884 goto unref_cursor;
885 }
886
887 gt = container_of(obj, struct gtt_range, gem);
888
889 /* Pin the memory into the GTT */
890 ret = psb_gtt_pin(gt);
891 if (ret) {
892 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
893 goto unref_cursor;
894 }
895
896 if (dev_priv->ops->cursor_needs_phys) {
897 if (cursor_gt == NULL) {
898 dev_err(dev->dev, "No hardware cursor mem available");
899 ret = -ENOMEM;
900 goto unref_cursor;
901 }
902
903 /* Prevent overflow */
904 if (gt->npage > 4)
905 cursor_pages = 4;
906 else
907 cursor_pages = gt->npage;
908
909 /* Copy the cursor to cursor mem */
910 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
911 for (i = 0; i < cursor_pages; i++) {
912 tmp_src = kmap(gt->pages[i]);
913 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
914 kunmap(gt->pages[i]);
915 tmp_dst += PAGE_SIZE;
916 }
917
918 addr = psb_intel_crtc->cursor_addr;
919 } else {
920 addr = gt->offset; /* Or resource.start ??? */
921 psb_intel_crtc->cursor_addr = addr;
922 }
923
924 temp = 0;
925 /* set the pipe for the cursor */
926 temp |= (pipe << 28);
927 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
928
929 if (gma_power_begin(dev, false)) {
930 REG_WRITE(control, temp);
931 REG_WRITE(base, addr);
932 gma_power_end(dev);
933 }
934
935 /* unpin the old bo */
936 if (psb_intel_crtc->cursor_obj) {
937 gt = container_of(psb_intel_crtc->cursor_obj,
938 struct gtt_range, gem);
939 psb_gtt_unpin(gt);
940 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
941 }
942
943 psb_intel_crtc->cursor_obj = obj;
944 return ret;
945
946unref_cursor:
947 drm_gem_object_unreference(obj);
948 return ret;
949}
950
951static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
952{
953 struct drm_device *dev = crtc->dev;
954 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
955 int pipe = psb_intel_crtc->pipe;
956 uint32_t temp = 0;
957 uint32_t addr;
958
959
960 if (x < 0) {
961 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
962 x = -x;
963 }
964 if (y < 0) {
965 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
966 y = -y;
967 }
968
969 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
970 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
971
972 addr = psb_intel_crtc->cursor_addr;
973 302
974 if (gma_power_begin(dev, false)) {
975 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
976 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
977 gma_power_end(dev);
978 }
979 return 0; 303 return 0;
980} 304}
981 305
982static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
983 u16 *green, u16 *blue, uint32_t type, uint32_t size)
984{
985 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
986 int i;
987
988 if (size != 256)
989 return;
990
991 for (i = 0; i < 256; i++) {
992 psb_intel_crtc->lut_r[i] = red[i] >> 8;
993 psb_intel_crtc->lut_g[i] = green[i] >> 8;
994 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
995 }
996
997 psb_intel_crtc_load_lut(crtc);
998}
999
1000static int psb_crtc_set_config(struct drm_mode_set *set)
1001{
1002 int ret;
1003 struct drm_device *dev = set->crtc->dev;
1004 struct drm_psb_private *dev_priv = dev->dev_private;
1005
1006 if (!dev_priv->rpm_enabled)
1007 return drm_crtc_helper_set_config(set);
1008
1009 pm_runtime_forbid(&dev->pdev->dev);
1010 ret = drm_crtc_helper_set_config(set);
1011 pm_runtime_allow(&dev->pdev->dev);
1012 return ret;
1013}
1014
1015/* Returns the clock of the currently programmed mode of the given pipe. */ 306/* Returns the clock of the currently programmed mode of the given pipe. */
1016static int psb_intel_crtc_clock_get(struct drm_device *dev, 307static int psb_intel_crtc_clock_get(struct drm_device *dev,
1017 struct drm_crtc *crtc) 308 struct drm_crtc *crtc)
1018{ 309{
1019 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 310 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1020 struct drm_psb_private *dev_priv = dev->dev_private; 311 struct drm_psb_private *dev_priv = dev->dev_private;
1021 int pipe = psb_intel_crtc->pipe; 312 int pipe = gma_crtc->pipe;
1022 const struct psb_offset *map = &dev_priv->regmap[pipe]; 313 const struct psb_offset *map = &dev_priv->regmap[pipe];
1023 u32 dpll; 314 u32 dpll;
1024 u32 fp; 315 u32 fp;
1025 struct psb_intel_clock_t clock; 316 struct gma_clock_t clock;
1026 bool is_lvds; 317 bool is_lvds;
1027 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 318 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1028 319
@@ -1092,8 +383,8 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1092struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 383struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1093 struct drm_crtc *crtc) 384 struct drm_crtc *crtc)
1094{ 385{
1095 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 386 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1096 int pipe = psb_intel_crtc->pipe; 387 int pipe = gma_crtc->pipe;
1097 struct drm_display_mode *mode; 388 struct drm_display_mode *mode;
1098 int htot; 389 int htot;
1099 int hsync; 390 int hsync;
@@ -1136,58 +427,30 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1136 return mode; 427 return mode;
1137} 428}
1138 429
1139static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1140{
1141 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1142 struct gtt_range *gt;
1143
1144 /* Unpin the old GEM object */
1145 if (psb_intel_crtc->cursor_obj) {
1146 gt = container_of(psb_intel_crtc->cursor_obj,
1147 struct gtt_range, gem);
1148 psb_gtt_unpin(gt);
1149 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1150 psb_intel_crtc->cursor_obj = NULL;
1151 }
1152
1153 if (psb_intel_crtc->cursor_gt != NULL)
1154 psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
1155 kfree(psb_intel_crtc->crtc_state);
1156 drm_crtc_cleanup(crtc);
1157 kfree(psb_intel_crtc);
1158}
1159
1160static void psb_intel_crtc_disable(struct drm_crtc *crtc)
1161{
1162 struct gtt_range *gt;
1163 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1164
1165 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1166
1167 if (crtc->fb) {
1168 gt = to_psb_fb(crtc->fb)->gtt;
1169 psb_gtt_unpin(gt);
1170 }
1171}
1172
1173const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { 430const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1174 .dpms = psb_intel_crtc_dpms, 431 .dpms = gma_crtc_dpms,
1175 .mode_fixup = psb_intel_crtc_mode_fixup, 432 .mode_fixup = gma_crtc_mode_fixup,
1176 .mode_set = psb_intel_crtc_mode_set, 433 .mode_set = psb_intel_crtc_mode_set,
1177 .mode_set_base = psb_intel_pipe_set_base, 434 .mode_set_base = gma_pipe_set_base,
1178 .prepare = psb_intel_crtc_prepare, 435 .prepare = gma_crtc_prepare,
1179 .commit = psb_intel_crtc_commit, 436 .commit = gma_crtc_commit,
1180 .disable = psb_intel_crtc_disable, 437 .disable = gma_crtc_disable,
1181}; 438};
1182 439
1183const struct drm_crtc_funcs psb_intel_crtc_funcs = { 440const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1184 .save = psb_intel_crtc_save, 441 .save = gma_crtc_save,
1185 .restore = psb_intel_crtc_restore, 442 .restore = gma_crtc_restore,
1186 .cursor_set = psb_intel_crtc_cursor_set, 443 .cursor_set = gma_crtc_cursor_set,
1187 .cursor_move = psb_intel_crtc_cursor_move, 444 .cursor_move = gma_crtc_cursor_move,
1188 .gamma_set = psb_intel_crtc_gamma_set, 445 .gamma_set = gma_crtc_gamma_set,
1189 .set_config = psb_crtc_set_config, 446 .set_config = gma_crtc_set_config,
1190 .destroy = psb_intel_crtc_destroy, 447 .destroy = gma_crtc_destroy,
448};
449
450const struct gma_clock_funcs psb_clock_funcs = {
451 .clock = psb_intel_clock,
452 .limit = psb_intel_limit,
453 .pll_is_valid = gma_pll_is_valid,
1191}; 454};
1192 455
1193/* 456/*
@@ -1195,7 +458,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1195 * to zero. This is a workaround for h/w defect on Oaktrail 458 * to zero. This is a workaround for h/w defect on Oaktrail
1196 */ 459 */
1197static void psb_intel_cursor_init(struct drm_device *dev, 460static void psb_intel_cursor_init(struct drm_device *dev,
1198 struct psb_intel_crtc *psb_intel_crtc) 461 struct gma_crtc *gma_crtc)
1199{ 462{
1200 struct drm_psb_private *dev_priv = dev->dev_private; 463 struct drm_psb_private *dev_priv = dev->dev_private;
1201 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; 464 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
@@ -1208,88 +471,87 @@ static void psb_intel_cursor_init(struct drm_device *dev,
1208 */ 471 */
1209 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1); 472 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
1210 if (!cursor_gt) { 473 if (!cursor_gt) {
1211 psb_intel_crtc->cursor_gt = NULL; 474 gma_crtc->cursor_gt = NULL;
1212 goto out; 475 goto out;
1213 } 476 }
1214 psb_intel_crtc->cursor_gt = cursor_gt; 477 gma_crtc->cursor_gt = cursor_gt;
1215 psb_intel_crtc->cursor_addr = dev_priv->stolen_base + 478 gma_crtc->cursor_addr = dev_priv->stolen_base +
1216 cursor_gt->offset; 479 cursor_gt->offset;
1217 } else { 480 } else {
1218 psb_intel_crtc->cursor_gt = NULL; 481 gma_crtc->cursor_gt = NULL;
1219 } 482 }
1220 483
1221out: 484out:
1222 REG_WRITE(control[psb_intel_crtc->pipe], 0); 485 REG_WRITE(control[gma_crtc->pipe], 0);
1223 REG_WRITE(base[psb_intel_crtc->pipe], 0); 486 REG_WRITE(base[gma_crtc->pipe], 0);
1224} 487}
1225 488
1226void psb_intel_crtc_init(struct drm_device *dev, int pipe, 489void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1227 struct psb_intel_mode_device *mode_dev) 490 struct psb_intel_mode_device *mode_dev)
1228{ 491{
1229 struct drm_psb_private *dev_priv = dev->dev_private; 492 struct drm_psb_private *dev_priv = dev->dev_private;
1230 struct psb_intel_crtc *psb_intel_crtc; 493 struct gma_crtc *gma_crtc;
1231 int i; 494 int i;
1232 uint16_t *r_base, *g_base, *b_base; 495 uint16_t *r_base, *g_base, *b_base;
1233 496
1234 /* We allocate a extra array of drm_connector pointers 497 /* We allocate a extra array of drm_connector pointers
1235 * for fbdev after the crtc */ 498 * for fbdev after the crtc */
1236 psb_intel_crtc = 499 gma_crtc = kzalloc(sizeof(struct gma_crtc) +
1237 kzalloc(sizeof(struct psb_intel_crtc) + 500 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
1238 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 501 GFP_KERNEL);
1239 GFP_KERNEL); 502 if (gma_crtc == NULL)
1240 if (psb_intel_crtc == NULL)
1241 return; 503 return;
1242 504
1243 psb_intel_crtc->crtc_state = 505 gma_crtc->crtc_state =
1244 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL); 506 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
1245 if (!psb_intel_crtc->crtc_state) { 507 if (!gma_crtc->crtc_state) {
1246 dev_err(dev->dev, "Crtc state error: No memory\n"); 508 dev_err(dev->dev, "Crtc state error: No memory\n");
1247 kfree(psb_intel_crtc); 509 kfree(gma_crtc);
1248 return; 510 return;
1249 } 511 }
1250 512
1251 /* Set the CRTC operations from the chip specific data */ 513 /* Set the CRTC operations from the chip specific data */
1252 drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs); 514 drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs);
1253 515
1254 drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); 516 /* Set the CRTC clock functions from chip specific data */
1255 psb_intel_crtc->pipe = pipe; 517 gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
1256 psb_intel_crtc->plane = pipe;
1257 518
1258 r_base = psb_intel_crtc->base.gamma_store; 519 drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256);
520 gma_crtc->pipe = pipe;
521 gma_crtc->plane = pipe;
522
523 r_base = gma_crtc->base.gamma_store;
1259 g_base = r_base + 256; 524 g_base = r_base + 256;
1260 b_base = g_base + 256; 525 b_base = g_base + 256;
1261 for (i = 0; i < 256; i++) { 526 for (i = 0; i < 256; i++) {
1262 psb_intel_crtc->lut_r[i] = i; 527 gma_crtc->lut_r[i] = i;
1263 psb_intel_crtc->lut_g[i] = i; 528 gma_crtc->lut_g[i] = i;
1264 psb_intel_crtc->lut_b[i] = i; 529 gma_crtc->lut_b[i] = i;
1265 r_base[i] = i << 8; 530 r_base[i] = i << 8;
1266 g_base[i] = i << 8; 531 g_base[i] = i << 8;
1267 b_base[i] = i << 8; 532 b_base[i] = i << 8;
1268 533
1269 psb_intel_crtc->lut_adj[i] = 0; 534 gma_crtc->lut_adj[i] = 0;
1270 } 535 }
1271 536
1272 psb_intel_crtc->mode_dev = mode_dev; 537 gma_crtc->mode_dev = mode_dev;
1273 psb_intel_crtc->cursor_addr = 0; 538 gma_crtc->cursor_addr = 0;
1274 539
1275 drm_crtc_helper_add(&psb_intel_crtc->base, 540 drm_crtc_helper_add(&gma_crtc->base,
1276 dev_priv->ops->crtc_helper); 541 dev_priv->ops->crtc_helper);
1277 542
1278 /* Setup the array of drm_connector pointer array */ 543 /* Setup the array of drm_connector pointer array */
1279 psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; 544 gma_crtc->mode_set.crtc = &gma_crtc->base;
1280 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 545 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
1281 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL); 546 dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL);
1282 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = 547 dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base;
1283 &psb_intel_crtc->base; 548 dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base;
1284 dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = 549 gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1);
1285 &psb_intel_crtc->base; 550 gma_crtc->mode_set.num_connectors = 0;
1286 psb_intel_crtc->mode_set.connectors = 551 psb_intel_cursor_init(dev, gma_crtc);
1287 (struct drm_connector **) (psb_intel_crtc + 1);
1288 psb_intel_crtc->mode_set.num_connectors = 0;
1289 psb_intel_cursor_init(dev, psb_intel_crtc);
1290 552
1291 /* Set to true so that the pipe is forced off on initial config. */ 553 /* Set to true so that the pipe is forced off on initial config. */
1292 psb_intel_crtc->active = true; 554 gma_crtc->active = true;
1293} 555}
1294 556
1295int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 557int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -1298,7 +560,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1298 struct drm_psb_private *dev_priv = dev->dev_private; 560 struct drm_psb_private *dev_priv = dev->dev_private;
1299 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data; 561 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
1300 struct drm_mode_object *drmmode_obj; 562 struct drm_mode_object *drmmode_obj;
1301 struct psb_intel_crtc *crtc; 563 struct gma_crtc *crtc;
1302 564
1303 if (!dev_priv) { 565 if (!dev_priv) {
1304 dev_err(dev->dev, "called with no initialization\n"); 566 dev_err(dev->dev, "called with no initialization\n");
@@ -1313,7 +575,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1313 return -EINVAL; 575 return -EINVAL;
1314 } 576 }
1315 577
1316 crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj)); 578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
1317 pipe_from_crtc_id->pipe = crtc->pipe; 579 pipe_from_crtc_id->pipe = crtc->pipe;
1318 580
1319 return 0; 581 return 0;
@@ -1324,14 +586,14 @@ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
1324 struct drm_crtc *crtc = NULL; 586 struct drm_crtc *crtc = NULL;
1325 587
1326 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 588 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1327 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 589 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1328 if (psb_intel_crtc->pipe == pipe) 590 if (gma_crtc->pipe == pipe)
1329 break; 591 break;
1330 } 592 }
1331 return crtc; 593 return crtc;
1332} 594}
1333 595
1334int psb_intel_connector_clones(struct drm_device *dev, int type_mask) 596int gma_connector_clones(struct drm_device *dev, int type_mask)
1335{ 597{
1336 int index_mask = 0; 598 int index_mask = 0;
1337 struct drm_connector *connector; 599 struct drm_connector *connector;
@@ -1339,30 +601,10 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
1339 601
1340 list_for_each_entry(connector, &dev->mode_config.connector_list, 602 list_for_each_entry(connector, &dev->mode_config.connector_list,
1341 head) { 603 head) {
1342 struct psb_intel_encoder *psb_intel_encoder = 604 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1343 psb_intel_attached_encoder(connector); 605 if (type_mask & (1 << gma_encoder->type))
1344 if (type_mask & (1 << psb_intel_encoder->type))
1345 index_mask |= (1 << entry); 606 index_mask |= (1 << entry);
1346 entry++; 607 entry++;
1347 } 608 }
1348 return index_mask; 609 return index_mask;
1349} 610}
1350
1351/* current intel driver doesn't take advantage of encoders
1352 always give back the encoder for the connector
1353*/
1354struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
1355{
1356 struct psb_intel_encoder *psb_intel_encoder =
1357 psb_intel_attached_encoder(connector);
1358
1359 return &psb_intel_encoder->base;
1360}
1361
1362void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
1363 struct psb_intel_encoder *encoder)
1364{
1365 connector->encoder = encoder;
1366 drm_mode_connector_attach_encoder(&connector->base,
1367 &encoder->base);
1368}
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 4dcae421a58d..bde27fdb41bf 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -24,6 +24,7 @@
24#include <drm/drm_crtc.h> 24#include <drm/drm_crtc.h>
25#include <drm/drm_crtc_helper.h> 25#include <drm/drm_crtc_helper.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include "gma_display.h"
27 28
28/* 29/*
29 * Display related stuff 30 * Display related stuff
@@ -116,11 +117,11 @@ struct psb_intel_i2c_chan {
116 u8 slave_addr; 117 u8 slave_addr;
117}; 118};
118 119
119struct psb_intel_encoder { 120struct gma_encoder {
120 struct drm_encoder base; 121 struct drm_encoder base;
121 int type; 122 int type;
122 bool needs_tv_clock; 123 bool needs_tv_clock;
123 void (*hot_plug)(struct psb_intel_encoder *); 124 void (*hot_plug)(struct gma_encoder *);
124 int crtc_mask; 125 int crtc_mask;
125 int clone_mask; 126 int clone_mask;
126 u32 ddi_select; /* Channel info */ 127 u32 ddi_select; /* Channel info */
@@ -136,9 +137,9 @@ struct psb_intel_encoder {
136 struct psb_intel_i2c_chan *ddc_bus; 137 struct psb_intel_i2c_chan *ddc_bus;
137}; 138};
138 139
139struct psb_intel_connector { 140struct gma_connector {
140 struct drm_connector base; 141 struct drm_connector base;
141 struct psb_intel_encoder *encoder; 142 struct gma_encoder *encoder;
142}; 143};
143 144
144struct psb_intel_crtc_state { 145struct psb_intel_crtc_state {
@@ -161,7 +162,7 @@ struct psb_intel_crtc_state {
161 uint32_t savePalette[256]; 162 uint32_t savePalette[256];
162}; 163};
163 164
164struct psb_intel_crtc { 165struct gma_crtc {
165 struct drm_crtc base; 166 struct drm_crtc base;
166 int pipe; 167 int pipe;
167 int plane; 168 int plane;
@@ -188,14 +189,16 @@ struct psb_intel_crtc {
188 189
189 /* Saved Crtc HW states */ 190 /* Saved Crtc HW states */
190 struct psb_intel_crtc_state *crtc_state; 191 struct psb_intel_crtc_state *crtc_state;
192
193 const struct gma_clock_funcs *clock_funcs;
191}; 194};
192 195
193#define to_psb_intel_crtc(x) \ 196#define to_gma_crtc(x) \
194 container_of(x, struct psb_intel_crtc, base) 197 container_of(x, struct gma_crtc, base)
195#define to_psb_intel_connector(x) \ 198#define to_gma_connector(x) \
196 container_of(x, struct psb_intel_connector, base) 199 container_of(x, struct gma_connector, base)
197#define to_psb_intel_encoder(x) \ 200#define to_gma_encoder(x) \
198 container_of(x, struct psb_intel_encoder, base) 201 container_of(x, struct gma_encoder, base)
199#define to_psb_intel_framebuffer(x) \ 202#define to_psb_intel_framebuffer(x) \
200 container_of(x, struct psb_intel_framebuffer, base) 203 container_of(x, struct psb_intel_framebuffer, base)
201 204
@@ -223,27 +226,18 @@ extern void oaktrail_dsi_init(struct drm_device *dev,
223extern void mid_dsi_init(struct drm_device *dev, 226extern void mid_dsi_init(struct drm_device *dev,
224 struct psb_intel_mode_device *mode_dev, int dsi_num); 227 struct psb_intel_mode_device *mode_dev, int dsi_num);
225 228
226extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc); 229extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector);
227extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); 230extern void gma_connector_attach_encoder(struct gma_connector *connector,
228extern void psb_intel_encoder_commit(struct drm_encoder *encoder); 231 struct gma_encoder *encoder);
229extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
230 232
231static inline struct psb_intel_encoder *psb_intel_attached_encoder( 233static inline struct gma_encoder *gma_attached_encoder(
232 struct drm_connector *connector) 234 struct drm_connector *connector)
233{ 235{
234 return to_psb_intel_connector(connector)->encoder; 236 return to_gma_connector(connector)->encoder;
235} 237}
236 238
237extern void psb_intel_connector_attach_encoder(
238 struct psb_intel_connector *connector,
239 struct psb_intel_encoder *encoder);
240
241extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
242 *connector);
243
244extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 239extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
245 struct drm_crtc *crtc); 240 struct drm_crtc *crtc);
246extern void psb_intel_wait_for_vblank(struct drm_device *dev);
247extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 241extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
248 struct drm_file *file_priv); 242 struct drm_file *file_priv);
249extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, 243extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 9fa5fa2e6192..32342f6990d9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -267,10 +267,9 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
267 struct drm_device *dev = connector->dev; 267 struct drm_device *dev = connector->dev;
268 struct drm_psb_private *dev_priv = 268 struct drm_psb_private *dev_priv =
269 (struct drm_psb_private *)dev->dev_private; 269 (struct drm_psb_private *)dev->dev_private;
270 struct psb_intel_encoder *psb_intel_encoder = 270 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
271 psb_intel_attached_encoder(connector);
272 struct psb_intel_lvds_priv *lvds_priv = 271 struct psb_intel_lvds_priv *lvds_priv =
273 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; 272 (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
274 273
275 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON); 274 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
276 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF); 275 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
@@ -307,10 +306,9 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
307{ 306{
308 struct drm_device *dev = connector->dev; 307 struct drm_device *dev = connector->dev;
309 u32 pp_status; 308 u32 pp_status;
310 struct psb_intel_encoder *psb_intel_encoder = 309 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
311 psb_intel_attached_encoder(connector);
312 struct psb_intel_lvds_priv *lvds_priv = 310 struct psb_intel_lvds_priv *lvds_priv =
313 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; 311 (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
314 312
315 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", 313 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
316 lvds_priv->savePP_ON, 314 lvds_priv->savePP_ON,
@@ -349,12 +347,11 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
349 struct drm_display_mode *mode) 347 struct drm_display_mode *mode)
350{ 348{
351 struct drm_psb_private *dev_priv = connector->dev->dev_private; 349 struct drm_psb_private *dev_priv = connector->dev->dev_private;
352 struct psb_intel_encoder *psb_intel_encoder = 350 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
353 psb_intel_attached_encoder(connector);
354 struct drm_display_mode *fixed_mode = 351 struct drm_display_mode *fixed_mode =
355 dev_priv->mode_dev.panel_fixed_mode; 352 dev_priv->mode_dev.panel_fixed_mode;
356 353
357 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) 354 if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
358 fixed_mode = dev_priv->mode_dev.panel_fixed_mode2; 355 fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
359 356
360 /* just in case */ 357 /* just in case */
@@ -381,22 +378,20 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
381 struct drm_device *dev = encoder->dev; 378 struct drm_device *dev = encoder->dev;
382 struct drm_psb_private *dev_priv = dev->dev_private; 379 struct drm_psb_private *dev_priv = dev->dev_private;
383 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 380 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
384 struct psb_intel_crtc *psb_intel_crtc = 381 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
385 to_psb_intel_crtc(encoder->crtc);
386 struct drm_encoder *tmp_encoder; 382 struct drm_encoder *tmp_encoder;
387 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; 383 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
388 struct psb_intel_encoder *psb_intel_encoder = 384 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
389 to_psb_intel_encoder(encoder);
390 385
391 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) 386 if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
392 panel_fixed_mode = mode_dev->panel_fixed_mode2; 387 panel_fixed_mode = mode_dev->panel_fixed_mode2;
393 388
394 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */ 389 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
395 if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) { 390 if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
396 printk(KERN_ERR "Can't support LVDS on pipe A\n"); 391 printk(KERN_ERR "Can't support LVDS on pipe A\n");
397 return false; 392 return false;
398 } 393 }
399 if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) { 394 if (IS_MRST(dev) && gma_crtc->pipe != 0) {
400 printk(KERN_ERR "Must use PIPE A\n"); 395 printk(KERN_ERR "Must use PIPE A\n");
401 return false; 396 return false;
402 } 397 }
@@ -525,9 +520,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
525 struct drm_device *dev = connector->dev; 520 struct drm_device *dev = connector->dev;
526 struct drm_psb_private *dev_priv = dev->dev_private; 521 struct drm_psb_private *dev_priv = dev->dev_private;
527 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 522 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
528 struct psb_intel_encoder *psb_intel_encoder = 523 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
529 psb_intel_attached_encoder(connector); 524 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
530 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
531 int ret = 0; 525 int ret = 0;
532 526
533 if (!IS_MRST(dev)) 527 if (!IS_MRST(dev))
@@ -564,9 +558,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
564 */ 558 */
565void psb_intel_lvds_destroy(struct drm_connector *connector) 559void psb_intel_lvds_destroy(struct drm_connector *connector)
566{ 560{
567 struct psb_intel_encoder *psb_intel_encoder = 561 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
568 psb_intel_attached_encoder(connector); 562 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
569 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
570 563
571 if (lvds_priv->ddc_bus) 564 if (lvds_priv->ddc_bus)
572 psb_intel_i2c_destroy(lvds_priv->ddc_bus); 565 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
@@ -585,8 +578,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
585 return -1; 578 return -1;
586 579
587 if (!strcmp(property->name, "scaling mode")) { 580 if (!strcmp(property->name, "scaling mode")) {
588 struct psb_intel_crtc *crtc = 581 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
589 to_psb_intel_crtc(encoder->crtc);
590 uint64_t curval; 582 uint64_t curval;
591 583
592 if (!crtc) 584 if (!crtc)
@@ -656,7 +648,7 @@ const struct drm_connector_helper_funcs
656 psb_intel_lvds_connector_helper_funcs = { 648 psb_intel_lvds_connector_helper_funcs = {
657 .get_modes = psb_intel_lvds_get_modes, 649 .get_modes = psb_intel_lvds_get_modes,
658 .mode_valid = psb_intel_lvds_mode_valid, 650 .mode_valid = psb_intel_lvds_mode_valid,
659 .best_encoder = psb_intel_best_encoder, 651 .best_encoder = gma_best_encoder,
660}; 652};
661 653
662const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { 654const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
@@ -691,8 +683,8 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
691void psb_intel_lvds_init(struct drm_device *dev, 683void psb_intel_lvds_init(struct drm_device *dev,
692 struct psb_intel_mode_device *mode_dev) 684 struct psb_intel_mode_device *mode_dev)
693{ 685{
694 struct psb_intel_encoder *psb_intel_encoder; 686 struct gma_encoder *gma_encoder;
695 struct psb_intel_connector *psb_intel_connector; 687 struct gma_connector *gma_connector;
696 struct psb_intel_lvds_priv *lvds_priv; 688 struct psb_intel_lvds_priv *lvds_priv;
697 struct drm_connector *connector; 689 struct drm_connector *connector;
698 struct drm_encoder *encoder; 690 struct drm_encoder *encoder;
@@ -702,17 +694,15 @@ void psb_intel_lvds_init(struct drm_device *dev,
702 u32 lvds; 694 u32 lvds;
703 int pipe; 695 int pipe;
704 696
705 psb_intel_encoder = 697 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
706 kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 698 if (!gma_encoder) {
707 if (!psb_intel_encoder) { 699 dev_err(dev->dev, "gma_encoder allocation error\n");
708 dev_err(dev->dev, "psb_intel_encoder allocation error\n");
709 return; 700 return;
710 } 701 }
711 702
712 psb_intel_connector = 703 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
713 kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 704 if (!gma_connector) {
714 if (!psb_intel_connector) { 705 dev_err(dev->dev, "gma_connector allocation error\n");
715 dev_err(dev->dev, "psb_intel_connector allocation error\n");
716 goto failed_encoder; 706 goto failed_encoder;
717 } 707 }
718 708
@@ -722,10 +712,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
722 goto failed_connector; 712 goto failed_connector;
723 } 713 }
724 714
725 psb_intel_encoder->dev_priv = lvds_priv; 715 gma_encoder->dev_priv = lvds_priv;
726 716
727 connector = &psb_intel_connector->base; 717 connector = &gma_connector->base;
728 encoder = &psb_intel_encoder->base; 718 encoder = &gma_encoder->base;
729 drm_connector_init(dev, connector, 719 drm_connector_init(dev, connector,
730 &psb_intel_lvds_connector_funcs, 720 &psb_intel_lvds_connector_funcs,
731 DRM_MODE_CONNECTOR_LVDS); 721 DRM_MODE_CONNECTOR_LVDS);
@@ -734,9 +724,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
734 &psb_intel_lvds_enc_funcs, 724 &psb_intel_lvds_enc_funcs,
735 DRM_MODE_ENCODER_LVDS); 725 DRM_MODE_ENCODER_LVDS);
736 726
737 psb_intel_connector_attach_encoder(psb_intel_connector, 727 gma_connector_attach_encoder(gma_connector, gma_encoder);
738 psb_intel_encoder); 728 gma_encoder->type = INTEL_OUTPUT_LVDS;
739 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
740 729
741 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); 730 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
742 drm_connector_helper_add(connector, 731 drm_connector_helper_add(connector,
@@ -851,8 +840,8 @@ failed_blc_i2c:
851 drm_encoder_cleanup(encoder); 840 drm_encoder_cleanup(encoder);
852 drm_connector_cleanup(connector); 841 drm_connector_cleanup(connector);
853failed_connector: 842failed_connector:
854 kfree(psb_intel_connector); 843 kfree(gma_connector);
855failed_encoder: 844failed_encoder:
856 kfree(psb_intel_encoder); 845 kfree(gma_encoder);
857} 846}
858 847
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 3bc8414533c9..6f01cdf5e125 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -65,7 +65,7 @@ static const char *tv_format_names[] = {
65#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) 65#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
66 66
67struct psb_intel_sdvo { 67struct psb_intel_sdvo {
68 struct psb_intel_encoder base; 68 struct gma_encoder base;
69 69
70 struct i2c_adapter *i2c; 70 struct i2c_adapter *i2c;
71 u8 slave_addr; 71 u8 slave_addr;
@@ -140,7 +140,7 @@ struct psb_intel_sdvo {
140}; 140};
141 141
142struct psb_intel_sdvo_connector { 142struct psb_intel_sdvo_connector {
143 struct psb_intel_connector base; 143 struct gma_connector base;
144 144
145 /* Mark the type of connector */ 145 /* Mark the type of connector */
146 uint16_t output_flag; 146 uint16_t output_flag;
@@ -200,13 +200,13 @@ static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
200 200
201static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) 201static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
202{ 202{
203 return container_of(psb_intel_attached_encoder(connector), 203 return container_of(gma_attached_encoder(connector),
204 struct psb_intel_sdvo, base); 204 struct psb_intel_sdvo, base);
205} 205}
206 206
207static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector) 207static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
208{ 208{
209 return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base); 209 return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
210} 210}
211 211
212static bool 212static bool
@@ -988,7 +988,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
988{ 988{
989 struct drm_device *dev = encoder->dev; 989 struct drm_device *dev = encoder->dev;
990 struct drm_crtc *crtc = encoder->crtc; 990 struct drm_crtc *crtc = encoder->crtc;
991 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 991 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
992 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); 992 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
993 u32 sdvox; 993 u32 sdvox;
994 struct psb_intel_sdvo_in_out_map in_out; 994 struct psb_intel_sdvo_in_out_map in_out;
@@ -1071,7 +1071,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
1071 } 1071 }
1072 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1072 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1073 1073
1074 if (psb_intel_crtc->pipe == 1) 1074 if (gma_crtc->pipe == 1)
1075 sdvox |= SDVO_PIPE_B_SELECT; 1075 sdvox |= SDVO_PIPE_B_SELECT;
1076 if (psb_intel_sdvo->has_hdmi_audio) 1076 if (psb_intel_sdvo->has_hdmi_audio)
1077 sdvox |= SDVO_AUDIO_ENABLE; 1077 sdvox |= SDVO_AUDIO_ENABLE;
@@ -1122,7 +1122,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1122 if ((temp & SDVO_ENABLE) == 0) 1122 if ((temp & SDVO_ENABLE) == 0)
1123 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE); 1123 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
1124 for (i = 0; i < 2; i++) 1124 for (i = 0; i < 2; i++)
1125 psb_intel_wait_for_vblank(dev); 1125 gma_wait_for_vblank(dev);
1126 1126
1127 status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2); 1127 status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
1128 /* Warn if the device reported failure to sync. 1128 /* Warn if the device reported failure to sync.
@@ -1837,10 +1837,8 @@ done:
1837static void psb_intel_sdvo_save(struct drm_connector *connector) 1837static void psb_intel_sdvo_save(struct drm_connector *connector)
1838{ 1838{
1839 struct drm_device *dev = connector->dev; 1839 struct drm_device *dev = connector->dev;
1840 struct psb_intel_encoder *psb_intel_encoder = 1840 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1841 psb_intel_attached_encoder(connector); 1841 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
1842 struct psb_intel_sdvo *sdvo =
1843 to_psb_intel_sdvo(&psb_intel_encoder->base);
1844 1842
1845 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg); 1843 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
1846} 1844}
@@ -1848,8 +1846,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector)
1848static void psb_intel_sdvo_restore(struct drm_connector *connector) 1846static void psb_intel_sdvo_restore(struct drm_connector *connector)
1849{ 1847{
1850 struct drm_device *dev = connector->dev; 1848 struct drm_device *dev = connector->dev;
1851 struct drm_encoder *encoder = 1849 struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
1852 &psb_intel_attached_encoder(connector)->base;
1853 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder); 1850 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
1854 struct drm_crtc *crtc = encoder->crtc; 1851 struct drm_crtc *crtc = encoder->crtc;
1855 1852
@@ -1865,9 +1862,9 @@ static void psb_intel_sdvo_restore(struct drm_connector *connector)
1865static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { 1862static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1866 .dpms = psb_intel_sdvo_dpms, 1863 .dpms = psb_intel_sdvo_dpms,
1867 .mode_fixup = psb_intel_sdvo_mode_fixup, 1864 .mode_fixup = psb_intel_sdvo_mode_fixup,
1868 .prepare = psb_intel_encoder_prepare, 1865 .prepare = gma_encoder_prepare,
1869 .mode_set = psb_intel_sdvo_mode_set, 1866 .mode_set = psb_intel_sdvo_mode_set,
1870 .commit = psb_intel_encoder_commit, 1867 .commit = gma_encoder_commit,
1871}; 1868};
1872 1869
1873static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1870static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
@@ -1883,7 +1880,7 @@ static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1883static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = { 1880static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
1884 .get_modes = psb_intel_sdvo_get_modes, 1881 .get_modes = psb_intel_sdvo_get_modes,
1885 .mode_valid = psb_intel_sdvo_mode_valid, 1882 .mode_valid = psb_intel_sdvo_mode_valid,
1886 .best_encoder = psb_intel_best_encoder, 1883 .best_encoder = gma_best_encoder,
1887}; 1884};
1888 1885
1889static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) 1886static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -1895,7 +1892,7 @@ static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
1895 psb_intel_sdvo->sdvo_lvds_fixed_mode); 1892 psb_intel_sdvo->sdvo_lvds_fixed_mode);
1896 1893
1897 i2c_del_adapter(&psb_intel_sdvo->ddc); 1894 i2c_del_adapter(&psb_intel_sdvo->ddc);
1898 psb_intel_encoder_destroy(encoder); 1895 gma_encoder_destroy(encoder);
1899} 1896}
1900 1897
1901static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { 1898static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
@@ -2056,7 +2053,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
2056 connector->base.base.doublescan_allowed = 0; 2053 connector->base.base.doublescan_allowed = 0;
2057 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2054 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2058 2055
2059 psb_intel_connector_attach_encoder(&connector->base, &encoder->base); 2056 gma_connector_attach_encoder(&connector->base, &encoder->base);
2060 drm_sysfs_connector_add(&connector->base.base); 2057 drm_sysfs_connector_add(&connector->base.base);
2061} 2058}
2062 2059
@@ -2076,7 +2073,7 @@ psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2076{ 2073{
2077 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2074 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2078 struct drm_connector *connector; 2075 struct drm_connector *connector;
2079 struct psb_intel_connector *intel_connector; 2076 struct gma_connector *intel_connector;
2080 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2077 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2081 2078
2082 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2079 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2116,7 +2113,7 @@ psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
2116{ 2113{
2117 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2114 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2118 struct drm_connector *connector; 2115 struct drm_connector *connector;
2119 struct psb_intel_connector *intel_connector; 2116 struct gma_connector *intel_connector;
2120 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2117 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2121 2118
2122 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2119 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2155,7 +2152,7 @@ psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2155{ 2152{
2156 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2153 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2157 struct drm_connector *connector; 2154 struct drm_connector *connector;
2158 struct psb_intel_connector *intel_connector; 2155 struct gma_connector *intel_connector;
2159 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2156 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2160 2157
2161 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2158 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2189,7 +2186,7 @@ psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2189{ 2186{
2190 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2187 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2191 struct drm_connector *connector; 2188 struct drm_connector *connector;
2192 struct psb_intel_connector *intel_connector; 2189 struct gma_connector *intel_connector;
2193 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2190 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2194 2191
2195 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2192 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2541,7 +2538,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
2541bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2538bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2542{ 2539{
2543 struct drm_psb_private *dev_priv = dev->dev_private; 2540 struct drm_psb_private *dev_priv = dev->dev_private;
2544 struct psb_intel_encoder *psb_intel_encoder; 2541 struct gma_encoder *gma_encoder;
2545 struct psb_intel_sdvo *psb_intel_sdvo; 2542 struct psb_intel_sdvo *psb_intel_sdvo;
2546 int i; 2543 int i;
2547 2544
@@ -2558,9 +2555,9 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2558 } 2555 }
2559 2556
2560 /* encoder type will be decided later */ 2557 /* encoder type will be decided later */
2561 psb_intel_encoder = &psb_intel_sdvo->base; 2558 gma_encoder = &psb_intel_sdvo->base;
2562 psb_intel_encoder->type = INTEL_OUTPUT_SDVO; 2559 gma_encoder->type = INTEL_OUTPUT_SDVO;
2563 drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0); 2560 drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
2564 2561
2565 /* Read the regs to test if we can talk to the device */ 2562 /* Read the regs to test if we can talk to the device */
2566 for (i = 0; i < 0x40; i++) { 2563 for (i = 0; i < 0x40; i++) {
@@ -2578,7 +2575,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2578 else 2575 else
2579 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2576 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2580 2577
2581 drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs); 2578 drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
2582 2579
2583 /* In default case sdvo lvds is false */ 2580 /* In default case sdvo lvds is false */
2584 if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps)) 2581 if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
@@ -2621,7 +2618,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2621 return true; 2618 return true;
2622 2619
2623err: 2620err:
2624 drm_encoder_cleanup(&psb_intel_encoder->base); 2621 drm_encoder_cleanup(&gma_encoder->base);
2625 i2c_del_adapter(&psb_intel_sdvo->ddc); 2622 i2c_del_adapter(&psb_intel_sdvo->ddc);
2626 kfree(psb_intel_sdvo); 2623 kfree(psb_intel_sdvo);
2627 2624
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index e68b58a1aaf9..b1f8fc69023f 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -23,7 +23,7 @@
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_encoder_slave.h> 24#include <drm/drm_encoder_slave.h>
25#include <drm/drm_edid.h> 25#include <drm/drm_edid.h>
26 26#include <drm/i2c/tda998x.h>
27 27
28#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 28#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
29 29
@@ -32,6 +32,11 @@ struct tda998x_priv {
32 uint16_t rev; 32 uint16_t rev;
33 uint8_t current_page; 33 uint8_t current_page;
34 int dpms; 34 int dpms;
35 bool is_hdmi_sink;
36 u8 vip_cntrl_0;
37 u8 vip_cntrl_1;
38 u8 vip_cntrl_2;
39 struct tda998x_encoder_params params;
35}; 40};
36 41
37#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) 42#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
68# define I2C_MASTER_DIS_MM (1 << 0) 73# define I2C_MASTER_DIS_MM (1 << 0)
69# define I2C_MASTER_DIS_FILT (1 << 1) 74# define I2C_MASTER_DIS_FILT (1 << 1)
70# define I2C_MASTER_APP_STRT_LAT (1 << 2) 75# define I2C_MASTER_APP_STRT_LAT (1 << 2)
76#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
77# define FEAT_POWERDOWN_SPDIF (1 << 3)
71#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */ 78#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
72#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */ 79#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
73#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */ 80#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
74# define INT_FLAGS_2_EDID_BLK_RD (1 << 1) 81# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
82#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
75#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */ 83#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
76#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */ 84#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
77#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */ 85#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
110#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */ 118#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
111# define VIP_CNTRL_5_CKCASE (1 << 0) 119# define VIP_CNTRL_5_CKCASE (1 << 0)
112# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) 120# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
121#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
122#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
113#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ 123#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
114# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) 124# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
115# define MAT_CONTRL_MAT_BP (1 << 2) 125# define MAT_CONTRL_MAT_BP (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
130#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */ 140#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
131#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */ 141#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
132#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */ 142#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
143#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
144#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
133#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */ 145#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
134#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */ 146#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
147#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
148#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
135#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */ 149#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
136#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */ 150#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
137#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */ 151#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
142#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */ 156#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
143#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */ 157#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
144#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */ 158#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
159#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
160#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
161#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
162#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
145#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */ 163#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
146#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */ 164#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
147#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */ 165#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
148#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */ 166#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
149#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */ 167#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
168# define TBG_CNTRL_0_TOP_TGL (1 << 0)
169# define TBG_CNTRL_0_TOP_SEL (1 << 1)
170# define TBG_CNTRL_0_DE_EXT (1 << 2)
171# define TBG_CNTRL_0_TOP_EXT (1 << 3)
150# define TBG_CNTRL_0_FRAME_DIS (1 << 5) 172# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
151# define TBG_CNTRL_0_SYNC_MTHD (1 << 6) 173# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
152# define TBG_CNTRL_0_SYNC_ONCE (1 << 7) 174# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
153#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */ 175#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
154# define TBG_CNTRL_1_VH_TGL_0 (1 << 0) 176# define TBG_CNTRL_1_H_TGL (1 << 0)
155# define TBG_CNTRL_1_VH_TGL_1 (1 << 1) 177# define TBG_CNTRL_1_V_TGL (1 << 1)
156# define TBG_CNTRL_1_VH_TGL_2 (1 << 2) 178# define TBG_CNTRL_1_TGL_EN (1 << 2)
157# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3) 179# define TBG_CNTRL_1_X_EXT (1 << 3)
158# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4) 180# define TBG_CNTRL_1_H_EXT (1 << 4)
159# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5) 181# define TBG_CNTRL_1_V_EXT (1 << 5)
160# define TBG_CNTRL_1_DWIN_DIS (1 << 6) 182# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
161#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */ 183#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
162#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */ 184#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
171# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4) 193# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
172# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6) 194# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
173#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */ 195#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
196#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
197# define I2S_FORMAT(x) (((x) & 3) << 0)
198#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
199# define AIP_CLKSEL_FS(x) (((x) & 3) << 0)
200# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2)
201# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3)
174 202
175 203
176/* Page 02h: PLL settings */ 204/* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
194#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */ 222#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
195#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */ 223#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
196#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */ 224#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
225# define AUDIO_DIV_SERCLK_1 0
226# define AUDIO_DIV_SERCLK_2 1
227# define AUDIO_DIV_SERCLK_4 2
228# define AUDIO_DIV_SERCLK_8 3
229# define AUDIO_DIV_SERCLK_16 4
230# define AUDIO_DIV_SERCLK_32 5
197#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */ 231#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
198# define SEL_CLK_SEL_CLK1 (1 << 0) 232# define SEL_CLK_SEL_CLK1 (1 << 0)
199# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1) 233# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
212 246
213 247
214/* Page 10h: information frames and packets */ 248/* Page 10h: information frames and packets */
249#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
250#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
251#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
252#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
253#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
215 254
216 255
217/* Page 11h: audio settings and content info packets */ 256/* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
221# define AIP_CNTRL_0_LAYOUT (1 << 2) 260# define AIP_CNTRL_0_LAYOUT (1 << 2)
222# define AIP_CNTRL_0_ACR_MAN (1 << 5) 261# define AIP_CNTRL_0_ACR_MAN (1 << 5)
223# define AIP_CNTRL_0_RST_CTS (1 << 6) 262# define AIP_CNTRL_0_RST_CTS (1 << 6)
263#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
264# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
265# define CA_I2S_HBR_CHSTAT (1 << 6)
266#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
267#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
268#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
269#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
270#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
271#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
272#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
273#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
274# define CTS_N_K(x) (((x) & 7) << 0)
275# define CTS_N_M(x) (((x) & 3) << 4)
224#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */ 276#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
225# define ENC_CNTRL_RST_ENC (1 << 0) 277# define ENC_CNTRL_RST_ENC (1 << 0)
226# define ENC_CNTRL_RST_SEL (1 << 1) 278# define ENC_CNTRL_RST_SEL (1 << 1)
227# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2) 279# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
280#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
281# define DIP_FLAGS_ACR (1 << 0)
282# define DIP_FLAGS_GC (1 << 1)
283#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
284# define DIP_IF_FLAGS_IF1 (1 << 1)
285# define DIP_IF_FLAGS_IF2 (1 << 2)
286# define DIP_IF_FLAGS_IF3 (1 << 3)
287# define DIP_IF_FLAGS_IF4 (1 << 4)
288# define DIP_IF_FLAGS_IF5 (1 << 5)
289#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
228 290
229 291
230/* Page 12h: HDCP and OTP */ 292/* Page 12h: HDCP and OTP */
231#define REG_TX3 REG(0x12, 0x9a) /* read/write */ 293#define REG_TX3 REG(0x12, 0x9a) /* read/write */
294#define REG_TX4 REG(0x12, 0x9b) /* read/write */
295# define TX4_PD_RAM (1 << 1)
232#define REG_TX33 REG(0x12, 0xb8) /* read/write */ 296#define REG_TX33 REG(0x12, 0xb8) /* read/write */
233# define TX33_HDMI (1 << 1) 297# define TX33_HDMI (1 << 1)
234 298
@@ -338,6 +402,23 @@ fail:
338 return ret; 402 return ret;
339} 403}
340 404
405static void
406reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
407{
408 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
409 uint8_t buf[cnt+1];
410 int ret;
411
412 buf[0] = REG2ADDR(reg);
413 memcpy(&buf[1], p, cnt);
414
415 set_page(encoder, reg);
416
417 ret = i2c_master_send(client, buf, cnt + 1);
418 if (ret < 0)
419 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
420}
421
341static uint8_t 422static uint8_t
342reg_read(struct drm_encoder *encoder, uint16_t reg) 423reg_read(struct drm_encoder *encoder, uint16_t reg)
343{ 424{
@@ -406,13 +487,176 @@ tda998x_reset(struct drm_encoder *encoder)
406 reg_write(encoder, REG_SERIALIZER, 0x00); 487 reg_write(encoder, REG_SERIALIZER, 0x00);
407 reg_write(encoder, REG_BUFFER_OUT, 0x00); 488 reg_write(encoder, REG_BUFFER_OUT, 0x00);
408 reg_write(encoder, REG_PLL_SCG1, 0x00); 489 reg_write(encoder, REG_PLL_SCG1, 0x00);
409 reg_write(encoder, REG_AUDIO_DIV, 0x03); 490 reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
410 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); 491 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
411 reg_write(encoder, REG_PLL_SCGN1, 0xfa); 492 reg_write(encoder, REG_PLL_SCGN1, 0xfa);
412 reg_write(encoder, REG_PLL_SCGN2, 0x00); 493 reg_write(encoder, REG_PLL_SCGN2, 0x00);
413 reg_write(encoder, REG_PLL_SCGR1, 0x5b); 494 reg_write(encoder, REG_PLL_SCGR1, 0x5b);
414 reg_write(encoder, REG_PLL_SCGR2, 0x00); 495 reg_write(encoder, REG_PLL_SCGR2, 0x00);
415 reg_write(encoder, REG_PLL_SCG2, 0x10); 496 reg_write(encoder, REG_PLL_SCG2, 0x10);
497
498 /* Write the default value MUX register */
499 reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
500}
501
502static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
503{
504 uint8_t sum = 0;
505
506 while (bytes--)
507 sum += *buf++;
508 return (255 - sum) + 1;
509}
510
511#define HB(x) (x)
512#define PB(x) (HB(2) + 1 + (x))
513
514static void
515tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
516 uint8_t *buf, size_t size)
517{
518 buf[PB(0)] = tda998x_cksum(buf, size);
519
520 reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
521 reg_write_range(encoder, addr, buf, size);
522 reg_set(encoder, REG_DIP_IF_FLAGS, bit);
523}
524
525static void
526tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
527{
528 uint8_t buf[PB(5) + 1];
529
530 buf[HB(0)] = 0x84;
531 buf[HB(1)] = 0x01;
532 buf[HB(2)] = 10;
533 buf[PB(0)] = 0;
534 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
535 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
536 buf[PB(4)] = p->audio_frame[4];
537 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
538
539 tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
540 sizeof(buf));
541}
542
543static void
544tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
545{
546 uint8_t buf[PB(13) + 1];
547
548 memset(buf, 0, sizeof(buf));
549 buf[HB(0)] = 0x82;
550 buf[HB(1)] = 0x02;
551 buf[HB(2)] = 13;
552 buf[PB(4)] = drm_match_cea_mode(mode);
553
554 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
555 sizeof(buf));
556}
557
558static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
559{
560 if (on) {
561 reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
562 reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
563 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
564 } else {
565 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
566 }
567}
568
569static void
570tda998x_configure_audio(struct drm_encoder *encoder,
571 struct drm_display_mode *mode, struct tda998x_encoder_params *p)
572{
573 uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
574 uint32_t n;
575
576 /* Enable audio ports */
577 reg_write(encoder, REG_ENA_AP, p->audio_cfg);
578 reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
579
580 /* Set audio input source */
581 switch (p->audio_format) {
582 case AFMT_SPDIF:
583 reg_write(encoder, REG_MUX_AP, 0x40);
584 clksel_aip = AIP_CLKSEL_AIP(0);
585 /* FS64SPDIF */
586 clksel_fs = AIP_CLKSEL_FS(2);
587 cts_n = CTS_N_M(3) | CTS_N_K(3);
588 ca_i2s = 0;
589 break;
590
591 case AFMT_I2S:
592 reg_write(encoder, REG_MUX_AP, 0x64);
593 clksel_aip = AIP_CLKSEL_AIP(1);
594 /* ACLK */
595 clksel_fs = AIP_CLKSEL_FS(0);
596 cts_n = CTS_N_M(3) | CTS_N_K(3);
597 ca_i2s = CA_I2S_CA_I2S(0);
598 break;
599
600 default:
601 BUG();
602 return;
603 }
604
605 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
606 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
607
608 /* Enable automatic CTS generation */
609 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
610 reg_write(encoder, REG_CTS_N, cts_n);
611
612 /*
613 * Audio input somehow depends on HDMI line rate which is
614 * related to pixclk. Testing showed that modes with pixclk
615 * >100MHz need a larger divider while <40MHz need the default.
616 * There is no detailed info in the datasheet, so we just
617 * assume 100MHz requires larger divider.
618 */
619 if (mode->clock > 100000)
620 adiv = AUDIO_DIV_SERCLK_16;
621 else
622 adiv = AUDIO_DIV_SERCLK_8;
623 reg_write(encoder, REG_AUDIO_DIV, adiv);
624
625 /*
626 * This is the approximate value of N, which happens to be
627 * the recommended values for non-coherent clocks.
628 */
629 n = 128 * p->audio_sample_rate / 1000;
630
631 /* Write the CTS and N values */
632 buf[0] = 0x44;
633 buf[1] = 0x42;
634 buf[2] = 0x01;
635 buf[3] = n;
636 buf[4] = n >> 8;
637 buf[5] = n >> 16;
638 reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
639
640 /* Set CTS clock reference */
641 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
642
643 /* Reset CTS generator */
644 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
645 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
646
647 /* Write the channel status */
648 buf[0] = 0x04;
649 buf[1] = 0x00;
650 buf[2] = 0x00;
651 buf[3] = 0xf1;
652 reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
653
654 tda998x_audio_mute(encoder, true);
655 mdelay(20);
656 tda998x_audio_mute(encoder, false);
657
658 /* Write the audio information packet */
659 tda998x_write_aif(encoder, p);
416} 660}
417 661
418/* DRM encoder functions */ 662/* DRM encoder functions */
@@ -420,6 +664,23 @@ tda998x_reset(struct drm_encoder *encoder)
420static void 664static void
421tda998x_encoder_set_config(struct drm_encoder *encoder, void *params) 665tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
422{ 666{
667 struct tda998x_priv *priv = to_tda998x_priv(encoder);
668 struct tda998x_encoder_params *p = params;
669
670 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
671 (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
672 VIP_CNTRL_0_SWAP_B(p->swap_b) |
673 (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
674 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
675 (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
676 VIP_CNTRL_1_SWAP_D(p->swap_d) |
677 (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
678 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
679 (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
680 VIP_CNTRL_2_SWAP_F(p->swap_f) |
681 (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
682
683 priv->params = *p;
423} 684}
424 685
425static void 686static void
@@ -436,18 +697,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
436 697
437 switch (mode) { 698 switch (mode) {
438 case DRM_MODE_DPMS_ON: 699 case DRM_MODE_DPMS_ON:
439 /* enable audio and video ports */ 700 /* enable video ports, audio will be enabled later */
440 reg_write(encoder, REG_ENA_AP, 0xff);
441 reg_write(encoder, REG_ENA_VP_0, 0xff); 701 reg_write(encoder, REG_ENA_VP_0, 0xff);
442 reg_write(encoder, REG_ENA_VP_1, 0xff); 702 reg_write(encoder, REG_ENA_VP_1, 0xff);
443 reg_write(encoder, REG_ENA_VP_2, 0xff); 703 reg_write(encoder, REG_ENA_VP_2, 0xff);
444 /* set muxing after enabling ports: */ 704 /* set muxing after enabling ports: */
445 reg_write(encoder, REG_VIP_CNTRL_0, 705 reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
446 VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3)); 706 reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
447 reg_write(encoder, REG_VIP_CNTRL_1, 707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
448 VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
449 reg_write(encoder, REG_VIP_CNTRL_2,
450 VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
451 break; 708 break;
452 case DRM_MODE_DPMS_OFF: 709 case DRM_MODE_DPMS_OFF:
453 /* disable audio and video ports */ 710 /* disable audio and video ports */
@@ -494,43 +751,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
494 struct drm_display_mode *adjusted_mode) 751 struct drm_display_mode *adjusted_mode)
495{ 752{
496 struct tda998x_priv *priv = to_tda998x_priv(encoder); 753 struct tda998x_priv *priv = to_tda998x_priv(encoder);
497 uint16_t hs_start, hs_end, line_start, line_end; 754 uint16_t ref_pix, ref_line, n_pix, n_line;
498 uint16_t vwin_start, vwin_end, de_start, de_end; 755 uint16_t hs_pix_s, hs_pix_e;
499 uint16_t ref_pix, ref_line, pix_start2; 756 uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
757 uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
758 uint16_t vwin1_line_s, vwin1_line_e;
759 uint16_t vwin2_line_s, vwin2_line_e;
760 uint16_t de_pix_s, de_pix_e;
500 uint8_t reg, div, rep; 761 uint8_t reg, div, rep;
501 762
502 hs_start = mode->hsync_start - mode->hdisplay; 763 /*
503 hs_end = mode->hsync_end - mode->hdisplay; 764 * Internally TDA998x is using ITU-R BT.656 style sync but
504 line_start = 1; 765 * we get VESA style sync. TDA998x is using a reference pixel
505 line_end = 1 + mode->vsync_end - mode->vsync_start; 766 * relative to ITU to sync to the input frame and for output
506 vwin_start = mode->vtotal - mode->vsync_start; 767 * sync generation. Currently, we are using reference detection
507 vwin_end = vwin_start + mode->vdisplay; 768 * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
508 de_start = mode->htotal - mode->hdisplay; 769 * which is position of rising VS with coincident rising HS.
509 de_end = mode->htotal; 770 *
510 771 * Now there is some issues to take care of:
511 pix_start2 = 0; 772 * - HDMI data islands require sync-before-active
512 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 773 * - TDA998x register values must be > 0 to be enabled
513 pix_start2 = (mode->htotal / 2) + hs_start; 774 * - REFLINE needs an additional offset of +1
514 775 * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
515 /* TODO how is this value calculated? It is 2 for all common 776 *
516 * formats in the tables in out of tree nxp driver (assuming 777 * So we add +1 to all horizontal and vertical register values,
517 * I've properly deciphered their byzantine table system) 778 * plus an additional +3 for REFPIX as we are using RGB input only.
518 */ 779 */
519 ref_line = 2; 780 n_pix = mode->htotal;
520 781 n_line = mode->vtotal;
521 /* this might changes for other color formats from the CRTC: */ 782
522 ref_pix = 3 + hs_start; 783 hs_pix_e = mode->hsync_end - mode->hdisplay;
784 hs_pix_s = mode->hsync_start - mode->hdisplay;
785 de_pix_e = mode->htotal;
786 de_pix_s = mode->htotal - mode->hdisplay;
787 ref_pix = 3 + hs_pix_s;
788
789 /*
790 * Attached LCD controllers may generate broken sync. Allow
791 * those to adjust the position of the rising VS edge by adding
792 * HSKEW to ref_pix.
793 */
794 if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
795 ref_pix += adjusted_mode->hskew;
796
797 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
798 ref_line = 1 + mode->vsync_start - mode->vdisplay;
799 vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
800 vwin1_line_e = vwin1_line_s + mode->vdisplay;
801 vs1_pix_s = vs1_pix_e = hs_pix_s;
802 vs1_line_s = mode->vsync_start - mode->vdisplay;
803 vs1_line_e = vs1_line_s +
804 mode->vsync_end - mode->vsync_start;
805 vwin2_line_s = vwin2_line_e = 0;
806 vs2_pix_s = vs2_pix_e = 0;
807 vs2_line_s = vs2_line_e = 0;
808 } else {
809 ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
810 vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
811 vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
812 vs1_pix_s = vs1_pix_e = hs_pix_s;
813 vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
814 vs1_line_e = vs1_line_s +
815 (mode->vsync_end - mode->vsync_start)/2;
816 vwin2_line_s = vwin1_line_s + mode->vtotal/2;
817 vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
818 vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
819 vs2_line_s = vs1_line_s + mode->vtotal/2 ;
820 vs2_line_e = vs2_line_s +
821 (mode->vsync_end - mode->vsync_start)/2;
822 }
523 823
524 div = 148500 / mode->clock; 824 div = 148500 / mode->clock;
525 825
526 DBG("clock=%d, div=%u", mode->clock, div);
527 DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
528 hs_start, hs_end, line_start, line_end);
529 DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
530 vwin_start, vwin_end, de_start, de_end);
531 DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
532 ref_line, ref_pix, pix_start2);
533
534 /* mute the audio FIFO: */ 826 /* mute the audio FIFO: */
535 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 827 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
536 828
@@ -561,9 +853,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
561 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | 853 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
562 PLL_SERIAL_2_SRL_PR(rep)); 854 PLL_SERIAL_2_SRL_PR(rep));
563 855
564 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
565 reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
566
567 /* set color matrix bypass flag: */ 856 /* set color matrix bypass flag: */
568 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP); 857 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
569 858
@@ -572,47 +861,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
572 861
573 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD); 862 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
574 863
864 /*
865 * Sync on rising HSYNC/VSYNC
866 */
575 reg_write(encoder, REG_VIP_CNTRL_3, 0); 867 reg_write(encoder, REG_VIP_CNTRL_3, 0);
576 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS); 868 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
869
870 /*
871 * TDA19988 requires high-active sync at input stage,
872 * so invert low-active sync provided by master encoder here
873 */
874 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
875 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
577 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 876 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
578 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL); 877 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
579 878
879 /*
880 * Always generate sync polarity relative to input sync and
881 * revert input stage toggled sync at output stage
882 */
883 reg = TBG_CNTRL_1_TGL_EN;
580 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 884 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
581 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); 885 reg |= TBG_CNTRL_1_H_TGL;
886 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
887 reg |= TBG_CNTRL_1_V_TGL;
888 reg_write(encoder, REG_TBG_CNTRL_1, reg);
582 889
583 reg_write(encoder, REG_VIDFORMAT, 0x00); 890 reg_write(encoder, REG_VIDFORMAT, 0x00);
584 reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1); 891 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
585 reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1); 892 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
586 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start); 893 reg_write16(encoder, REG_NPIX_MSB, n_pix);
587 reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end); 894 reg_write16(encoder, REG_NLINE_MSB, n_line);
588 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start); 895 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
589 reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start); 896 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
590 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start); 897 reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
591 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end); 898 reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
592 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start); 899 reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
593 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end); 900 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
594 reg_write16(encoder, REG_DE_START_MSB, de_start); 901 reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
595 reg_write16(encoder, REG_DE_STOP_MSB, de_end); 902 reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
903 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
904 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
905 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
906 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
907 reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
908 reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
909 reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
910 reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
596 911
597 if (priv->rev == TDA19988) { 912 if (priv->rev == TDA19988) {
598 /* let incoming pixels fill the active space (if any) */ 913 /* let incoming pixels fill the active space (if any) */
599 reg_write(encoder, REG_ENABLE_SPACE, 0x01); 914 reg_write(encoder, REG_ENABLE_SPACE, 0x01);
600 } 915 }
601 916
602 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
603 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
604
605 reg = TBG_CNTRL_1_VHX_EXT_DE |
606 TBG_CNTRL_1_VHX_EXT_HS |
607 TBG_CNTRL_1_VHX_EXT_VS |
608 TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
609 TBG_CNTRL_1_VH_TGL_2;
610 if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
611 reg |= TBG_CNTRL_1_VH_TGL_0;
612 reg_set(encoder, REG_TBG_CNTRL_1, reg);
613
614 /* must be last register set: */ 917 /* must be last register set: */
615 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); 918 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
919
920 /* Only setup the info frames if the sink is HDMI */
921 if (priv->is_hdmi_sink) {
922 /* We need to turn HDMI HDCP stuff on to get audio through */
923 reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
924 reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
925 reg_set(encoder, REG_TX33, TX33_HDMI);
926
927 tda998x_write_avi(encoder, adjusted_mode);
928
929 if (priv->params.audio_cfg)
930 tda998x_configure_audio(encoder, adjusted_mode,
931 &priv->params);
932 }
616} 933}
617 934
618static enum drm_connector_status 935static enum drm_connector_status
@@ -673,6 +990,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
673static uint8_t * 990static uint8_t *
674do_get_edid(struct drm_encoder *encoder) 991do_get_edid(struct drm_encoder *encoder)
675{ 992{
993 struct tda998x_priv *priv = to_tda998x_priv(encoder);
676 int j = 0, valid_extensions = 0; 994 int j = 0, valid_extensions = 0;
677 uint8_t *block, *new; 995 uint8_t *block, *new;
678 bool print_bad_edid = drm_debug & DRM_UT_KMS; 996 bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +998,9 @@ do_get_edid(struct drm_encoder *encoder)
680 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 998 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
681 return NULL; 999 return NULL;
682 1000
1001 if (priv->rev == TDA19988)
1002 reg_clear(encoder, REG_TX4, TX4_PD_RAM);
1003
683 /* base block fetch */ 1004 /* base block fetch */
684 if (read_edid_block(encoder, block, 0)) 1005 if (read_edid_block(encoder, block, 0))
685 goto fail; 1006 goto fail;
@@ -689,7 +1010,7 @@ do_get_edid(struct drm_encoder *encoder)
689 1010
690 /* if there's no extensions, we're done */ 1011 /* if there's no extensions, we're done */
691 if (block[0x7e] == 0) 1012 if (block[0x7e] == 0)
692 return block; 1013 goto done;
693 1014
694 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); 1015 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
695 if (!new) 1016 if (!new)
@@ -716,9 +1037,15 @@ do_get_edid(struct drm_encoder *encoder)
716 block = new; 1037 block = new;
717 } 1038 }
718 1039
1040done:
1041 if (priv->rev == TDA19988)
1042 reg_set(encoder, REG_TX4, TX4_PD_RAM);
1043
719 return block; 1044 return block;
720 1045
721fail: 1046fail:
1047 if (priv->rev == TDA19988)
1048 reg_set(encoder, REG_TX4, TX4_PD_RAM);
722 dev_warn(encoder->dev->dev, "failed to read EDID\n"); 1049 dev_warn(encoder->dev->dev, "failed to read EDID\n");
723 kfree(block); 1050 kfree(block);
724 return NULL; 1051 return NULL;
@@ -728,12 +1055,14 @@ static int
728tda998x_encoder_get_modes(struct drm_encoder *encoder, 1055tda998x_encoder_get_modes(struct drm_encoder *encoder,
729 struct drm_connector *connector) 1056 struct drm_connector *connector)
730{ 1057{
1058 struct tda998x_priv *priv = to_tda998x_priv(encoder);
731 struct edid *edid = (struct edid *)do_get_edid(encoder); 1059 struct edid *edid = (struct edid *)do_get_edid(encoder);
732 int n = 0; 1060 int n = 0;
733 1061
734 if (edid) { 1062 if (edid) {
735 drm_mode_connector_update_edid_property(connector, edid); 1063 drm_mode_connector_update_edid_property(connector, edid);
736 n = drm_add_edid_modes(connector, edid); 1064 n = drm_add_edid_modes(connector, edid);
1065 priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
737 kfree(edid); 1066 kfree(edid);
738 } 1067 }
739 1068
@@ -807,6 +1136,10 @@ tda998x_encoder_init(struct i2c_client *client,
807 if (!priv) 1136 if (!priv)
808 return -ENOMEM; 1137 return -ENOMEM;
809 1138
1139 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
1140 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
1141 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
1142
810 priv->current_page = 0; 1143 priv->current_page = 0;
811 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1144 priv->cec = i2c_new_dummy(client->adapter, 0x34);
812 priv->dpms = DRM_MODE_DPMS_OFF; 1145 priv->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ada49eda489f..ab1892eb1074 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = {
113 .release = drm_release, 113 .release = drm_release,
114 .unlocked_ioctl = drm_ioctl, 114 .unlocked_ioctl = drm_ioctl,
115 .mmap = i810_mmap_buffers, 115 .mmap = i810_mmap_buffers,
116 .fasync = drm_fasync,
117#ifdef CONFIG_COMPAT 116#ifdef CONFIG_COMPAT
118 .compat_ioctl = drm_compat_ioctl, 117 .compat_ioctl = drm_compat_ioctl,
119#endif 118#endif
@@ -1241,7 +1240,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
1241 return 0; 1240 return 0;
1242} 1241}
1243 1242
1244struct drm_ioctl_desc i810_ioctls[] = { 1243const struct drm_ioctl_desc i810_ioctls[] = {
1245 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1244 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1246 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), 1245 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1247 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), 1246 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 2e91fc3580b4..d8180d22cedd 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = {
49 .unlocked_ioctl = drm_ioctl, 49 .unlocked_ioctl = drm_ioctl,
50 .mmap = drm_mmap, 50 .mmap = drm_mmap,
51 .poll = drm_poll, 51 .poll = drm_poll,
52 .fasync = drm_fasync,
53#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
54 .compat_ioctl = drm_compat_ioctl, 53 .compat_ioctl = drm_compat_ioctl,
55#endif 54#endif
@@ -58,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
58 57
59static struct drm_driver driver = { 58static struct drm_driver driver = {
60 .driver_features = 59 .driver_features =
61 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 60 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
62 DRIVER_HAVE_DMA, 61 DRIVER_HAVE_DMA,
63 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 62 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
64 .load = i810_driver_load, 63 .load = i810_driver_load,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 6e0acad9e0f5..d4d16eddd651 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
125extern int i810_driver_device_is_agp(struct drm_device *dev); 125extern int i810_driver_device_is_agp(struct drm_device *dev);
126 126
127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
128extern struct drm_ioctl_desc i810_ioctls[]; 128extern const struct drm_ioctl_desc i810_ioctls[];
129extern int i810_max_ioctl; 129extern int i810_max_ioctl;
130 130
131#define I810_BASE(reg) ((unsigned long) \ 131#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 40034ecefd3b..b8449a84a0dc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -5,6 +5,7 @@
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o \
7 i915_debugfs.o \ 7 i915_debugfs.o \
8 i915_gpu_error.o \
8 i915_suspend.o \ 9 i915_suspend.o \
9 i915_gem.o \ 10 i915_gem.o \
10 i915_gem_context.o \ 11 i915_gem_context.o \
@@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
37 intel_sprite.o \ 38 intel_sprite.o \
38 intel_opregion.o \ 39 intel_opregion.o \
39 intel_sideband.o \ 40 intel_sideband.o \
41 intel_uncore.o \
40 dvo_ch7xxx.o \ 42 dvo_ch7xxx.o \
41 dvo_ch7017.o \ 43 dvo_ch7017.o \
42 dvo_ivch.o \ 44 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 757e0fa11043..af42e94f6846 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
307 idf |= CH7xxx_IDF_HSP; 307 idf |= CH7xxx_IDF_HSP;
308 308
309 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 309 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
310 idf |= CH7xxx_IDF_HSP; 310 idf |= CH7xxx_IDF_VSP;
311 311
312 ch7xxx_writeb(dvo, CH7xxx_IDF, idf); 312 ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
313} 313}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47d6c748057e..55ab9246e1b9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,7 +30,8 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/export.h> 32#include <linux/export.h>
33#include <generated/utsrelease.h> 33#include <linux/list_sort.h>
34#include <asm/msr-index.h>
34#include <drm/drmP.h> 35#include <drm/drmP.h>
35#include "intel_drv.h" 36#include "intel_drv.h"
36#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
@@ -90,41 +91,45 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
90 } 91 }
91} 92}
92 93
93static const char *cache_level_str(int type) 94static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
94{ 95{
95 switch (type) { 96 return obj->has_global_gtt_mapping ? "g" : " ";
96 case I915_CACHE_NONE: return " uncached";
97 case I915_CACHE_LLC: return " snooped (LLC)";
98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
99 default: return "";
100 }
101} 97}
102 98
103static void 99static void
104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 100describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105{ 101{
106 seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", 102 struct i915_vma *vma;
103 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
107 &obj->base, 104 &obj->base,
108 get_pin_flag(obj), 105 get_pin_flag(obj),
109 get_tiling_flag(obj), 106 get_tiling_flag(obj),
107 get_global_flag(obj),
110 obj->base.size / 1024, 108 obj->base.size / 1024,
111 obj->base.read_domains, 109 obj->base.read_domains,
112 obj->base.write_domain, 110 obj->base.write_domain,
113 obj->last_read_seqno, 111 obj->last_read_seqno,
114 obj->last_write_seqno, 112 obj->last_write_seqno,
115 obj->last_fenced_seqno, 113 obj->last_fenced_seqno,
116 cache_level_str(obj->cache_level), 114 i915_cache_level_str(obj->cache_level),
117 obj->dirty ? " dirty" : "", 115 obj->dirty ? " dirty" : "",
118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 116 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
119 if (obj->base.name) 117 if (obj->base.name)
120 seq_printf(m, " (name: %d)", obj->base.name); 118 seq_printf(m, " (name: %d)", obj->base.name);
121 if (obj->pin_count) 119 if (obj->pin_count)
122 seq_printf(m, " (pinned x %d)", obj->pin_count); 120 seq_printf(m, " (pinned x %d)", obj->pin_count);
121 if (obj->pin_display)
122 seq_printf(m, " (display)");
123 if (obj->fence_reg != I915_FENCE_REG_NONE) 123 if (obj->fence_reg != I915_FENCE_REG_NONE)
124 seq_printf(m, " (fence: %d)", obj->fence_reg); 124 seq_printf(m, " (fence: %d)", obj->fence_reg);
125 if (obj->gtt_space != NULL) 125 list_for_each_entry(vma, &obj->vma_list, vma_link) {
126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 126 if (!i915_is_ggtt(vma->vm))
127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 127 seq_puts(m, " (pp");
128 else
129 seq_puts(m, " (g");
130 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
131 vma->node.start, vma->node.size);
132 }
128 if (obj->stolen) 133 if (obj->stolen)
129 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 134 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
130 if (obj->pin_mappable || obj->fault_mappable) { 135 if (obj->pin_mappable || obj->fault_mappable) {
@@ -146,8 +151,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
146 uintptr_t list = (uintptr_t) node->info_ent->data; 151 uintptr_t list = (uintptr_t) node->info_ent->data;
147 struct list_head *head; 152 struct list_head *head;
148 struct drm_device *dev = node->minor->dev; 153 struct drm_device *dev = node->minor->dev;
149 drm_i915_private_t *dev_priv = dev->dev_private; 154 struct drm_i915_private *dev_priv = dev->dev_private;
150 struct drm_i915_gem_object *obj; 155 struct i915_address_space *vm = &dev_priv->gtt.base;
156 struct i915_vma *vma;
151 size_t total_obj_size, total_gtt_size; 157 size_t total_obj_size, total_gtt_size;
152 int count, ret; 158 int count, ret;
153 159
@@ -155,14 +161,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
155 if (ret) 161 if (ret)
156 return ret; 162 return ret;
157 163
164 /* FIXME: the user of this interface might want more than just GGTT */
158 switch (list) { 165 switch (list) {
159 case ACTIVE_LIST: 166 case ACTIVE_LIST:
160 seq_printf(m, "Active:\n"); 167 seq_puts(m, "Active:\n");
161 head = &dev_priv->mm.active_list; 168 head = &vm->active_list;
162 break; 169 break;
163 case INACTIVE_LIST: 170 case INACTIVE_LIST:
164 seq_printf(m, "Inactive:\n"); 171 seq_puts(m, "Inactive:\n");
165 head = &dev_priv->mm.inactive_list; 172 head = &vm->inactive_list;
166 break; 173 break;
167 default: 174 default:
168 mutex_unlock(&dev->struct_mutex); 175 mutex_unlock(&dev->struct_mutex);
@@ -170,14 +177,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
170 } 177 }
171 178
172 total_obj_size = total_gtt_size = count = 0; 179 total_obj_size = total_gtt_size = count = 0;
173 list_for_each_entry(obj, head, mm_list) { 180 list_for_each_entry(vma, head, mm_list) {
174 seq_printf(m, " "); 181 seq_printf(m, " ");
175 describe_obj(m, obj); 182 describe_obj(m, vma->obj);
176 seq_printf(m, "\n"); 183 seq_printf(m, "\n");
184 total_obj_size += vma->obj->base.size;
185 total_gtt_size += vma->node.size;
186 count++;
187 }
188 mutex_unlock(&dev->struct_mutex);
189
190 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
191 count, total_obj_size, total_gtt_size);
192 return 0;
193}
194
195static int obj_rank_by_stolen(void *priv,
196 struct list_head *A, struct list_head *B)
197{
198 struct drm_i915_gem_object *a =
199 container_of(A, struct drm_i915_gem_object, obj_exec_link);
200 struct drm_i915_gem_object *b =
201 container_of(B, struct drm_i915_gem_object, obj_exec_link);
202
203 return a->stolen->start - b->stolen->start;
204}
205
206static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
207{
208 struct drm_info_node *node = (struct drm_info_node *) m->private;
209 struct drm_device *dev = node->minor->dev;
210 struct drm_i915_private *dev_priv = dev->dev_private;
211 struct drm_i915_gem_object *obj;
212 size_t total_obj_size, total_gtt_size;
213 LIST_HEAD(stolen);
214 int count, ret;
215
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 return ret;
219
220 total_obj_size = total_gtt_size = count = 0;
221 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
222 if (obj->stolen == NULL)
223 continue;
224
225 list_add(&obj->obj_exec_link, &stolen);
226
227 total_obj_size += obj->base.size;
228 total_gtt_size += i915_gem_obj_ggtt_size(obj);
229 count++;
230 }
231 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
232 if (obj->stolen == NULL)
233 continue;
234
235 list_add(&obj->obj_exec_link, &stolen);
236
177 total_obj_size += obj->base.size; 237 total_obj_size += obj->base.size;
178 total_gtt_size += obj->gtt_space->size;
179 count++; 238 count++;
180 } 239 }
240 list_sort(NULL, &stolen, obj_rank_by_stolen);
241 seq_puts(m, "Stolen:\n");
242 while (!list_empty(&stolen)) {
243 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
244 seq_puts(m, " ");
245 describe_obj(m, obj);
246 seq_putc(m, '\n');
247 list_del_init(&obj->obj_exec_link);
248 }
181 mutex_unlock(&dev->struct_mutex); 249 mutex_unlock(&dev->struct_mutex);
182 250
183 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 251 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
@@ -187,10 +255,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
187 255
188#define count_objects(list, member) do { \ 256#define count_objects(list, member) do { \
189 list_for_each_entry(obj, list, member) { \ 257 list_for_each_entry(obj, list, member) { \
190 size += obj->gtt_space->size; \ 258 size += i915_gem_obj_ggtt_size(obj); \
191 ++count; \ 259 ++count; \
192 if (obj->map_and_fenceable) { \ 260 if (obj->map_and_fenceable) { \
193 mappable_size += obj->gtt_space->size; \ 261 mappable_size += i915_gem_obj_ggtt_size(obj); \
194 ++mappable_count; \ 262 ++mappable_count; \
195 } \ 263 } \
196 } \ 264 } \
@@ -209,7 +277,7 @@ static int per_file_stats(int id, void *ptr, void *data)
209 stats->count++; 277 stats->count++;
210 stats->total += obj->base.size; 278 stats->total += obj->base.size;
211 279
212 if (obj->gtt_space) { 280 if (i915_gem_obj_ggtt_bound(obj)) {
213 if (!list_empty(&obj->ring_list)) 281 if (!list_empty(&obj->ring_list))
214 stats->active += obj->base.size; 282 stats->active += obj->base.size;
215 else 283 else
@@ -222,6 +290,17 @@ static int per_file_stats(int id, void *ptr, void *data)
222 return 0; 290 return 0;
223} 291}
224 292
293#define count_vmas(list, member) do { \
294 list_for_each_entry(vma, list, member) { \
295 size += i915_gem_obj_ggtt_size(vma->obj); \
296 ++count; \
297 if (vma->obj->map_and_fenceable) { \
298 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
299 ++mappable_count; \
300 } \
301 } \
302} while (0)
303
225static int i915_gem_object_info(struct seq_file *m, void* data) 304static int i915_gem_object_info(struct seq_file *m, void* data)
226{ 305{
227 struct drm_info_node *node = (struct drm_info_node *) m->private; 306 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -230,7 +309,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
230 u32 count, mappable_count, purgeable_count; 309 u32 count, mappable_count, purgeable_count;
231 size_t size, mappable_size, purgeable_size; 310 size_t size, mappable_size, purgeable_size;
232 struct drm_i915_gem_object *obj; 311 struct drm_i915_gem_object *obj;
312 struct i915_address_space *vm = &dev_priv->gtt.base;
233 struct drm_file *file; 313 struct drm_file *file;
314 struct i915_vma *vma;
234 int ret; 315 int ret;
235 316
236 ret = mutex_lock_interruptible(&dev->struct_mutex); 317 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -247,12 +328,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
247 count, mappable_count, size, mappable_size); 328 count, mappable_count, size, mappable_size);
248 329
249 size = count = mappable_size = mappable_count = 0; 330 size = count = mappable_size = mappable_count = 0;
250 count_objects(&dev_priv->mm.active_list, mm_list); 331 count_vmas(&vm->active_list, mm_list);
251 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 332 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
252 count, mappable_count, size, mappable_size); 333 count, mappable_count, size, mappable_size);
253 334
254 size = count = mappable_size = mappable_count = 0; 335 size = count = mappable_size = mappable_count = 0;
255 count_objects(&dev_priv->mm.inactive_list, mm_list); 336 count_vmas(&vm->inactive_list, mm_list);
256 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 337 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
257 count, mappable_count, size, mappable_size); 338 count, mappable_count, size, mappable_size);
258 339
@@ -267,11 +348,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
267 size = count = mappable_size = mappable_count = 0; 348 size = count = mappable_size = mappable_count = 0;
268 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 349 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
269 if (obj->fault_mappable) { 350 if (obj->fault_mappable) {
270 size += obj->gtt_space->size; 351 size += i915_gem_obj_ggtt_size(obj);
271 ++count; 352 ++count;
272 } 353 }
273 if (obj->pin_mappable) { 354 if (obj->pin_mappable) {
274 mappable_size += obj->gtt_space->size; 355 mappable_size += i915_gem_obj_ggtt_size(obj);
275 ++mappable_count; 356 ++mappable_count;
276 } 357 }
277 if (obj->madv == I915_MADV_DONTNEED) { 358 if (obj->madv == I915_MADV_DONTNEED) {
@@ -287,10 +368,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
287 count, size); 368 count, size);
288 369
289 seq_printf(m, "%zu [%lu] gtt total\n", 370 seq_printf(m, "%zu [%lu] gtt total\n",
290 dev_priv->gtt.total, 371 dev_priv->gtt.base.total,
291 dev_priv->gtt.mappable_end - dev_priv->gtt.start); 372 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
292 373
293 seq_printf(m, "\n"); 374 seq_putc(m, '\n');
294 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 375 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
295 struct file_stats stats; 376 struct file_stats stats;
296 377
@@ -310,7 +391,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
310 return 0; 391 return 0;
311} 392}
312 393
313static int i915_gem_gtt_info(struct seq_file *m, void* data) 394static int i915_gem_gtt_info(struct seq_file *m, void *data)
314{ 395{
315 struct drm_info_node *node = (struct drm_info_node *) m->private; 396 struct drm_info_node *node = (struct drm_info_node *) m->private;
316 struct drm_device *dev = node->minor->dev; 397 struct drm_device *dev = node->minor->dev;
@@ -329,11 +410,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
329 if (list == PINNED_LIST && obj->pin_count == 0) 410 if (list == PINNED_LIST && obj->pin_count == 0)
330 continue; 411 continue;
331 412
332 seq_printf(m, " "); 413 seq_puts(m, " ");
333 describe_obj(m, obj); 414 describe_obj(m, obj);
334 seq_printf(m, "\n"); 415 seq_putc(m, '\n');
335 total_obj_size += obj->base.size; 416 total_obj_size += obj->base.size;
336 total_gtt_size += obj->gtt_space->size; 417 total_gtt_size += i915_gem_obj_ggtt_size(obj);
337 count++; 418 count++;
338 } 419 }
339 420
@@ -371,20 +452,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
371 pipe, plane); 452 pipe, plane);
372 } 453 }
373 if (work->enable_stall_check) 454 if (work->enable_stall_check)
374 seq_printf(m, "Stall check enabled, "); 455 seq_puts(m, "Stall check enabled, ");
375 else 456 else
376 seq_printf(m, "Stall check waiting for page flip ioctl, "); 457 seq_puts(m, "Stall check waiting for page flip ioctl, ");
377 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 458 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
378 459
379 if (work->old_fb_obj) { 460 if (work->old_fb_obj) {
380 struct drm_i915_gem_object *obj = work->old_fb_obj; 461 struct drm_i915_gem_object *obj = work->old_fb_obj;
381 if (obj) 462 if (obj)
382 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 463 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
464 i915_gem_obj_ggtt_offset(obj));
383 } 465 }
384 if (work->pending_flip_obj) { 466 if (work->pending_flip_obj) {
385 struct drm_i915_gem_object *obj = work->pending_flip_obj; 467 struct drm_i915_gem_object *obj = work->pending_flip_obj;
386 if (obj) 468 if (obj)
387 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 469 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
470 i915_gem_obj_ggtt_offset(obj));
388 } 471 }
389 } 472 }
390 spin_unlock_irqrestore(&dev->event_lock, flags); 473 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +507,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
424 mutex_unlock(&dev->struct_mutex); 507 mutex_unlock(&dev->struct_mutex);
425 508
426 if (count == 0) 509 if (count == 0)
427 seq_printf(m, "No requests\n"); 510 seq_puts(m, "No requests\n");
428 511
429 return 0; 512 return 0;
430} 513}
@@ -574,10 +657,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
574 seq_printf(m, "Fence %d, pin count = %d, object = ", 657 seq_printf(m, "Fence %d, pin count = %d, object = ",
575 i, dev_priv->fence_regs[i].pin_count); 658 i, dev_priv->fence_regs[i].pin_count);
576 if (obj == NULL) 659 if (obj == NULL)
577 seq_printf(m, "unused"); 660 seq_puts(m, "unused");
578 else 661 else
579 describe_obj(m, obj); 662 describe_obj(m, obj);
580 seq_printf(m, "\n"); 663 seq_putc(m, '\n');
581 } 664 }
582 665
583 mutex_unlock(&dev->struct_mutex); 666 mutex_unlock(&dev->struct_mutex);
@@ -606,361 +689,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
606 return 0; 689 return 0;
607} 690}
608 691
609static const char *ring_str(int ring)
610{
611 switch (ring) {
612 case RCS: return "render";
613 case VCS: return "bsd";
614 case BCS: return "blt";
615 case VECS: return "vebox";
616 default: return "";
617 }
618}
619
620static const char *pin_flag(int pinned)
621{
622 if (pinned > 0)
623 return " P";
624 else if (pinned < 0)
625 return " p";
626 else
627 return "";
628}
629
630static const char *tiling_flag(int tiling)
631{
632 switch (tiling) {
633 default:
634 case I915_TILING_NONE: return "";
635 case I915_TILING_X: return " X";
636 case I915_TILING_Y: return " Y";
637 }
638}
639
640static const char *dirty_flag(int dirty)
641{
642 return dirty ? " dirty" : "";
643}
644
645static const char *purgeable_flag(int purgeable)
646{
647 return purgeable ? " purgeable" : "";
648}
649
650static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
651{
652
653 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
654 e->err = -ENOSPC;
655 return false;
656 }
657
658 if (e->bytes == e->size - 1 || e->err)
659 return false;
660
661 return true;
662}
663
664static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
665 unsigned len)
666{
667 if (e->pos + len <= e->start) {
668 e->pos += len;
669 return false;
670 }
671
672 /* First vsnprintf needs to fit in its entirety for memmove */
673 if (len >= e->size) {
674 e->err = -EIO;
675 return false;
676 }
677
678 return true;
679}
680
681static void __i915_error_advance(struct drm_i915_error_state_buf *e,
682 unsigned len)
683{
684 /* If this is first printf in this window, adjust it so that
685 * start position matches start of the buffer
686 */
687
688 if (e->pos < e->start) {
689 const size_t off = e->start - e->pos;
690
691 /* Should not happen but be paranoid */
692 if (off > len || e->bytes) {
693 e->err = -EIO;
694 return;
695 }
696
697 memmove(e->buf, e->buf + off, len - off);
698 e->bytes = len - off;
699 e->pos = e->start;
700 return;
701 }
702
703 e->bytes += len;
704 e->pos += len;
705}
706
707static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
708 const char *f, va_list args)
709{
710 unsigned len;
711
712 if (!__i915_error_ok(e))
713 return;
714
715 /* Seek the first printf which is hits start position */
716 if (e->pos < e->start) {
717 len = vsnprintf(NULL, 0, f, args);
718 if (!__i915_error_seek(e, len))
719 return;
720 }
721
722 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
723 if (len >= e->size - e->bytes)
724 len = e->size - e->bytes - 1;
725
726 __i915_error_advance(e, len);
727}
728
729static void i915_error_puts(struct drm_i915_error_state_buf *e,
730 const char *str)
731{
732 unsigned len;
733
734 if (!__i915_error_ok(e))
735 return;
736
737 len = strlen(str);
738
739 /* Seek the first printf which is hits start position */
740 if (e->pos < e->start) {
741 if (!__i915_error_seek(e, len))
742 return;
743 }
744
745 if (len >= e->size - e->bytes)
746 len = e->size - e->bytes - 1;
747 memcpy(e->buf + e->bytes, str, len);
748
749 __i915_error_advance(e, len);
750}
751
752void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
753{
754 va_list args;
755
756 va_start(args, f);
757 i915_error_vprintf(e, f, args);
758 va_end(args);
759}
760
761#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
762#define err_puts(e, s) i915_error_puts(e, s)
763
764static void print_error_buffers(struct drm_i915_error_state_buf *m,
765 const char *name,
766 struct drm_i915_error_buffer *err,
767 int count)
768{
769 err_printf(m, "%s [%d]:\n", name, count);
770
771 while (count--) {
772 err_printf(m, " %08x %8u %02x %02x %x %x",
773 err->gtt_offset,
774 err->size,
775 err->read_domains,
776 err->write_domain,
777 err->rseqno, err->wseqno);
778 err_puts(m, pin_flag(err->pinned));
779 err_puts(m, tiling_flag(err->tiling));
780 err_puts(m, dirty_flag(err->dirty));
781 err_puts(m, purgeable_flag(err->purgeable));
782 err_puts(m, err->ring != -1 ? " " : "");
783 err_puts(m, ring_str(err->ring));
784 err_puts(m, cache_level_str(err->cache_level));
785
786 if (err->name)
787 err_printf(m, " (name: %d)", err->name);
788 if (err->fence_reg != I915_FENCE_REG_NONE)
789 err_printf(m, " (fence: %d)", err->fence_reg);
790
791 err_puts(m, "\n");
792 err++;
793 }
794}
795
796static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
797 struct drm_device *dev,
798 struct drm_i915_error_state *error,
799 unsigned ring)
800{
801 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
802 err_printf(m, "%s command stream:\n", ring_str(ring));
803 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
804 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
805 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
806 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
807 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
808 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
809 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
810 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
811 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
812
813 if (INTEL_INFO(dev)->gen >= 4)
814 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
815 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
816 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
817 if (INTEL_INFO(dev)->gen >= 6) {
818 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
819 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
820 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
821 error->semaphore_mboxes[ring][0],
822 error->semaphore_seqno[ring][0]);
823 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
824 error->semaphore_mboxes[ring][1],
825 error->semaphore_seqno[ring][1]);
826 }
827 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
828 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
829 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
830 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
831}
832
833struct i915_error_state_file_priv {
834 struct drm_device *dev;
835 struct drm_i915_error_state *error;
836};
837
838
839static int i915_error_state(struct i915_error_state_file_priv *error_priv,
840 struct drm_i915_error_state_buf *m)
841
842{
843 struct drm_device *dev = error_priv->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private;
845 struct drm_i915_error_state *error = error_priv->error;
846 struct intel_ring_buffer *ring;
847 int i, j, page, offset, elt;
848
849 if (!error) {
850 err_printf(m, "no error state collected\n");
851 return 0;
852 }
853
854 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
855 error->time.tv_usec);
856 err_printf(m, "Kernel: " UTS_RELEASE "\n");
857 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
858 err_printf(m, "EIR: 0x%08x\n", error->eir);
859 err_printf(m, "IER: 0x%08x\n", error->ier);
860 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
861 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
862 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
863 err_printf(m, "CCID: 0x%08x\n", error->ccid);
864
865 for (i = 0; i < dev_priv->num_fence_regs; i++)
866 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
867
868 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
869 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
870 error->extra_instdone[i]);
871
872 if (INTEL_INFO(dev)->gen >= 6) {
873 err_printf(m, "ERROR: 0x%08x\n", error->error);
874 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
875 }
876
877 if (INTEL_INFO(dev)->gen == 7)
878 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
879
880 for_each_ring(ring, dev_priv, i)
881 i915_ring_error_state(m, dev, error, i);
882
883 if (error->active_bo)
884 print_error_buffers(m, "Active",
885 error->active_bo,
886 error->active_bo_count);
887
888 if (error->pinned_bo)
889 print_error_buffers(m, "Pinned",
890 error->pinned_bo,
891 error->pinned_bo_count);
892
893 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
894 struct drm_i915_error_object *obj;
895
896 if ((obj = error->ring[i].batchbuffer)) {
897 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
898 dev_priv->ring[i].name,
899 obj->gtt_offset);
900 offset = 0;
901 for (page = 0; page < obj->page_count; page++) {
902 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
903 err_printf(m, "%08x : %08x\n", offset,
904 obj->pages[page][elt]);
905 offset += 4;
906 }
907 }
908 }
909
910 if (error->ring[i].num_requests) {
911 err_printf(m, "%s --- %d requests\n",
912 dev_priv->ring[i].name,
913 error->ring[i].num_requests);
914 for (j = 0; j < error->ring[i].num_requests; j++) {
915 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
916 error->ring[i].requests[j].seqno,
917 error->ring[i].requests[j].jiffies,
918 error->ring[i].requests[j].tail);
919 }
920 }
921
922 if ((obj = error->ring[i].ringbuffer)) {
923 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
924 dev_priv->ring[i].name,
925 obj->gtt_offset);
926 offset = 0;
927 for (page = 0; page < obj->page_count; page++) {
928 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
929 err_printf(m, "%08x : %08x\n",
930 offset,
931 obj->pages[page][elt]);
932 offset += 4;
933 }
934 }
935 }
936
937 obj = error->ring[i].ctx;
938 if (obj) {
939 err_printf(m, "%s --- HW Context = 0x%08x\n",
940 dev_priv->ring[i].name,
941 obj->gtt_offset);
942 offset = 0;
943 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
944 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
945 offset,
946 obj->pages[0][elt],
947 obj->pages[0][elt+1],
948 obj->pages[0][elt+2],
949 obj->pages[0][elt+3]);
950 offset += 16;
951 }
952 }
953 }
954
955 if (error->overlay)
956 intel_overlay_print_error_state(m, error->overlay);
957
958 if (error->display)
959 intel_display_print_error_state(m, dev, error->display);
960
961 return 0;
962}
963
964static ssize_t 692static ssize_t
965i915_error_state_write(struct file *filp, 693i915_error_state_write(struct file *filp,
966 const char __user *ubuf, 694 const char __user *ubuf,
@@ -986,9 +714,7 @@ i915_error_state_write(struct file *filp,
986static int i915_error_state_open(struct inode *inode, struct file *file) 714static int i915_error_state_open(struct inode *inode, struct file *file)
987{ 715{
988 struct drm_device *dev = inode->i_private; 716 struct drm_device *dev = inode->i_private;
989 drm_i915_private_t *dev_priv = dev->dev_private;
990 struct i915_error_state_file_priv *error_priv; 717 struct i915_error_state_file_priv *error_priv;
991 unsigned long flags;
992 718
993 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 719 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
994 if (!error_priv) 720 if (!error_priv)
@@ -996,11 +722,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
996 722
997 error_priv->dev = dev; 723 error_priv->dev = dev;
998 724
999 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 725 i915_error_state_get(dev, error_priv);
1000 error_priv->error = dev_priv->gpu_error.first_error;
1001 if (error_priv->error)
1002 kref_get(&error_priv->error->ref);
1003 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1004 726
1005 file->private_data = error_priv; 727 file->private_data = error_priv;
1006 728
@@ -1011,8 +733,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
1011{ 733{
1012 struct i915_error_state_file_priv *error_priv = file->private_data; 734 struct i915_error_state_file_priv *error_priv = file->private_data;
1013 735
1014 if (error_priv->error) 736 i915_error_state_put(error_priv);
1015 kref_put(&error_priv->error->ref, i915_error_state_free);
1016 kfree(error_priv); 737 kfree(error_priv);
1017 738
1018 return 0; 739 return 0;
@@ -1025,40 +746,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1025 struct drm_i915_error_state_buf error_str; 746 struct drm_i915_error_state_buf error_str;
1026 loff_t tmp_pos = 0; 747 loff_t tmp_pos = 0;
1027 ssize_t ret_count = 0; 748 ssize_t ret_count = 0;
1028 int ret = 0; 749 int ret;
1029
1030 memset(&error_str, 0, sizeof(error_str));
1031
1032 /* We need to have enough room to store any i915_error_state printf
1033 * so that we can move it to start position.
1034 */
1035 error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
1036 error_str.buf = kmalloc(error_str.size,
1037 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
1038
1039 if (error_str.buf == NULL) {
1040 error_str.size = PAGE_SIZE;
1041 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1042 }
1043
1044 if (error_str.buf == NULL) {
1045 error_str.size = 128;
1046 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1047 }
1048
1049 if (error_str.buf == NULL)
1050 return -ENOMEM;
1051
1052 error_str.start = *pos;
1053 750
1054 ret = i915_error_state(error_priv, &error_str); 751 ret = i915_error_state_buf_init(&error_str, count, *pos);
1055 if (ret) 752 if (ret)
1056 goto out; 753 return ret;
1057 754
1058 if (error_str.bytes == 0 && error_str.err) { 755 ret = i915_error_state_to_str(&error_str, error_priv);
1059 ret = error_str.err; 756 if (ret)
1060 goto out; 757 goto out;
1061 }
1062 758
1063 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 759 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1064 error_str.buf, 760 error_str.buf,
@@ -1069,7 +765,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1069 else 765 else
1070 *pos = error_str.start + ret_count; 766 *pos = error_str.start + ret_count;
1071out: 767out:
1072 kfree(error_str.buf); 768 i915_error_state_buf_release(&error_str);
1073 return ret ?: ret_count; 769 return ret ?: ret_count;
1074} 770}
1075 771
@@ -1246,7 +942,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1246 (freq_sts >> 8) & 0xff)); 942 (freq_sts >> 8) & 0xff));
1247 mutex_unlock(&dev_priv->rps.hw_lock); 943 mutex_unlock(&dev_priv->rps.hw_lock);
1248 } else { 944 } else {
1249 seq_printf(m, "no P-state info available\n"); 945 seq_puts(m, "no P-state info available\n");
1250 } 946 }
1251 947
1252 return 0; 948 return 0;
@@ -1341,28 +1037,28 @@ static int ironlake_drpc_info(struct seq_file *m)
1341 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1037 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1342 seq_printf(m, "Render standby enabled: %s\n", 1038 seq_printf(m, "Render standby enabled: %s\n",
1343 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1039 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1344 seq_printf(m, "Current RS state: "); 1040 seq_puts(m, "Current RS state: ");
1345 switch (rstdbyctl & RSX_STATUS_MASK) { 1041 switch (rstdbyctl & RSX_STATUS_MASK) {
1346 case RSX_STATUS_ON: 1042 case RSX_STATUS_ON:
1347 seq_printf(m, "on\n"); 1043 seq_puts(m, "on\n");
1348 break; 1044 break;
1349 case RSX_STATUS_RC1: 1045 case RSX_STATUS_RC1:
1350 seq_printf(m, "RC1\n"); 1046 seq_puts(m, "RC1\n");
1351 break; 1047 break;
1352 case RSX_STATUS_RC1E: 1048 case RSX_STATUS_RC1E:
1353 seq_printf(m, "RC1E\n"); 1049 seq_puts(m, "RC1E\n");
1354 break; 1050 break;
1355 case RSX_STATUS_RS1: 1051 case RSX_STATUS_RS1:
1356 seq_printf(m, "RS1\n"); 1052 seq_puts(m, "RS1\n");
1357 break; 1053 break;
1358 case RSX_STATUS_RS2: 1054 case RSX_STATUS_RS2:
1359 seq_printf(m, "RS2 (RC6)\n"); 1055 seq_puts(m, "RS2 (RC6)\n");
1360 break; 1056 break;
1361 case RSX_STATUS_RS3: 1057 case RSX_STATUS_RS3:
1362 seq_printf(m, "RC3 (RC6+)\n"); 1058 seq_puts(m, "RC3 (RC6+)\n");
1363 break; 1059 break;
1364 default: 1060 default:
1365 seq_printf(m, "unknown\n"); 1061 seq_puts(m, "unknown\n");
1366 break; 1062 break;
1367 } 1063 }
1368 1064
@@ -1377,20 +1073,19 @@ static int gen6_drpc_info(struct seq_file *m)
1377 struct drm_i915_private *dev_priv = dev->dev_private; 1073 struct drm_i915_private *dev_priv = dev->dev_private;
1378 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1074 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1379 unsigned forcewake_count; 1075 unsigned forcewake_count;
1380 int count=0, ret; 1076 int count = 0, ret;
1381
1382 1077
1383 ret = mutex_lock_interruptible(&dev->struct_mutex); 1078 ret = mutex_lock_interruptible(&dev->struct_mutex);
1384 if (ret) 1079 if (ret)
1385 return ret; 1080 return ret;
1386 1081
1387 spin_lock_irq(&dev_priv->gt_lock); 1082 spin_lock_irq(&dev_priv->uncore.lock);
1388 forcewake_count = dev_priv->forcewake_count; 1083 forcewake_count = dev_priv->uncore.forcewake_count;
1389 spin_unlock_irq(&dev_priv->gt_lock); 1084 spin_unlock_irq(&dev_priv->uncore.lock);
1390 1085
1391 if (forcewake_count) { 1086 if (forcewake_count) {
1392 seq_printf(m, "RC information inaccurate because somebody " 1087 seq_puts(m, "RC information inaccurate because somebody "
1393 "holds a forcewake reference \n"); 1088 "holds a forcewake reference \n");
1394 } else { 1089 } else {
1395 /* NB: we cannot use forcewake, else we read the wrong values */ 1090 /* NB: we cannot use forcewake, else we read the wrong values */
1396 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1091 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1399,7 +1094,7 @@ static int gen6_drpc_info(struct seq_file *m)
1399 } 1094 }
1400 1095
1401 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1096 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1402 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1097 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1403 1098
1404 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1099 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1405 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1100 rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1423,25 +1118,25 @@ static int gen6_drpc_info(struct seq_file *m)
1423 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1118 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1424 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1119 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1425 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1120 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1426 seq_printf(m, "Current RC state: "); 1121 seq_puts(m, "Current RC state: ");
1427 switch (gt_core_status & GEN6_RCn_MASK) { 1122 switch (gt_core_status & GEN6_RCn_MASK) {
1428 case GEN6_RC0: 1123 case GEN6_RC0:
1429 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1124 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1430 seq_printf(m, "Core Power Down\n"); 1125 seq_puts(m, "Core Power Down\n");
1431 else 1126 else
1432 seq_printf(m, "on\n"); 1127 seq_puts(m, "on\n");
1433 break; 1128 break;
1434 case GEN6_RC3: 1129 case GEN6_RC3:
1435 seq_printf(m, "RC3\n"); 1130 seq_puts(m, "RC3\n");
1436 break; 1131 break;
1437 case GEN6_RC6: 1132 case GEN6_RC6:
1438 seq_printf(m, "RC6\n"); 1133 seq_puts(m, "RC6\n");
1439 break; 1134 break;
1440 case GEN6_RC7: 1135 case GEN6_RC7:
1441 seq_printf(m, "RC7\n"); 1136 seq_puts(m, "RC7\n");
1442 break; 1137 break;
1443 default: 1138 default:
1444 seq_printf(m, "Unknown\n"); 1139 seq_puts(m, "Unknown\n");
1445 break; 1140 break;
1446 } 1141 }
1447 1142
@@ -1485,43 +1180,52 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1485 drm_i915_private_t *dev_priv = dev->dev_private; 1180 drm_i915_private_t *dev_priv = dev->dev_private;
1486 1181
1487 if (!I915_HAS_FBC(dev)) { 1182 if (!I915_HAS_FBC(dev)) {
1488 seq_printf(m, "FBC unsupported on this chipset\n"); 1183 seq_puts(m, "FBC unsupported on this chipset\n");
1489 return 0; 1184 return 0;
1490 } 1185 }
1491 1186
1492 if (intel_fbc_enabled(dev)) { 1187 if (intel_fbc_enabled(dev)) {
1493 seq_printf(m, "FBC enabled\n"); 1188 seq_puts(m, "FBC enabled\n");
1494 } else { 1189 } else {
1495 seq_printf(m, "FBC disabled: "); 1190 seq_puts(m, "FBC disabled: ");
1496 switch (dev_priv->no_fbc_reason) { 1191 switch (dev_priv->fbc.no_fbc_reason) {
1192 case FBC_OK:
1193 seq_puts(m, "FBC actived, but currently disabled in hardware");
1194 break;
1195 case FBC_UNSUPPORTED:
1196 seq_puts(m, "unsupported by this chipset");
1197 break;
1497 case FBC_NO_OUTPUT: 1198 case FBC_NO_OUTPUT:
1498 seq_printf(m, "no outputs"); 1199 seq_puts(m, "no outputs");
1499 break; 1200 break;
1500 case FBC_STOLEN_TOO_SMALL: 1201 case FBC_STOLEN_TOO_SMALL:
1501 seq_printf(m, "not enough stolen memory"); 1202 seq_puts(m, "not enough stolen memory");
1502 break; 1203 break;
1503 case FBC_UNSUPPORTED_MODE: 1204 case FBC_UNSUPPORTED_MODE:
1504 seq_printf(m, "mode not supported"); 1205 seq_puts(m, "mode not supported");
1505 break; 1206 break;
1506 case FBC_MODE_TOO_LARGE: 1207 case FBC_MODE_TOO_LARGE:
1507 seq_printf(m, "mode too large"); 1208 seq_puts(m, "mode too large");
1508 break; 1209 break;
1509 case FBC_BAD_PLANE: 1210 case FBC_BAD_PLANE:
1510 seq_printf(m, "FBC unsupported on plane"); 1211 seq_puts(m, "FBC unsupported on plane");
1511 break; 1212 break;
1512 case FBC_NOT_TILED: 1213 case FBC_NOT_TILED:
1513 seq_printf(m, "scanout buffer not tiled"); 1214 seq_puts(m, "scanout buffer not tiled");
1514 break; 1215 break;
1515 case FBC_MULTIPLE_PIPES: 1216 case FBC_MULTIPLE_PIPES:
1516 seq_printf(m, "multiple pipes are enabled"); 1217 seq_puts(m, "multiple pipes are enabled");
1517 break; 1218 break;
1518 case FBC_MODULE_PARAM: 1219 case FBC_MODULE_PARAM:
1519 seq_printf(m, "disabled per module param (default off)"); 1220 seq_puts(m, "disabled per module param (default off)");
1221 break;
1222 case FBC_CHIP_DEFAULT:
1223 seq_puts(m, "disabled per chip default");
1520 break; 1224 break;
1521 default: 1225 default:
1522 seq_printf(m, "unknown reason"); 1226 seq_puts(m, "unknown reason");
1523 } 1227 }
1524 seq_printf(m, "\n"); 1228 seq_putc(m, '\n');
1525 } 1229 }
1526 return 0; 1230 return 0;
1527} 1231}
@@ -1604,7 +1308,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1604 int gpu_freq, ia_freq; 1308 int gpu_freq, ia_freq;
1605 1309
1606 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1310 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1607 seq_printf(m, "unsupported on this chipset\n"); 1311 seq_puts(m, "unsupported on this chipset\n");
1608 return 0; 1312 return 0;
1609 } 1313 }
1610 1314
@@ -1612,7 +1316,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1612 if (ret) 1316 if (ret)
1613 return ret; 1317 return ret;
1614 1318
1615 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1319 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1616 1320
1617 for (gpu_freq = dev_priv->rps.min_delay; 1321 for (gpu_freq = dev_priv->rps.min_delay;
1618 gpu_freq <= dev_priv->rps.max_delay; 1322 gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1405,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1701 fb->base.bits_per_pixel, 1405 fb->base.bits_per_pixel,
1702 atomic_read(&fb->base.refcount.refcount)); 1406 atomic_read(&fb->base.refcount.refcount));
1703 describe_obj(m, fb->obj); 1407 describe_obj(m, fb->obj);
1704 seq_printf(m, "\n"); 1408 seq_putc(m, '\n');
1705 mutex_unlock(&dev->mode_config.mutex); 1409 mutex_unlock(&dev->mode_config.mutex);
1706 1410
1707 mutex_lock(&dev->mode_config.fb_lock); 1411 mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1420,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1716 fb->base.bits_per_pixel, 1420 fb->base.bits_per_pixel,
1717 atomic_read(&fb->base.refcount.refcount)); 1421 atomic_read(&fb->base.refcount.refcount));
1718 describe_obj(m, fb->obj); 1422 describe_obj(m, fb->obj);
1719 seq_printf(m, "\n"); 1423 seq_putc(m, '\n');
1720 } 1424 }
1721 mutex_unlock(&dev->mode_config.fb_lock); 1425 mutex_unlock(&dev->mode_config.fb_lock);
1722 1426
@@ -1736,22 +1440,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
1736 return ret; 1440 return ret;
1737 1441
1738 if (dev_priv->ips.pwrctx) { 1442 if (dev_priv->ips.pwrctx) {
1739 seq_printf(m, "power context "); 1443 seq_puts(m, "power context ");
1740 describe_obj(m, dev_priv->ips.pwrctx); 1444 describe_obj(m, dev_priv->ips.pwrctx);
1741 seq_printf(m, "\n"); 1445 seq_putc(m, '\n');
1742 } 1446 }
1743 1447
1744 if (dev_priv->ips.renderctx) { 1448 if (dev_priv->ips.renderctx) {
1745 seq_printf(m, "render context "); 1449 seq_puts(m, "render context ");
1746 describe_obj(m, dev_priv->ips.renderctx); 1450 describe_obj(m, dev_priv->ips.renderctx);
1747 seq_printf(m, "\n"); 1451 seq_putc(m, '\n');
1748 } 1452 }
1749 1453
1750 for_each_ring(ring, dev_priv, i) { 1454 for_each_ring(ring, dev_priv, i) {
1751 if (ring->default_context) { 1455 if (ring->default_context) {
1752 seq_printf(m, "HW default context %s ring ", ring->name); 1456 seq_printf(m, "HW default context %s ring ", ring->name);
1753 describe_obj(m, ring->default_context->obj); 1457 describe_obj(m, ring->default_context->obj);
1754 seq_printf(m, "\n"); 1458 seq_putc(m, '\n');
1755 } 1459 }
1756 } 1460 }
1757 1461
@@ -1767,9 +1471,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1767 struct drm_i915_private *dev_priv = dev->dev_private; 1471 struct drm_i915_private *dev_priv = dev->dev_private;
1768 unsigned forcewake_count; 1472 unsigned forcewake_count;
1769 1473
1770 spin_lock_irq(&dev_priv->gt_lock); 1474 spin_lock_irq(&dev_priv->uncore.lock);
1771 forcewake_count = dev_priv->forcewake_count; 1475 forcewake_count = dev_priv->uncore.forcewake_count;
1772 spin_unlock_irq(&dev_priv->gt_lock); 1476 spin_unlock_irq(&dev_priv->uncore.lock);
1773 1477
1774 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1478 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1775 1479
@@ -1778,7 +1482,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1778 1482
1779static const char *swizzle_string(unsigned swizzle) 1483static const char *swizzle_string(unsigned swizzle)
1780{ 1484{
1781 switch(swizzle) { 1485 switch (swizzle) {
1782 case I915_BIT_6_SWIZZLE_NONE: 1486 case I915_BIT_6_SWIZZLE_NONE:
1783 return "none"; 1487 return "none";
1784 case I915_BIT_6_SWIZZLE_9: 1488 case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1572,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1868 if (dev_priv->mm.aliasing_ppgtt) { 1572 if (dev_priv->mm.aliasing_ppgtt) {
1869 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1573 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1870 1574
1871 seq_printf(m, "aliasing PPGTT:\n"); 1575 seq_puts(m, "aliasing PPGTT:\n");
1872 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1576 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1873 } 1577 }
1874 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1578 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1590,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1886 1590
1887 1591
1888 if (!IS_VALLEYVIEW(dev)) { 1592 if (!IS_VALLEYVIEW(dev)) {
1889 seq_printf(m, "unsupported\n"); 1593 seq_puts(m, "unsupported\n");
1890 return 0; 1594 return 0;
1891 } 1595 }
1892 1596
@@ -1924,6 +1628,194 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1924 return 0; 1628 return 0;
1925} 1629}
1926 1630
1631static int i915_llc(struct seq_file *m, void *data)
1632{
1633 struct drm_info_node *node = (struct drm_info_node *) m->private;
1634 struct drm_device *dev = node->minor->dev;
1635 struct drm_i915_private *dev_priv = dev->dev_private;
1636
1637 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1638 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1639 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1640
1641 return 0;
1642}
1643
1644static int i915_edp_psr_status(struct seq_file *m, void *data)
1645{
1646 struct drm_info_node *node = m->private;
1647 struct drm_device *dev = node->minor->dev;
1648 struct drm_i915_private *dev_priv = dev->dev_private;
1649 u32 psrstat, psrperf;
1650
1651 if (!IS_HASWELL(dev)) {
1652 seq_puts(m, "PSR not supported on this platform\n");
1653 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1654 seq_puts(m, "PSR enabled\n");
1655 } else {
1656 seq_puts(m, "PSR disabled: ");
1657 switch (dev_priv->no_psr_reason) {
1658 case PSR_NO_SOURCE:
1659 seq_puts(m, "not supported on this platform");
1660 break;
1661 case PSR_NO_SINK:
1662 seq_puts(m, "not supported by panel");
1663 break;
1664 case PSR_MODULE_PARAM:
1665 seq_puts(m, "disabled by flag");
1666 break;
1667 case PSR_CRTC_NOT_ACTIVE:
1668 seq_puts(m, "crtc not active");
1669 break;
1670 case PSR_PWR_WELL_ENABLED:
1671 seq_puts(m, "power well enabled");
1672 break;
1673 case PSR_NOT_TILED:
1674 seq_puts(m, "not tiled");
1675 break;
1676 case PSR_SPRITE_ENABLED:
1677 seq_puts(m, "sprite enabled");
1678 break;
1679 case PSR_S3D_ENABLED:
1680 seq_puts(m, "stereo 3d enabled");
1681 break;
1682 case PSR_INTERLACED_ENABLED:
1683 seq_puts(m, "interlaced enabled");
1684 break;
1685 case PSR_HSW_NOT_DDIA:
1686 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1687 break;
1688 default:
1689 seq_puts(m, "unknown reason");
1690 }
1691 seq_puts(m, "\n");
1692 return 0;
1693 }
1694
1695 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1696
1697 seq_puts(m, "PSR Current State: ");
1698 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1699 case EDP_PSR_STATUS_STATE_IDLE:
1700 seq_puts(m, "Reset state\n");
1701 break;
1702 case EDP_PSR_STATUS_STATE_SRDONACK:
1703 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1704 break;
1705 case EDP_PSR_STATUS_STATE_SRDENT:
1706 seq_puts(m, "SRD entry\n");
1707 break;
1708 case EDP_PSR_STATUS_STATE_BUFOFF:
1709 seq_puts(m, "Wait for buffer turn off\n");
1710 break;
1711 case EDP_PSR_STATUS_STATE_BUFON:
1712 seq_puts(m, "Wait for buffer turn on\n");
1713 break;
1714 case EDP_PSR_STATUS_STATE_AUXACK:
1715 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1716 break;
1717 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1718 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1719 break;
1720 default:
1721 seq_puts(m, "Unknown\n");
1722 break;
1723 }
1724
1725 seq_puts(m, "Link Status: ");
1726 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1727 case EDP_PSR_STATUS_LINK_FULL_OFF:
1728 seq_puts(m, "Link is fully off\n");
1729 break;
1730 case EDP_PSR_STATUS_LINK_FULL_ON:
1731 seq_puts(m, "Link is fully on\n");
1732 break;
1733 case EDP_PSR_STATUS_LINK_STANDBY:
1734 seq_puts(m, "Link is in standby\n");
1735 break;
1736 default:
1737 seq_puts(m, "Unknown\n");
1738 break;
1739 }
1740
1741 seq_printf(m, "PSR Entry Count: %u\n",
1742 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1743 EDP_PSR_STATUS_COUNT_MASK);
1744
1745 seq_printf(m, "Max Sleep Timer Counter: %u\n",
1746 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1747 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1748
1749 seq_printf(m, "Had AUX error: %s\n",
1750 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1751
1752 seq_printf(m, "Sending AUX: %s\n",
1753 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1754
1755 seq_printf(m, "Sending Idle: %s\n",
1756 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
1757
1758 seq_printf(m, "Sending TP2 TP3: %s\n",
1759 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1760
1761 seq_printf(m, "Sending TP1: %s\n",
1762 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1763
1764 seq_printf(m, "Idle Count: %u\n",
1765 psrstat & EDP_PSR_STATUS_IDLE_MASK);
1766
1767 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1768 seq_printf(m, "Performance Counter: %u\n", psrperf);
1769
1770 return 0;
1771}
1772
1773static int i915_energy_uJ(struct seq_file *m, void *data)
1774{
1775 struct drm_info_node *node = m->private;
1776 struct drm_device *dev = node->minor->dev;
1777 struct drm_i915_private *dev_priv = dev->dev_private;
1778 u64 power;
1779 u32 units;
1780
1781 if (INTEL_INFO(dev)->gen < 6)
1782 return -ENODEV;
1783
1784 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1785 power = (power & 0x1f00) >> 8;
1786 units = 1000000 / (1 << power); /* convert to uJ */
1787 power = I915_READ(MCH_SECP_NRG_STTS);
1788 power *= units;
1789
1790 seq_printf(m, "%llu", (long long unsigned)power);
1791
1792 return 0;
1793}
1794
1795static int i915_pc8_status(struct seq_file *m, void *unused)
1796{
1797 struct drm_info_node *node = (struct drm_info_node *) m->private;
1798 struct drm_device *dev = node->minor->dev;
1799 struct drm_i915_private *dev_priv = dev->dev_private;
1800
1801 if (!IS_HASWELL(dev)) {
1802 seq_puts(m, "not supported\n");
1803 return 0;
1804 }
1805
1806 mutex_lock(&dev_priv->pc8.lock);
1807 seq_printf(m, "Requirements met: %s\n",
1808 yesno(dev_priv->pc8.requirements_met));
1809 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
1810 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
1811 seq_printf(m, "IRQs disabled: %s\n",
1812 yesno(dev_priv->pc8.irqs_disabled));
1813 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
1814 mutex_unlock(&dev_priv->pc8.lock);
1815
1816 return 0;
1817}
1818
1927static int 1819static int
1928i915_wedged_get(void *data, u64 *val) 1820i915_wedged_get(void *data, u64 *val)
1929{ 1821{
@@ -2006,6 +1898,8 @@ i915_drop_caches_set(void *data, u64 val)
2006 struct drm_device *dev = data; 1898 struct drm_device *dev = data;
2007 struct drm_i915_private *dev_priv = dev->dev_private; 1899 struct drm_i915_private *dev_priv = dev->dev_private;
2008 struct drm_i915_gem_object *obj, *next; 1900 struct drm_i915_gem_object *obj, *next;
1901 struct i915_address_space *vm;
1902 struct i915_vma *vma, *x;
2009 int ret; 1903 int ret;
2010 1904
2011 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); 1905 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -2026,12 +1920,17 @@ i915_drop_caches_set(void *data, u64 val)
2026 i915_gem_retire_requests(dev); 1920 i915_gem_retire_requests(dev);
2027 1921
2028 if (val & DROP_BOUND) { 1922 if (val & DROP_BOUND) {
2029 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) 1923 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2030 if (obj->pin_count == 0) { 1924 list_for_each_entry_safe(vma, x, &vm->inactive_list,
2031 ret = i915_gem_object_unbind(obj); 1925 mm_list) {
1926 if (vma->obj->pin_count)
1927 continue;
1928
1929 ret = i915_vma_unbind(vma);
2032 if (ret) 1930 if (ret)
2033 goto unlock; 1931 goto unlock;
2034 } 1932 }
1933 }
2035 } 1934 }
2036 1935
2037 if (val & DROP_UNBOUND) { 1936 if (val & DROP_UNBOUND) {
@@ -2326,6 +2225,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2326 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2225 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2327 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2226 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2328 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2227 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2228 {"i915_gem_stolen", i915_gem_stolen_list_info },
2329 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2229 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2330 {"i915_gem_request", i915_gem_request_info, 0}, 2230 {"i915_gem_request", i915_gem_request_info, 0},
2331 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 2231 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -2353,64 +2253,42 @@ static struct drm_info_list i915_debugfs_list[] = {
2353 {"i915_swizzle_info", i915_swizzle_info, 0}, 2253 {"i915_swizzle_info", i915_swizzle_info, 0},
2354 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 2254 {"i915_ppgtt_info", i915_ppgtt_info, 0},
2355 {"i915_dpio", i915_dpio_info, 0}, 2255 {"i915_dpio", i915_dpio_info, 0},
2256 {"i915_llc", i915_llc, 0},
2257 {"i915_edp_psr_status", i915_edp_psr_status, 0},
2258 {"i915_energy_uJ", i915_energy_uJ, 0},
2259 {"i915_pc8_status", i915_pc8_status, 0},
2356}; 2260};
2357#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 2261#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2358 2262
2263static struct i915_debugfs_files {
2264 const char *name;
2265 const struct file_operations *fops;
2266} i915_debugfs_files[] = {
2267 {"i915_wedged", &i915_wedged_fops},
2268 {"i915_max_freq", &i915_max_freq_fops},
2269 {"i915_min_freq", &i915_min_freq_fops},
2270 {"i915_cache_sharing", &i915_cache_sharing_fops},
2271 {"i915_ring_stop", &i915_ring_stop_fops},
2272 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2273 {"i915_error_state", &i915_error_state_fops},
2274 {"i915_next_seqno", &i915_next_seqno_fops},
2275};
2276
2359int i915_debugfs_init(struct drm_minor *minor) 2277int i915_debugfs_init(struct drm_minor *minor)
2360{ 2278{
2361 int ret; 2279 int ret, i;
2362
2363 ret = i915_debugfs_create(minor->debugfs_root, minor,
2364 "i915_wedged",
2365 &i915_wedged_fops);
2366 if (ret)
2367 return ret;
2368 2280
2369 ret = i915_forcewake_create(minor->debugfs_root, minor); 2281 ret = i915_forcewake_create(minor->debugfs_root, minor);
2370 if (ret) 2282 if (ret)
2371 return ret; 2283 return ret;
2372 2284
2373 ret = i915_debugfs_create(minor->debugfs_root, minor, 2285 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2374 "i915_max_freq", 2286 ret = i915_debugfs_create(minor->debugfs_root, minor,
2375 &i915_max_freq_fops); 2287 i915_debugfs_files[i].name,
2376 if (ret) 2288 i915_debugfs_files[i].fops);
2377 return ret; 2289 if (ret)
2378 2290 return ret;
2379 ret = i915_debugfs_create(minor->debugfs_root, minor, 2291 }
2380 "i915_min_freq",
2381 &i915_min_freq_fops);
2382 if (ret)
2383 return ret;
2384
2385 ret = i915_debugfs_create(minor->debugfs_root, minor,
2386 "i915_cache_sharing",
2387 &i915_cache_sharing_fops);
2388 if (ret)
2389 return ret;
2390
2391 ret = i915_debugfs_create(minor->debugfs_root, minor,
2392 "i915_ring_stop",
2393 &i915_ring_stop_fops);
2394 if (ret)
2395 return ret;
2396
2397 ret = i915_debugfs_create(minor->debugfs_root, minor,
2398 "i915_gem_drop_caches",
2399 &i915_drop_caches_fops);
2400 if (ret)
2401 return ret;
2402
2403 ret = i915_debugfs_create(minor->debugfs_root, minor,
2404 "i915_error_state",
2405 &i915_error_state_fops);
2406 if (ret)
2407 return ret;
2408
2409 ret = i915_debugfs_create(minor->debugfs_root, minor,
2410 "i915_next_seqno",
2411 &i915_next_seqno_fops);
2412 if (ret)
2413 return ret;
2414 2292
2415 return drm_debugfs_create_files(i915_debugfs_list, 2293 return drm_debugfs_create_files(i915_debugfs_list,
2416 I915_DEBUGFS_ENTRIES, 2294 I915_DEBUGFS_ENTRIES,
@@ -2419,26 +2297,18 @@ int i915_debugfs_init(struct drm_minor *minor)
2419 2297
2420void i915_debugfs_cleanup(struct drm_minor *minor) 2298void i915_debugfs_cleanup(struct drm_minor *minor)
2421{ 2299{
2300 int i;
2301
2422 drm_debugfs_remove_files(i915_debugfs_list, 2302 drm_debugfs_remove_files(i915_debugfs_list,
2423 I915_DEBUGFS_ENTRIES, minor); 2303 I915_DEBUGFS_ENTRIES, minor);
2424 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 2304 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2425 1, minor); 2305 1, minor);
2426 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 2306 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2427 1, minor); 2307 struct drm_info_list *info_list =
2428 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2308 (struct drm_info_list *) i915_debugfs_files[i].fops;
2429 1, minor); 2309
2430 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2310 drm_debugfs_remove_files(info_list, 1, minor);
2431 1, minor); 2311 }
2432 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2433 1, minor);
2434 drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
2435 1, minor);
2436 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2437 1, minor);
2438 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2439 1, minor);
2440 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
2441 1, minor);
2442} 2312}
2443 2313
2444#endif /* CONFIG_DEBUG_FS */ 2314#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f4669802a0fb..fdaa0915ce56 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
976 case I915_PARAM_HAS_LLC: 976 case I915_PARAM_HAS_LLC:
977 value = HAS_LLC(dev); 977 value = HAS_LLC(dev);
978 break; 978 break;
979 case I915_PARAM_HAS_WT:
980 value = HAS_WT(dev);
981 break;
979 case I915_PARAM_HAS_ALIASING_PPGTT: 982 case I915_PARAM_HAS_ALIASING_PPGTT:
980 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 983 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
981 break; 984 break;
@@ -1293,7 +1296,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1293 1296
1294 intel_register_dsm_handler(); 1297 intel_register_dsm_handler();
1295 1298
1296 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); 1299 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
1297 if (ret) 1300 if (ret)
1298 goto cleanup_vga_client; 1301 goto cleanup_vga_client;
1299 1302
@@ -1323,10 +1326,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1323 /* Always safe in the mode setting case. */ 1326 /* Always safe in the mode setting case. */
1324 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1327 /* FIXME: do pre/post-mode set stuff in core KMS code */
1325 dev->vblank_disable_allowed = 1; 1328 dev->vblank_disable_allowed = 1;
1326 if (INTEL_INFO(dev)->num_pipes == 0) { 1329 if (INTEL_INFO(dev)->num_pipes == 0)
1327 dev_priv->mm.suspended = 0;
1328 return 0; 1330 return 0;
1329 }
1330 1331
1331 ret = intel_fbdev_init(dev); 1332 ret = intel_fbdev_init(dev);
1332 if (ret) 1333 if (ret)
@@ -1352,9 +1353,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1352 1353
1353 drm_kms_helper_poll_init(dev); 1354 drm_kms_helper_poll_init(dev);
1354 1355
1355 /* We're off and running w/KMS */
1356 dev_priv->mm.suspended = 0;
1357
1358 return 0; 1356 return 0;
1359 1357
1360cleanup_gem: 1358cleanup_gem:
@@ -1363,7 +1361,7 @@ cleanup_gem:
1363 i915_gem_context_fini(dev); 1361 i915_gem_context_fini(dev);
1364 mutex_unlock(&dev->struct_mutex); 1362 mutex_unlock(&dev->struct_mutex);
1365 i915_gem_cleanup_aliasing_ppgtt(dev); 1363 i915_gem_cleanup_aliasing_ppgtt(dev);
1366 drm_mm_takedown(&dev_priv->mm.gtt_space); 1364 drm_mm_takedown(&dev_priv->gtt.base.mm);
1367cleanup_irq: 1365cleanup_irq:
1368 drm_irq_uninstall(dev); 1366 drm_irq_uninstall(dev);
1369cleanup_gem_stolen: 1367cleanup_gem_stolen:
@@ -1441,22 +1439,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1441} 1439}
1442 1440
1443/** 1441/**
1444 * intel_early_sanitize_regs - clean up BIOS state
1445 * @dev: DRM device
1446 *
1447 * This function must be called before we do any I915_READ or I915_WRITE. Its
1448 * purpose is to clean up any state left by the BIOS that may affect us when
1449 * reading and/or writing registers.
1450 */
1451static void intel_early_sanitize_regs(struct drm_device *dev)
1452{
1453 struct drm_i915_private *dev_priv = dev->dev_private;
1454
1455 if (HAS_FPGA_DBG_UNCLAIMED(dev))
1456 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1457}
1458
1459/**
1460 * i915_driver_load - setup chip and create an initial config 1442 * i915_driver_load - setup chip and create an initial config
1461 * @dev: DRM device 1443 * @dev: DRM device
1462 * @flags: startup flags 1444 * @flags: startup flags
@@ -1497,15 +1479,31 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1497 1479
1498 spin_lock_init(&dev_priv->irq_lock); 1480 spin_lock_init(&dev_priv->irq_lock);
1499 spin_lock_init(&dev_priv->gpu_error.lock); 1481 spin_lock_init(&dev_priv->gpu_error.lock);
1500 spin_lock_init(&dev_priv->rps.lock);
1501 spin_lock_init(&dev_priv->gt_lock);
1502 spin_lock_init(&dev_priv->backlight.lock); 1482 spin_lock_init(&dev_priv->backlight.lock);
1483 spin_lock_init(&dev_priv->uncore.lock);
1484 spin_lock_init(&dev_priv->mm.object_stat_lock);
1503 mutex_init(&dev_priv->dpio_lock); 1485 mutex_init(&dev_priv->dpio_lock);
1504 mutex_init(&dev_priv->rps.hw_lock); 1486 mutex_init(&dev_priv->rps.hw_lock);
1505 mutex_init(&dev_priv->modeset_restore_lock); 1487 mutex_init(&dev_priv->modeset_restore_lock);
1506 1488
1489 mutex_init(&dev_priv->pc8.lock);
1490 dev_priv->pc8.requirements_met = false;
1491 dev_priv->pc8.gpu_idle = false;
1492 dev_priv->pc8.irqs_disabled = false;
1493 dev_priv->pc8.enabled = false;
1494 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1495 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1496
1507 i915_dump_device_info(dev_priv); 1497 i915_dump_device_info(dev_priv);
1508 1498
1499 /* Not all pre-production machines fall into this category, only the
1500 * very first ones. Almost everything should work, except for maybe
1501 * suspend/resume. And we don't implement workarounds that affect only
1502 * pre-production machines. */
1503 if (IS_HSW_EARLY_SDV(dev))
1504 DRM_INFO("This is an early pre-production Haswell machine. "
1505 "It may not be fully functional.\n");
1506
1509 if (i915_get_bridge_dev(dev)) { 1507 if (i915_get_bridge_dev(dev)) {
1510 ret = -EIO; 1508 ret = -EIO;
1511 goto free_priv; 1509 goto free_priv;
@@ -1531,7 +1529,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1531 goto put_bridge; 1529 goto put_bridge;
1532 } 1530 }
1533 1531
1534 intel_early_sanitize_regs(dev); 1532 intel_uncore_early_sanitize(dev);
1533
1534 if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
1535 /* The docs do not explain exactly how the calculation can be
1536 * made. It is somewhat guessable, but for now, it's always
1537 * 128MB.
1538 * NB: We can't write IDICR yet because we do not have gt funcs
1539 * set up */
1540 dev_priv->ellc_size = 128;
1541 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
1542 }
1535 1543
1536 ret = i915_gem_gtt_init(dev); 1544 ret = i915_gem_gtt_init(dev);
1537 if (ret) 1545 if (ret)
@@ -1567,8 +1575,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1567 goto out_rmmap; 1575 goto out_rmmap;
1568 } 1576 }
1569 1577
1570 dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1578 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1571 aperture_size); 1579 aperture_size);
1572 1580
1573 /* The i915 workqueue is primarily used for batched retirement of 1581 /* The i915 workqueue is primarily used for batched retirement of
1574 * requests (and thus managing bo) once the task has been completed 1582 * requests (and thus managing bo) once the task has been completed
@@ -1595,8 +1603,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1595 1603
1596 intel_irq_init(dev); 1604 intel_irq_init(dev);
1597 intel_pm_init(dev); 1605 intel_pm_init(dev);
1598 intel_gt_sanitize(dev); 1606 intel_uncore_sanitize(dev);
1599 intel_gt_init(dev); 1607 intel_uncore_init(dev);
1600 1608
1601 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
1602 intel_setup_mchbar(dev); 1610 intel_setup_mchbar(dev);
@@ -1631,9 +1639,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1631 goto out_gem_unload; 1639 goto out_gem_unload;
1632 } 1640 }
1633 1641
1634 /* Start out suspended */
1635 dev_priv->mm.suspended = 1;
1636
1637 if (HAS_POWER_WELL(dev)) 1642 if (HAS_POWER_WELL(dev))
1638 i915_init_power_well(dev); 1643 i915_init_power_well(dev);
1639 1644
@@ -1643,6 +1648,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1643 DRM_ERROR("failed to init modeset\n"); 1648 DRM_ERROR("failed to init modeset\n");
1644 goto out_gem_unload; 1649 goto out_gem_unload;
1645 } 1650 }
1651 } else {
1652 /* Start out suspended in ums mode. */
1653 dev_priv->ums.mm_suspended = 1;
1646 } 1654 }
1647 1655
1648 i915_setup_sysfs(dev); 1656 i915_setup_sysfs(dev);
@@ -1669,9 +1677,9 @@ out_gem_unload:
1669 intel_teardown_mchbar(dev); 1677 intel_teardown_mchbar(dev);
1670 destroy_workqueue(dev_priv->wq); 1678 destroy_workqueue(dev_priv->wq);
1671out_mtrrfree: 1679out_mtrrfree:
1672 arch_phys_wc_del(dev_priv->mm.gtt_mtrr); 1680 arch_phys_wc_del(dev_priv->gtt.mtrr);
1673 io_mapping_free(dev_priv->gtt.mappable); 1681 io_mapping_free(dev_priv->gtt.mappable);
1674 dev_priv->gtt.gtt_remove(dev); 1682 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1675out_rmmap: 1683out_rmmap:
1676 pci_iounmap(dev->pdev, dev_priv->regs); 1684 pci_iounmap(dev->pdev, dev_priv->regs);
1677put_bridge: 1685put_bridge:
@@ -1688,8 +1696,13 @@ int i915_driver_unload(struct drm_device *dev)
1688 1696
1689 intel_gpu_ips_teardown(); 1697 intel_gpu_ips_teardown();
1690 1698
1691 if (HAS_POWER_WELL(dev)) 1699 if (HAS_POWER_WELL(dev)) {
1700 /* The i915.ko module is still not prepared to be loaded when
1701 * the power well is not enabled, so just enable it in case
1702 * we're going to unload/reload. */
1703 intel_set_power_well(dev, true);
1692 i915_remove_power_well(dev); 1704 i915_remove_power_well(dev);
1705 }
1693 1706
1694 i915_teardown_sysfs(dev); 1707 i915_teardown_sysfs(dev);
1695 1708
@@ -1707,7 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
1707 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 1720 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1708 1721
1709 io_mapping_free(dev_priv->gtt.mappable); 1722 io_mapping_free(dev_priv->gtt.mappable);
1710 arch_phys_wc_del(dev_priv->mm.gtt_mtrr); 1723 arch_phys_wc_del(dev_priv->gtt.mtrr);
1711 1724
1712 acpi_video_unregister(); 1725 acpi_video_unregister();
1713 1726
@@ -1735,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev)
1735 cancel_work_sync(&dev_priv->gpu_error.work); 1748 cancel_work_sync(&dev_priv->gpu_error.work);
1736 i915_destroy_error_state(dev); 1749 i915_destroy_error_state(dev);
1737 1750
1751 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
1752
1738 if (dev->pdev->msi_enabled) 1753 if (dev->pdev->msi_enabled)
1739 pci_disable_msi(dev->pdev); 1754 pci_disable_msi(dev->pdev);
1740 1755
@@ -1756,7 +1771,9 @@ int i915_driver_unload(struct drm_device *dev)
1756 i915_free_hws(dev); 1771 i915_free_hws(dev);
1757 } 1772 }
1758 1773
1759 drm_mm_takedown(&dev_priv->mm.gtt_space); 1774 list_del(&dev_priv->gtt.base.global_link);
1775 WARN_ON(!list_empty(&dev_priv->vm_list));
1776 drm_mm_takedown(&dev_priv->gtt.base.mm);
1760 if (dev_priv->regs != NULL) 1777 if (dev_priv->regs != NULL)
1761 pci_iounmap(dev->pdev, dev_priv->regs); 1778 pci_iounmap(dev->pdev, dev_priv->regs);
1762 1779
@@ -1766,7 +1783,7 @@ int i915_driver_unload(struct drm_device *dev)
1766 destroy_workqueue(dev_priv->wq); 1783 destroy_workqueue(dev_priv->wq);
1767 pm_qos_remove_request(&dev_priv->pm_qos); 1784 pm_qos_remove_request(&dev_priv->pm_qos);
1768 1785
1769 dev_priv->gtt.gtt_remove(dev); 1786 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1770 1787
1771 if (dev_priv->slab) 1788 if (dev_priv->slab)
1772 kmem_cache_destroy(dev_priv->slab); 1789 kmem_cache_destroy(dev_priv->slab);
@@ -1842,14 +1859,14 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1842 kfree(file_priv); 1859 kfree(file_priv);
1843} 1860}
1844 1861
1845struct drm_ioctl_desc i915_ioctls[] = { 1862const struct drm_ioctl_desc i915_ioctls[] = {
1846 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1863 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1847 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1864 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1848 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 1865 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1849 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1866 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1850 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1867 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1851 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1868 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1852 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), 1869 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1853 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1870 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1854 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1871 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1855 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1872 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -1862,35 +1879,35 @@ struct drm_ioctl_desc i915_ioctls[] = {
1862 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1879 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1863 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1880 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1864 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 1881 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1865 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 1882 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1866 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1883 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1867 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1884 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1868 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 1885 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1869 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), 1886 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1870 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), 1887 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1871 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 1888 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1872 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1889 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1873 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1890 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1874 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 1891 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1875 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 1892 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1876 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 1893 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1877 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 1894 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1878 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 1895 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1879 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 1896 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1880 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 1897 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1881 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 1898 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1882 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 1899 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1883 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 1900 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1884 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 1901 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1885 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 1902 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1886 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1903 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1887 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1904 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1888 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1905 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1889 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1906 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1890 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), 1907 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1891 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), 1908 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1892 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), 1909 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1893 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), 1910 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1894}; 1911};
1895 1912
1896int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1913int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 45b3c030f483..ccb28ead3501 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,10 +118,14 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
118MODULE_PARM_DESC(i915_enable_ppgtt, 118MODULE_PARM_DESC(i915_enable_ppgtt,
119 "Enable PPGTT (default: true)"); 119 "Enable PPGTT (default: true)");
120 120
121unsigned int i915_preliminary_hw_support __read_mostly = 0; 121int i915_enable_psr __read_mostly = 0;
122module_param_named(enable_psr, i915_enable_psr, int, 0600);
123MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
124
125unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
122module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); 126module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
123MODULE_PARM_DESC(preliminary_hw_support, 127MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. (default: false)"); 128 "Enable preliminary hardware support.");
125 129
126int i915_disable_power_well __read_mostly = 1; 130int i915_disable_power_well __read_mostly = 1;
127module_param_named(disable_power_well, i915_disable_power_well, int, 0600); 131module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -132,6 +136,24 @@ int i915_enable_ips __read_mostly = 1;
132module_param_named(enable_ips, i915_enable_ips, int, 0600); 136module_param_named(enable_ips, i915_enable_ips, int, 0600);
133MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 137MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
134 138
139bool i915_fastboot __read_mostly = 0;
140module_param_named(fastboot, i915_fastboot, bool, 0600);
141MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
142 "(default: false)");
143
144int i915_enable_pc8 __read_mostly = 1;
145module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
146MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
147
148int i915_pc8_timeout __read_mostly = 5000;
149module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
150MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
151
152bool i915_prefault_disable __read_mostly;
153module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
154MODULE_PARM_DESC(prefault_disable,
155 "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
156
135static struct drm_driver driver; 157static struct drm_driver driver;
136extern int intel_agp_enabled; 158extern int intel_agp_enabled;
137 159
@@ -543,6 +565,9 @@ static int i915_drm_freeze(struct drm_device *dev)
543 dev_priv->modeset_restore = MODESET_SUSPENDED; 565 dev_priv->modeset_restore = MODESET_SUSPENDED;
544 mutex_unlock(&dev_priv->modeset_restore_lock); 566 mutex_unlock(&dev_priv->modeset_restore_lock);
545 567
568 /* We do a lot of poking in a lot of registers, make sure they work
569 * properly. */
570 hsw_disable_package_c8(dev_priv);
546 intel_set_power_well(dev, true); 571 intel_set_power_well(dev, true);
547 572
548 drm_kms_helper_poll_disable(dev); 573 drm_kms_helper_poll_disable(dev);
@@ -551,7 +576,11 @@ static int i915_drm_freeze(struct drm_device *dev)
551 576
552 /* If KMS is active, we do the leavevt stuff here */ 577 /* If KMS is active, we do the leavevt stuff here */
553 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 578 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
554 int error = i915_gem_idle(dev); 579 int error;
580
581 mutex_lock(&dev->struct_mutex);
582 error = i915_gem_idle(dev);
583 mutex_unlock(&dev->struct_mutex);
555 if (error) { 584 if (error) {
556 dev_err(&dev->pdev->dev, 585 dev_err(&dev->pdev->dev,
557 "GEM idle failed, resume might fail\n"); 586 "GEM idle failed, resume might fail\n");
@@ -656,7 +685,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
656 intel_init_pch_refclk(dev); 685 intel_init_pch_refclk(dev);
657 686
658 mutex_lock(&dev->struct_mutex); 687 mutex_lock(&dev->struct_mutex);
659 dev_priv->mm.suspended = 0;
660 688
661 error = i915_gem_init_hw(dev); 689 error = i915_gem_init_hw(dev);
662 mutex_unlock(&dev->struct_mutex); 690 mutex_unlock(&dev->struct_mutex);
@@ -696,6 +724,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
696 schedule_work(&dev_priv->console_resume_work); 724 schedule_work(&dev_priv->console_resume_work);
697 } 725 }
698 726
727 /* Undo what we did at i915_drm_freeze so the refcount goes back to the
728 * expected level. */
729 hsw_enable_package_c8(dev_priv);
730
699 mutex_lock(&dev_priv->modeset_restore_lock); 731 mutex_lock(&dev_priv->modeset_restore_lock);
700 dev_priv->modeset_restore = MODESET_DONE; 732 dev_priv->modeset_restore = MODESET_DONE;
701 mutex_unlock(&dev_priv->modeset_restore_lock); 733 mutex_unlock(&dev_priv->modeset_restore_lock);
@@ -706,7 +738,7 @@ static int i915_drm_thaw(struct drm_device *dev)
706{ 738{
707 int error = 0; 739 int error = 0;
708 740
709 intel_gt_sanitize(dev); 741 intel_uncore_sanitize(dev);
710 742
711 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 743 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
712 mutex_lock(&dev->struct_mutex); 744 mutex_lock(&dev->struct_mutex);
@@ -732,7 +764,7 @@ int i915_resume(struct drm_device *dev)
732 764
733 pci_set_master(dev->pdev); 765 pci_set_master(dev->pdev);
734 766
735 intel_gt_sanitize(dev); 767 intel_uncore_sanitize(dev);
736 768
737 /* 769 /*
738 * Platforms with opregion should have sane BIOS, older ones (gen3 and 770 * Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -753,139 +785,6 @@ int i915_resume(struct drm_device *dev)
753 return 0; 785 return 0;
754} 786}
755 787
756static int i8xx_do_reset(struct drm_device *dev)
757{
758 struct drm_i915_private *dev_priv = dev->dev_private;
759
760 if (IS_I85X(dev))
761 return -ENODEV;
762
763 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
764 POSTING_READ(D_STATE);
765
766 if (IS_I830(dev) || IS_845G(dev)) {
767 I915_WRITE(DEBUG_RESET_I830,
768 DEBUG_RESET_DISPLAY |
769 DEBUG_RESET_RENDER |
770 DEBUG_RESET_FULL);
771 POSTING_READ(DEBUG_RESET_I830);
772 msleep(1);
773
774 I915_WRITE(DEBUG_RESET_I830, 0);
775 POSTING_READ(DEBUG_RESET_I830);
776 }
777
778 msleep(1);
779
780 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
781 POSTING_READ(D_STATE);
782
783 return 0;
784}
785
786static int i965_reset_complete(struct drm_device *dev)
787{
788 u8 gdrst;
789 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
790 return (gdrst & GRDOM_RESET_ENABLE) == 0;
791}
792
793static int i965_do_reset(struct drm_device *dev)
794{
795 int ret;
796 u8 gdrst;
797
798 /*
799 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
800 * well as the reset bit (GR/bit 0). Setting the GR bit
801 * triggers the reset; when done, the hardware will clear it.
802 */
803 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
804 pci_write_config_byte(dev->pdev, I965_GDRST,
805 gdrst | GRDOM_RENDER |
806 GRDOM_RESET_ENABLE);
807 ret = wait_for(i965_reset_complete(dev), 500);
808 if (ret)
809 return ret;
810
811 /* We can't reset render&media without also resetting display ... */
812 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
813 pci_write_config_byte(dev->pdev, I965_GDRST,
814 gdrst | GRDOM_MEDIA |
815 GRDOM_RESET_ENABLE);
816
817 return wait_for(i965_reset_complete(dev), 500);
818}
819
820static int ironlake_do_reset(struct drm_device *dev)
821{
822 struct drm_i915_private *dev_priv = dev->dev_private;
823 u32 gdrst;
824 int ret;
825
826 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
827 gdrst &= ~GRDOM_MASK;
828 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
829 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
830 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
831 if (ret)
832 return ret;
833
834 /* We can't reset render&media without also resetting display ... */
835 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
836 gdrst &= ~GRDOM_MASK;
837 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
838 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
839 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
840}
841
842static int gen6_do_reset(struct drm_device *dev)
843{
844 struct drm_i915_private *dev_priv = dev->dev_private;
845 int ret;
846 unsigned long irqflags;
847
848 /* Hold gt_lock across reset to prevent any register access
849 * with forcewake not set correctly
850 */
851 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
852
853 /* Reset the chip */
854
855 /* GEN6_GDRST is not in the gt power well, no need to check
856 * for fifo space for the write or forcewake the chip for
857 * the read
858 */
859 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
860
861 /* Spin waiting for the device to ack the reset request */
862 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
863
864 /* If reset with a user forcewake, try to restore, otherwise turn it off */
865 if (dev_priv->forcewake_count)
866 dev_priv->gt.force_wake_get(dev_priv);
867 else
868 dev_priv->gt.force_wake_put(dev_priv);
869
870 /* Restore fifo count */
871 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
872
873 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
874 return ret;
875}
876
877int intel_gpu_reset(struct drm_device *dev)
878{
879 switch (INTEL_INFO(dev)->gen) {
880 case 7:
881 case 6: return gen6_do_reset(dev);
882 case 5: return ironlake_do_reset(dev);
883 case 4: return i965_do_reset(dev);
884 case 2: return i8xx_do_reset(dev);
885 default: return -ENODEV;
886 }
887}
888
889/** 788/**
890 * i915_reset - reset chip after a hang 789 * i915_reset - reset chip after a hang
891 * @dev: drm device to reset 790 * @dev: drm device to reset
@@ -955,11 +854,11 @@ int i915_reset(struct drm_device *dev)
955 * switched away). 854 * switched away).
956 */ 855 */
957 if (drm_core_check_feature(dev, DRIVER_MODESET) || 856 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
958 !dev_priv->mm.suspended) { 857 !dev_priv->ums.mm_suspended) {
959 struct intel_ring_buffer *ring; 858 struct intel_ring_buffer *ring;
960 int i; 859 int i;
961 860
962 dev_priv->mm.suspended = 0; 861 dev_priv->ums.mm_suspended = 0;
963 862
964 i915_gem_init_swizzling(dev); 863 i915_gem_init_swizzling(dev);
965 864
@@ -1110,7 +1009,6 @@ static const struct file_operations i915_driver_fops = {
1110 .unlocked_ioctl = drm_ioctl, 1009 .unlocked_ioctl = drm_ioctl,
1111 .mmap = drm_gem_mmap, 1010 .mmap = drm_gem_mmap,
1112 .poll = drm_poll, 1011 .poll = drm_poll,
1113 .fasync = drm_fasync,
1114 .read = drm_read, 1012 .read = drm_read,
1115#ifdef CONFIG_COMPAT 1013#ifdef CONFIG_COMPAT
1116 .compat_ioctl = i915_compat_ioctl, 1014 .compat_ioctl = i915_compat_ioctl,
@@ -1123,8 +1021,9 @@ static struct drm_driver driver = {
1123 * deal with them for Intel hardware. 1021 * deal with them for Intel hardware.
1124 */ 1022 */
1125 .driver_features = 1023 .driver_features =
1126 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 1024 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1127 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, 1025 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1026 DRIVER_RENDER,
1128 .load = i915_driver_load, 1027 .load = i915_driver_load,
1129 .unload = i915_driver_unload, 1028 .unload = i915_driver_unload,
1130 .open = i915_driver_open, 1029 .open = i915_driver_open,
@@ -1154,7 +1053,7 @@ static struct drm_driver driver = {
1154 1053
1155 .dumb_create = i915_gem_dumb_create, 1054 .dumb_create = i915_gem_dumb_create,
1156 .dumb_map_offset = i915_gem_mmap_gtt, 1055 .dumb_map_offset = i915_gem_mmap_gtt,
1157 .dumb_destroy = i915_gem_dumb_destroy, 1056 .dumb_destroy = drm_gem_dumb_destroy,
1158 .ioctls = i915_ioctls, 1057 .ioctls = i915_ioctls,
1159 .fops = &i915_driver_fops, 1058 .fops = &i915_driver_fops,
1160 .name = DRIVER_NAME, 1059 .name = DRIVER_NAME,
@@ -1215,136 +1114,3 @@ module_exit(i915_exit);
1215MODULE_AUTHOR(DRIVER_AUTHOR); 1114MODULE_AUTHOR(DRIVER_AUTHOR);
1216MODULE_DESCRIPTION(DRIVER_DESC); 1115MODULE_DESCRIPTION(DRIVER_DESC);
1217MODULE_LICENSE("GPL and additional rights"); 1116MODULE_LICENSE("GPL and additional rights");
1218
1219/* We give fast paths for the really cool registers */
1220#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1221 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1222 ((reg) < 0x40000) && \
1223 ((reg) != FORCEWAKE))
1224static void
1225ilk_dummy_write(struct drm_i915_private *dev_priv)
1226{
1227 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1228 * the chip from rc6 before touching it for real. MI_MODE is masked,
1229 * hence harmless to write 0 into. */
1230 I915_WRITE_NOTRACE(MI_MODE, 0);
1231}
1232
1233static void
1234hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1235{
1236 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1237 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1238 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1239 reg);
1240 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1241 }
1242}
1243
1244static void
1245hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1246{
1247 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1248 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1249 DRM_ERROR("Unclaimed write to %x\n", reg);
1250 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1251 }
1252}
1253
1254#define __i915_read(x, y) \
1255u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1256 unsigned long irqflags; \
1257 u##x val = 0; \
1258 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1259 if (IS_GEN5(dev_priv->dev)) \
1260 ilk_dummy_write(dev_priv); \
1261 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1262 if (dev_priv->forcewake_count == 0) \
1263 dev_priv->gt.force_wake_get(dev_priv); \
1264 val = read##y(dev_priv->regs + reg); \
1265 if (dev_priv->forcewake_count == 0) \
1266 dev_priv->gt.force_wake_put(dev_priv); \
1267 } else { \
1268 val = read##y(dev_priv->regs + reg); \
1269 } \
1270 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1271 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1272 return val; \
1273}
1274
1275__i915_read(8, b)
1276__i915_read(16, w)
1277__i915_read(32, l)
1278__i915_read(64, q)
1279#undef __i915_read
1280
1281#define __i915_write(x, y) \
1282void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1283 unsigned long irqflags; \
1284 u32 __fifo_ret = 0; \
1285 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1286 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1287 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1288 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1289 } \
1290 if (IS_GEN5(dev_priv->dev)) \
1291 ilk_dummy_write(dev_priv); \
1292 hsw_unclaimed_reg_clear(dev_priv, reg); \
1293 write##y(val, dev_priv->regs + reg); \
1294 if (unlikely(__fifo_ret)) { \
1295 gen6_gt_check_fifodbg(dev_priv); \
1296 } \
1297 hsw_unclaimed_reg_check(dev_priv, reg); \
1298 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1299}
1300__i915_write(8, b)
1301__i915_write(16, w)
1302__i915_write(32, l)
1303__i915_write(64, q)
1304#undef __i915_write
1305
1306static const struct register_whitelist {
1307 uint64_t offset;
1308 uint32_t size;
1309 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1310} whitelist[] = {
1311 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1312};
1313
1314int i915_reg_read_ioctl(struct drm_device *dev,
1315 void *data, struct drm_file *file)
1316{
1317 struct drm_i915_private *dev_priv = dev->dev_private;
1318 struct drm_i915_reg_read *reg = data;
1319 struct register_whitelist const *entry = whitelist;
1320 int i;
1321
1322 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1323 if (entry->offset == reg->offset &&
1324 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1325 break;
1326 }
1327
1328 if (i == ARRAY_SIZE(whitelist))
1329 return -EINVAL;
1330
1331 switch (entry->size) {
1332 case 8:
1333 reg->val = I915_READ64(reg->offset);
1334 break;
1335 case 4:
1336 reg->val = I915_READ(reg->offset);
1337 break;
1338 case 2:
1339 reg->val = I915_READ16(reg->offset);
1340 break;
1341 case 1:
1342 reg->val = I915_READ8(reg->offset);
1343 break;
1344 default:
1345 WARN_ON(1);
1346 return -EINVAL;
1347 }
1348
1349 return 0;
1350}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1929bffc1c77..52a3785a3fdf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -144,6 +144,7 @@ enum intel_dpll_id {
144 144
145struct intel_dpll_hw_state { 145struct intel_dpll_hw_state {
146 uint32_t dpll; 146 uint32_t dpll;
147 uint32_t dpll_md;
147 uint32_t fp0; 148 uint32_t fp0;
148 uint32_t fp1; 149 uint32_t fp1;
149}; 150};
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
156 /* should match the index in the dev_priv->shared_dplls array */ 157 /* should match the index in the dev_priv->shared_dplls array */
157 enum intel_dpll_id id; 158 enum intel_dpll_id id;
158 struct intel_dpll_hw_state hw_state; 159 struct intel_dpll_hw_state hw_state;
160 void (*mode_set)(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll);
159 void (*enable)(struct drm_i915_private *dev_priv, 162 void (*enable)(struct drm_i915_private *dev_priv,
160 struct intel_shared_dpll *pll); 163 struct intel_shared_dpll *pll);
161 void (*disable)(struct drm_i915_private *dev_priv, 164 void (*disable)(struct drm_i915_private *dev_priv,
@@ -198,7 +201,6 @@ struct intel_ddi_plls {
198#define DRIVER_MINOR 6 201#define DRIVER_MINOR 6
199#define DRIVER_PATCHLEVEL 0 202#define DRIVER_PATCHLEVEL 0
200 203
201#define WATCH_COHERENCY 0
202#define WATCH_LISTS 0 204#define WATCH_LISTS 0
203#define WATCH_GTT 0 205#define WATCH_GTT 0
204 206
@@ -320,8 +322,8 @@ struct drm_i915_error_state {
320 u32 purgeable:1; 322 u32 purgeable:1;
321 s32 ring:4; 323 s32 ring:4;
322 u32 cache_level:2; 324 u32 cache_level:2;
323 } *active_bo, *pinned_bo; 325 } **active_bo, **pinned_bo;
324 u32 active_bo_count, pinned_bo_count; 326 u32 *active_bo_count, *pinned_bo_count;
325 struct intel_overlay_error_state *overlay; 327 struct intel_overlay_error_state *overlay;
326 struct intel_display_error_state *display; 328 struct intel_display_error_state *display;
327}; 329};
@@ -356,14 +358,16 @@ struct drm_i915_display_funcs {
356 struct dpll *match_clock, 358 struct dpll *match_clock,
357 struct dpll *best_clock); 359 struct dpll *best_clock);
358 void (*update_wm)(struct drm_device *dev); 360 void (*update_wm)(struct drm_device *dev);
359 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 361 void (*update_sprite_wm)(struct drm_plane *plane,
362 struct drm_crtc *crtc,
360 uint32_t sprite_width, int pixel_size, 363 uint32_t sprite_width, int pixel_size,
361 bool enable); 364 bool enable, bool scaled);
362 void (*modeset_global_resources)(struct drm_device *dev); 365 void (*modeset_global_resources)(struct drm_device *dev);
363 /* Returns the active state of the crtc, and if the crtc is active, 366 /* Returns the active state of the crtc, and if the crtc is active,
364 * fills out the pipe-config with the hw state. */ 367 * fills out the pipe-config with the hw state. */
365 bool (*get_pipe_config)(struct intel_crtc *, 368 bool (*get_pipe_config)(struct intel_crtc *,
366 struct intel_crtc_config *); 369 struct intel_crtc_config *);
370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
367 int (*crtc_mode_set)(struct drm_crtc *crtc, 371 int (*crtc_mode_set)(struct drm_crtc *crtc,
368 int x, int y, 372 int x, int y,
369 struct drm_framebuffer *old_fb); 373 struct drm_framebuffer *old_fb);
@@ -376,7 +380,8 @@ struct drm_i915_display_funcs {
376 void (*init_clock_gating)(struct drm_device *dev); 380 void (*init_clock_gating)(struct drm_device *dev);
377 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
378 struct drm_framebuffer *fb, 382 struct drm_framebuffer *fb,
379 struct drm_i915_gem_object *obj); 383 struct drm_i915_gem_object *obj,
384 uint32_t flags);
380 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 385 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
381 int x, int y); 386 int x, int y);
382 void (*hpd_irq_setup)(struct drm_device *dev); 387 void (*hpd_irq_setup)(struct drm_device *dev);
@@ -387,11 +392,20 @@ struct drm_i915_display_funcs {
387 /* pll clock increase/decrease */ 392 /* pll clock increase/decrease */
388}; 393};
389 394
390struct drm_i915_gt_funcs { 395struct intel_uncore_funcs {
391 void (*force_wake_get)(struct drm_i915_private *dev_priv); 396 void (*force_wake_get)(struct drm_i915_private *dev_priv);
392 void (*force_wake_put)(struct drm_i915_private *dev_priv); 397 void (*force_wake_put)(struct drm_i915_private *dev_priv);
393}; 398};
394 399
400struct intel_uncore {
401 spinlock_t lock; /** lock is also taken in irq contexts. */
402
403 struct intel_uncore_funcs funcs;
404
405 unsigned fifo_count;
406 unsigned forcewake_count;
407};
408
395#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 409#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
396 func(is_mobile) sep \ 410 func(is_mobile) sep \
397 func(is_i85x) sep \ 411 func(is_i85x) sep \
@@ -436,12 +450,64 @@ struct intel_device_info {
436 450
437enum i915_cache_level { 451enum i915_cache_level {
438 I915_CACHE_NONE = 0, 452 I915_CACHE_NONE = 0,
439 I915_CACHE_LLC, 453 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
440 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 454 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
455 caches, eg sampler/render caches, and the
456 large Last-Level-Cache. LLC is coherent with
457 the CPU, but L3 is only visible to the GPU. */
458 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
441}; 459};
442 460
443typedef uint32_t gen6_gtt_pte_t; 461typedef uint32_t gen6_gtt_pte_t;
444 462
463struct i915_address_space {
464 struct drm_mm mm;
465 struct drm_device *dev;
466 struct list_head global_link;
467 unsigned long start; /* Start offset always 0 for dri2 */
468 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
469
470 struct {
471 dma_addr_t addr;
472 struct page *page;
473 } scratch;
474
475 /**
476 * List of objects currently involved in rendering.
477 *
478 * Includes buffers having the contents of their GPU caches
479 * flushed, not necessarily primitives. last_rendering_seqno
480 * represents when the rendering involved will be completed.
481 *
482 * A reference is held on the buffer while on this list.
483 */
484 struct list_head active_list;
485
486 /**
487 * LRU list of objects which are not in the ringbuffer and
488 * are ready to unbind, but are still in the GTT.
489 *
490 * last_rendering_seqno is 0 while an object is in this list.
491 *
492 * A reference is not held on the buffer while on this list,
493 * as merely being GTT-bound shouldn't prevent its being
494 * freed, and we'll pull it off the list in the free path.
495 */
496 struct list_head inactive_list;
497
498 /* FIXME: Need a more generic return type */
499 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
500 enum i915_cache_level level);
501 void (*clear_range)(struct i915_address_space *vm,
502 unsigned int first_entry,
503 unsigned int num_entries);
504 void (*insert_entries)(struct i915_address_space *vm,
505 struct sg_table *st,
506 unsigned int first_entry,
507 enum i915_cache_level cache_level);
508 void (*cleanup)(struct i915_address_space *vm);
509};
510
445/* The Graphics Translation Table is the way in which GEN hardware translates a 511/* The Graphics Translation Table is the way in which GEN hardware translates a
446 * Graphics Virtual Address into a Physical Address. In addition to the normal 512 * Graphics Virtual Address into a Physical Address. In addition to the normal
447 * collateral associated with any va->pa translations GEN hardware also has a 513 * collateral associated with any va->pa translations GEN hardware also has a
@@ -450,8 +516,7 @@ typedef uint32_t gen6_gtt_pte_t;
450 * the spec. 516 * the spec.
451 */ 517 */
452struct i915_gtt { 518struct i915_gtt {
453 unsigned long start; /* Start offset of used GTT */ 519 struct i915_address_space base;
454 size_t total; /* Total size GTT can map */
455 size_t stolen_size; /* Total size of stolen memory */ 520 size_t stolen_size; /* Total size of stolen memory */
456 521
457 unsigned long mappable_end; /* End offset that we can CPU map */ 522 unsigned long mappable_end; /* End offset that we can CPU map */
@@ -462,50 +527,47 @@ struct i915_gtt {
462 void __iomem *gsm; 527 void __iomem *gsm;
463 528
464 bool do_idle_maps; 529 bool do_idle_maps;
465 dma_addr_t scratch_page_dma; 530
466 struct page *scratch_page; 531 int mtrr;
467 532
468 /* global gtt ops */ 533 /* global gtt ops */
469 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, 534 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
470 size_t *stolen, phys_addr_t *mappable_base, 535 size_t *stolen, phys_addr_t *mappable_base,
471 unsigned long *mappable_end); 536 unsigned long *mappable_end);
472 void (*gtt_remove)(struct drm_device *dev);
473 void (*gtt_clear_range)(struct drm_device *dev,
474 unsigned int first_entry,
475 unsigned int num_entries);
476 void (*gtt_insert_entries)(struct drm_device *dev,
477 struct sg_table *st,
478 unsigned int pg_start,
479 enum i915_cache_level cache_level);
480 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
481 dma_addr_t addr,
482 enum i915_cache_level level);
483}; 537};
484#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) 538#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
485 539
486#define I915_PPGTT_PD_ENTRIES 512
487#define I915_PPGTT_PT_ENTRIES 1024
488struct i915_hw_ppgtt { 540struct i915_hw_ppgtt {
489 struct drm_device *dev; 541 struct i915_address_space base;
490 unsigned num_pd_entries; 542 unsigned num_pd_entries;
491 struct page **pt_pages; 543 struct page **pt_pages;
492 uint32_t pd_offset; 544 uint32_t pd_offset;
493 dma_addr_t *pt_dma_addr; 545 dma_addr_t *pt_dma_addr;
494 dma_addr_t scratch_page_dma_addr;
495 546
496 /* pte functions, mirroring the interface of the global gtt. */
497 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
498 unsigned int first_entry,
499 unsigned int num_entries);
500 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
501 struct sg_table *st,
502 unsigned int pg_start,
503 enum i915_cache_level cache_level);
504 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
505 dma_addr_t addr,
506 enum i915_cache_level level);
507 int (*enable)(struct drm_device *dev); 547 int (*enable)(struct drm_device *dev);
508 void (*cleanup)(struct i915_hw_ppgtt *ppgtt); 548};
549
550/**
551 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
552 * VMA's presence cannot be guaranteed before binding, or after unbinding the
553 * object into/from the address space.
554 *
555 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
556 * will always be <= an objects lifetime. So object refcounting should cover us.
557 */
558struct i915_vma {
559 struct drm_mm_node node;
560 struct drm_i915_gem_object *obj;
561 struct i915_address_space *vm;
562
563 /** This object's place on the active/inactive lists */
564 struct list_head mm_list;
565
566 struct list_head vma_link; /* Link in the object's VMA list */
567
568 /** This vma's place in the batchbuffer or on the eviction list */
569 struct list_head exec_list;
570
509}; 571};
510 572
511struct i915_ctx_hang_stats { 573struct i915_ctx_hang_stats {
@@ -528,15 +590,48 @@ struct i915_hw_context {
528 struct i915_ctx_hang_stats hang_stats; 590 struct i915_ctx_hang_stats hang_stats;
529}; 591};
530 592
531enum no_fbc_reason { 593struct i915_fbc {
532 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 594 unsigned long size;
533 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 595 unsigned int fb_id;
534 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 596 enum plane plane;
535 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 597 int y;
536 FBC_BAD_PLANE, /* fbc not supported on plane */ 598
537 FBC_NOT_TILED, /* buffer not tiled */ 599 struct drm_mm_node *compressed_fb;
538 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 600 struct drm_mm_node *compressed_llb;
539 FBC_MODULE_PARAM, 601
602 struct intel_fbc_work {
603 struct delayed_work work;
604 struct drm_crtc *crtc;
605 struct drm_framebuffer *fb;
606 int interval;
607 } *fbc_work;
608
609 enum no_fbc_reason {
610 FBC_OK, /* FBC is enabled */
611 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
612 FBC_NO_OUTPUT, /* no outputs enabled to compress */
613 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
614 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
615 FBC_MODE_TOO_LARGE, /* mode too large for compression */
616 FBC_BAD_PLANE, /* fbc not supported on plane */
617 FBC_NOT_TILED, /* buffer not tiled */
618 FBC_MULTIPLE_PIPES, /* more than one pipe active */
619 FBC_MODULE_PARAM,
620 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
621 } no_fbc_reason;
622};
623
624enum no_psr_reason {
625 PSR_NO_SOURCE, /* Not supported on platform */
626 PSR_NO_SINK, /* Not supported by panel */
627 PSR_MODULE_PARAM,
628 PSR_CRTC_NOT_ACTIVE,
629 PSR_PWR_WELL_ENABLED,
630 PSR_NOT_TILED,
631 PSR_SPRITE_ENABLED,
632 PSR_S3D_ENABLED,
633 PSR_INTERLACED_ENABLED,
634 PSR_HSW_NOT_DDIA,
540}; 635};
541 636
542enum intel_pch { 637enum intel_pch {
@@ -722,12 +817,12 @@ struct i915_suspend_saved_registers {
722}; 817};
723 818
724struct intel_gen6_power_mgmt { 819struct intel_gen6_power_mgmt {
820 /* work and pm_iir are protected by dev_priv->irq_lock */
725 struct work_struct work; 821 struct work_struct work;
726 struct delayed_work vlv_work;
727 u32 pm_iir; 822 u32 pm_iir;
728 /* lock - irqsave spinlock that protectects the work_struct and 823
729 * pm_iir. */ 824 /* On vlv we need to manually drop to Vmin with a delayed work. */
730 spinlock_t lock; 825 struct delayed_work vlv_work;
731 826
732 /* The below variables an all the rps hw state are protected by 827 /* The below variables an all the rps hw state are protected by
733 * dev->struct mutext. */ 828 * dev->struct mutext. */
@@ -793,6 +888,18 @@ struct i915_dri1_state {
793 uint32_t counter; 888 uint32_t counter;
794}; 889};
795 890
891struct i915_ums_state {
892 /**
893 * Flag if the X Server, and thus DRM, is not currently in
894 * control of the device.
895 *
896 * This is set between LeaveVT and EnterVT. It needs to be
897 * replaced with a semaphore. It also needs to be
898 * transitioned away from for kernel modesetting.
899 */
900 int mm_suspended;
901};
902
796struct intel_l3_parity { 903struct intel_l3_parity {
797 u32 *remap_info; 904 u32 *remap_info;
798 struct work_struct error_work; 905 struct work_struct error_work;
@@ -801,8 +908,6 @@ struct intel_l3_parity {
801struct i915_gem_mm { 908struct i915_gem_mm {
802 /** Memory allocator for GTT stolen memory */ 909 /** Memory allocator for GTT stolen memory */
803 struct drm_mm stolen; 910 struct drm_mm stolen;
804 /** Memory allocator for GTT */
805 struct drm_mm gtt_space;
806 /** List of all objects in gtt_space. Used to restore gtt 911 /** List of all objects in gtt_space. Used to restore gtt
807 * mappings on resume */ 912 * mappings on resume */
808 struct list_head bound_list; 913 struct list_head bound_list;
@@ -816,37 +921,12 @@ struct i915_gem_mm {
816 /** Usable portion of the GTT for GEM */ 921 /** Usable portion of the GTT for GEM */
817 unsigned long stolen_base; /* limited to low memory (32-bit) */ 922 unsigned long stolen_base; /* limited to low memory (32-bit) */
818 923
819 int gtt_mtrr;
820
821 /** PPGTT used for aliasing the PPGTT with the GTT */ 924 /** PPGTT used for aliasing the PPGTT with the GTT */
822 struct i915_hw_ppgtt *aliasing_ppgtt; 925 struct i915_hw_ppgtt *aliasing_ppgtt;
823 926
824 struct shrinker inactive_shrinker; 927 struct shrinker inactive_shrinker;
825 bool shrinker_no_lock_stealing; 928 bool shrinker_no_lock_stealing;
826 929
827 /**
828 * List of objects currently involved in rendering.
829 *
830 * Includes buffers having the contents of their GPU caches
831 * flushed, not necessarily primitives. last_rendering_seqno
832 * represents when the rendering involved will be completed.
833 *
834 * A reference is held on the buffer while on this list.
835 */
836 struct list_head active_list;
837
838 /**
839 * LRU list of objects which are not in the ringbuffer and
840 * are ready to unbind, but are still in the GTT.
841 *
842 * last_rendering_seqno is 0 while an object is in this list.
843 *
844 * A reference is not held on the buffer while on this list,
845 * as merely being GTT-bound shouldn't prevent its being
846 * freed, and we'll pull it off the list in the free path.
847 */
848 struct list_head inactive_list;
849
850 /** LRU list of objects with fence regs on them. */ 930 /** LRU list of objects with fence regs on them. */
851 struct list_head fence_list; 931 struct list_head fence_list;
852 932
@@ -865,16 +945,6 @@ struct i915_gem_mm {
865 */ 945 */
866 bool interruptible; 946 bool interruptible;
867 947
868 /**
869 * Flag if the X Server, and thus DRM, is not currently in
870 * control of the device.
871 *
872 * This is set between LeaveVT and EnterVT. It needs to be
873 * replaced with a semaphore. It also needs to be
874 * transitioned away from for kernel modesetting.
875 */
876 int suspended;
877
878 /** Bit 6 swizzling required for X tiling */ 948 /** Bit 6 swizzling required for X tiling */
879 uint32_t bit_6_swizzle_x; 949 uint32_t bit_6_swizzle_x;
880 /** Bit 6 swizzling required for Y tiling */ 950 /** Bit 6 swizzling required for Y tiling */
@@ -884,6 +954,7 @@ struct i915_gem_mm {
884 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 954 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
885 955
886 /* accounting, useful for userland debugging */ 956 /* accounting, useful for userland debugging */
957 spinlock_t object_stat_lock;
887 size_t object_memory; 958 size_t object_memory;
888 u32 object_count; 959 u32 object_count;
889}; 960};
@@ -897,6 +968,11 @@ struct drm_i915_error_state_buf {
897 loff_t pos; 968 loff_t pos;
898}; 969};
899 970
971struct i915_error_state_file_priv {
972 struct drm_device *dev;
973 struct drm_i915_error_state *error;
974};
975
900struct i915_gpu_error { 976struct i915_gpu_error {
901 /* For hangcheck timer */ 977 /* For hangcheck timer */
902#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 978#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -988,6 +1064,88 @@ struct intel_vbt_data {
988 struct child_device_config *child_dev; 1064 struct child_device_config *child_dev;
989}; 1065};
990 1066
1067enum intel_ddb_partitioning {
1068 INTEL_DDB_PART_1_2,
1069 INTEL_DDB_PART_5_6, /* IVB+ */
1070};
1071
1072struct intel_wm_level {
1073 bool enable;
1074 uint32_t pri_val;
1075 uint32_t spr_val;
1076 uint32_t cur_val;
1077 uint32_t fbc_val;
1078};
1079
1080/*
1081 * This struct tracks the state needed for the Package C8+ feature.
1082 *
1083 * Package states C8 and deeper are really deep PC states that can only be
1084 * reached when all the devices on the system allow it, so even if the graphics
1085 * device allows PC8+, it doesn't mean the system will actually get to these
1086 * states.
1087 *
1088 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1089 * is disabled and the GPU is idle. When these conditions are met, we manually
1090 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1091 * refclk to Fclk.
1092 *
1093 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1094 * the state of some registers, so when we come back from PC8+ we need to
1095 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1096 * need to take care of the registers kept by RC6.
1097 *
1098 * The interrupt disabling is part of the requirements. We can only leave the
1099 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1100 * can lock the machine.
1101 *
1102 * Ideally every piece of our code that needs PC8+ disabled would call
1103 * hsw_disable_package_c8, which would increment disable_count and prevent the
1104 * system from reaching PC8+. But we don't have a symmetric way to do this for
1105 * everything, so we have the requirements_met and gpu_idle variables. When we
1106 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1107 * increase it in the opposite case. The requirements_met variable is true when
1108 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1109 * variable is true when the GPU is idle.
1110 *
1111 * In addition to everything, we only actually enable PC8+ if disable_count
1112 * stays at zero for at least some seconds. This is implemented with the
1113 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1114 * consecutive times when all screens are disabled and some background app
1115 * queries the state of our connectors, or we have some application constantly
1116 * waking up to use the GPU. Only after the enable_work function actually
1117 * enables PC8+ the "enable" variable will become true, which means that it can
1118 * be false even if disable_count is 0.
1119 *
1120 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1121 * goes back to false exactly before we reenable the IRQs. We use this variable
1122 * to check if someone is trying to enable/disable IRQs while they're supposed
1123 * to be disabled. This shouldn't happen and we'll print some error messages in
1124 * case it happens, but if it actually happens we'll also update the variables
1125 * inside struct regsave so when we restore the IRQs they will contain the
1126 * latest expected values.
1127 *
1128 * For more, read "Display Sequences for Package C8" on our documentation.
1129 */
1130struct i915_package_c8 {
1131 bool requirements_met;
1132 bool gpu_idle;
1133 bool irqs_disabled;
1134 /* Only true after the delayed work task actually enables it. */
1135 bool enabled;
1136 int disable_count;
1137 struct mutex lock;
1138 struct delayed_work enable_work;
1139
1140 struct {
1141 uint32_t deimr;
1142 uint32_t sdeimr;
1143 uint32_t gtimr;
1144 uint32_t gtier;
1145 uint32_t gen6_pmimr;
1146 } regsave;
1147};
1148
991typedef struct drm_i915_private { 1149typedef struct drm_i915_private {
992 struct drm_device *dev; 1150 struct drm_device *dev;
993 struct kmem_cache *slab; 1151 struct kmem_cache *slab;
@@ -998,14 +1156,7 @@ typedef struct drm_i915_private {
998 1156
999 void __iomem *regs; 1157 void __iomem *regs;
1000 1158
1001 struct drm_i915_gt_funcs gt; 1159 struct intel_uncore uncore;
1002 /** gt_fifo_count and the subsequent register write are synchronized
1003 * with dev->struct_mutex. */
1004 unsigned gt_fifo_count;
1005 /** forcewake_count is protected by gt_lock */
1006 unsigned forcewake_count;
1007 /** gt_lock is also taken in irq contexts. */
1008 spinlock_t gt_lock;
1009 1160
1010 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 1161 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1011 1162
@@ -1042,6 +1193,7 @@ typedef struct drm_i915_private {
1042 /** Cached value of IMR to avoid reads in updating the bitfield */ 1193 /** Cached value of IMR to avoid reads in updating the bitfield */
1043 u32 irq_mask; 1194 u32 irq_mask;
1044 u32 gt_irq_mask; 1195 u32 gt_irq_mask;
1196 u32 pm_irq_mask;
1045 1197
1046 struct work_struct hotplug_work; 1198 struct work_struct hotplug_work;
1047 bool enable_hotplug_processing; 1199 bool enable_hotplug_processing;
@@ -1059,12 +1211,7 @@ typedef struct drm_i915_private {
1059 1211
1060 int num_plane; 1212 int num_plane;
1061 1213
1062 unsigned long cfb_size; 1214 struct i915_fbc fbc;
1063 unsigned int cfb_fb;
1064 enum plane cfb_plane;
1065 int cfb_y;
1066 struct intel_fbc_work *fbc_work;
1067
1068 struct intel_opregion opregion; 1215 struct intel_opregion opregion;
1069 struct intel_vbt_data vbt; 1216 struct intel_vbt_data vbt;
1070 1217
@@ -1081,8 +1228,6 @@ typedef struct drm_i915_private {
1081 } backlight; 1228 } backlight;
1082 1229
1083 /* LVDS info */ 1230 /* LVDS info */
1084 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1085 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1086 bool no_aux_handshake; 1231 bool no_aux_handshake;
1087 1232
1088 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1233 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1105,7 +1250,8 @@ typedef struct drm_i915_private {
1105 enum modeset_restore modeset_restore; 1250 enum modeset_restore modeset_restore;
1106 struct mutex modeset_restore_lock; 1251 struct mutex modeset_restore_lock;
1107 1252
1108 struct i915_gtt gtt; 1253 struct list_head vm_list; /* Global list of all address spaces */
1254 struct i915_gtt gtt; /* VMA representing the global address space */
1109 1255
1110 struct i915_gem_mm mm; 1256 struct i915_gem_mm mm;
1111 1257
@@ -1132,6 +1278,9 @@ typedef struct drm_i915_private {
1132 1278
1133 struct intel_l3_parity l3_parity; 1279 struct intel_l3_parity l3_parity;
1134 1280
1281 /* Cannot be determined by PCIID. You must always read a register. */
1282 size_t ellc_size;
1283
1135 /* gen6+ rps state */ 1284 /* gen6+ rps state */
1136 struct intel_gen6_power_mgmt rps; 1285 struct intel_gen6_power_mgmt rps;
1137 1286
@@ -1142,10 +1291,7 @@ typedef struct drm_i915_private {
1142 /* Haswell power well */ 1291 /* Haswell power well */
1143 struct i915_power_well power_well; 1292 struct i915_power_well power_well;
1144 1293
1145 enum no_fbc_reason no_fbc_reason; 1294 enum no_psr_reason no_psr_reason;
1146
1147 struct drm_mm_node *compressed_fb;
1148 struct drm_mm_node *compressed_llb;
1149 1295
1150 struct i915_gpu_error gpu_error; 1296 struct i915_gpu_error gpu_error;
1151 1297
@@ -1170,11 +1316,34 @@ typedef struct drm_i915_private {
1170 1316
1171 struct i915_suspend_saved_registers regfile; 1317 struct i915_suspend_saved_registers regfile;
1172 1318
1319 struct {
1320 /*
1321 * Raw watermark latency values:
1322 * in 0.1us units for WM0,
1323 * in 0.5us units for WM1+.
1324 */
1325 /* primary */
1326 uint16_t pri_latency[5];
1327 /* sprite */
1328 uint16_t spr_latency[5];
1329 /* cursor */
1330 uint16_t cur_latency[5];
1331 } wm;
1332
1333 struct i915_package_c8 pc8;
1334
1173 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1335 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1174 * here! */ 1336 * here! */
1175 struct i915_dri1_state dri1; 1337 struct i915_dri1_state dri1;
1338 /* Old ums support infrastructure, same warning applies. */
1339 struct i915_ums_state ums;
1176} drm_i915_private_t; 1340} drm_i915_private_t;
1177 1341
1342static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1343{
1344 return dev->dev_private;
1345}
1346
1178/* Iterate over initialised rings */ 1347/* Iterate over initialised rings */
1179#define for_each_ring(ring__, dev_priv__, i__) \ 1348#define for_each_ring(ring__, dev_priv__, i__) \
1180 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1349 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1187,7 +1356,7 @@ enum hdmi_force_audio {
1187 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1356 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1188}; 1357};
1189 1358
1190#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) 1359#define I915_GTT_OFFSET_NONE ((u32)-1)
1191 1360
1192struct drm_i915_gem_object_ops { 1361struct drm_i915_gem_object_ops {
1193 /* Interface between the GEM object and its backing storage. 1362 /* Interface between the GEM object and its backing storage.
@@ -1212,15 +1381,16 @@ struct drm_i915_gem_object {
1212 1381
1213 const struct drm_i915_gem_object_ops *ops; 1382 const struct drm_i915_gem_object_ops *ops;
1214 1383
1215 /** Current space allocated to this object in the GTT, if any. */ 1384 /** List of VMAs backed by this object */
1216 struct drm_mm_node *gtt_space; 1385 struct list_head vma_list;
1386
1217 /** Stolen memory for this object, instead of being backed by shmem. */ 1387 /** Stolen memory for this object, instead of being backed by shmem. */
1218 struct drm_mm_node *stolen; 1388 struct drm_mm_node *stolen;
1219 struct list_head global_list; 1389 struct list_head global_list;
1220 1390
1221 /** This object's place on the active/inactive lists */
1222 struct list_head ring_list; 1391 struct list_head ring_list;
1223 struct list_head mm_list; 1392 /** Used in execbuf to temporarily hold a ref */
1393 struct list_head obj_exec_link;
1224 /** This object's place in the batchbuffer or on the eviction list */ 1394 /** This object's place in the batchbuffer or on the eviction list */
1225 struct list_head exec_list; 1395 struct list_head exec_list;
1226 1396
@@ -1287,6 +1457,7 @@ struct drm_i915_gem_object {
1287 */ 1457 */
1288 unsigned int fault_mappable:1; 1458 unsigned int fault_mappable:1;
1289 unsigned int pin_mappable:1; 1459 unsigned int pin_mappable:1;
1460 unsigned int pin_display:1;
1290 1461
1291 /* 1462 /*
1292 * Is the GPU currently using a fence to access this buffer, 1463 * Is the GPU currently using a fence to access this buffer,
@@ -1294,7 +1465,7 @@ struct drm_i915_gem_object {
1294 unsigned int pending_fenced_gpu_access:1; 1465 unsigned int pending_fenced_gpu_access:1;
1295 unsigned int fenced_gpu_access:1; 1466 unsigned int fenced_gpu_access:1;
1296 1467
1297 unsigned int cache_level:2; 1468 unsigned int cache_level:3;
1298 1469
1299 unsigned int has_aliasing_ppgtt_mapping:1; 1470 unsigned int has_aliasing_ppgtt_mapping:1;
1300 unsigned int has_global_gtt_mapping:1; 1471 unsigned int has_global_gtt_mapping:1;
@@ -1314,13 +1485,6 @@ struct drm_i915_gem_object {
1314 unsigned long exec_handle; 1485 unsigned long exec_handle;
1315 struct drm_i915_gem_exec_object2 *exec_entry; 1486 struct drm_i915_gem_exec_object2 *exec_entry;
1316 1487
1317 /**
1318 * Current offset of the object in GTT space.
1319 *
1320 * This is the same as gtt_space->start
1321 */
1322 uint32_t gtt_offset;
1323
1324 struct intel_ring_buffer *ring; 1488 struct intel_ring_buffer *ring;
1325 1489
1326 /** Breadcrumb of last rendering to the buffer. */ 1490 /** Breadcrumb of last rendering to the buffer. */
@@ -1396,7 +1560,7 @@ struct drm_i915_file_private {
1396 struct i915_ctx_hang_stats hang_stats; 1560 struct i915_ctx_hang_stats hang_stats;
1397}; 1561};
1398 1562
1399#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1563#define INTEL_INFO(dev) (to_i915(dev)->info)
1400 1564
1401#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1565#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1402#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1566#define IS_845G(dev) ((dev)->pci_device == 0x2562)
@@ -1414,7 +1578,6 @@ struct drm_i915_file_private {
1414#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1578#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1415#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1579#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1416#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1580#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1417#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1418#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1581#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1419#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1582#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1420#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1583#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
@@ -1426,6 +1589,8 @@ struct drm_i915_file_private {
1426#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1589#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1427#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1590#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1428#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1591#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1592#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1593 ((dev)->pci_device & 0xFF00) == 0x0C00)
1429#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1594#define IS_ULT(dev) (IS_HASWELL(dev) && \
1430 ((dev)->pci_device & 0xFF00) == 0x0A00) 1595 ((dev)->pci_device & 0xFF00) == 0x0A00)
1431 1596
@@ -1446,6 +1611,7 @@ struct drm_i915_file_private {
1446#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1611#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1447#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) 1612#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1448#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1613#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1614#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1449#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1615#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1450 1616
1451#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1617#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -1468,8 +1634,6 @@ struct drm_i915_file_private {
1468#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1634#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1469#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1635#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1470#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1636#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1471/* dsparb controlled by hw only */
1472#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1473 1637
1474#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1638#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1475#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1639#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
@@ -1477,8 +1641,6 @@ struct drm_i915_file_private {
1477 1641
1478#define HAS_IPS(dev) (IS_ULT(dev)) 1642#define HAS_IPS(dev) (IS_ULT(dev))
1479 1643
1480#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1481
1482#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1644#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1483#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1645#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1484#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1646#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
@@ -1490,7 +1652,7 @@ struct drm_i915_file_private {
1490#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 1652#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1491#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 1653#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1492 1654
1493#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1655#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
1494#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1656#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1495#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1657#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1496#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1658#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1526,7 +1688,7 @@ struct drm_i915_file_private {
1526#define INTEL_RC6p_ENABLE (1<<1) 1688#define INTEL_RC6p_ENABLE (1<<1)
1527#define INTEL_RC6pp_ENABLE (1<<2) 1689#define INTEL_RC6pp_ENABLE (1<<2)
1528 1690
1529extern struct drm_ioctl_desc i915_ioctls[]; 1691extern const struct drm_ioctl_desc i915_ioctls[];
1530extern int i915_max_ioctl; 1692extern int i915_max_ioctl;
1531extern unsigned int i915_fbpercrtc __always_unused; 1693extern unsigned int i915_fbpercrtc __always_unused;
1532extern int i915_panel_ignore_lid __read_mostly; 1694extern int i915_panel_ignore_lid __read_mostly;
@@ -1540,9 +1702,14 @@ extern int i915_enable_rc6 __read_mostly;
1540extern int i915_enable_fbc __read_mostly; 1702extern int i915_enable_fbc __read_mostly;
1541extern bool i915_enable_hangcheck __read_mostly; 1703extern bool i915_enable_hangcheck __read_mostly;
1542extern int i915_enable_ppgtt __read_mostly; 1704extern int i915_enable_ppgtt __read_mostly;
1705extern int i915_enable_psr __read_mostly;
1543extern unsigned int i915_preliminary_hw_support __read_mostly; 1706extern unsigned int i915_preliminary_hw_support __read_mostly;
1544extern int i915_disable_power_well __read_mostly; 1707extern int i915_disable_power_well __read_mostly;
1545extern int i915_enable_ips __read_mostly; 1708extern int i915_enable_ips __read_mostly;
1709extern bool i915_fastboot __read_mostly;
1710extern int i915_enable_pc8 __read_mostly;
1711extern int i915_pc8_timeout __read_mostly;
1712extern bool i915_prefault_disable __read_mostly;
1546 1713
1547extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1714extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1548extern int i915_resume(struct drm_device *dev); 1715extern int i915_resume(struct drm_device *dev);
@@ -1578,16 +1745,19 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1578extern void intel_console_resume(struct work_struct *work); 1745extern void intel_console_resume(struct work_struct *work);
1579 1746
1580/* i915_irq.c */ 1747/* i915_irq.c */
1581void i915_hangcheck_elapsed(unsigned long data); 1748void i915_queue_hangcheck(struct drm_device *dev);
1582void i915_handle_error(struct drm_device *dev, bool wedged); 1749void i915_handle_error(struct drm_device *dev, bool wedged);
1583 1750
1584extern void intel_irq_init(struct drm_device *dev); 1751extern void intel_irq_init(struct drm_device *dev);
1585extern void intel_pm_init(struct drm_device *dev); 1752extern void intel_pm_init(struct drm_device *dev);
1586extern void intel_hpd_init(struct drm_device *dev); 1753extern void intel_hpd_init(struct drm_device *dev);
1587extern void intel_gt_init(struct drm_device *dev); 1754extern void intel_pm_init(struct drm_device *dev);
1588extern void intel_gt_sanitize(struct drm_device *dev);
1589 1755
1590void i915_error_state_free(struct kref *error_ref); 1756extern void intel_uncore_sanitize(struct drm_device *dev);
1757extern void intel_uncore_early_sanitize(struct drm_device *dev);
1758extern void intel_uncore_init(struct drm_device *dev);
1759extern void intel_uncore_clear_errors(struct drm_device *dev);
1760extern void intel_uncore_check_errors(struct drm_device *dev);
1591 1761
1592void 1762void
1593i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1763i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1595,13 +1765,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1595void 1765void
1596i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1766i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1597 1767
1598#ifdef CONFIG_DEBUG_FS
1599extern void i915_destroy_error_state(struct drm_device *dev);
1600#else
1601#define i915_destroy_error_state(x)
1602#endif
1603
1604
1605/* i915_gem.c */ 1768/* i915_gem.c */
1606int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1769int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1607 struct drm_file *file_priv); 1770 struct drm_file *file_priv);
@@ -1658,13 +1821,18 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
1658struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1821struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1659 size_t size); 1822 size_t size);
1660void i915_gem_free_object(struct drm_gem_object *obj); 1823void i915_gem_free_object(struct drm_gem_object *obj);
1824struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
1825 struct i915_address_space *vm);
1826void i915_gem_vma_destroy(struct i915_vma *vma);
1661 1827
1662int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1828int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1829 struct i915_address_space *vm,
1663 uint32_t alignment, 1830 uint32_t alignment,
1664 bool map_and_fenceable, 1831 bool map_and_fenceable,
1665 bool nonblocking); 1832 bool nonblocking);
1666void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1833void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1667int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1834int __must_check i915_vma_unbind(struct i915_vma *vma);
1835int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
1668int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 1836int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1669void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1837void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1670void i915_gem_lastclose(struct drm_device *dev); 1838void i915_gem_lastclose(struct drm_device *dev);
@@ -1701,8 +1869,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
1701 struct drm_mode_create_dumb *args); 1869 struct drm_mode_create_dumb *args);
1702int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1870int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1703 uint32_t handle, uint64_t *offset); 1871 uint32_t handle, uint64_t *offset);
1704int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1705 uint32_t handle);
1706/** 1872/**
1707 * Returns true if seq1 is later than seq2. 1873 * Returns true if seq1 is later than seq2.
1708 */ 1874 */
@@ -1754,10 +1920,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1754} 1920}
1755 1921
1756void i915_gem_reset(struct drm_device *dev); 1922void i915_gem_reset(struct drm_device *dev);
1757void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1923bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1758int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1759 uint32_t read_domains,
1760 uint32_t write_domain);
1761int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1924int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1762int __must_check i915_gem_init(struct drm_device *dev); 1925int __must_check i915_gem_init(struct drm_device *dev);
1763int __must_check i915_gem_init_hw(struct drm_device *dev); 1926int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -1784,6 +1947,7 @@ int __must_check
1784i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1947i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1785 u32 alignment, 1948 u32 alignment,
1786 struct intel_ring_buffer *pipelined); 1949 struct intel_ring_buffer *pipelined);
1950void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1787int i915_gem_attach_phys_object(struct drm_device *dev, 1951int i915_gem_attach_phys_object(struct drm_device *dev,
1788 struct drm_i915_gem_object *obj, 1952 struct drm_i915_gem_object *obj,
1789 int id, 1953 int id,
@@ -1810,6 +1974,56 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1810 1974
1811void i915_gem_restore_fences(struct drm_device *dev); 1975void i915_gem_restore_fences(struct drm_device *dev);
1812 1976
1977unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
1978 struct i915_address_space *vm);
1979bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
1980bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
1981 struct i915_address_space *vm);
1982unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
1983 struct i915_address_space *vm);
1984struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
1985 struct i915_address_space *vm);
1986struct i915_vma *
1987i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
1988 struct i915_address_space *vm);
1989/* Some GGTT VM helpers */
1990#define obj_to_ggtt(obj) \
1991 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
1992static inline bool i915_is_ggtt(struct i915_address_space *vm)
1993{
1994 struct i915_address_space *ggtt =
1995 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
1996 return vm == ggtt;
1997}
1998
1999static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2000{
2001 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2002}
2003
2004static inline unsigned long
2005i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2006{
2007 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2008}
2009
2010static inline unsigned long
2011i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2012{
2013 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2014}
2015
2016static inline int __must_check
2017i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2018 uint32_t alignment,
2019 bool map_and_fenceable,
2020 bool nonblocking)
2021{
2022 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2023 map_and_fenceable, nonblocking);
2024}
2025#undef obj_to_ggtt
2026
1813/* i915_gem_context.c */ 2027/* i915_gem_context.c */
1814void i915_gem_context_init(struct drm_device *dev); 2028void i915_gem_context_init(struct drm_device *dev);
1815void i915_gem_context_fini(struct drm_device *dev); 2029void i915_gem_context_fini(struct drm_device *dev);
@@ -1828,7 +2042,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
1828} 2042}
1829 2043
1830struct i915_ctx_hang_stats * __must_check 2044struct i915_ctx_hang_stats * __must_check
1831i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, 2045i915_gem_context_get_hang_stats(struct drm_device *dev,
1832 struct drm_file *file, 2046 struct drm_file *file,
1833 u32 id); 2047 u32 id);
1834int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2048int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -1862,7 +2076,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
1862 2076
1863 2077
1864/* i915_gem_evict.c */ 2078/* i915_gem_evict.c */
1865int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 2079int __must_check i915_gem_evict_something(struct drm_device *dev,
2080 struct i915_address_space *vm,
2081 int min_size,
1866 unsigned alignment, 2082 unsigned alignment,
1867 unsigned cache_level, 2083 unsigned cache_level,
1868 bool mappable, 2084 bool mappable,
@@ -1884,7 +2100,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1884void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); 2100void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
1885 2101
1886/* i915_gem_tiling.c */ 2102/* i915_gem_tiling.c */
1887inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2103static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1888{ 2104{
1889 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2105 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1890 2106
@@ -1897,23 +2113,36 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1897void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 2113void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1898 2114
1899/* i915_gem_debug.c */ 2115/* i915_gem_debug.c */
1900void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1901 const char *where, uint32_t mark);
1902#if WATCH_LISTS 2116#if WATCH_LISTS
1903int i915_verify_lists(struct drm_device *dev); 2117int i915_verify_lists(struct drm_device *dev);
1904#else 2118#else
1905#define i915_verify_lists(dev) 0 2119#define i915_verify_lists(dev) 0
1906#endif 2120#endif
1907void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1908 int handle);
1909void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1910 const char *where, uint32_t mark);
1911 2121
1912/* i915_debugfs.c */ 2122/* i915_debugfs.c */
1913int i915_debugfs_init(struct drm_minor *minor); 2123int i915_debugfs_init(struct drm_minor *minor);
1914void i915_debugfs_cleanup(struct drm_minor *minor); 2124void i915_debugfs_cleanup(struct drm_minor *minor);
2125
2126/* i915_gpu_error.c */
1915__printf(2, 3) 2127__printf(2, 3)
1916void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 2128void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2129int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2130 const struct i915_error_state_file_priv *error);
2131int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2132 size_t count, loff_t pos);
2133static inline void i915_error_state_buf_release(
2134 struct drm_i915_error_state_buf *eb)
2135{
2136 kfree(eb->buf);
2137}
2138void i915_capture_error_state(struct drm_device *dev);
2139void i915_error_state_get(struct drm_device *dev,
2140 struct i915_error_state_file_priv *error_priv);
2141void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2142void i915_destroy_error_state(struct drm_device *dev);
2143
2144void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2145const char *i915_cache_level_str(int type);
1917 2146
1918/* i915_suspend.c */ 2147/* i915_suspend.c */
1919extern int i915_save_state(struct drm_device *dev); 2148extern int i915_save_state(struct drm_device *dev);
@@ -1993,7 +2222,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1993 struct drm_file *file); 2222 struct drm_file *file);
1994 2223
1995/* overlay */ 2224/* overlay */
1996#ifdef CONFIG_DEBUG_FS
1997extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 2225extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1998extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 2226extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
1999 struct intel_overlay_error_state *error); 2227 struct intel_overlay_error_state *error);
@@ -2002,7 +2230,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
2002extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 2230extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2003 struct drm_device *dev, 2231 struct drm_device *dev,
2004 struct intel_display_error_state *error); 2232 struct intel_display_error_state *error);
2005#endif
2006 2233
2007/* On SNB platform, before reading ring registers forcewake bit 2234/* On SNB platform, before reading ring registers forcewake bit
2008 * must be set to prevent GT core from power down and stale values being 2235 * must be set to prevent GT core from power down and stale values being
@@ -2010,7 +2237,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2010 */ 2237 */
2011void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 2238void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2012void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 2239void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
2013int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
2014 2240
2015int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2241int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2016int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2242int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2029,39 +2255,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2029int vlv_gpu_freq(int ddr_freq, int val); 2255int vlv_gpu_freq(int ddr_freq, int val);
2030int vlv_freq_opcode(int ddr_freq, int val); 2256int vlv_freq_opcode(int ddr_freq, int val);
2031 2257
2032#define __i915_read(x, y) \ 2258#define __i915_read(x) \
2033 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 2259 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
2034 2260__i915_read(8)
2035__i915_read(8, b) 2261__i915_read(16)
2036__i915_read(16, w) 2262__i915_read(32)
2037__i915_read(32, l) 2263__i915_read(64)
2038__i915_read(64, q)
2039#undef __i915_read 2264#undef __i915_read
2040 2265
2041#define __i915_write(x, y) \ 2266#define __i915_write(x) \
2042 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 2267 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
2043 2268__i915_write(8)
2044__i915_write(8, b) 2269__i915_write(16)
2045__i915_write(16, w) 2270__i915_write(32)
2046__i915_write(32, l) 2271__i915_write(64)
2047__i915_write(64, q)
2048#undef __i915_write 2272#undef __i915_write
2049 2273
2050#define I915_READ8(reg) i915_read8(dev_priv, (reg)) 2274#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2051#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) 2275#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2052 2276
2053#define I915_READ16(reg) i915_read16(dev_priv, (reg)) 2277#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2054#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) 2278#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2055#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) 2279#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2056#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) 2280#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2057 2281
2058#define I915_READ(reg) i915_read32(dev_priv, (reg)) 2282#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2059#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) 2283#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2060#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) 2284#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2061#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) 2285#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2062 2286
2063#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) 2287#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2064#define I915_READ64(reg) i915_read64(dev_priv, (reg)) 2288#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2065 2289
2066#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2290#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2067#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2291#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d9e2208cfe98..2d1cb10d846f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/drm_vma_manager.h>
29#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
30#include "i915_drv.h" 31#include "i915_drv.h"
31#include "i915_trace.h" 32#include "i915_trace.h"
@@ -37,11 +38,14 @@
37#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
38 39
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 42 bool force);
42 unsigned alignment, 43static __must_check int
43 bool map_and_fenceable, 44i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
44 bool nonblocking); 45 struct i915_address_space *vm,
46 unsigned alignment,
47 bool map_and_fenceable,
48 bool nonblocking);
45static int i915_gem_phys_pwrite(struct drm_device *dev, 49static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj, 50 struct drm_i915_gem_object *obj,
47 struct drm_i915_gem_pwrite *args, 51 struct drm_i915_gem_pwrite *args,
@@ -59,6 +63,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); 63static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 64static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61 65
66static bool cpu_cache_is_coherent(struct drm_device *dev,
67 enum i915_cache_level level)
68{
69 return HAS_LLC(dev) || level != I915_CACHE_NONE;
70}
71
72static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73{
74 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
75 return true;
76
77 return obj->pin_display;
78}
79
62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 80static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63{ 81{
64 if (obj->tiling_mode) 82 if (obj->tiling_mode)
@@ -75,15 +93,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
75static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 93static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76 size_t size) 94 size_t size)
77{ 95{
96 spin_lock(&dev_priv->mm.object_stat_lock);
78 dev_priv->mm.object_count++; 97 dev_priv->mm.object_count++;
79 dev_priv->mm.object_memory += size; 98 dev_priv->mm.object_memory += size;
99 spin_unlock(&dev_priv->mm.object_stat_lock);
80} 100}
81 101
82static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 102static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 size_t size) 103 size_t size)
84{ 104{
105 spin_lock(&dev_priv->mm.object_stat_lock);
85 dev_priv->mm.object_count--; 106 dev_priv->mm.object_count--;
86 dev_priv->mm.object_memory -= size; 107 dev_priv->mm.object_memory -= size;
108 spin_unlock(&dev_priv->mm.object_stat_lock);
87} 109}
88 110
89static int 111static int
@@ -135,7 +157,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
135static inline bool 157static inline bool
136i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 158i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
137{ 159{
138 return obj->gtt_space && !obj->active; 160 return i915_gem_obj_bound_any(obj) && !obj->active;
139} 161}
140 162
141int 163int
@@ -178,10 +200,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
178 mutex_lock(&dev->struct_mutex); 200 mutex_lock(&dev->struct_mutex);
179 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
180 if (obj->pin_count) 202 if (obj->pin_count)
181 pinned += obj->gtt_space->size; 203 pinned += i915_gem_obj_ggtt_size(obj);
182 mutex_unlock(&dev->struct_mutex); 204 mutex_unlock(&dev->struct_mutex);
183 205
184 args->aper_size = dev_priv->gtt.total; 206 args->aper_size = dev_priv->gtt.base.total;
185 args->aper_available_size = args->aper_size - pinned; 207 args->aper_available_size = args->aper_size - pinned;
186 208
187 return 0; 209 return 0;
@@ -219,16 +241,10 @@ i915_gem_create(struct drm_file *file,
219 return -ENOMEM; 241 return -ENOMEM;
220 242
221 ret = drm_gem_handle_create(file, &obj->base, &handle); 243 ret = drm_gem_handle_create(file, &obj->base, &handle);
222 if (ret) {
223 drm_gem_object_release(&obj->base);
224 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
225 i915_gem_object_free(obj);
226 return ret;
227 }
228
229 /* drop reference from allocate - handle holds it now */ 244 /* drop reference from allocate - handle holds it now */
230 drm_gem_object_unreference(&obj->base); 245 drm_gem_object_unreference_unlocked(&obj->base);
231 trace_i915_gem_object_create(obj); 246 if (ret)
247 return ret;
232 248
233 *handle_p = handle; 249 *handle_p = handle;
234 return 0; 250 return 0;
@@ -246,13 +262,6 @@ i915_gem_dumb_create(struct drm_file *file,
246 args->size, &args->handle); 262 args->size, &args->handle);
247} 263}
248 264
249int i915_gem_dumb_destroy(struct drm_file *file,
250 struct drm_device *dev,
251 uint32_t handle)
252{
253 return drm_gem_handle_delete(file, handle);
254}
255
256/** 265/**
257 * Creates a new mm object and returns a handle to it. 266 * Creates a new mm object and returns a handle to it.
258 */ 267 */
@@ -420,9 +429,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
420 * read domain and manually flush cachelines (if required). This 429 * read domain and manually flush cachelines (if required). This
421 * optimizes for the case when the gpu will dirty the data 430 * optimizes for the case when the gpu will dirty the data
422 * anyway again before the next pread happens. */ 431 * anyway again before the next pread happens. */
423 if (obj->cache_level == I915_CACHE_NONE) 432 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
424 needs_clflush = 1; 433 if (i915_gem_obj_bound_any(obj)) {
425 if (obj->gtt_space) {
426 ret = i915_gem_object_set_to_gtt_domain(obj, false); 434 ret = i915_gem_object_set_to_gtt_domain(obj, false);
427 if (ret) 435 if (ret)
428 return ret; 436 return ret;
@@ -465,7 +473,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
465 473
466 mutex_unlock(&dev->struct_mutex); 474 mutex_unlock(&dev->struct_mutex);
467 475
468 if (!prefaulted) { 476 if (likely(!i915_prefault_disable) && !prefaulted) {
469 ret = fault_in_multipages_writeable(user_data, remain); 477 ret = fault_in_multipages_writeable(user_data, remain);
470 /* Userspace is tricking us, but we've already clobbered 478 /* Userspace is tricking us, but we've already clobbered
471 * its pages with the prefault and promised to write the 479 * its pages with the prefault and promised to write the
@@ -594,7 +602,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
594 char __user *user_data; 602 char __user *user_data;
595 int page_offset, page_length, ret; 603 int page_offset, page_length, ret;
596 604
597 ret = i915_gem_object_pin(obj, 0, true, true); 605 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
598 if (ret) 606 if (ret)
599 goto out; 607 goto out;
600 608
@@ -609,7 +617,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
609 user_data = to_user_ptr(args->data_ptr); 617 user_data = to_user_ptr(args->data_ptr);
610 remain = args->size; 618 remain = args->size;
611 619
612 offset = obj->gtt_offset + args->offset; 620 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
613 621
614 while (remain > 0) { 622 while (remain > 0) {
615 /* Operation in this page 623 /* Operation in this page
@@ -737,19 +745,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
737 * write domain and manually flush cachelines (if required). This 745 * write domain and manually flush cachelines (if required). This
738 * optimizes for the case when the gpu will use the data 746 * optimizes for the case when the gpu will use the data
739 * right away and we therefore have to clflush anyway. */ 747 * right away and we therefore have to clflush anyway. */
740 if (obj->cache_level == I915_CACHE_NONE) 748 needs_clflush_after = cpu_write_needs_clflush(obj);
741 needs_clflush_after = 1; 749 if (i915_gem_obj_bound_any(obj)) {
742 if (obj->gtt_space) {
743 ret = i915_gem_object_set_to_gtt_domain(obj, true); 750 ret = i915_gem_object_set_to_gtt_domain(obj, true);
744 if (ret) 751 if (ret)
745 return ret; 752 return ret;
746 } 753 }
747 } 754 }
748 /* Same trick applies for invalidate partially written cachelines before 755 /* Same trick applies to invalidate partially written cachelines read
749 * writing. */ 756 * before writing. */
750 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) 757 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
751 && obj->cache_level == I915_CACHE_NONE) 758 needs_clflush_before =
752 needs_clflush_before = 1; 759 !cpu_cache_is_coherent(dev, obj->cache_level);
753 760
754 ret = i915_gem_object_get_pages(obj); 761 ret = i915_gem_object_get_pages(obj);
755 if (ret) 762 if (ret)
@@ -828,8 +835,8 @@ out:
828 */ 835 */
829 if (!needs_clflush_after && 836 if (!needs_clflush_after &&
830 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 837 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
831 i915_gem_clflush_object(obj); 838 if (i915_gem_clflush_object(obj, obj->pin_display))
832 i915_gem_chipset_flush(dev); 839 i915_gem_chipset_flush(dev);
833 } 840 }
834 } 841 }
835 842
@@ -860,10 +867,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
860 args->size)) 867 args->size))
861 return -EFAULT; 868 return -EFAULT;
862 869
863 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 870 if (likely(!i915_prefault_disable)) {
864 args->size); 871 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
865 if (ret) 872 args->size);
866 return -EFAULT; 873 if (ret)
874 return -EFAULT;
875 }
867 876
868 ret = i915_mutex_lock_interruptible(dev); 877 ret = i915_mutex_lock_interruptible(dev);
869 if (ret) 878 if (ret)
@@ -904,9 +913,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
904 goto out; 913 goto out;
905 } 914 }
906 915
907 if (obj->cache_level == I915_CACHE_NONE && 916 if (obj->tiling_mode == I915_TILING_NONE &&
908 obj->tiling_mode == I915_TILING_NONE && 917 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
909 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 918 cpu_write_needs_clflush(obj)) {
910 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 919 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
911 /* Note that the gtt paths might fail with non-page-backed user 920 /* Note that the gtt paths might fail with non-page-backed user
912 * pointers (e.g. gtt mappings when moving data between 921 * pointers (e.g. gtt mappings when moving data between
@@ -990,6 +999,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
990 bool wait_forever = true; 999 bool wait_forever = true;
991 int ret; 1000 int ret;
992 1001
1002 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1003
993 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1004 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
994 return 0; 1005 return 0;
995 1006
@@ -1255,8 +1266,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1255 } 1266 }
1256 1267
1257 /* Pinned buffers may be scanout, so flush the cache */ 1268 /* Pinned buffers may be scanout, so flush the cache */
1258 if (obj->pin_count) 1269 if (obj->pin_display)
1259 i915_gem_object_flush_cpu_write_domain(obj); 1270 i915_gem_object_flush_cpu_write_domain(obj, true);
1260 1271
1261 drm_gem_object_unreference(&obj->base); 1272 drm_gem_object_unreference(&obj->base);
1262unlock: 1273unlock:
@@ -1346,7 +1357,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1346 } 1357 }
1347 1358
1348 /* Now bind it into the GTT if needed */ 1359 /* Now bind it into the GTT if needed */
1349 ret = i915_gem_object_pin(obj, 0, true, false); 1360 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1350 if (ret) 1361 if (ret)
1351 goto unlock; 1362 goto unlock;
1352 1363
@@ -1360,8 +1371,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1360 1371
1361 obj->fault_mappable = true; 1372 obj->fault_mappable = true;
1362 1373
1363 pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + 1374 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1364 page_offset; 1375 pfn >>= PAGE_SHIFT;
1376 pfn += page_offset;
1365 1377
1366 /* Finally, remap it using the new GTT offset */ 1378 /* Finally, remap it using the new GTT offset */
1367 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1379 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1425,11 +1437,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1425 if (!obj->fault_mappable) 1437 if (!obj->fault_mappable)
1426 return; 1438 return;
1427 1439
1428 if (obj->base.dev->dev_mapping) 1440 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1429 unmap_mapping_range(obj->base.dev->dev_mapping,
1430 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1431 obj->base.size, 1);
1432
1433 obj->fault_mappable = false; 1441 obj->fault_mappable = false;
1434} 1442}
1435 1443
@@ -1485,7 +1493,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1485 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1493 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1486 int ret; 1494 int ret;
1487 1495
1488 if (obj->base.map_list.map) 1496 if (drm_vma_node_has_offset(&obj->base.vma_node))
1489 return 0; 1497 return 0;
1490 1498
1491 dev_priv->mm.shrinker_no_lock_stealing = true; 1499 dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1516,9 +1524,6 @@ out:
1516 1524
1517static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1525static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1518{ 1526{
1519 if (!obj->base.map_list.map)
1520 return;
1521
1522 drm_gem_free_mmap_offset(&obj->base); 1527 drm_gem_free_mmap_offset(&obj->base);
1523} 1528}
1524 1529
@@ -1557,7 +1562,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1557 if (ret) 1562 if (ret)
1558 goto out; 1563 goto out;
1559 1564
1560 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; 1565 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1561 1566
1562out: 1567out:
1563 drm_gem_object_unreference(&obj->base); 1568 drm_gem_object_unreference(&obj->base);
@@ -1632,7 +1637,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1632 * hope for the best. 1637 * hope for the best.
1633 */ 1638 */
1634 WARN_ON(ret != -EIO); 1639 WARN_ON(ret != -EIO);
1635 i915_gem_clflush_object(obj); 1640 i915_gem_clflush_object(obj, true);
1636 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 1641 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1637 } 1642 }
1638 1643
@@ -1667,11 +1672,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1667 if (obj->pages == NULL) 1672 if (obj->pages == NULL)
1668 return 0; 1673 return 0;
1669 1674
1670 BUG_ON(obj->gtt_space);
1671
1672 if (obj->pages_pin_count) 1675 if (obj->pages_pin_count)
1673 return -EBUSY; 1676 return -EBUSY;
1674 1677
1678 BUG_ON(i915_gem_obj_bound_any(obj));
1679
1675 /* ->put_pages might need to allocate memory for the bit17 swizzle 1680 /* ->put_pages might need to allocate memory for the bit17 swizzle
1676 * array, hence protect them from being reaped by removing them from gtt 1681 * array, hence protect them from being reaped by removing them from gtt
1677 * lists early. */ 1682 * lists early. */
@@ -1704,12 +1709,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1704 } 1709 }
1705 } 1710 }
1706 1711
1707 list_for_each_entry_safe(obj, next, 1712 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1708 &dev_priv->mm.inactive_list, 1713 global_list) {
1709 mm_list) { 1714 struct i915_vma *vma, *v;
1710 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && 1715
1711 i915_gem_object_unbind(obj) == 0 && 1716 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1712 i915_gem_object_put_pages(obj) == 0) { 1717 continue;
1718
1719 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1720 if (i915_vma_unbind(vma))
1721 break;
1722
1723 if (!i915_gem_object_put_pages(obj)) {
1713 count += obj->base.size >> PAGE_SHIFT; 1724 count += obj->base.size >> PAGE_SHIFT;
1714 if (count >= target) 1725 if (count >= target)
1715 return count; 1726 return count;
@@ -1892,8 +1903,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1892 obj->active = 1; 1903 obj->active = 1;
1893 } 1904 }
1894 1905
1895 /* Move from whatever list we were on to the tail of execution. */
1896 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1897 list_move_tail(&obj->ring_list, &ring->active_list); 1906 list_move_tail(&obj->ring_list, &ring->active_list);
1898 1907
1899 obj->last_read_seqno = seqno; 1908 obj->last_read_seqno = seqno;
@@ -1915,13 +1924,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1915static void 1924static void
1916i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 1925i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1917{ 1926{
1918 struct drm_device *dev = obj->base.dev; 1927 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1919 struct drm_i915_private *dev_priv = dev->dev_private; 1928 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1929 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1920 1930
1921 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 1931 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1922 BUG_ON(!obj->active); 1932 BUG_ON(!obj->active);
1923 1933
1924 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1934 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
1925 1935
1926 list_del_init(&obj->ring_list); 1936 list_del_init(&obj->ring_list);
1927 obj->ring = NULL; 1937 obj->ring = NULL;
@@ -2085,11 +2095,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2085 trace_i915_gem_request_add(ring, request->seqno); 2095 trace_i915_gem_request_add(ring, request->seqno);
2086 ring->outstanding_lazy_request = 0; 2096 ring->outstanding_lazy_request = 0;
2087 2097
2088 if (!dev_priv->mm.suspended) { 2098 if (!dev_priv->ums.mm_suspended) {
2089 if (i915_enable_hangcheck) { 2099 i915_queue_hangcheck(ring->dev);
2090 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2100
2091 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2092 }
2093 if (was_empty) { 2101 if (was_empty) {
2094 queue_delayed_work(dev_priv->wq, 2102 queue_delayed_work(dev_priv->wq,
2095 &dev_priv->mm.retire_work, 2103 &dev_priv->mm.retire_work,
@@ -2119,10 +2127,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2119 spin_unlock(&file_priv->mm.lock); 2127 spin_unlock(&file_priv->mm.lock);
2120} 2128}
2121 2129
2122static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) 2130static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2131 struct i915_address_space *vm)
2123{ 2132{
2124 if (acthd >= obj->gtt_offset && 2133 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2125 acthd < obj->gtt_offset + obj->base.size) 2134 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2126 return true; 2135 return true;
2127 2136
2128 return false; 2137 return false;
@@ -2145,6 +2154,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
2145 return false; 2154 return false;
2146} 2155}
2147 2156
2157static struct i915_address_space *
2158request_to_vm(struct drm_i915_gem_request *request)
2159{
2160 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2161 struct i915_address_space *vm;
2162
2163 vm = &dev_priv->gtt.base;
2164
2165 return vm;
2166}
2167
2148static bool i915_request_guilty(struct drm_i915_gem_request *request, 2168static bool i915_request_guilty(struct drm_i915_gem_request *request,
2149 const u32 acthd, bool *inside) 2169 const u32 acthd, bool *inside)
2150{ 2170{
@@ -2152,9 +2172,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
2152 * pointing inside the ring, matches the batch_obj address range. 2172 * pointing inside the ring, matches the batch_obj address range.
2153 * However this is extremely unlikely. 2173 * However this is extremely unlikely.
2154 */ 2174 */
2155
2156 if (request->batch_obj) { 2175 if (request->batch_obj) {
2157 if (i915_head_inside_object(acthd, request->batch_obj)) { 2176 if (i915_head_inside_object(acthd, request->batch_obj,
2177 request_to_vm(request))) {
2158 *inside = true; 2178 *inside = true;
2159 return true; 2179 return true;
2160 } 2180 }
@@ -2174,17 +2194,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
2174{ 2194{
2175 struct i915_ctx_hang_stats *hs = NULL; 2195 struct i915_ctx_hang_stats *hs = NULL;
2176 bool inside, guilty; 2196 bool inside, guilty;
2197 unsigned long offset = 0;
2177 2198
2178 /* Innocent until proven guilty */ 2199 /* Innocent until proven guilty */
2179 guilty = false; 2200 guilty = false;
2180 2201
2181 if (ring->hangcheck.action != wait && 2202 if (request->batch_obj)
2203 offset = i915_gem_obj_offset(request->batch_obj,
2204 request_to_vm(request));
2205
2206 if (ring->hangcheck.action != HANGCHECK_WAIT &&
2182 i915_request_guilty(request, acthd, &inside)) { 2207 i915_request_guilty(request, acthd, &inside)) {
2183 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", 2208 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2184 ring->name, 2209 ring->name,
2185 inside ? "inside" : "flushing", 2210 inside ? "inside" : "flushing",
2186 request->batch_obj ? 2211 offset,
2187 request->batch_obj->gtt_offset : 0,
2188 request->ctx ? request->ctx->id : 0, 2212 request->ctx ? request->ctx->id : 0,
2189 acthd); 2213 acthd);
2190 2214
@@ -2275,23 +2299,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
2275void i915_gem_reset(struct drm_device *dev) 2299void i915_gem_reset(struct drm_device *dev)
2276{ 2300{
2277 struct drm_i915_private *dev_priv = dev->dev_private; 2301 struct drm_i915_private *dev_priv = dev->dev_private;
2278 struct drm_i915_gem_object *obj;
2279 struct intel_ring_buffer *ring; 2302 struct intel_ring_buffer *ring;
2280 int i; 2303 int i;
2281 2304
2282 for_each_ring(ring, dev_priv, i) 2305 for_each_ring(ring, dev_priv, i)
2283 i915_gem_reset_ring_lists(dev_priv, ring); 2306 i915_gem_reset_ring_lists(dev_priv, ring);
2284 2307
2285 /* Move everything out of the GPU domains to ensure we do any
2286 * necessary invalidation upon reuse.
2287 */
2288 list_for_each_entry(obj,
2289 &dev_priv->mm.inactive_list,
2290 mm_list)
2291 {
2292 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2293 }
2294
2295 i915_gem_restore_fences(dev); 2308 i915_gem_restore_fences(dev);
2296} 2309}
2297 2310
@@ -2400,7 +2413,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
2400 idle &= list_empty(&ring->request_list); 2413 idle &= list_empty(&ring->request_list);
2401 } 2414 }
2402 2415
2403 if (!dev_priv->mm.suspended && !idle) 2416 if (!dev_priv->ums.mm_suspended && !idle)
2404 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2417 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2405 round_jiffies_up_relative(HZ)); 2418 round_jiffies_up_relative(HZ));
2406 if (idle) 2419 if (idle)
@@ -2586,18 +2599,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2586 old_write_domain); 2599 old_write_domain);
2587} 2600}
2588 2601
2589/** 2602int i915_vma_unbind(struct i915_vma *vma)
2590 * Unbinds an object from the GTT aperture.
2591 */
2592int
2593i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2594{ 2603{
2604 struct drm_i915_gem_object *obj = vma->obj;
2595 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2605 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2596 int ret; 2606 int ret;
2597 2607
2598 if (obj->gtt_space == NULL) 2608 if (list_empty(&vma->vma_link))
2599 return 0; 2609 return 0;
2600 2610
2611 if (!drm_mm_node_allocated(&vma->node))
2612 goto destroy;
2613
2601 if (obj->pin_count) 2614 if (obj->pin_count)
2602 return -EBUSY; 2615 return -EBUSY;
2603 2616
@@ -2618,7 +2631,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2618 if (ret) 2631 if (ret)
2619 return ret; 2632 return ret;
2620 2633
2621 trace_i915_gem_object_unbind(obj); 2634 trace_i915_vma_unbind(vma);
2622 2635
2623 if (obj->has_global_gtt_mapping) 2636 if (obj->has_global_gtt_mapping)
2624 i915_gem_gtt_unbind_object(obj); 2637 i915_gem_gtt_unbind_object(obj);
@@ -2629,18 +2642,46 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2629 i915_gem_gtt_finish_object(obj); 2642 i915_gem_gtt_finish_object(obj);
2630 i915_gem_object_unpin_pages(obj); 2643 i915_gem_object_unpin_pages(obj);
2631 2644
2632 list_del(&obj->mm_list); 2645 list_del(&vma->mm_list);
2633 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2634 /* Avoid an unnecessary call to unbind on rebind. */ 2646 /* Avoid an unnecessary call to unbind on rebind. */
2635 obj->map_and_fenceable = true; 2647 if (i915_is_ggtt(vma->vm))
2648 obj->map_and_fenceable = true;
2636 2649
2637 drm_mm_put_block(obj->gtt_space); 2650 drm_mm_remove_node(&vma->node);
2638 obj->gtt_space = NULL; 2651
2639 obj->gtt_offset = 0; 2652destroy:
2653 i915_gem_vma_destroy(vma);
2654
2655 /* Since the unbound list is global, only move to that list if
2656 * no more VMAs exist.
2657 * NB: Until we have real VMAs there will only ever be one */
2658 WARN_ON(!list_empty(&obj->vma_list));
2659 if (list_empty(&obj->vma_list))
2660 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2640 2661
2641 return 0; 2662 return 0;
2642} 2663}
2643 2664
2665/**
2666 * Unbinds an object from the global GTT aperture.
2667 */
2668int
2669i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2670{
2671 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2672 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2673
2674 if (!i915_gem_obj_ggtt_bound(obj))
2675 return 0;
2676
2677 if (obj->pin_count)
2678 return -EBUSY;
2679
2680 BUG_ON(obj->pages == NULL);
2681
2682 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2683}
2684
2644int i915_gpu_idle(struct drm_device *dev) 2685int i915_gpu_idle(struct drm_device *dev)
2645{ 2686{
2646 drm_i915_private_t *dev_priv = dev->dev_private; 2687 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2691,12 +2732,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2691 POSTING_READ(fence_reg); 2732 POSTING_READ(fence_reg);
2692 2733
2693 if (obj) { 2734 if (obj) {
2694 u32 size = obj->gtt_space->size; 2735 u32 size = i915_gem_obj_ggtt_size(obj);
2695 uint64_t val; 2736 uint64_t val;
2696 2737
2697 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2738 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2698 0xfffff000) << 32; 2739 0xfffff000) << 32;
2699 val |= obj->gtt_offset & 0xfffff000; 2740 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2700 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; 2741 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2701 if (obj->tiling_mode == I915_TILING_Y) 2742 if (obj->tiling_mode == I915_TILING_Y)
2702 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2743 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2720,15 +2761,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2720 u32 val; 2761 u32 val;
2721 2762
2722 if (obj) { 2763 if (obj) {
2723 u32 size = obj->gtt_space->size; 2764 u32 size = i915_gem_obj_ggtt_size(obj);
2724 int pitch_val; 2765 int pitch_val;
2725 int tile_width; 2766 int tile_width;
2726 2767
2727 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || 2768 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2728 (size & -size) != size || 2769 (size & -size) != size ||
2729 (obj->gtt_offset & (size - 1)), 2770 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2730 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 2771 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2731 obj->gtt_offset, obj->map_and_fenceable, size); 2772 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2732 2773
2733 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 2774 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2734 tile_width = 128; 2775 tile_width = 128;
@@ -2739,7 +2780,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2739 pitch_val = obj->stride / tile_width; 2780 pitch_val = obj->stride / tile_width;
2740 pitch_val = ffs(pitch_val) - 1; 2781 pitch_val = ffs(pitch_val) - 1;
2741 2782
2742 val = obj->gtt_offset; 2783 val = i915_gem_obj_ggtt_offset(obj);
2743 if (obj->tiling_mode == I915_TILING_Y) 2784 if (obj->tiling_mode == I915_TILING_Y)
2744 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2785 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2745 val |= I915_FENCE_SIZE_BITS(size); 2786 val |= I915_FENCE_SIZE_BITS(size);
@@ -2764,19 +2805,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
2764 uint32_t val; 2805 uint32_t val;
2765 2806
2766 if (obj) { 2807 if (obj) {
2767 u32 size = obj->gtt_space->size; 2808 u32 size = i915_gem_obj_ggtt_size(obj);
2768 uint32_t pitch_val; 2809 uint32_t pitch_val;
2769 2810
2770 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || 2811 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2771 (size & -size) != size || 2812 (size & -size) != size ||
2772 (obj->gtt_offset & (size - 1)), 2813 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2773 "object 0x%08x not 512K or pot-size 0x%08x aligned\n", 2814 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2774 obj->gtt_offset, size); 2815 i915_gem_obj_ggtt_offset(obj), size);
2775 2816
2776 pitch_val = obj->stride / 128; 2817 pitch_val = obj->stride / 128;
2777 pitch_val = ffs(pitch_val) - 1; 2818 pitch_val = ffs(pitch_val) - 1;
2778 2819
2779 val = obj->gtt_offset; 2820 val = i915_gem_obj_ggtt_offset(obj);
2780 if (obj->tiling_mode == I915_TILING_Y) 2821 if (obj->tiling_mode == I915_TILING_Y)
2781 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2822 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2782 val |= I830_FENCE_SIZE_BITS(size); 2823 val |= I830_FENCE_SIZE_BITS(size);
@@ -2997,7 +3038,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2997 if (HAS_LLC(dev)) 3038 if (HAS_LLC(dev))
2998 return true; 3039 return true;
2999 3040
3000 if (gtt_space == NULL) 3041 if (!drm_mm_node_allocated(gtt_space))
3001 return true; 3042 return true;
3002 3043
3003 if (list_empty(&gtt_space->node_list)) 3044 if (list_empty(&gtt_space->node_list))
@@ -3030,8 +3071,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3030 3071
3031 if (obj->cache_level != obj->gtt_space->color) { 3072 if (obj->cache_level != obj->gtt_space->color) {
3032 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", 3073 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3033 obj->gtt_space->start, 3074 i915_gem_obj_ggtt_offset(obj),
3034 obj->gtt_space->start + obj->gtt_space->size, 3075 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3035 obj->cache_level, 3076 obj->cache_level,
3036 obj->gtt_space->color); 3077 obj->gtt_space->color);
3037 err++; 3078 err++;
@@ -3042,8 +3083,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3042 obj->gtt_space, 3083 obj->gtt_space,
3043 obj->cache_level)) { 3084 obj->cache_level)) {
3044 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", 3085 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3045 obj->gtt_space->start, 3086 i915_gem_obj_ggtt_offset(obj),
3046 obj->gtt_space->start + obj->gtt_space->size, 3087 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3047 obj->cache_level); 3088 obj->cache_level);
3048 err++; 3089 err++;
3049 continue; 3090 continue;
@@ -3058,18 +3099,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3058 * Finds free space in the GTT aperture and binds the object there. 3099 * Finds free space in the GTT aperture and binds the object there.
3059 */ 3100 */
3060static int 3101static int
3061i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 3102i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3062 unsigned alignment, 3103 struct i915_address_space *vm,
3063 bool map_and_fenceable, 3104 unsigned alignment,
3064 bool nonblocking) 3105 bool map_and_fenceable,
3106 bool nonblocking)
3065{ 3107{
3066 struct drm_device *dev = obj->base.dev; 3108 struct drm_device *dev = obj->base.dev;
3067 drm_i915_private_t *dev_priv = dev->dev_private; 3109 drm_i915_private_t *dev_priv = dev->dev_private;
3068 struct drm_mm_node *node;
3069 u32 size, fence_size, fence_alignment, unfenced_alignment; 3110 u32 size, fence_size, fence_alignment, unfenced_alignment;
3070 bool mappable, fenceable; 3111 size_t gtt_max =
3071 size_t gtt_max = map_and_fenceable ? 3112 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3072 dev_priv->gtt.mappable_end : dev_priv->gtt.total; 3113 struct i915_vma *vma;
3073 int ret; 3114 int ret;
3074 3115
3075 fence_size = i915_gem_get_gtt_size(dev, 3116 fence_size = i915_gem_get_gtt_size(dev,
@@ -3110,77 +3151,89 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3110 3151
3111 i915_gem_object_pin_pages(obj); 3152 i915_gem_object_pin_pages(obj);
3112 3153
3113 node = kzalloc(sizeof(*node), GFP_KERNEL); 3154 BUG_ON(!i915_is_ggtt(vm));
3114 if (node == NULL) { 3155
3115 i915_gem_object_unpin_pages(obj); 3156 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3116 return -ENOMEM; 3157 if (IS_ERR(vma)) {
3158 ret = PTR_ERR(vma);
3159 goto err_unpin;
3117 } 3160 }
3118 3161
3162 /* For now we only ever use 1 vma per object */
3163 WARN_ON(!list_is_singular(&obj->vma_list));
3164
3119search_free: 3165search_free:
3120 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 3166 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3121 size, alignment, 3167 size, alignment,
3122 obj->cache_level, 0, gtt_max); 3168 obj->cache_level, 0, gtt_max,
3169 DRM_MM_SEARCH_DEFAULT);
3123 if (ret) { 3170 if (ret) {
3124 ret = i915_gem_evict_something(dev, size, alignment, 3171 ret = i915_gem_evict_something(dev, vm, size, alignment,
3125 obj->cache_level, 3172 obj->cache_level,
3126 map_and_fenceable, 3173 map_and_fenceable,
3127 nonblocking); 3174 nonblocking);
3128 if (ret == 0) 3175 if (ret == 0)
3129 goto search_free; 3176 goto search_free;
3130 3177
3131 i915_gem_object_unpin_pages(obj); 3178 goto err_free_vma;
3132 kfree(node);
3133 return ret;
3134 } 3179 }
3135 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { 3180 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3136 i915_gem_object_unpin_pages(obj); 3181 obj->cache_level))) {
3137 drm_mm_put_block(node); 3182 ret = -EINVAL;
3138 return -EINVAL; 3183 goto err_remove_node;
3139 } 3184 }
3140 3185
3141 ret = i915_gem_gtt_prepare_object(obj); 3186 ret = i915_gem_gtt_prepare_object(obj);
3142 if (ret) { 3187 if (ret)
3143 i915_gem_object_unpin_pages(obj); 3188 goto err_remove_node;
3144 drm_mm_put_block(node);
3145 return ret;
3146 }
3147 3189
3148 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3190 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3149 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3191 list_add_tail(&vma->mm_list, &vm->inactive_list);
3150 3192
3151 obj->gtt_space = node; 3193 if (i915_is_ggtt(vm)) {
3152 obj->gtt_offset = node->start; 3194 bool mappable, fenceable;
3153 3195
3154 fenceable = 3196 fenceable = (vma->node.size == fence_size &&
3155 node->size == fence_size && 3197 (vma->node.start & (fence_alignment - 1)) == 0);
3156 (node->start & (fence_alignment - 1)) == 0;
3157 3198
3158 mappable = 3199 mappable = (vma->node.start + obj->base.size <=
3159 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; 3200 dev_priv->gtt.mappable_end);
3201
3202 obj->map_and_fenceable = mappable && fenceable;
3203 }
3160 3204
3161 obj->map_and_fenceable = mappable && fenceable; 3205 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3162 3206
3163 trace_i915_gem_object_bind(obj, map_and_fenceable); 3207 trace_i915_vma_bind(vma, map_and_fenceable);
3164 i915_gem_verify_gtt(dev); 3208 i915_gem_verify_gtt(dev);
3165 return 0; 3209 return 0;
3210
3211err_remove_node:
3212 drm_mm_remove_node(&vma->node);
3213err_free_vma:
3214 i915_gem_vma_destroy(vma);
3215err_unpin:
3216 i915_gem_object_unpin_pages(obj);
3217 return ret;
3166} 3218}
3167 3219
3168void 3220bool
3169i915_gem_clflush_object(struct drm_i915_gem_object *obj) 3221i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3222 bool force)
3170{ 3223{
3171 /* If we don't have a page list set up, then we're not pinned 3224 /* If we don't have a page list set up, then we're not pinned
3172 * to GPU, and we can ignore the cache flush because it'll happen 3225 * to GPU, and we can ignore the cache flush because it'll happen
3173 * again at bind time. 3226 * again at bind time.
3174 */ 3227 */
3175 if (obj->pages == NULL) 3228 if (obj->pages == NULL)
3176 return; 3229 return false;
3177 3230
3178 /* 3231 /*
3179 * Stolen memory is always coherent with the GPU as it is explicitly 3232 * Stolen memory is always coherent with the GPU as it is explicitly
3180 * marked as wc by the system, or the system is cache-coherent. 3233 * marked as wc by the system, or the system is cache-coherent.
3181 */ 3234 */
3182 if (obj->stolen) 3235 if (obj->stolen)
3183 return; 3236 return false;
3184 3237
3185 /* If the GPU is snooping the contents of the CPU cache, 3238 /* If the GPU is snooping the contents of the CPU cache,
3186 * we do not need to manually clear the CPU cache lines. However, 3239 * we do not need to manually clear the CPU cache lines. However,
@@ -3190,12 +3243,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3190 * snooping behaviour occurs naturally as the result of our domain 3243 * snooping behaviour occurs naturally as the result of our domain
3191 * tracking. 3244 * tracking.
3192 */ 3245 */
3193 if (obj->cache_level != I915_CACHE_NONE) 3246 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3194 return; 3247 return false;
3195 3248
3196 trace_i915_gem_object_clflush(obj); 3249 trace_i915_gem_object_clflush(obj);
3197
3198 drm_clflush_sg(obj->pages); 3250 drm_clflush_sg(obj->pages);
3251
3252 return true;
3199} 3253}
3200 3254
3201/** Flushes the GTT write domain for the object if it's dirty. */ 3255/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3227,15 +3281,17 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3227 3281
3228/** Flushes the CPU write domain for the object if it's dirty. */ 3282/** Flushes the CPU write domain for the object if it's dirty. */
3229static void 3283static void
3230i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3284i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3285 bool force)
3231{ 3286{
3232 uint32_t old_write_domain; 3287 uint32_t old_write_domain;
3233 3288
3234 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3289 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3235 return; 3290 return;
3236 3291
3237 i915_gem_clflush_object(obj); 3292 if (i915_gem_clflush_object(obj, force))
3238 i915_gem_chipset_flush(obj->base.dev); 3293 i915_gem_chipset_flush(obj->base.dev);
3294
3239 old_write_domain = obj->base.write_domain; 3295 old_write_domain = obj->base.write_domain;
3240 obj->base.write_domain = 0; 3296 obj->base.write_domain = 0;
3241 3297
@@ -3258,7 +3314,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3258 int ret; 3314 int ret;
3259 3315
3260 /* Not valid to be called on unbound objects. */ 3316 /* Not valid to be called on unbound objects. */
3261 if (obj->gtt_space == NULL) 3317 if (!i915_gem_obj_bound_any(obj))
3262 return -EINVAL; 3318 return -EINVAL;
3263 3319
3264 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3320 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3268,7 +3324,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3268 if (ret) 3324 if (ret)
3269 return ret; 3325 return ret;
3270 3326
3271 i915_gem_object_flush_cpu_write_domain(obj); 3327 i915_gem_object_flush_cpu_write_domain(obj, false);
3272 3328
3273 /* Serialise direct access to this object with the barriers for 3329 /* Serialise direct access to this object with the barriers for
3274 * coherent writes from the GPU, by effectively invalidating the 3330 * coherent writes from the GPU, by effectively invalidating the
@@ -3296,8 +3352,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3296 old_write_domain); 3352 old_write_domain);
3297 3353
3298 /* And bump the LRU for this access */ 3354 /* And bump the LRU for this access */
3299 if (i915_gem_object_is_inactive(obj)) 3355 if (i915_gem_object_is_inactive(obj)) {
3300 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3356 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3357 &dev_priv->gtt.base);
3358 if (vma)
3359 list_move_tail(&vma->mm_list,
3360 &dev_priv->gtt.base.inactive_list);
3361
3362 }
3301 3363
3302 return 0; 3364 return 0;
3303} 3365}
@@ -3307,6 +3369,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3307{ 3369{
3308 struct drm_device *dev = obj->base.dev; 3370 struct drm_device *dev = obj->base.dev;
3309 drm_i915_private_t *dev_priv = dev->dev_private; 3371 drm_i915_private_t *dev_priv = dev->dev_private;
3372 struct i915_vma *vma;
3310 int ret; 3373 int ret;
3311 3374
3312 if (obj->cache_level == cache_level) 3375 if (obj->cache_level == cache_level)
@@ -3317,13 +3380,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3317 return -EBUSY; 3380 return -EBUSY;
3318 } 3381 }
3319 3382
3320 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { 3383 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3321 ret = i915_gem_object_unbind(obj); 3384 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3322 if (ret) 3385 ret = i915_vma_unbind(vma);
3323 return ret; 3386 if (ret)
3387 return ret;
3388
3389 break;
3390 }
3324 } 3391 }
3325 3392
3326 if (obj->gtt_space) { 3393 if (i915_gem_obj_bound_any(obj)) {
3327 ret = i915_gem_object_finish_gpu(obj); 3394 ret = i915_gem_object_finish_gpu(obj);
3328 if (ret) 3395 if (ret)
3329 return ret; 3396 return ret;
@@ -3345,11 +3412,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3345 if (obj->has_aliasing_ppgtt_mapping) 3412 if (obj->has_aliasing_ppgtt_mapping)
3346 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 3413 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3347 obj, cache_level); 3414 obj, cache_level);
3348
3349 obj->gtt_space->color = cache_level;
3350 } 3415 }
3351 3416
3352 if (cache_level == I915_CACHE_NONE) { 3417 list_for_each_entry(vma, &obj->vma_list, vma_link)
3418 vma->node.color = cache_level;
3419 obj->cache_level = cache_level;
3420
3421 if (cpu_write_needs_clflush(obj)) {
3353 u32 old_read_domains, old_write_domain; 3422 u32 old_read_domains, old_write_domain;
3354 3423
3355 /* If we're coming from LLC cached, then we haven't 3424 /* If we're coming from LLC cached, then we haven't
@@ -3359,7 +3428,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3359 * Just set it to the CPU cache for now. 3428 * Just set it to the CPU cache for now.
3360 */ 3429 */
3361 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 3430 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3362 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3363 3431
3364 old_read_domains = obj->base.read_domains; 3432 old_read_domains = obj->base.read_domains;
3365 old_write_domain = obj->base.write_domain; 3433 old_write_domain = obj->base.write_domain;
@@ -3372,7 +3440,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3372 old_write_domain); 3440 old_write_domain);
3373 } 3441 }
3374 3442
3375 obj->cache_level = cache_level;
3376 i915_gem_verify_gtt(dev); 3443 i915_gem_verify_gtt(dev);
3377 return 0; 3444 return 0;
3378} 3445}
@@ -3394,7 +3461,20 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3394 goto unlock; 3461 goto unlock;
3395 } 3462 }
3396 3463
3397 args->caching = obj->cache_level != I915_CACHE_NONE; 3464 switch (obj->cache_level) {
3465 case I915_CACHE_LLC:
3466 case I915_CACHE_L3_LLC:
3467 args->caching = I915_CACHING_CACHED;
3468 break;
3469
3470 case I915_CACHE_WT:
3471 args->caching = I915_CACHING_DISPLAY;
3472 break;
3473
3474 default:
3475 args->caching = I915_CACHING_NONE;
3476 break;
3477 }
3398 3478
3399 drm_gem_object_unreference(&obj->base); 3479 drm_gem_object_unreference(&obj->base);
3400unlock: 3480unlock:
@@ -3417,6 +3497,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3417 case I915_CACHING_CACHED: 3497 case I915_CACHING_CACHED:
3418 level = I915_CACHE_LLC; 3498 level = I915_CACHE_LLC;
3419 break; 3499 break;
3500 case I915_CACHING_DISPLAY:
3501 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3502 break;
3420 default: 3503 default:
3421 return -EINVAL; 3504 return -EINVAL;
3422 } 3505 }
@@ -3439,6 +3522,22 @@ unlock:
3439 return ret; 3522 return ret;
3440} 3523}
3441 3524
3525static bool is_pin_display(struct drm_i915_gem_object *obj)
3526{
3527 /* There are 3 sources that pin objects:
3528 * 1. The display engine (scanouts, sprites, cursors);
3529 * 2. Reservations for execbuffer;
3530 * 3. The user.
3531 *
3532 * We can ignore reservations as we hold the struct_mutex and
3533 * are only called outside of the reservation path. The user
3534 * can only increment pin_count once, and so if after
3535 * subtracting the potential reference by the user, any pin_count
3536 * remains, it must be due to another use by the display engine.
3537 */
3538 return obj->pin_count - !!obj->user_pin_count;
3539}
3540
3442/* 3541/*
3443 * Prepare buffer for display plane (scanout, cursors, etc). 3542 * Prepare buffer for display plane (scanout, cursors, etc).
3444 * Can be called from an uninterruptible phase (modesetting) and allows 3543 * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3458,6 +3557,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3458 return ret; 3557 return ret;
3459 } 3558 }
3460 3559
3560 /* Mark the pin_display early so that we account for the
3561 * display coherency whilst setting up the cache domains.
3562 */
3563 obj->pin_display = true;
3564
3461 /* The display engine is not coherent with the LLC cache on gen6. As 3565 /* The display engine is not coherent with the LLC cache on gen6. As
3462 * a result, we make sure that the pinning that is about to occur is 3566 * a result, we make sure that the pinning that is about to occur is
3463 * done with uncached PTEs. This is lowest common denominator for all 3567 * done with uncached PTEs. This is lowest common denominator for all
@@ -3467,19 +3571,20 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3467 * of uncaching, which would allow us to flush all the LLC-cached data 3571 * of uncaching, which would allow us to flush all the LLC-cached data
3468 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3572 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3469 */ 3573 */
3470 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); 3574 ret = i915_gem_object_set_cache_level(obj,
3575 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3471 if (ret) 3576 if (ret)
3472 return ret; 3577 goto err_unpin_display;
3473 3578
3474 /* As the user may map the buffer once pinned in the display plane 3579 /* As the user may map the buffer once pinned in the display plane
3475 * (e.g. libkms for the bootup splash), we have to ensure that we 3580 * (e.g. libkms for the bootup splash), we have to ensure that we
3476 * always use map_and_fenceable for all scanout buffers. 3581 * always use map_and_fenceable for all scanout buffers.
3477 */ 3582 */
3478 ret = i915_gem_object_pin(obj, alignment, true, false); 3583 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3479 if (ret) 3584 if (ret)
3480 return ret; 3585 goto err_unpin_display;
3481 3586
3482 i915_gem_object_flush_cpu_write_domain(obj); 3587 i915_gem_object_flush_cpu_write_domain(obj, true);
3483 3588
3484 old_write_domain = obj->base.write_domain; 3589 old_write_domain = obj->base.write_domain;
3485 old_read_domains = obj->base.read_domains; 3590 old_read_domains = obj->base.read_domains;
@@ -3495,6 +3600,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3495 old_write_domain); 3600 old_write_domain);
3496 3601
3497 return 0; 3602 return 0;
3603
3604err_unpin_display:
3605 obj->pin_display = is_pin_display(obj);
3606 return ret;
3607}
3608
3609void
3610i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3611{
3612 i915_gem_object_unpin(obj);
3613 obj->pin_display = is_pin_display(obj);
3498} 3614}
3499 3615
3500int 3616int
@@ -3540,7 +3656,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3540 3656
3541 /* Flush the CPU cache if it's still invalid. */ 3657 /* Flush the CPU cache if it's still invalid. */
3542 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 3658 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3543 i915_gem_clflush_object(obj); 3659 i915_gem_clflush_object(obj, false);
3544 3660
3545 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 3661 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3546 } 3662 }
@@ -3618,37 +3734,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3618 3734
3619int 3735int
3620i915_gem_object_pin(struct drm_i915_gem_object *obj, 3736i915_gem_object_pin(struct drm_i915_gem_object *obj,
3737 struct i915_address_space *vm,
3621 uint32_t alignment, 3738 uint32_t alignment,
3622 bool map_and_fenceable, 3739 bool map_and_fenceable,
3623 bool nonblocking) 3740 bool nonblocking)
3624{ 3741{
3742 struct i915_vma *vma;
3625 int ret; 3743 int ret;
3626 3744
3627 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3745 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3628 return -EBUSY; 3746 return -EBUSY;
3629 3747
3630 if (obj->gtt_space != NULL) { 3748 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3631 if ((alignment && obj->gtt_offset & (alignment - 1)) || 3749
3750 vma = i915_gem_obj_to_vma(obj, vm);
3751
3752 if (vma) {
3753 if ((alignment &&
3754 vma->node.start & (alignment - 1)) ||
3632 (map_and_fenceable && !obj->map_and_fenceable)) { 3755 (map_and_fenceable && !obj->map_and_fenceable)) {
3633 WARN(obj->pin_count, 3756 WARN(obj->pin_count,
3634 "bo is already pinned with incorrect alignment:" 3757 "bo is already pinned with incorrect alignment:"
3635 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," 3758 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3636 " obj->map_and_fenceable=%d\n", 3759 " obj->map_and_fenceable=%d\n",
3637 obj->gtt_offset, alignment, 3760 i915_gem_obj_offset(obj, vm), alignment,
3638 map_and_fenceable, 3761 map_and_fenceable,
3639 obj->map_and_fenceable); 3762 obj->map_and_fenceable);
3640 ret = i915_gem_object_unbind(obj); 3763 ret = i915_vma_unbind(vma);
3641 if (ret) 3764 if (ret)
3642 return ret; 3765 return ret;
3643 } 3766 }
3644 } 3767 }
3645 3768
3646 if (obj->gtt_space == NULL) { 3769 if (!i915_gem_obj_bound(obj, vm)) {
3647 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3770 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3648 3771
3649 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3772 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3650 map_and_fenceable, 3773 map_and_fenceable,
3651 nonblocking); 3774 nonblocking);
3652 if (ret) 3775 if (ret)
3653 return ret; 3776 return ret;
3654 3777
@@ -3669,7 +3792,7 @@ void
3669i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3792i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3670{ 3793{
3671 BUG_ON(obj->pin_count == 0); 3794 BUG_ON(obj->pin_count == 0);
3672 BUG_ON(obj->gtt_space == NULL); 3795 BUG_ON(!i915_gem_obj_bound_any(obj));
3673 3796
3674 if (--obj->pin_count == 0) 3797 if (--obj->pin_count == 0)
3675 obj->pin_mappable = false; 3798 obj->pin_mappable = false;
@@ -3707,7 +3830,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3707 } 3830 }
3708 3831
3709 if (obj->user_pin_count == 0) { 3832 if (obj->user_pin_count == 0) {
3710 ret = i915_gem_object_pin(obj, args->alignment, true, false); 3833 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3711 if (ret) 3834 if (ret)
3712 goto out; 3835 goto out;
3713 } 3836 }
@@ -3715,11 +3838,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3715 obj->user_pin_count++; 3838 obj->user_pin_count++;
3716 obj->pin_filp = file; 3839 obj->pin_filp = file;
3717 3840
3718 /* XXX - flush the CPU caches for pinned objects 3841 args->offset = i915_gem_obj_ggtt_offset(obj);
3719 * as the X server doesn't manage domains yet
3720 */
3721 i915_gem_object_flush_cpu_write_domain(obj);
3722 args->offset = obj->gtt_offset;
3723out: 3842out:
3724 drm_gem_object_unreference(&obj->base); 3843 drm_gem_object_unreference(&obj->base);
3725unlock: 3844unlock:
@@ -3858,10 +3977,11 @@ unlock:
3858void i915_gem_object_init(struct drm_i915_gem_object *obj, 3977void i915_gem_object_init(struct drm_i915_gem_object *obj,
3859 const struct drm_i915_gem_object_ops *ops) 3978 const struct drm_i915_gem_object_ops *ops)
3860{ 3979{
3861 INIT_LIST_HEAD(&obj->mm_list);
3862 INIT_LIST_HEAD(&obj->global_list); 3980 INIT_LIST_HEAD(&obj->global_list);
3863 INIT_LIST_HEAD(&obj->ring_list); 3981 INIT_LIST_HEAD(&obj->ring_list);
3864 INIT_LIST_HEAD(&obj->exec_list); 3982 INIT_LIST_HEAD(&obj->exec_list);
3983 INIT_LIST_HEAD(&obj->obj_exec_link);
3984 INIT_LIST_HEAD(&obj->vma_list);
3865 3985
3866 obj->ops = ops; 3986 obj->ops = ops;
3867 3987
@@ -3926,6 +4046,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3926 } else 4046 } else
3927 obj->cache_level = I915_CACHE_NONE; 4047 obj->cache_level = I915_CACHE_NONE;
3928 4048
4049 trace_i915_gem_object_create(obj);
4050
3929 return obj; 4051 return obj;
3930} 4052}
3931 4053
@@ -3941,6 +4063,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3941 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4063 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3942 struct drm_device *dev = obj->base.dev; 4064 struct drm_device *dev = obj->base.dev;
3943 drm_i915_private_t *dev_priv = dev->dev_private; 4065 drm_i915_private_t *dev_priv = dev->dev_private;
4066 struct i915_vma *vma, *next;
3944 4067
3945 trace_i915_gem_object_destroy(obj); 4068 trace_i915_gem_object_destroy(obj);
3946 4069
@@ -3948,15 +4071,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3948 i915_gem_detach_phys_object(dev, obj); 4071 i915_gem_detach_phys_object(dev, obj);
3949 4072
3950 obj->pin_count = 0; 4073 obj->pin_count = 0;
3951 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { 4074 /* NB: 0 or 1 elements */
3952 bool was_interruptible; 4075 WARN_ON(!list_empty(&obj->vma_list) &&
4076 !list_is_singular(&obj->vma_list));
4077 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4078 int ret = i915_vma_unbind(vma);
4079 if (WARN_ON(ret == -ERESTARTSYS)) {
4080 bool was_interruptible;
3953 4081
3954 was_interruptible = dev_priv->mm.interruptible; 4082 was_interruptible = dev_priv->mm.interruptible;
3955 dev_priv->mm.interruptible = false; 4083 dev_priv->mm.interruptible = false;
3956 4084
3957 WARN_ON(i915_gem_object_unbind(obj)); 4085 WARN_ON(i915_vma_unbind(vma));
3958 4086
3959 dev_priv->mm.interruptible = was_interruptible; 4087 dev_priv->mm.interruptible = was_interruptible;
4088 }
3960 } 4089 }
3961 4090
3962 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4091 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@@ -3982,15 +4111,42 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3982 i915_gem_object_free(obj); 4111 i915_gem_object_free(obj);
3983} 4112}
3984 4113
4114struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4115 struct i915_address_space *vm)
4116{
4117 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4118 if (vma == NULL)
4119 return ERR_PTR(-ENOMEM);
4120
4121 INIT_LIST_HEAD(&vma->vma_link);
4122 INIT_LIST_HEAD(&vma->mm_list);
4123 INIT_LIST_HEAD(&vma->exec_list);
4124 vma->vm = vm;
4125 vma->obj = obj;
4126
4127 /* Keep GGTT vmas first to make debug easier */
4128 if (i915_is_ggtt(vm))
4129 list_add(&vma->vma_link, &obj->vma_list);
4130 else
4131 list_add_tail(&vma->vma_link, &obj->vma_list);
4132
4133 return vma;
4134}
4135
4136void i915_gem_vma_destroy(struct i915_vma *vma)
4137{
4138 WARN_ON(vma->node.allocated);
4139 list_del(&vma->vma_link);
4140 kfree(vma);
4141}
4142
3985int 4143int
3986i915_gem_idle(struct drm_device *dev) 4144i915_gem_idle(struct drm_device *dev)
3987{ 4145{
3988 drm_i915_private_t *dev_priv = dev->dev_private; 4146 drm_i915_private_t *dev_priv = dev->dev_private;
3989 int ret; 4147 int ret;
3990 4148
3991 mutex_lock(&dev->struct_mutex); 4149 if (dev_priv->ums.mm_suspended) {
3992
3993 if (dev_priv->mm.suspended) {
3994 mutex_unlock(&dev->struct_mutex); 4150 mutex_unlock(&dev->struct_mutex);
3995 return 0; 4151 return 0;
3996 } 4152 }
@@ -4006,18 +4162,11 @@ i915_gem_idle(struct drm_device *dev)
4006 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4162 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4007 i915_gem_evict_everything(dev); 4163 i915_gem_evict_everything(dev);
4008 4164
4009 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4010 * We need to replace this with a semaphore, or something.
4011 * And not confound mm.suspended!
4012 */
4013 dev_priv->mm.suspended = 1;
4014 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 4165 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4015 4166
4016 i915_kernel_lost_context(dev); 4167 i915_kernel_lost_context(dev);
4017 i915_gem_cleanup_ringbuffer(dev); 4168 i915_gem_cleanup_ringbuffer(dev);
4018 4169
4019 mutex_unlock(&dev->struct_mutex);
4020
4021 /* Cancel the retire work handler, which should be idle now. */ 4170 /* Cancel the retire work handler, which should be idle now. */
4022 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4171 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4023 4172
@@ -4150,8 +4299,8 @@ i915_gem_init_hw(struct drm_device *dev)
4150 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4299 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4151 return -EIO; 4300 return -EIO;
4152 4301
4153 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 4302 if (dev_priv->ellc_size)
4154 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); 4303 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4155 4304
4156 if (HAS_PCH_NOP(dev)) { 4305 if (HAS_PCH_NOP(dev)) {
4157 u32 temp = I915_READ(GEN7_MSG_CTL); 4306 u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4227,7 +4376,7 @@ int
4227i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 4376i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4228 struct drm_file *file_priv) 4377 struct drm_file *file_priv)
4229{ 4378{
4230 drm_i915_private_t *dev_priv = dev->dev_private; 4379 struct drm_i915_private *dev_priv = dev->dev_private;
4231 int ret; 4380 int ret;
4232 4381
4233 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4382 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4239,7 +4388,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4239 } 4388 }
4240 4389
4241 mutex_lock(&dev->struct_mutex); 4390 mutex_lock(&dev->struct_mutex);
4242 dev_priv->mm.suspended = 0; 4391 dev_priv->ums.mm_suspended = 0;
4243 4392
4244 ret = i915_gem_init_hw(dev); 4393 ret = i915_gem_init_hw(dev);
4245 if (ret != 0) { 4394 if (ret != 0) {
@@ -4247,7 +4396,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4247 return ret; 4396 return ret;
4248 } 4397 }
4249 4398
4250 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4399 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4251 mutex_unlock(&dev->struct_mutex); 4400 mutex_unlock(&dev->struct_mutex);
4252 4401
4253 ret = drm_irq_install(dev); 4402 ret = drm_irq_install(dev);
@@ -4259,7 +4408,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4259cleanup_ringbuffer: 4408cleanup_ringbuffer:
4260 mutex_lock(&dev->struct_mutex); 4409 mutex_lock(&dev->struct_mutex);
4261 i915_gem_cleanup_ringbuffer(dev); 4410 i915_gem_cleanup_ringbuffer(dev);
4262 dev_priv->mm.suspended = 1; 4411 dev_priv->ums.mm_suspended = 1;
4263 mutex_unlock(&dev->struct_mutex); 4412 mutex_unlock(&dev->struct_mutex);
4264 4413
4265 return ret; 4414 return ret;
@@ -4269,11 +4418,26 @@ int
4269i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4418i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4270 struct drm_file *file_priv) 4419 struct drm_file *file_priv)
4271{ 4420{
4421 struct drm_i915_private *dev_priv = dev->dev_private;
4422 int ret;
4423
4272 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4424 if (drm_core_check_feature(dev, DRIVER_MODESET))
4273 return 0; 4425 return 0;
4274 4426
4275 drm_irq_uninstall(dev); 4427 drm_irq_uninstall(dev);
4276 return i915_gem_idle(dev); 4428
4429 mutex_lock(&dev->struct_mutex);
4430 ret = i915_gem_idle(dev);
4431
4432 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4433 * We need to replace this with a semaphore, or something.
4434 * And not confound ums.mm_suspended!
4435 */
4436 if (ret != 0)
4437 dev_priv->ums.mm_suspended = 1;
4438 mutex_unlock(&dev->struct_mutex);
4439
4440 return ret;
4277} 4441}
4278 4442
4279void 4443void
@@ -4284,9 +4448,11 @@ i915_gem_lastclose(struct drm_device *dev)
4284 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4448 if (drm_core_check_feature(dev, DRIVER_MODESET))
4285 return; 4449 return;
4286 4450
4451 mutex_lock(&dev->struct_mutex);
4287 ret = i915_gem_idle(dev); 4452 ret = i915_gem_idle(dev);
4288 if (ret) 4453 if (ret)
4289 DRM_ERROR("failed to idle hardware: %d\n", ret); 4454 DRM_ERROR("failed to idle hardware: %d\n", ret);
4455 mutex_unlock(&dev->struct_mutex);
4290} 4456}
4291 4457
4292static void 4458static void
@@ -4296,6 +4462,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
4296 INIT_LIST_HEAD(&ring->request_list); 4462 INIT_LIST_HEAD(&ring->request_list);
4297} 4463}
4298 4464
4465static void i915_init_vm(struct drm_i915_private *dev_priv,
4466 struct i915_address_space *vm)
4467{
4468 vm->dev = dev_priv->dev;
4469 INIT_LIST_HEAD(&vm->active_list);
4470 INIT_LIST_HEAD(&vm->inactive_list);
4471 INIT_LIST_HEAD(&vm->global_link);
4472 list_add(&vm->global_link, &dev_priv->vm_list);
4473}
4474
4299void 4475void
4300i915_gem_load(struct drm_device *dev) 4476i915_gem_load(struct drm_device *dev)
4301{ 4477{
@@ -4308,8 +4484,9 @@ i915_gem_load(struct drm_device *dev)
4308 SLAB_HWCACHE_ALIGN, 4484 SLAB_HWCACHE_ALIGN,
4309 NULL); 4485 NULL);
4310 4486
4311 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4487 INIT_LIST_HEAD(&dev_priv->vm_list);
4312 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4488 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4489
4313 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4490 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4314 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4491 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4315 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4492 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4608,11 +4785,101 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4608 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 4785 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4609 if (obj->pages_pin_count == 0) 4786 if (obj->pages_pin_count == 0)
4610 cnt += obj->base.size >> PAGE_SHIFT; 4787 cnt += obj->base.size >> PAGE_SHIFT;
4611 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) 4788
4789 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4790 if (obj->active)
4791 continue;
4792
4612 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4793 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4613 cnt += obj->base.size >> PAGE_SHIFT; 4794 cnt += obj->base.size >> PAGE_SHIFT;
4795 }
4614 4796
4615 if (unlock) 4797 if (unlock)
4616 mutex_unlock(&dev->struct_mutex); 4798 mutex_unlock(&dev->struct_mutex);
4617 return cnt; 4799 return cnt;
4618} 4800}
4801
4802/* All the new VM stuff */
4803unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4804 struct i915_address_space *vm)
4805{
4806 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4807 struct i915_vma *vma;
4808
4809 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4810 vm = &dev_priv->gtt.base;
4811
4812 BUG_ON(list_empty(&o->vma_list));
4813 list_for_each_entry(vma, &o->vma_list, vma_link) {
4814 if (vma->vm == vm)
4815 return vma->node.start;
4816
4817 }
4818 return -1;
4819}
4820
4821bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4822 struct i915_address_space *vm)
4823{
4824 struct i915_vma *vma;
4825
4826 list_for_each_entry(vma, &o->vma_list, vma_link)
4827 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4828 return true;
4829
4830 return false;
4831}
4832
4833bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4834{
4835 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4836 struct i915_address_space *vm;
4837
4838 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4839 if (i915_gem_obj_bound(o, vm))
4840 return true;
4841
4842 return false;
4843}
4844
4845unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4846 struct i915_address_space *vm)
4847{
4848 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4849 struct i915_vma *vma;
4850
4851 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4852 vm = &dev_priv->gtt.base;
4853
4854 BUG_ON(list_empty(&o->vma_list));
4855
4856 list_for_each_entry(vma, &o->vma_list, vma_link)
4857 if (vma->vm == vm)
4858 return vma->node.size;
4859
4860 return 0;
4861}
4862
4863struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4864 struct i915_address_space *vm)
4865{
4866 struct i915_vma *vma;
4867 list_for_each_entry(vma, &obj->vma_list, vma_link)
4868 if (vma->vm == vm)
4869 return vma;
4870
4871 return NULL;
4872}
4873
4874struct i915_vma *
4875i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4876 struct i915_address_space *vm)
4877{
4878 struct i915_vma *vma;
4879
4880 vma = i915_gem_obj_to_vma(obj, vm);
4881 if (!vma)
4882 vma = i915_gem_vma_create(obj, vm);
4883
4884 return vma;
4885}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 51b7a2171cae..403309c2a7d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev,
155 155
156 if (INTEL_INFO(dev)->gen >= 7) { 156 if (INTEL_INFO(dev)->gen >= 7) {
157 ret = i915_gem_object_set_cache_level(ctx->obj, 157 ret = i915_gem_object_set_cache_level(ctx->obj,
158 I915_CACHE_LLC_MLC); 158 I915_CACHE_L3_LLC);
159 /* Failure shouldn't ever happen this early */ 159 /* Failure shouldn't ever happen this early */
160 if (WARN_ON(ret)) 160 if (WARN_ON(ret))
161 goto err_out; 161 goto err_out;
@@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
214 * default context. 214 * default context.
215 */ 215 */
216 dev_priv->ring[RCS].default_context = ctx; 216 dev_priv->ring[RCS].default_context = ctx;
217 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); 217 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
218 if (ret) { 218 if (ret) {
219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
220 goto err_destroy; 220 goto err_destroy;
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data)
304} 304}
305 305
306struct i915_ctx_hang_stats * 306struct i915_ctx_hang_stats *
307i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, 307i915_gem_context_get_hang_stats(struct drm_device *dev,
308 struct drm_file *file, 308 struct drm_file *file,
309 u32 id) 309 u32 id)
310{ 310{
311 struct drm_i915_private *dev_priv = ring->dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct drm_i915_file_private *file_priv = file->driver_priv; 312 struct drm_i915_file_private *file_priv = file->driver_priv;
313 struct i915_hw_context *to; 313 struct i915_hw_context *ctx;
314
315 if (dev_priv->hw_contexts_disabled)
316 return ERR_PTR(-ENOENT);
317
318 if (ring->id != RCS)
319 return ERR_PTR(-EINVAL);
320
321 if (file == NULL)
322 return ERR_PTR(-EINVAL);
323 314
324 if (id == DEFAULT_CONTEXT_ID) 315 if (id == DEFAULT_CONTEXT_ID)
325 return &file_priv->hang_stats; 316 return &file_priv->hang_stats;
326 317
327 to = i915_gem_context_get(file->driver_priv, id); 318 ctx = NULL;
328 if (to == NULL) 319 if (!dev_priv->hw_contexts_disabled)
320 ctx = i915_gem_context_get(file->driver_priv, id);
321 if (ctx == NULL)
329 return ERR_PTR(-ENOENT); 322 return ERR_PTR(-ENOENT);
330 323
331 return &to->hang_stats; 324 return &ctx->hang_stats;
332} 325}
333 326
334void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 327void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
@@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring,
377 370
378 intel_ring_emit(ring, MI_NOOP); 371 intel_ring_emit(ring, MI_NOOP);
379 intel_ring_emit(ring, MI_SET_CONTEXT); 372 intel_ring_emit(ring, MI_SET_CONTEXT);
380 intel_ring_emit(ring, new_context->obj->gtt_offset | 373 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
381 MI_MM_SPACE_GTT | 374 MI_MM_SPACE_GTT |
382 MI_SAVE_EXT_STATE_EN | 375 MI_SAVE_EXT_STATE_EN |
383 MI_RESTORE_EXT_STATE_EN | 376 MI_RESTORE_EXT_STATE_EN |
@@ -407,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
407 if (from == to) 400 if (from == to)
408 return 0; 401 return 0;
409 402
410 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); 403 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
411 if (ret) 404 if (ret)
412 return ret; 405 return ret;
413 406
@@ -443,7 +436,10 @@ static int do_switch(struct i915_hw_context *to)
443 * MI_SET_CONTEXT instead of when the next seqno has completed. 436 * MI_SET_CONTEXT instead of when the next seqno has completed.
444 */ 437 */
445 if (from != NULL) { 438 if (from != NULL) {
439 struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
440 struct i915_address_space *ggtt = &dev_priv->gtt.base;
446 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 441 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
442 list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
447 i915_gem_object_move_to_active(from->obj, ring); 443 i915_gem_object_move_to_active(from->obj, ring);
448 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 444 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
449 * whole damn pipeline, we don't need to explicitly mark the 445 * whole damn pipeline, we don't need to explicitly mark the
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5f3dac..775d506b3208 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
97 } 97 }
98 } 98 }
99 99
100 list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { 100 list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
101 if (obj->base.dev != dev || 101 if (obj->base.dev != dev ||
102 !atomic_read(&obj->base.refcount.refcount)) { 102 !atomic_read(&obj->base.refcount.refcount)) {
103 DRM_ERROR("freed inactive %p\n", obj); 103 DRM_ERROR("freed inactive %p\n", obj);
@@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev)
115 115
116 return warned = err; 116 return warned = err;
117} 117}
118#endif /* WATCH_INACTIVE */ 118#endif /* WATCH_LIST */
119
120#if WATCH_COHERENCY
121void
122i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
123{
124 struct drm_device *dev = obj->base.dev;
125 int page;
126 uint32_t *gtt_mapping;
127 uint32_t *backing_map = NULL;
128 int bad_count = 0;
129
130 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
131 __func__, obj, obj->gtt_offset, handle,
132 obj->size / 1024);
133
134 gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
135 obj->base.size);
136 if (gtt_mapping == NULL) {
137 DRM_ERROR("failed to map GTT space\n");
138 return;
139 }
140
141 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
142 int i;
143
144 backing_map = kmap_atomic(obj->pages[page]);
145
146 if (backing_map == NULL) {
147 DRM_ERROR("failed to map backing page\n");
148 goto out;
149 }
150
151 for (i = 0; i < PAGE_SIZE / 4; i++) {
152 uint32_t cpuval = backing_map[i];
153 uint32_t gttval = readl(gtt_mapping +
154 page * 1024 + i);
155
156 if (cpuval != gttval) {
157 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
158 "0x%08x vs 0x%08x\n",
159 (int)(obj->gtt_offset +
160 page * PAGE_SIZE + i * 4),
161 cpuval, gttval);
162 if (bad_count++ >= 8) {
163 DRM_INFO("...\n");
164 goto out;
165 }
166 }
167 }
168 kunmap_atomic(backing_map);
169 backing_map = NULL;
170 }
171
172 out:
173 if (backing_map != NULL)
174 kunmap_atomic(backing_map);
175 iounmap(gtt_mapping);
176
177 /* give syslog time to catch up */
178 msleep(1);
179
180 /* Directly flush the object, since we just loaded values with the CPU
181 * from the backing pages and we don't want to disturb the cache
182 * management that we're trying to observe.
183 */
184
185 i915_gem_clflush_object(obj);
186}
187#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 9e6578330801..e918b05fcbdd 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,10 +27,15 @@
27#include "i915_drv.h" 27#include "i915_drv.h"
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31{
32 return to_intel_bo(buf->priv);
33}
34
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 36 enum dma_data_direction dir)
32{ 37{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
34 struct sg_table *st; 39 struct sg_table *st;
35 struct scatterlist *src, *dst; 40 struct scatterlist *src, *dst;
36 int ret, i; 41 int ret, i;
@@ -85,7 +90,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg, 90 struct sg_table *sg,
86 enum dma_data_direction dir) 91 enum dma_data_direction dir)
87{ 92{
88 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 93 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
89 94
90 mutex_lock(&obj->base.dev->struct_mutex); 95 mutex_lock(&obj->base.dev->struct_mutex);
91 96
@@ -98,20 +103,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
98 mutex_unlock(&obj->base.dev->struct_mutex); 103 mutex_unlock(&obj->base.dev->struct_mutex);
99} 104}
100 105
101static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
102{
103 struct drm_i915_gem_object *obj = dma_buf->priv;
104
105 if (obj->base.export_dma_buf == dma_buf) {
106 /* drop the reference on the export fd holds */
107 obj->base.export_dma_buf = NULL;
108 drm_gem_object_unreference_unlocked(&obj->base);
109 }
110}
111
112static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 106static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
113{ 107{
114 struct drm_i915_gem_object *obj = dma_buf->priv; 108 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
115 struct drm_device *dev = obj->base.dev; 109 struct drm_device *dev = obj->base.dev;
116 struct sg_page_iter sg_iter; 110 struct sg_page_iter sg_iter;
117 struct page **pages; 111 struct page **pages;
@@ -159,7 +153,7 @@ error:
159 153
160static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 154static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161{ 155{
162 struct drm_i915_gem_object *obj = dma_buf->priv; 156 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev; 157 struct drm_device *dev = obj->base.dev;
164 int ret; 158 int ret;
165 159
@@ -202,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
202 196
203static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) 197static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
204{ 198{
205 struct drm_i915_gem_object *obj = dma_buf->priv; 199 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
206 struct drm_device *dev = obj->base.dev; 200 struct drm_device *dev = obj->base.dev;
207 int ret; 201 int ret;
208 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 202 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -219,7 +213,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
219static const struct dma_buf_ops i915_dmabuf_ops = { 213static const struct dma_buf_ops i915_dmabuf_ops = {
220 .map_dma_buf = i915_gem_map_dma_buf, 214 .map_dma_buf = i915_gem_map_dma_buf,
221 .unmap_dma_buf = i915_gem_unmap_dma_buf, 215 .unmap_dma_buf = i915_gem_unmap_dma_buf,
222 .release = i915_gem_dmabuf_release, 216 .release = drm_gem_dmabuf_release,
223 .kmap = i915_gem_dmabuf_kmap, 217 .kmap = i915_gem_dmabuf_kmap,
224 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 218 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
225 .kunmap = i915_gem_dmabuf_kunmap, 219 .kunmap = i915_gem_dmabuf_kunmap,
@@ -233,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
233struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 227struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
234 struct drm_gem_object *gem_obj, int flags) 228 struct drm_gem_object *gem_obj, int flags)
235{ 229{
236 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 230 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
237
238 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
239} 231}
240 232
241static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 233static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -272,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
272 264
273 /* is this one of own objects? */ 265 /* is this one of own objects? */
274 if (dma_buf->ops == &i915_dmabuf_ops) { 266 if (dma_buf->ops == &i915_dmabuf_ops) {
275 obj = dma_buf->priv; 267 obj = dma_buf_to_obj(dma_buf);
276 /* is it from our device? */ 268 /* is it from our device? */
277 if (obj->base.dev == dev) { 269 if (obj->base.dev == dev) {
278 /* 270 /*
@@ -297,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
297 goto fail_detach; 289 goto fail_detach;
298 } 290 }
299 291
300 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 292 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
301 if (ret) {
302 i915_gem_object_free(obj);
303 goto fail_detach;
304 }
305
306 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); 293 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
307 obj->base.import_attach = attach; 294 obj->base.import_attach = attach;
308 295
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c86d5d9356fd..91b700155850 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,23 +32,23 @@
32#include "i915_trace.h" 32#include "i915_trace.h"
33 33
34static bool 34static bool
35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 35mark_free(struct i915_vma *vma, struct list_head *unwind)
36{ 36{
37 if (obj->pin_count) 37 if (vma->obj->pin_count)
38 return false; 38 return false;
39 39
40 list_add(&obj->exec_list, unwind); 40 list_add(&vma->exec_list, unwind);
41 return drm_mm_scan_add_block(obj->gtt_space); 41 return drm_mm_scan_add_block(&vma->node);
42} 42}
43 43
44int 44int
45i915_gem_evict_something(struct drm_device *dev, int min_size, 45i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
46 unsigned alignment, unsigned cache_level, 46 int min_size, unsigned alignment, unsigned cache_level,
47 bool mappable, bool nonblocking) 47 bool mappable, bool nonblocking)
48{ 48{
49 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
50 struct list_head eviction_list, unwind_list; 50 struct list_head eviction_list, unwind_list;
51 struct drm_i915_gem_object *obj; 51 struct i915_vma *vma;
52 int ret = 0; 52 int ret = 0;
53 53
54 trace_i915_gem_evict(dev, min_size, alignment, mappable); 54 trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -77,17 +77,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
77 */ 77 */
78 78
79 INIT_LIST_HEAD(&unwind_list); 79 INIT_LIST_HEAD(&unwind_list);
80 if (mappable) 80 if (mappable) {
81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 81 BUG_ON(!i915_is_ggtt(vm));
82 min_size, alignment, cache_level, 82 drm_mm_init_scan_with_range(&vm->mm, min_size,
83 0, dev_priv->gtt.mappable_end); 83 alignment, cache_level, 0,
84 else 84 dev_priv->gtt.mappable_end);
85 drm_mm_init_scan(&dev_priv->mm.gtt_space, 85 } else
86 min_size, alignment, cache_level); 86 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
87 87
88 /* First see if there is a large enough contiguous idle region... */ 88 /* First see if there is a large enough contiguous idle region... */
89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 89 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
90 if (mark_free(obj, &unwind_list)) 90 if (mark_free(vma, &unwind_list))
91 goto found; 91 goto found;
92 } 92 }
93 93
@@ -95,22 +95,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
95 goto none; 95 goto none;
96 96
97 /* Now merge in the soon-to-be-expired objects... */ 97 /* Now merge in the soon-to-be-expired objects... */
98 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 98 list_for_each_entry(vma, &vm->active_list, mm_list) {
99 if (mark_free(obj, &unwind_list)) 99 if (mark_free(vma, &unwind_list))
100 goto found; 100 goto found;
101 } 101 }
102 102
103none: 103none:
104 /* Nothing found, clean up and bail out! */ 104 /* Nothing found, clean up and bail out! */
105 while (!list_empty(&unwind_list)) { 105 while (!list_empty(&unwind_list)) {
106 obj = list_first_entry(&unwind_list, 106 vma = list_first_entry(&unwind_list,
107 struct drm_i915_gem_object, 107 struct i915_vma,
108 exec_list); 108 exec_list);
109 109 ret = drm_mm_scan_remove_block(&vma->node);
110 ret = drm_mm_scan_remove_block(obj->gtt_space);
111 BUG_ON(ret); 110 BUG_ON(ret);
112 111
113 list_del_init(&obj->exec_list); 112 list_del_init(&vma->exec_list);
114 } 113 }
115 114
116 /* We expect the caller to unpin, evict all and try again, or give up. 115 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -124,27 +123,30 @@ found:
124 * temporary list. */ 123 * temporary list. */
125 INIT_LIST_HEAD(&eviction_list); 124 INIT_LIST_HEAD(&eviction_list);
126 while (!list_empty(&unwind_list)) { 125 while (!list_empty(&unwind_list)) {
127 obj = list_first_entry(&unwind_list, 126 vma = list_first_entry(&unwind_list,
128 struct drm_i915_gem_object, 127 struct i915_vma,
129 exec_list); 128 exec_list);
130 if (drm_mm_scan_remove_block(obj->gtt_space)) { 129 if (drm_mm_scan_remove_block(&vma->node)) {
131 list_move(&obj->exec_list, &eviction_list); 130 list_move(&vma->exec_list, &eviction_list);
132 drm_gem_object_reference(&obj->base); 131 drm_gem_object_reference(&vma->obj->base);
133 continue; 132 continue;
134 } 133 }
135 list_del_init(&obj->exec_list); 134 list_del_init(&vma->exec_list);
136 } 135 }
137 136
138 /* Unbinding will emit any required flushes */ 137 /* Unbinding will emit any required flushes */
139 while (!list_empty(&eviction_list)) { 138 while (!list_empty(&eviction_list)) {
140 obj = list_first_entry(&eviction_list, 139 struct drm_gem_object *obj;
141 struct drm_i915_gem_object, 140 vma = list_first_entry(&eviction_list,
141 struct i915_vma,
142 exec_list); 142 exec_list);
143
144 obj = &vma->obj->base;
145 list_del_init(&vma->exec_list);
143 if (ret == 0) 146 if (ret == 0)
144 ret = i915_gem_object_unbind(obj); 147 ret = i915_vma_unbind(vma);
145 148
146 list_del_init(&obj->exec_list); 149 drm_gem_object_unreference(obj);
147 drm_gem_object_unreference(&obj->base);
148 } 150 }
149 151
150 return ret; 152 return ret;
@@ -154,12 +156,18 @@ int
154i915_gem_evict_everything(struct drm_device *dev) 156i915_gem_evict_everything(struct drm_device *dev)
155{ 157{
156 drm_i915_private_t *dev_priv = dev->dev_private; 158 drm_i915_private_t *dev_priv = dev->dev_private;
157 struct drm_i915_gem_object *obj, *next; 159 struct i915_address_space *vm;
158 bool lists_empty; 160 struct i915_vma *vma, *next;
161 bool lists_empty = true;
159 int ret; 162 int ret;
160 163
161 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 164 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
162 list_empty(&dev_priv->mm.active_list)); 165 lists_empty = (list_empty(&vm->inactive_list) &&
166 list_empty(&vm->active_list));
167 if (!lists_empty)
168 lists_empty = false;
169 }
170
163 if (lists_empty) 171 if (lists_empty)
164 return -ENOSPC; 172 return -ENOSPC;
165 173
@@ -176,10 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev)
176 i915_gem_retire_requests(dev); 184 i915_gem_retire_requests(dev);
177 185
178 /* Having flushed everything, unbind() should never raise an error */ 186 /* Having flushed everything, unbind() should never raise an error */
179 list_for_each_entry_safe(obj, next, 187 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
180 &dev_priv->mm.inactive_list, mm_list) 188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
181 if (obj->pin_count == 0) 189 if (vma->obj->pin_count == 0)
182 WARN_ON(i915_gem_object_unbind(obj)); 190 WARN_ON(i915_vma_unbind(vma));
191 }
183 192
184 return 0; 193 return 0;
185} 194}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87a3227e5179..792c52a235ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -172,9 +172,60 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
172} 172}
173 173
174static int 174static int
175relocate_entry_cpu(struct drm_i915_gem_object *obj,
176 struct drm_i915_gem_relocation_entry *reloc)
177{
178 uint32_t page_offset = offset_in_page(reloc->offset);
179 char *vaddr;
180 int ret = -EINVAL;
181
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
183 if (ret)
184 return ret;
185
186 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
187 reloc->offset >> PAGE_SHIFT));
188 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
189 kunmap_atomic(vaddr);
190
191 return 0;
192}
193
194static int
195relocate_entry_gtt(struct drm_i915_gem_object *obj,
196 struct drm_i915_gem_relocation_entry *reloc)
197{
198 struct drm_device *dev = obj->base.dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 uint32_t __iomem *reloc_entry;
201 void __iomem *reloc_page;
202 int ret = -EINVAL;
203
204 ret = i915_gem_object_set_to_gtt_domain(obj, true);
205 if (ret)
206 return ret;
207
208 ret = i915_gem_object_put_fence(obj);
209 if (ret)
210 return ret;
211
212 /* Map the page containing the relocation we're going to perform. */
213 reloc->offset += i915_gem_obj_ggtt_offset(obj);
214 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
215 reloc->offset & PAGE_MASK);
216 reloc_entry = (uint32_t __iomem *)
217 (reloc_page + offset_in_page(reloc->offset));
218 iowrite32(reloc->delta, reloc_entry);
219 io_mapping_unmap_atomic(reloc_page);
220
221 return 0;
222}
223
224static int
175i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 225i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
176 struct eb_objects *eb, 226 struct eb_objects *eb,
177 struct drm_i915_gem_relocation_entry *reloc) 227 struct drm_i915_gem_relocation_entry *reloc,
228 struct i915_address_space *vm)
178{ 229{
179 struct drm_device *dev = obj->base.dev; 230 struct drm_device *dev = obj->base.dev;
180 struct drm_gem_object *target_obj; 231 struct drm_gem_object *target_obj;
@@ -188,7 +239,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
188 return -ENOENT; 239 return -ENOENT;
189 240
190 target_i915_obj = to_intel_bo(target_obj); 241 target_i915_obj = to_intel_bo(target_obj);
191 target_offset = target_i915_obj->gtt_offset; 242 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
192 243
193 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and 244 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
194 * pipe_control writes because the gpu doesn't properly redirect them 245 * pipe_control writes because the gpu doesn't properly redirect them
@@ -254,40 +305,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
254 return -EFAULT; 305 return -EFAULT;
255 306
256 reloc->delta += target_offset; 307 reloc->delta += target_offset;
257 if (use_cpu_reloc(obj)) { 308 if (use_cpu_reloc(obj))
258 uint32_t page_offset = reloc->offset & ~PAGE_MASK; 309 ret = relocate_entry_cpu(obj, reloc);
259 char *vaddr; 310 else
260 311 ret = relocate_entry_gtt(obj, reloc);
261 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
262 if (ret)
263 return ret;
264
265 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
266 reloc->offset >> PAGE_SHIFT));
267 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
268 kunmap_atomic(vaddr);
269 } else {
270 struct drm_i915_private *dev_priv = dev->dev_private;
271 uint32_t __iomem *reloc_entry;
272 void __iomem *reloc_page;
273
274 ret = i915_gem_object_set_to_gtt_domain(obj, true);
275 if (ret)
276 return ret;
277
278 ret = i915_gem_object_put_fence(obj);
279 if (ret)
280 return ret;
281
282 /* Map the page containing the relocation we're going to perform. */
283 reloc->offset += obj->gtt_offset;
284 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
285 reloc->offset & PAGE_MASK);
286 reloc_entry = (uint32_t __iomem *)
287 (reloc_page + (reloc->offset & ~PAGE_MASK));
288 iowrite32(reloc->delta, reloc_entry);
289 io_mapping_unmap_atomic(reloc_page);
290 }
291 312
292 /* and update the user's relocation entry */ 313 /* and update the user's relocation entry */
293 reloc->presumed_offset = target_offset; 314 reloc->presumed_offset = target_offset;
@@ -297,7 +318,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
297 318
298static int 319static int
299i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 320i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
300 struct eb_objects *eb) 321 struct eb_objects *eb,
322 struct i915_address_space *vm)
301{ 323{
302#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 324#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
303 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; 325 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@@ -321,7 +343,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
321 do { 343 do {
322 u64 offset = r->presumed_offset; 344 u64 offset = r->presumed_offset;
323 345
324 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); 346 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
347 vm);
325 if (ret) 348 if (ret)
326 return ret; 349 return ret;
327 350
@@ -344,13 +367,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
344static int 367static int
345i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, 368i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
346 struct eb_objects *eb, 369 struct eb_objects *eb,
347 struct drm_i915_gem_relocation_entry *relocs) 370 struct drm_i915_gem_relocation_entry *relocs,
371 struct i915_address_space *vm)
348{ 372{
349 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 373 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
350 int i, ret; 374 int i, ret;
351 375
352 for (i = 0; i < entry->relocation_count; i++) { 376 for (i = 0; i < entry->relocation_count; i++) {
353 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); 377 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
378 vm);
354 if (ret) 379 if (ret)
355 return ret; 380 return ret;
356 } 381 }
@@ -359,7 +384,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
359} 384}
360 385
361static int 386static int
362i915_gem_execbuffer_relocate(struct eb_objects *eb) 387i915_gem_execbuffer_relocate(struct eb_objects *eb,
388 struct i915_address_space *vm)
363{ 389{
364 struct drm_i915_gem_object *obj; 390 struct drm_i915_gem_object *obj;
365 int ret = 0; 391 int ret = 0;
@@ -373,7 +399,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
373 */ 399 */
374 pagefault_disable(); 400 pagefault_disable();
375 list_for_each_entry(obj, &eb->objects, exec_list) { 401 list_for_each_entry(obj, &eb->objects, exec_list) {
376 ret = i915_gem_execbuffer_relocate_object(obj, eb); 402 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
377 if (ret) 403 if (ret)
378 break; 404 break;
379 } 405 }
@@ -395,6 +421,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
395static int 421static int
396i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, 422i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
397 struct intel_ring_buffer *ring, 423 struct intel_ring_buffer *ring,
424 struct i915_address_space *vm,
398 bool *need_reloc) 425 bool *need_reloc)
399{ 426{
400 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 427 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -409,7 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
409 obj->tiling_mode != I915_TILING_NONE; 436 obj->tiling_mode != I915_TILING_NONE;
410 need_mappable = need_fence || need_reloc_mappable(obj); 437 need_mappable = need_fence || need_reloc_mappable(obj);
411 438
412 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); 439 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
440 false);
413 if (ret) 441 if (ret)
414 return ret; 442 return ret;
415 443
@@ -436,8 +464,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
436 obj->has_aliasing_ppgtt_mapping = 1; 464 obj->has_aliasing_ppgtt_mapping = 1;
437 } 465 }
438 466
439 if (entry->offset != obj->gtt_offset) { 467 if (entry->offset != i915_gem_obj_offset(obj, vm)) {
440 entry->offset = obj->gtt_offset; 468 entry->offset = i915_gem_obj_offset(obj, vm);
441 *need_reloc = true; 469 *need_reloc = true;
442 } 470 }
443 471
@@ -458,7 +486,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
458{ 486{
459 struct drm_i915_gem_exec_object2 *entry; 487 struct drm_i915_gem_exec_object2 *entry;
460 488
461 if (!obj->gtt_space) 489 if (!i915_gem_obj_bound_any(obj))
462 return; 490 return;
463 491
464 entry = obj->exec_entry; 492 entry = obj->exec_entry;
@@ -475,6 +503,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
475static int 503static int
476i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 504i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
477 struct list_head *objects, 505 struct list_head *objects,
506 struct i915_address_space *vm,
478 bool *need_relocs) 507 bool *need_relocs)
479{ 508{
480 struct drm_i915_gem_object *obj; 509 struct drm_i915_gem_object *obj;
@@ -529,31 +558,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
529 list_for_each_entry(obj, objects, exec_list) { 558 list_for_each_entry(obj, objects, exec_list) {
530 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 559 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
531 bool need_fence, need_mappable; 560 bool need_fence, need_mappable;
561 u32 obj_offset;
532 562
533 if (!obj->gtt_space) 563 if (!i915_gem_obj_bound(obj, vm))
534 continue; 564 continue;
535 565
566 obj_offset = i915_gem_obj_offset(obj, vm);
536 need_fence = 567 need_fence =
537 has_fenced_gpu_access && 568 has_fenced_gpu_access &&
538 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 569 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
539 obj->tiling_mode != I915_TILING_NONE; 570 obj->tiling_mode != I915_TILING_NONE;
540 need_mappable = need_fence || need_reloc_mappable(obj); 571 need_mappable = need_fence || need_reloc_mappable(obj);
541 572
542 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || 573 WARN_ON((need_mappable || need_fence) &&
574 !i915_is_ggtt(vm));
575
576 if ((entry->alignment &&
577 obj_offset & (entry->alignment - 1)) ||
543 (need_mappable && !obj->map_and_fenceable)) 578 (need_mappable && !obj->map_and_fenceable))
544 ret = i915_gem_object_unbind(obj); 579 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
545 else 580 else
546 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); 581 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
547 if (ret) 582 if (ret)
548 goto err; 583 goto err;
549 } 584 }
550 585
551 /* Bind fresh objects */ 586 /* Bind fresh objects */
552 list_for_each_entry(obj, objects, exec_list) { 587 list_for_each_entry(obj, objects, exec_list) {
553 if (obj->gtt_space) 588 if (i915_gem_obj_bound(obj, vm))
554 continue; 589 continue;
555 590
556 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); 591 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
557 if (ret) 592 if (ret)
558 goto err; 593 goto err;
559 } 594 }
@@ -577,7 +612,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
577 struct drm_file *file, 612 struct drm_file *file,
578 struct intel_ring_buffer *ring, 613 struct intel_ring_buffer *ring,
579 struct eb_objects *eb, 614 struct eb_objects *eb,
580 struct drm_i915_gem_exec_object2 *exec) 615 struct drm_i915_gem_exec_object2 *exec,
616 struct i915_address_space *vm)
581{ 617{
582 struct drm_i915_gem_relocation_entry *reloc; 618 struct drm_i915_gem_relocation_entry *reloc;
583 struct drm_i915_gem_object *obj; 619 struct drm_i915_gem_object *obj;
@@ -661,14 +697,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
661 goto err; 697 goto err;
662 698
663 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 699 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
664 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); 700 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
665 if (ret) 701 if (ret)
666 goto err; 702 goto err;
667 703
668 list_for_each_entry(obj, &eb->objects, exec_list) { 704 list_for_each_entry(obj, &eb->objects, exec_list) {
669 int offset = obj->exec_entry - exec; 705 int offset = obj->exec_entry - exec;
670 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 706 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
671 reloc + reloc_offset[offset]); 707 reloc + reloc_offset[offset],
708 vm);
672 if (ret) 709 if (ret)
673 goto err; 710 goto err;
674 } 711 }
@@ -691,6 +728,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
691{ 728{
692 struct drm_i915_gem_object *obj; 729 struct drm_i915_gem_object *obj;
693 uint32_t flush_domains = 0; 730 uint32_t flush_domains = 0;
731 bool flush_chipset = false;
694 int ret; 732 int ret;
695 733
696 list_for_each_entry(obj, objects, exec_list) { 734 list_for_each_entry(obj, objects, exec_list) {
@@ -699,12 +737,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
699 return ret; 737 return ret;
700 738
701 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 739 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
702 i915_gem_clflush_object(obj); 740 flush_chipset |= i915_gem_clflush_object(obj, false);
703 741
704 flush_domains |= obj->base.write_domain; 742 flush_domains |= obj->base.write_domain;
705 } 743 }
706 744
707 if (flush_domains & I915_GEM_DOMAIN_CPU) 745 if (flush_chipset)
708 i915_gem_chipset_flush(ring->dev); 746 i915_gem_chipset_flush(ring->dev);
709 747
710 if (flush_domains & I915_GEM_DOMAIN_GTT) 748 if (flush_domains & I915_GEM_DOMAIN_GTT)
@@ -758,8 +796,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
758 if (!access_ok(VERIFY_WRITE, ptr, length)) 796 if (!access_ok(VERIFY_WRITE, ptr, length))
759 return -EFAULT; 797 return -EFAULT;
760 798
761 if (fault_in_multipages_readable(ptr, length)) 799 if (likely(!i915_prefault_disable)) {
762 return -EFAULT; 800 if (fault_in_multipages_readable(ptr, length))
801 return -EFAULT;
802 }
763 } 803 }
764 804
765 return 0; 805 return 0;
@@ -767,6 +807,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
767 807
768static void 808static void
769i915_gem_execbuffer_move_to_active(struct list_head *objects, 809i915_gem_execbuffer_move_to_active(struct list_head *objects,
810 struct i915_address_space *vm,
770 struct intel_ring_buffer *ring) 811 struct intel_ring_buffer *ring)
771{ 812{
772 struct drm_i915_gem_object *obj; 813 struct drm_i915_gem_object *obj;
@@ -781,6 +822,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
781 obj->base.read_domains = obj->base.pending_read_domains; 822 obj->base.read_domains = obj->base.pending_read_domains;
782 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 823 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
783 824
825 /* FIXME: This lookup gets fixed later <-- danvet */
826 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
784 i915_gem_object_move_to_active(obj, ring); 827 i915_gem_object_move_to_active(obj, ring);
785 if (obj->base.write_domain) { 828 if (obj->base.write_domain) {
786 obj->dirty = 1; 829 obj->dirty = 1;
@@ -835,7 +878,8 @@ static int
835i915_gem_do_execbuffer(struct drm_device *dev, void *data, 878i915_gem_do_execbuffer(struct drm_device *dev, void *data,
836 struct drm_file *file, 879 struct drm_file *file,
837 struct drm_i915_gem_execbuffer2 *args, 880 struct drm_i915_gem_execbuffer2 *args,
838 struct drm_i915_gem_exec_object2 *exec) 881 struct drm_i915_gem_exec_object2 *exec,
882 struct i915_address_space *vm)
839{ 883{
840 drm_i915_private_t *dev_priv = dev->dev_private; 884 drm_i915_private_t *dev_priv = dev->dev_private;
841 struct eb_objects *eb; 885 struct eb_objects *eb;
@@ -872,7 +916,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
872 break; 916 break;
873 case I915_EXEC_BSD: 917 case I915_EXEC_BSD:
874 ring = &dev_priv->ring[VCS]; 918 ring = &dev_priv->ring[VCS];
875 if (ctx_id != 0) { 919 if (ctx_id != DEFAULT_CONTEXT_ID) {
876 DRM_DEBUG("Ring %s doesn't support contexts\n", 920 DRM_DEBUG("Ring %s doesn't support contexts\n",
877 ring->name); 921 ring->name);
878 return -EPERM; 922 return -EPERM;
@@ -880,7 +924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
880 break; 924 break;
881 case I915_EXEC_BLT: 925 case I915_EXEC_BLT:
882 ring = &dev_priv->ring[BCS]; 926 ring = &dev_priv->ring[BCS];
883 if (ctx_id != 0) { 927 if (ctx_id != DEFAULT_CONTEXT_ID) {
884 DRM_DEBUG("Ring %s doesn't support contexts\n", 928 DRM_DEBUG("Ring %s doesn't support contexts\n",
885 ring->name); 929 ring->name);
886 return -EPERM; 930 return -EPERM;
@@ -888,7 +932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
888 break; 932 break;
889 case I915_EXEC_VEBOX: 933 case I915_EXEC_VEBOX:
890 ring = &dev_priv->ring[VECS]; 934 ring = &dev_priv->ring[VECS];
891 if (ctx_id != 0) { 935 if (ctx_id != DEFAULT_CONTEXT_ID) {
892 DRM_DEBUG("Ring %s doesn't support contexts\n", 936 DRM_DEBUG("Ring %s doesn't support contexts\n",
893 ring->name); 937 ring->name);
894 return -EPERM; 938 return -EPERM;
@@ -972,7 +1016,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
972 if (ret) 1016 if (ret)
973 goto pre_mutex_err; 1017 goto pre_mutex_err;
974 1018
975 if (dev_priv->mm.suspended) { 1019 if (dev_priv->ums.mm_suspended) {
976 mutex_unlock(&dev->struct_mutex); 1020 mutex_unlock(&dev->struct_mutex);
977 ret = -EBUSY; 1021 ret = -EBUSY;
978 goto pre_mutex_err; 1022 goto pre_mutex_err;
@@ -997,17 +1041,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
997 1041
998 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1042 /* Move the objects en-masse into the GTT, evicting if necessary. */
999 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1043 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1000 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); 1044 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
1001 if (ret) 1045 if (ret)
1002 goto err; 1046 goto err;
1003 1047
1004 /* The objects are in their final locations, apply the relocations. */ 1048 /* The objects are in their final locations, apply the relocations. */
1005 if (need_relocs) 1049 if (need_relocs)
1006 ret = i915_gem_execbuffer_relocate(eb); 1050 ret = i915_gem_execbuffer_relocate(eb, vm);
1007 if (ret) { 1051 if (ret) {
1008 if (ret == -EFAULT) { 1052 if (ret == -EFAULT) {
1009 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, 1053 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1010 eb, exec); 1054 eb, exec, vm);
1011 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1055 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1012 } 1056 }
1013 if (ret) 1057 if (ret)
@@ -1058,7 +1102,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1058 goto err; 1102 goto err;
1059 } 1103 }
1060 1104
1061 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1105 exec_start = i915_gem_obj_offset(batch_obj, vm) +
1106 args->batch_start_offset;
1062 exec_len = args->batch_len; 1107 exec_len = args->batch_len;
1063 if (cliprects) { 1108 if (cliprects) {
1064 for (i = 0; i < args->num_cliprects; i++) { 1109 for (i = 0; i < args->num_cliprects; i++) {
@@ -1083,7 +1128,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1083 1128
1084 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1129 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1085 1130
1086 i915_gem_execbuffer_move_to_active(&eb->objects, ring); 1131 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
1087 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1132 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1088 1133
1089err: 1134err:
@@ -1104,6 +1149,7 @@ int
1104i915_gem_execbuffer(struct drm_device *dev, void *data, 1149i915_gem_execbuffer(struct drm_device *dev, void *data,
1105 struct drm_file *file) 1150 struct drm_file *file)
1106{ 1151{
1152 struct drm_i915_private *dev_priv = dev->dev_private;
1107 struct drm_i915_gem_execbuffer *args = data; 1153 struct drm_i915_gem_execbuffer *args = data;
1108 struct drm_i915_gem_execbuffer2 exec2; 1154 struct drm_i915_gem_execbuffer2 exec2;
1109 struct drm_i915_gem_exec_object *exec_list = NULL; 1155 struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1159,7 +1205,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1159 exec2.flags = I915_EXEC_RENDER; 1205 exec2.flags = I915_EXEC_RENDER;
1160 i915_execbuffer2_set_context_id(exec2, 0); 1206 i915_execbuffer2_set_context_id(exec2, 0);
1161 1207
1162 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1208 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1209 &dev_priv->gtt.base);
1163 if (!ret) { 1210 if (!ret) {
1164 /* Copy the new buffer offsets back to the user's exec list. */ 1211 /* Copy the new buffer offsets back to the user's exec list. */
1165 for (i = 0; i < args->buffer_count; i++) 1212 for (i = 0; i < args->buffer_count; i++)
@@ -1185,6 +1232,7 @@ int
1185i915_gem_execbuffer2(struct drm_device *dev, void *data, 1232i915_gem_execbuffer2(struct drm_device *dev, void *data,
1186 struct drm_file *file) 1233 struct drm_file *file)
1187{ 1234{
1235 struct drm_i915_private *dev_priv = dev->dev_private;
1188 struct drm_i915_gem_execbuffer2 *args = data; 1236 struct drm_i915_gem_execbuffer2 *args = data;
1189 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1237 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1190 int ret; 1238 int ret;
@@ -1215,7 +1263,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1215 return -EFAULT; 1263 return -EFAULT;
1216 } 1264 }
1217 1265
1218 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1266 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1267 &dev_priv->gtt.base);
1219 if (!ret) { 1268 if (!ret) {
1220 /* Copy the new buffer offsets back to the user's exec list. */ 1269 /* Copy the new buffer offsets back to the user's exec list. */
1221 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1270 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5101ab6869b4..212f6d8c35ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,8 +28,12 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33
31/* PPGTT stuff */ 34/* PPGTT stuff */
32#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
36#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
33 37
34#define GEN6_PDE_VALID (1 << 0) 38#define GEN6_PDE_VALID (1 << 0)
35/* gen6+ has bit 11-4 for physical addr bit 39-32 */ 39/* gen6+ has bit 11-4 for physical addr bit 39-32 */
@@ -39,19 +43,50 @@
39#define GEN6_PTE_UNCACHED (1 << 1) 43#define GEN6_PTE_UNCACHED (1 << 1)
40#define HSW_PTE_UNCACHED (0) 44#define HSW_PTE_UNCACHED (0)
41#define GEN6_PTE_CACHE_LLC (2 << 1) 45#define GEN6_PTE_CACHE_LLC (2 << 1)
42#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 46#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
43#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 47#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
48#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
44 49
45static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, 50/* Cacheability Control is a 4-bit value. The low three bits are stored in *
46 dma_addr_t addr, 51 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
47 enum i915_cache_level level) 52 */
53#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
54 (((bits) & 0x8) << (11 - 3)))
55#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
56#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
57#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
59
60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 enum i915_cache_level level)
48{ 62{
49 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 63 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
50 pte |= GEN6_PTE_ADDR_ENCODE(addr); 64 pte |= GEN6_PTE_ADDR_ENCODE(addr);
51 65
52 switch (level) { 66 switch (level) {
53 case I915_CACHE_LLC_MLC: 67 case I915_CACHE_L3_LLC:
54 pte |= GEN6_PTE_CACHE_LLC_MLC; 68 case I915_CACHE_LLC:
69 pte |= GEN6_PTE_CACHE_LLC;
70 break;
71 case I915_CACHE_NONE:
72 pte |= GEN6_PTE_UNCACHED;
73 break;
74 default:
75 WARN_ON(1);
76 }
77
78 return pte;
79}
80
81static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
82 enum i915_cache_level level)
83{
84 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
85 pte |= GEN6_PTE_ADDR_ENCODE(addr);
86
87 switch (level) {
88 case I915_CACHE_L3_LLC:
89 pte |= GEN7_PTE_CACHE_L3_LLC;
55 break; 90 break;
56 case I915_CACHE_LLC: 91 case I915_CACHE_LLC:
57 pte |= GEN6_PTE_CACHE_LLC; 92 pte |= GEN6_PTE_CACHE_LLC;
@@ -60,7 +95,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
60 pte |= GEN6_PTE_UNCACHED; 95 pte |= GEN6_PTE_UNCACHED;
61 break; 96 break;
62 default: 97 default:
63 BUG(); 98 WARN_ON(1);
64 } 99 }
65 100
66 return pte; 101 return pte;
@@ -69,8 +104,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
69#define BYT_PTE_WRITEABLE (1 << 1) 104#define BYT_PTE_WRITEABLE (1 << 1)
70#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 105#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
71 106
72static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, 107static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
73 dma_addr_t addr,
74 enum i915_cache_level level) 108 enum i915_cache_level level)
75{ 109{
76 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 110 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,22 +121,41 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
87 return pte; 121 return pte;
88} 122}
89 123
90static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev, 124static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
91 dma_addr_t addr,
92 enum i915_cache_level level) 125 enum i915_cache_level level)
93{ 126{
94 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 127 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
95 pte |= GEN6_PTE_ADDR_ENCODE(addr); 128 pte |= HSW_PTE_ADDR_ENCODE(addr);
96 129
97 if (level != I915_CACHE_NONE) 130 if (level != I915_CACHE_NONE)
98 pte |= GEN6_PTE_CACHE_LLC; 131 pte |= HSW_WB_LLC_AGE3;
132
133 return pte;
134}
135
136static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
137 enum i915_cache_level level)
138{
139 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
140 pte |= HSW_PTE_ADDR_ENCODE(addr);
141
142 switch (level) {
143 case I915_CACHE_NONE:
144 break;
145 case I915_CACHE_WT:
146 pte |= HSW_WT_ELLC_LLC_AGE0;
147 break;
148 default:
149 pte |= HSW_WB_ELLC_LLC_AGE0;
150 break;
151 }
99 152
100 return pte; 153 return pte;
101} 154}
102 155
103static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 156static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
104{ 157{
105 struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; 158 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
106 gen6_gtt_pte_t __iomem *pd_addr; 159 gen6_gtt_pte_t __iomem *pd_addr;
107 uint32_t pd_entry; 160 uint32_t pd_entry;
108 int i; 161 int i;
@@ -181,18 +234,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
181} 234}
182 235
183/* PPGTT support for Sandybdrige/Gen6 and later */ 236/* PPGTT support for Sandybdrige/Gen6 and later */
184static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 237static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
185 unsigned first_entry, 238 unsigned first_entry,
186 unsigned num_entries) 239 unsigned num_entries)
187{ 240{
241 struct i915_hw_ppgtt *ppgtt =
242 container_of(vm, struct i915_hw_ppgtt, base);
188 gen6_gtt_pte_t *pt_vaddr, scratch_pte; 243 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
189 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 244 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
190 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 245 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
191 unsigned last_pte, i; 246 unsigned last_pte, i;
192 247
193 scratch_pte = ppgtt->pte_encode(ppgtt->dev, 248 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
194 ppgtt->scratch_page_dma_addr,
195 I915_CACHE_LLC);
196 249
197 while (num_entries) { 250 while (num_entries) {
198 last_pte = first_pte + num_entries; 251 last_pte = first_pte + num_entries;
@@ -212,11 +265,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
212 } 265 }
213} 266}
214 267
215static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, 268static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
216 struct sg_table *pages, 269 struct sg_table *pages,
217 unsigned first_entry, 270 unsigned first_entry,
218 enum i915_cache_level cache_level) 271 enum i915_cache_level cache_level)
219{ 272{
273 struct i915_hw_ppgtt *ppgtt =
274 container_of(vm, struct i915_hw_ppgtt, base);
220 gen6_gtt_pte_t *pt_vaddr; 275 gen6_gtt_pte_t *pt_vaddr;
221 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 276 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
222 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; 277 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@@ -227,8 +282,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
227 dma_addr_t page_addr; 282 dma_addr_t page_addr;
228 283
229 page_addr = sg_page_iter_dma_address(&sg_iter); 284 page_addr = sg_page_iter_dma_address(&sg_iter);
230 pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr, 285 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
231 cache_level);
232 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 286 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
233 kunmap_atomic(pt_vaddr); 287 kunmap_atomic(pt_vaddr);
234 act_pt++; 288 act_pt++;
@@ -240,13 +294,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
240 kunmap_atomic(pt_vaddr); 294 kunmap_atomic(pt_vaddr);
241} 295}
242 296
243static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 297static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
244{ 298{
299 struct i915_hw_ppgtt *ppgtt =
300 container_of(vm, struct i915_hw_ppgtt, base);
245 int i; 301 int i;
246 302
303 drm_mm_takedown(&ppgtt->base.mm);
304
247 if (ppgtt->pt_dma_addr) { 305 if (ppgtt->pt_dma_addr) {
248 for (i = 0; i < ppgtt->num_pd_entries; i++) 306 for (i = 0; i < ppgtt->num_pd_entries; i++)
249 pci_unmap_page(ppgtt->dev->pdev, 307 pci_unmap_page(ppgtt->base.dev->pdev,
250 ppgtt->pt_dma_addr[i], 308 ppgtt->pt_dma_addr[i],
251 4096, PCI_DMA_BIDIRECTIONAL); 309 4096, PCI_DMA_BIDIRECTIONAL);
252 } 310 }
@@ -260,7 +318,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
260 318
261static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 319static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
262{ 320{
263 struct drm_device *dev = ppgtt->dev; 321 struct drm_device *dev = ppgtt->base.dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 322 struct drm_i915_private *dev_priv = dev->dev_private;
265 unsigned first_pd_entry_in_global_pt; 323 unsigned first_pd_entry_in_global_pt;
266 int i; 324 int i;
@@ -271,18 +329,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
271 * now. */ 329 * now. */
272 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); 330 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
273 331
274 if (IS_HASWELL(dev)) { 332 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
275 ppgtt->pte_encode = hsw_pte_encode; 333 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
276 } else if (IS_VALLEYVIEW(dev)) {
277 ppgtt->pte_encode = byt_pte_encode;
278 } else {
279 ppgtt->pte_encode = gen6_pte_encode;
280 }
281 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
282 ppgtt->enable = gen6_ppgtt_enable; 334 ppgtt->enable = gen6_ppgtt_enable;
283 ppgtt->clear_range = gen6_ppgtt_clear_range; 335 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
284 ppgtt->insert_entries = gen6_ppgtt_insert_entries; 336 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
285 ppgtt->cleanup = gen6_ppgtt_cleanup; 337 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
338 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
286 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 339 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
287 GFP_KERNEL); 340 GFP_KERNEL);
288 if (!ppgtt->pt_pages) 341 if (!ppgtt->pt_pages)
@@ -313,8 +366,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
313 ppgtt->pt_dma_addr[i] = pt_addr; 366 ppgtt->pt_dma_addr[i] = pt_addr;
314 } 367 }
315 368
316 ppgtt->clear_range(ppgtt, 0, 369 ppgtt->base.clear_range(&ppgtt->base, 0,
317 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 370 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
318 371
319 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 372 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
320 373
@@ -347,8 +400,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
347 if (!ppgtt) 400 if (!ppgtt)
348 return -ENOMEM; 401 return -ENOMEM;
349 402
350 ppgtt->dev = dev; 403 ppgtt->base.dev = dev;
351 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
352 404
353 if (INTEL_INFO(dev)->gen < 8) 405 if (INTEL_INFO(dev)->gen < 8)
354 ret = gen6_ppgtt_init(ppgtt); 406 ret = gen6_ppgtt_init(ppgtt);
@@ -357,8 +409,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
357 409
358 if (ret) 410 if (ret)
359 kfree(ppgtt); 411 kfree(ppgtt);
360 else 412 else {
361 dev_priv->mm.aliasing_ppgtt = ppgtt; 413 dev_priv->mm.aliasing_ppgtt = ppgtt;
414 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
415 ppgtt->base.total);
416 }
362 417
363 return ret; 418 return ret;
364} 419}
@@ -371,7 +426,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
371 if (!ppgtt) 426 if (!ppgtt)
372 return; 427 return;
373 428
374 ppgtt->cleanup(ppgtt); 429 ppgtt->base.cleanup(&ppgtt->base);
375 dev_priv->mm.aliasing_ppgtt = NULL; 430 dev_priv->mm.aliasing_ppgtt = NULL;
376} 431}
377 432
@@ -379,17 +434,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
379 struct drm_i915_gem_object *obj, 434 struct drm_i915_gem_object *obj,
380 enum i915_cache_level cache_level) 435 enum i915_cache_level cache_level)
381{ 436{
382 ppgtt->insert_entries(ppgtt, obj->pages, 437 ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
383 obj->gtt_space->start >> PAGE_SHIFT, 438 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
384 cache_level); 439 cache_level);
385} 440}
386 441
387void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 442void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
388 struct drm_i915_gem_object *obj) 443 struct drm_i915_gem_object *obj)
389{ 444{
390 ppgtt->clear_range(ppgtt, 445 ppgtt->base.clear_range(&ppgtt->base,
391 obj->gtt_space->start >> PAGE_SHIFT, 446 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
392 obj->base.size >> PAGE_SHIFT); 447 obj->base.size >> PAGE_SHIFT);
393} 448}
394 449
395extern int intel_iommu_gfx_mapped; 450extern int intel_iommu_gfx_mapped;
@@ -436,11 +491,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
436 struct drm_i915_gem_object *obj; 491 struct drm_i915_gem_object *obj;
437 492
438 /* First fill our portion of the GTT with scratch pages */ 493 /* First fill our portion of the GTT with scratch pages */
439 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, 494 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
440 dev_priv->gtt.total / PAGE_SIZE); 495 dev_priv->gtt.base.start / PAGE_SIZE,
496 dev_priv->gtt.base.total / PAGE_SIZE);
441 497
442 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 498 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
443 i915_gem_clflush_object(obj); 499 i915_gem_clflush_object(obj, obj->pin_display);
444 i915_gem_gtt_bind_object(obj, obj->cache_level); 500 i915_gem_gtt_bind_object(obj, obj->cache_level);
445 } 501 }
446 502
@@ -466,12 +522,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
466 * within the global GTT as well as accessible by the GPU through the GMADR 522 * within the global GTT as well as accessible by the GPU through the GMADR
467 * mapped BAR (dev_priv->mm.gtt->gtt). 523 * mapped BAR (dev_priv->mm.gtt->gtt).
468 */ 524 */
469static void gen6_ggtt_insert_entries(struct drm_device *dev, 525static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
470 struct sg_table *st, 526 struct sg_table *st,
471 unsigned int first_entry, 527 unsigned int first_entry,
472 enum i915_cache_level level) 528 enum i915_cache_level level)
473{ 529{
474 struct drm_i915_private *dev_priv = dev->dev_private; 530 struct drm_i915_private *dev_priv = vm->dev->dev_private;
475 gen6_gtt_pte_t __iomem *gtt_entries = 531 gen6_gtt_pte_t __iomem *gtt_entries =
476 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 532 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
477 int i = 0; 533 int i = 0;
@@ -480,8 +536,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
480 536
481 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 537 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
482 addr = sg_page_iter_dma_address(&sg_iter); 538 addr = sg_page_iter_dma_address(&sg_iter);
483 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level), 539 iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
484 &gtt_entries[i]);
485 i++; 540 i++;
486 } 541 }
487 542
@@ -492,8 +547,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
492 * hardware should work, we must keep this posting read for paranoia. 547 * hardware should work, we must keep this posting read for paranoia.
493 */ 548 */
494 if (i != 0) 549 if (i != 0)
495 WARN_ON(readl(&gtt_entries[i-1]) 550 WARN_ON(readl(&gtt_entries[i-1]) !=
496 != dev_priv->gtt.pte_encode(dev, addr, level)); 551 vm->pte_encode(addr, level));
497 552
498 /* This next bit makes the above posting read even more important. We 553 /* This next bit makes the above posting read even more important. We
499 * want to flush the TLBs only after we're certain all the PTE updates 554 * want to flush the TLBs only after we're certain all the PTE updates
@@ -503,11 +558,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
503 POSTING_READ(GFX_FLSH_CNTL_GEN6); 558 POSTING_READ(GFX_FLSH_CNTL_GEN6);
504} 559}
505 560
506static void gen6_ggtt_clear_range(struct drm_device *dev, 561static void gen6_ggtt_clear_range(struct i915_address_space *vm,
507 unsigned int first_entry, 562 unsigned int first_entry,
508 unsigned int num_entries) 563 unsigned int num_entries)
509{ 564{
510 struct drm_i915_private *dev_priv = dev->dev_private; 565 struct drm_i915_private *dev_priv = vm->dev->dev_private;
511 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = 566 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
512 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 567 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
513 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 568 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -518,16 +573,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
518 first_entry, num_entries, max_entries)) 573 first_entry, num_entries, max_entries))
519 num_entries = max_entries; 574 num_entries = max_entries;
520 575
521 scratch_pte = dev_priv->gtt.pte_encode(dev, 576 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
522 dev_priv->gtt.scratch_page_dma,
523 I915_CACHE_LLC);
524 for (i = 0; i < num_entries; i++) 577 for (i = 0; i < num_entries; i++)
525 iowrite32(scratch_pte, &gtt_base[i]); 578 iowrite32(scratch_pte, &gtt_base[i]);
526 readl(gtt_base); 579 readl(gtt_base);
527} 580}
528 581
529 582
530static void i915_ggtt_insert_entries(struct drm_device *dev, 583static void i915_ggtt_insert_entries(struct i915_address_space *vm,
531 struct sg_table *st, 584 struct sg_table *st,
532 unsigned int pg_start, 585 unsigned int pg_start,
533 enum i915_cache_level cache_level) 586 enum i915_cache_level cache_level)
@@ -539,7 +592,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
539 592
540} 593}
541 594
542static void i915_ggtt_clear_range(struct drm_device *dev, 595static void i915_ggtt_clear_range(struct i915_address_space *vm,
543 unsigned int first_entry, 596 unsigned int first_entry,
544 unsigned int num_entries) 597 unsigned int num_entries)
545{ 598{
@@ -552,10 +605,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
552{ 605{
553 struct drm_device *dev = obj->base.dev; 606 struct drm_device *dev = obj->base.dev;
554 struct drm_i915_private *dev_priv = dev->dev_private; 607 struct drm_i915_private *dev_priv = dev->dev_private;
608 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
555 609
556 dev_priv->gtt.gtt_insert_entries(dev, obj->pages, 610 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
557 obj->gtt_space->start >> PAGE_SHIFT, 611 entry,
558 cache_level); 612 cache_level);
559 613
560 obj->has_global_gtt_mapping = 1; 614 obj->has_global_gtt_mapping = 1;
561} 615}
@@ -564,10 +618,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
564{ 618{
565 struct drm_device *dev = obj->base.dev; 619 struct drm_device *dev = obj->base.dev;
566 struct drm_i915_private *dev_priv = dev->dev_private; 620 struct drm_i915_private *dev_priv = dev->dev_private;
621 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
567 622
568 dev_priv->gtt.gtt_clear_range(obj->base.dev, 623 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
569 obj->gtt_space->start >> PAGE_SHIFT, 624 entry,
570 obj->base.size >> PAGE_SHIFT); 625 obj->base.size >> PAGE_SHIFT);
571 626
572 obj->has_global_gtt_mapping = 0; 627 obj->has_global_gtt_mapping = 0;
573} 628}
@@ -618,7 +673,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
618 * aperture. One page should be enough to keep any prefetching inside 673 * aperture. One page should be enough to keep any prefetching inside
619 * of the aperture. 674 * of the aperture.
620 */ 675 */
621 drm_i915_private_t *dev_priv = dev->dev_private; 676 struct drm_i915_private *dev_priv = dev->dev_private;
677 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
622 struct drm_mm_node *entry; 678 struct drm_mm_node *entry;
623 struct drm_i915_gem_object *obj; 679 struct drm_i915_gem_object *obj;
624 unsigned long hole_start, hole_end; 680 unsigned long hole_start, hole_end;
@@ -626,37 +682,38 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
626 BUG_ON(mappable_end > end); 682 BUG_ON(mappable_end > end);
627 683
628 /* Subtract the guard page ... */ 684 /* Subtract the guard page ... */
629 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); 685 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
630 if (!HAS_LLC(dev)) 686 if (!HAS_LLC(dev))
631 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; 687 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
632 688
633 /* Mark any preallocated objects as occupied */ 689 /* Mark any preallocated objects as occupied */
634 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 690 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
635 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", 691 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
636 obj->gtt_offset, obj->base.size); 692 int ret;
637 693 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
638 BUG_ON(obj->gtt_space != I915_GTT_RESERVED); 694 i915_gem_obj_ggtt_offset(obj), obj->base.size);
639 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, 695
640 obj->gtt_offset, 696 WARN_ON(i915_gem_obj_ggtt_bound(obj));
641 obj->base.size, 697 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
642 false); 698 if (ret)
699 DRM_DEBUG_KMS("Reservation failed\n");
643 obj->has_global_gtt_mapping = 1; 700 obj->has_global_gtt_mapping = 1;
701 list_add(&vma->vma_link, &obj->vma_list);
644 } 702 }
645 703
646 dev_priv->gtt.start = start; 704 dev_priv->gtt.base.start = start;
647 dev_priv->gtt.total = end - start; 705 dev_priv->gtt.base.total = end - start;
648 706
649 /* Clear any non-preallocated blocks */ 707 /* Clear any non-preallocated blocks */
650 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, 708 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
651 hole_start, hole_end) { 709 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
652 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 710 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
653 hole_start, hole_end); 711 hole_start, hole_end);
654 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, 712 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
655 (hole_end-hole_start) / PAGE_SIZE);
656 } 713 }
657 714
658 /* And finally clear the reserved guard page */ 715 /* And finally clear the reserved guard page */
659 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); 716 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
660} 717}
661 718
662static bool 719static bool
@@ -679,7 +736,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
679 struct drm_i915_private *dev_priv = dev->dev_private; 736 struct drm_i915_private *dev_priv = dev->dev_private;
680 unsigned long gtt_size, mappable_size; 737 unsigned long gtt_size, mappable_size;
681 738
682 gtt_size = dev_priv->gtt.total; 739 gtt_size = dev_priv->gtt.base.total;
683 mappable_size = dev_priv->gtt.mappable_end; 740 mappable_size = dev_priv->gtt.mappable_end;
684 741
685 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 742 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -688,7 +745,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
688 if (INTEL_INFO(dev)->gen <= 7) { 745 if (INTEL_INFO(dev)->gen <= 7) {
689 /* PPGTT pdes are stolen from global gtt ptes, so shrink the 746 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
690 * aperture accordingly when using aliasing ppgtt. */ 747 * aperture accordingly when using aliasing ppgtt. */
691 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 748 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
692 } 749 }
693 750
694 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 751 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -698,8 +755,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
698 return; 755 return;
699 756
700 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); 757 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
701 drm_mm_takedown(&dev_priv->mm.gtt_space); 758 drm_mm_takedown(&dev_priv->gtt.base.mm);
702 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 759 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
703 } 760 }
704 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 761 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
705} 762}
@@ -724,8 +781,8 @@ static int setup_scratch_page(struct drm_device *dev)
724#else 781#else
725 dma_addr = page_to_phys(page); 782 dma_addr = page_to_phys(page);
726#endif 783#endif
727 dev_priv->gtt.scratch_page = page; 784 dev_priv->gtt.base.scratch.page = page;
728 dev_priv->gtt.scratch_page_dma = dma_addr; 785 dev_priv->gtt.base.scratch.addr = dma_addr;
729 786
730 return 0; 787 return 0;
731} 788}
@@ -733,11 +790,13 @@ static int setup_scratch_page(struct drm_device *dev)
733static void teardown_scratch_page(struct drm_device *dev) 790static void teardown_scratch_page(struct drm_device *dev)
734{ 791{
735 struct drm_i915_private *dev_priv = dev->dev_private; 792 struct drm_i915_private *dev_priv = dev->dev_private;
736 set_pages_wb(dev_priv->gtt.scratch_page, 1); 793 struct page *page = dev_priv->gtt.base.scratch.page;
737 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, 794
795 set_pages_wb(page, 1);
796 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
738 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 797 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
739 put_page(dev_priv->gtt.scratch_page); 798 put_page(page);
740 __free_page(dev_priv->gtt.scratch_page); 799 __free_page(page);
741} 800}
742 801
743static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 802static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -800,17 +859,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
800 if (ret) 859 if (ret)
801 DRM_ERROR("Scratch setup failed\n"); 860 DRM_ERROR("Scratch setup failed\n");
802 861
803 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; 862 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
804 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; 863 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
805 864
806 return ret; 865 return ret;
807} 866}
808 867
809static void gen6_gmch_remove(struct drm_device *dev) 868static void gen6_gmch_remove(struct i915_address_space *vm)
810{ 869{
811 struct drm_i915_private *dev_priv = dev->dev_private; 870
812 iounmap(dev_priv->gtt.gsm); 871 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
813 teardown_scratch_page(dev_priv->dev); 872 iounmap(gtt->gsm);
873 teardown_scratch_page(vm->dev);
814} 874}
815 875
816static int i915_gmch_probe(struct drm_device *dev, 876static int i915_gmch_probe(struct drm_device *dev,
@@ -831,13 +891,13 @@ static int i915_gmch_probe(struct drm_device *dev,
831 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); 891 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
832 892
833 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 893 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
834 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; 894 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
835 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; 895 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
836 896
837 return 0; 897 return 0;
838} 898}
839 899
840static void i915_gmch_remove(struct drm_device *dev) 900static void i915_gmch_remove(struct i915_address_space *vm)
841{ 901{
842 intel_gmch_remove(); 902 intel_gmch_remove();
843} 903}
@@ -849,34 +909,35 @@ int i915_gem_gtt_init(struct drm_device *dev)
849 int ret; 909 int ret;
850 910
851 if (INTEL_INFO(dev)->gen <= 5) { 911 if (INTEL_INFO(dev)->gen <= 5) {
852 dev_priv->gtt.gtt_probe = i915_gmch_probe; 912 gtt->gtt_probe = i915_gmch_probe;
853 dev_priv->gtt.gtt_remove = i915_gmch_remove; 913 gtt->base.cleanup = i915_gmch_remove;
854 } else { 914 } else {
855 dev_priv->gtt.gtt_probe = gen6_gmch_probe; 915 gtt->gtt_probe = gen6_gmch_probe;
856 dev_priv->gtt.gtt_remove = gen6_gmch_remove; 916 gtt->base.cleanup = gen6_gmch_remove;
857 if (IS_HASWELL(dev)) { 917 if (IS_HASWELL(dev) && dev_priv->ellc_size)
858 dev_priv->gtt.pte_encode = hsw_pte_encode; 918 gtt->base.pte_encode = iris_pte_encode;
859 } else if (IS_VALLEYVIEW(dev)) { 919 else if (IS_HASWELL(dev))
860 dev_priv->gtt.pte_encode = byt_pte_encode; 920 gtt->base.pte_encode = hsw_pte_encode;
861 } else { 921 else if (IS_VALLEYVIEW(dev))
862 dev_priv->gtt.pte_encode = gen6_pte_encode; 922 gtt->base.pte_encode = byt_pte_encode;
863 } 923 else if (INTEL_INFO(dev)->gen >= 7)
924 gtt->base.pte_encode = ivb_pte_encode;
925 else
926 gtt->base.pte_encode = snb_pte_encode;
864 } 927 }
865 928
866 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, 929 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
867 &dev_priv->gtt.stolen_size, 930 &gtt->mappable_base, &gtt->mappable_end);
868 &gtt->mappable_base,
869 &gtt->mappable_end);
870 if (ret) 931 if (ret)
871 return ret; 932 return ret;
872 933
934 gtt->base.dev = dev;
935
873 /* GMADR is the PCI mmio aperture into the global GTT. */ 936 /* GMADR is the PCI mmio aperture into the global GTT. */
874 DRM_INFO("Memory usable by graphics device = %zdM\n", 937 DRM_INFO("Memory usable by graphics device = %zdM\n",
875 dev_priv->gtt.total >> 20); 938 gtt->base.total >> 20);
876 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", 939 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
877 dev_priv->gtt.mappable_end >> 20); 940 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
878 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
879 dev_priv->gtt.stolen_size >> 20);
880 941
881 return 0; 942 return 0;
882} 943}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 982d4732cecf..9969d10b80f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -45,49 +45,48 @@
45static unsigned long i915_stolen_to_physical(struct drm_device *dev) 45static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46{ 46{
47 struct drm_i915_private *dev_priv = dev->dev_private; 47 struct drm_i915_private *dev_priv = dev->dev_private;
48 struct pci_dev *pdev = dev_priv->bridge_dev; 48 struct resource *r;
49 u32 base; 49 u32 base;
50 50
51 /* On the machines I have tested the Graphics Base of Stolen Memory 51 /* Almost universally we can find the Graphics Base of Stolen Memory
52 * is unreliable, so on those compute the base by subtracting the 52 * at offset 0x5c in the igfx configuration space. On a few (desktop)
53 * stolen memory from the Top of Low Usable DRAM which is where the 53 * machines this is also mirrored in the bridge device at different
54 * BIOS places the graphics stolen memory. 54 * locations, or in the MCHBAR. On gen2, the layout is again slightly
55 * different with the Graphics Segment immediately following Top of
56 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
57 * reported by 865g, so we just use the top of memory as determined
58 * by the e820 probe.
55 * 59 *
56 * On gen2, the layout is slightly different with the Graphics Segment 60 * XXX However gen2 requires an unavailable symbol.
57 * immediately following Top of Memory (or Top of Usable DRAM). Note
58 * it appears that TOUD is only reported by 865g, so we just use the
59 * top of memory as determined by the e820 probe.
60 *
61 * XXX gen2 requires an unavailable symbol and 945gm fails with
62 * its value of TOLUD.
63 */ 61 */
64 base = 0; 62 base = 0;
65 if (IS_VALLEYVIEW(dev)) { 63 if (INTEL_INFO(dev)->gen >= 3) {
64 /* Read Graphics Base of Stolen Memory directly */
66 pci_read_config_dword(dev->pdev, 0x5c, &base); 65 pci_read_config_dword(dev->pdev, 0x5c, &base);
67 base &= ~((1<<20) - 1); 66 base &= ~((1<<20) - 1);
68 } else if (INTEL_INFO(dev)->gen >= 6) { 67 } else { /* GEN2 */
69 /* Read Base Data of Stolen Memory Register (BDSM) directly.
70 * Note that there is also a MCHBAR miror at 0x1080c0 or
71 * we could use device 2:0x5c instead.
72 */
73 pci_read_config_dword(pdev, 0xB0, &base);
74 base &= ~4095; /* lower bits used for locking register */
75 } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
76 /* Read Graphics Base of Stolen Memory directly */
77 pci_read_config_dword(pdev, 0xA4, &base);
78#if 0 68#if 0
79 } else if (IS_GEN3(dev)) {
80 u8 val;
81 /* Stolen is immediately below Top of Low Usable DRAM */
82 pci_read_config_byte(pdev, 0x9c, &val);
83 base = val >> 3 << 27;
84 base -= dev_priv->mm.gtt->stolen_size;
85 } else {
86 /* Stolen is immediately above Top of Memory */ 69 /* Stolen is immediately above Top of Memory */
87 base = max_low_pfn_mapped << PAGE_SHIFT; 70 base = max_low_pfn_mapped << PAGE_SHIFT;
88#endif 71#endif
89 } 72 }
90 73
74 if (base == 0)
75 return 0;
76
77 /* Verify that nothing else uses this physical address. Stolen
78 * memory should be reserved by the BIOS and hidden from the
79 * kernel. So if the region is already marked as busy, something
80 * is seriously wrong.
81 */
82 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
83 "Graphics Stolen Memory");
84 if (r == NULL) {
85 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
86 base, base + (uint32_t)dev_priv->gtt.stolen_size);
87 base = 0;
88 }
89
91 return base; 90 return base;
92} 91}
93 92
@@ -95,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size)
95{ 94{
96 struct drm_i915_private *dev_priv = dev->dev_private; 95 struct drm_i915_private *dev_priv = dev->dev_private;
97 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 96 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
97 int ret;
98 98
99 /* Try to over-allocate to reduce reallocations and fragmentation */ 99 compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
100 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
101 size <<= 1, 4096, 0);
102 if (!compressed_fb) 100 if (!compressed_fb)
103 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, 101 goto err_llb;
104 size >>= 1, 4096, 0); 102
105 if (compressed_fb) 103 /* Try to over-allocate to reduce reallocations and fragmentation */
106 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 104 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
107 if (!compressed_fb) 105 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
108 goto err; 106 if (ret)
107 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
108 size >>= 1, 4096,
109 DRM_MM_SEARCH_DEFAULT);
110 if (ret)
111 goto err_llb;
109 112
110 if (HAS_PCH_SPLIT(dev)) 113 if (HAS_PCH_SPLIT(dev))
111 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 114 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
112 else if (IS_GM45(dev)) { 115 else if (IS_GM45(dev)) {
113 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 116 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
114 } else { 117 } else {
115 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 118 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
116 4096, 4096, 0);
117 if (compressed_llb)
118 compressed_llb = drm_mm_get_block(compressed_llb,
119 4096, 4096);
120 if (!compressed_llb) 119 if (!compressed_llb)
121 goto err_fb; 120 goto err_fb;
122 121
123 dev_priv->compressed_llb = compressed_llb; 122 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
123 4096, 4096, DRM_MM_SEARCH_DEFAULT);
124 if (ret)
125 goto err_fb;
126
127 dev_priv->fbc.compressed_llb = compressed_llb;
124 128
125 I915_WRITE(FBC_CFB_BASE, 129 I915_WRITE(FBC_CFB_BASE,
126 dev_priv->mm.stolen_base + compressed_fb->start); 130 dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
128 dev_priv->mm.stolen_base + compressed_llb->start); 132 dev_priv->mm.stolen_base + compressed_llb->start);
129 } 133 }
130 134
131 dev_priv->compressed_fb = compressed_fb; 135 dev_priv->fbc.compressed_fb = compressed_fb;
132 dev_priv->cfb_size = size; 136 dev_priv->fbc.size = size;
133 137
134 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 138 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
135 size); 139 size);
@@ -137,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
137 return 0; 141 return 0;
138 142
139err_fb: 143err_fb:
140 drm_mm_put_block(compressed_fb); 144 kfree(compressed_llb);
141err: 145 drm_mm_remove_node(compressed_fb);
146err_llb:
147 kfree(compressed_fb);
142 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 148 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
143 return -ENOSPC; 149 return -ENOSPC;
144} 150}
@@ -150,7 +156,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
150 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 156 if (!drm_mm_initialized(&dev_priv->mm.stolen))
151 return -ENODEV; 157 return -ENODEV;
152 158
153 if (size < dev_priv->cfb_size) 159 if (size < dev_priv->fbc.size)
154 return 0; 160 return 0;
155 161
156 /* Release any current block */ 162 /* Release any current block */
@@ -163,16 +169,20 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
163{ 169{
164 struct drm_i915_private *dev_priv = dev->dev_private; 170 struct drm_i915_private *dev_priv = dev->dev_private;
165 171
166 if (dev_priv->cfb_size == 0) 172 if (dev_priv->fbc.size == 0)
167 return; 173 return;
168 174
169 if (dev_priv->compressed_fb) 175 if (dev_priv->fbc.compressed_fb) {
170 drm_mm_put_block(dev_priv->compressed_fb); 176 drm_mm_remove_node(dev_priv->fbc.compressed_fb);
177 kfree(dev_priv->fbc.compressed_fb);
178 }
171 179
172 if (dev_priv->compressed_llb) 180 if (dev_priv->fbc.compressed_llb) {
173 drm_mm_put_block(dev_priv->compressed_llb); 181 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
182 kfree(dev_priv->fbc.compressed_llb);
183 }
174 184
175 dev_priv->cfb_size = 0; 185 dev_priv->fbc.size = 0;
176} 186}
177 187
178void i915_gem_cleanup_stolen(struct drm_device *dev) 188void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +211,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
201 if (IS_VALLEYVIEW(dev)) 211 if (IS_VALLEYVIEW(dev))
202 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ 212 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
203 213
214 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
215 return 0;
216
204 /* Basic memrange allocator for stolen space */ 217 /* Basic memrange allocator for stolen space */
205 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - 218 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
206 bios_reserved); 219 bios_reserved);
@@ -271,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
271 if (obj == NULL) 284 if (obj == NULL)
272 return NULL; 285 return NULL;
273 286
274 if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) 287 drm_gem_private_object_init(dev, &obj->base, stolen->size);
275 goto cleanup;
276
277 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 288 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
278 289
279 obj->pages = i915_pages_create_for_stolen(dev, 290 obj->pages = i915_pages_create_for_stolen(dev,
@@ -285,9 +296,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
285 i915_gem_object_pin_pages(obj); 296 i915_gem_object_pin_pages(obj);
286 obj->stolen = stolen; 297 obj->stolen = stolen;
287 298
288 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 299 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
289 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 300 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
290 obj->cache_level = I915_CACHE_NONE;
291 301
292 return obj; 302 return obj;
293 303
@@ -302,6 +312,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
302 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct drm_i915_gem_object *obj; 313 struct drm_i915_gem_object *obj;
304 struct drm_mm_node *stolen; 314 struct drm_mm_node *stolen;
315 int ret;
305 316
306 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 317 if (!drm_mm_initialized(&dev_priv->mm.stolen))
307 return NULL; 318 return NULL;
@@ -310,17 +321,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
310 if (size == 0) 321 if (size == 0)
311 return NULL; 322 return NULL;
312 323
313 stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 324 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
314 if (stolen) 325 if (!stolen)
315 stolen = drm_mm_get_block(stolen, size, 4096); 326 return NULL;
316 if (stolen == NULL) 327
328 ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
329 4096, DRM_MM_SEARCH_DEFAULT);
330 if (ret) {
331 kfree(stolen);
317 return NULL; 332 return NULL;
333 }
318 334
319 obj = _i915_gem_object_create_stolen(dev, stolen); 335 obj = _i915_gem_object_create_stolen(dev, stolen);
320 if (obj) 336 if (obj)
321 return obj; 337 return obj;
322 338
323 drm_mm_put_block(stolen); 339 drm_mm_remove_node(stolen);
340 kfree(stolen);
324 return NULL; 341 return NULL;
325} 342}
326 343
@@ -331,8 +348,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
331 u32 size) 348 u32 size)
332{ 349{
333 struct drm_i915_private *dev_priv = dev->dev_private; 350 struct drm_i915_private *dev_priv = dev->dev_private;
351 struct i915_address_space *ggtt = &dev_priv->gtt.base;
334 struct drm_i915_gem_object *obj; 352 struct drm_i915_gem_object *obj;
335 struct drm_mm_node *stolen; 353 struct drm_mm_node *stolen;
354 struct i915_vma *vma;
355 int ret;
336 356
337 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 357 if (!drm_mm_initialized(&dev_priv->mm.stolen))
338 return NULL; 358 return NULL;
@@ -347,56 +367,74 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
347 if (WARN_ON(size == 0)) 367 if (WARN_ON(size == 0))
348 return NULL; 368 return NULL;
349 369
350 stolen = drm_mm_create_block(&dev_priv->mm.stolen, 370 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
351 stolen_offset, size, 371 if (!stolen)
352 false); 372 return NULL;
353 if (stolen == NULL) { 373
374 stolen->start = stolen_offset;
375 stolen->size = size;
376 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
377 if (ret) {
354 DRM_DEBUG_KMS("failed to allocate stolen space\n"); 378 DRM_DEBUG_KMS("failed to allocate stolen space\n");
379 kfree(stolen);
355 return NULL; 380 return NULL;
356 } 381 }
357 382
358 obj = _i915_gem_object_create_stolen(dev, stolen); 383 obj = _i915_gem_object_create_stolen(dev, stolen);
359 if (obj == NULL) { 384 if (obj == NULL) {
360 DRM_DEBUG_KMS("failed to allocate stolen object\n"); 385 DRM_DEBUG_KMS("failed to allocate stolen object\n");
361 drm_mm_put_block(stolen); 386 drm_mm_remove_node(stolen);
387 kfree(stolen);
362 return NULL; 388 return NULL;
363 } 389 }
364 390
365 /* Some objects just need physical mem from stolen space */ 391 /* Some objects just need physical mem from stolen space */
366 if (gtt_offset == -1) 392 if (gtt_offset == I915_GTT_OFFSET_NONE)
367 return obj; 393 return obj;
368 394
395 vma = i915_gem_vma_create(obj, ggtt);
396 if (IS_ERR(vma)) {
397 ret = PTR_ERR(vma);
398 goto err_out;
399 }
400
369 /* To simplify the initialisation sequence between KMS and GTT, 401 /* To simplify the initialisation sequence between KMS and GTT,
370 * we allow construction of the stolen object prior to 402 * we allow construction of the stolen object prior to
371 * setting up the GTT space. The actual reservation will occur 403 * setting up the GTT space. The actual reservation will occur
372 * later. 404 * later.
373 */ 405 */
374 if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { 406 vma->node.start = gtt_offset;
375 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, 407 vma->node.size = size;
376 gtt_offset, size, 408 if (drm_mm_initialized(&ggtt->mm)) {
377 false); 409 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
378 if (obj->gtt_space == NULL) { 410 if (ret) {
379 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 411 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
380 drm_gem_object_unreference(&obj->base); 412 goto err_vma;
381 return NULL;
382 } 413 }
383 } else 414 }
384 obj->gtt_space = I915_GTT_RESERVED;
385 415
386 obj->gtt_offset = gtt_offset;
387 obj->has_global_gtt_mapping = 1; 416 obj->has_global_gtt_mapping = 1;
388 417
389 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 418 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
390 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 419 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
391 420
392 return obj; 421 return obj;
422
423err_vma:
424 i915_gem_vma_destroy(vma);
425err_out:
426 drm_mm_remove_node(stolen);
427 kfree(stolen);
428 drm_gem_object_unreference(&obj->base);
429 return NULL;
393} 430}
394 431
395void 432void
396i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 433i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
397{ 434{
398 if (obj->stolen) { 435 if (obj->stolen) {
399 drm_mm_put_block(obj->stolen); 436 drm_mm_remove_node(obj->stolen);
437 kfree(obj->stolen);
400 obj->stolen = NULL; 438 obj->stolen = NULL;
401 } 439 }
402} 440}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 537545be69db..032e9ef9c896 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
268 return true; 268 return true;
269 269
270 if (INTEL_INFO(obj->base.dev)->gen == 3) { 270 if (INTEL_INFO(obj->base.dev)->gen == 3) {
271 if (obj->gtt_offset & ~I915_FENCE_START_MASK) 271 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
272 return false; 272 return false;
273 } else { 273 } else {
274 if (obj->gtt_offset & ~I830_FENCE_START_MASK) 274 if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
275 return false; 275 return false;
276 } 276 }
277 277
278 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); 278 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
279 if (obj->gtt_space->size != size) 279 if (i915_gem_obj_ggtt_size(obj) != size)
280 return false; 280 return false;
281 281
282 if (obj->gtt_offset & (size - 1)) 282 if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
283 return false; 283 return false;
284 284
285 return true; 285 return true;
@@ -359,18 +359,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
359 */ 359 */
360 360
361 obj->map_and_fenceable = 361 obj->map_and_fenceable =
362 obj->gtt_space == NULL || 362 !i915_gem_obj_ggtt_bound(obj) ||
363 (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && 363 (i915_gem_obj_ggtt_offset(obj) +
364 obj->base.size <= dev_priv->gtt.mappable_end &&
364 i915_gem_object_fence_ok(obj, args->tiling_mode)); 365 i915_gem_object_fence_ok(obj, args->tiling_mode));
365 366
366 /* Rebind if we need a change of alignment */ 367 /* Rebind if we need a change of alignment */
367 if (!obj->map_and_fenceable) { 368 if (!obj->map_and_fenceable) {
368 u32 unfenced_alignment = 369 u32 unfenced_align =
369 i915_gem_get_gtt_alignment(dev, obj->base.size, 370 i915_gem_get_gtt_alignment(dev, obj->base.size,
370 args->tiling_mode, 371 args->tiling_mode,
371 false); 372 false);
372 if (obj->gtt_offset & (unfenced_alignment - 1)) 373 if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
373 ret = i915_gem_object_unbind(obj); 374 ret = i915_gem_object_ggtt_unbind(obj);
374 } 375 }
375 376
376 if (ret == 0) { 377 if (ret == 0) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 000000000000..558e568d5b45
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,1019 @@
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
33static const char *yesno(int v)
34{
35 return v ? "yes" : "no";
36}
37
38static const char *ring_str(int ring)
39{
40 switch (ring) {
41 case RCS: return "render";
42 case VCS: return "bsd";
43 case BCS: return "blt";
44 case VECS: return "vebox";
45 default: return "";
46 }
47}
48
49static const char *pin_flag(int pinned)
50{
51 if (pinned > 0)
52 return " P";
53 else if (pinned < 0)
54 return " p";
55 else
56 return "";
57}
58
59static const char *tiling_flag(int tiling)
60{
61 switch (tiling) {
62 default:
63 case I915_TILING_NONE: return "";
64 case I915_TILING_X: return " X";
65 case I915_TILING_Y: return " Y";
66 }
67}
68
69static const char *dirty_flag(int dirty)
70{
71 return dirty ? " dirty" : "";
72}
73
74static const char *purgeable_flag(int purgeable)
75{
76 return purgeable ? " purgeable" : "";
77}
78
79static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
80{
81
82 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
83 e->err = -ENOSPC;
84 return false;
85 }
86
87 if (e->bytes == e->size - 1 || e->err)
88 return false;
89
90 return true;
91}
92
93static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
94 unsigned len)
95{
96 if (e->pos + len <= e->start) {
97 e->pos += len;
98 return false;
99 }
100
101 /* First vsnprintf needs to fit in its entirety for memmove */
102 if (len >= e->size) {
103 e->err = -EIO;
104 return false;
105 }
106
107 return true;
108}
109
110static void __i915_error_advance(struct drm_i915_error_state_buf *e,
111 unsigned len)
112{
113 /* If this is first printf in this window, adjust it so that
114 * start position matches start of the buffer
115 */
116
117 if (e->pos < e->start) {
118 const size_t off = e->start - e->pos;
119
120 /* Should not happen but be paranoid */
121 if (off > len || e->bytes) {
122 e->err = -EIO;
123 return;
124 }
125
126 memmove(e->buf, e->buf + off, len - off);
127 e->bytes = len - off;
128 e->pos = e->start;
129 return;
130 }
131
132 e->bytes += len;
133 e->pos += len;
134}
135
136static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
137 const char *f, va_list args)
138{
139 unsigned len;
140
141 if (!__i915_error_ok(e))
142 return;
143
144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args);
147 if (!__i915_error_seek(e, len))
148 return;
149 }
150
151 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
152 if (len >= e->size - e->bytes)
153 len = e->size - e->bytes - 1;
154
155 __i915_error_advance(e, len);
156}
157
158static void i915_error_puts(struct drm_i915_error_state_buf *e,
159 const char *str)
160{
161 unsigned len;
162
163 if (!__i915_error_ok(e))
164 return;
165
166 len = strlen(str);
167
168 /* Seek the first printf which is hits start position */
169 if (e->pos < e->start) {
170 if (!__i915_error_seek(e, len))
171 return;
172 }
173
174 if (len >= e->size - e->bytes)
175 len = e->size - e->bytes - 1;
176 memcpy(e->buf + e->bytes, str, len);
177
178 __i915_error_advance(e, len);
179}
180
181#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
182#define err_puts(e, s) i915_error_puts(e, s)
183
184static void print_error_buffers(struct drm_i915_error_state_buf *m,
185 const char *name,
186 struct drm_i915_error_buffer *err,
187 int count)
188{
189 err_printf(m, "%s [%d]:\n", name, count);
190
191 while (count--) {
192 err_printf(m, " %08x %8u %02x %02x %x %x",
193 err->gtt_offset,
194 err->size,
195 err->read_domains,
196 err->write_domain,
197 err->rseqno, err->wseqno);
198 err_puts(m, pin_flag(err->pinned));
199 err_puts(m, tiling_flag(err->tiling));
200 err_puts(m, dirty_flag(err->dirty));
201 err_puts(m, purgeable_flag(err->purgeable));
202 err_puts(m, err->ring != -1 ? " " : "");
203 err_puts(m, ring_str(err->ring));
204 err_puts(m, i915_cache_level_str(err->cache_level));
205
206 if (err->name)
207 err_printf(m, " (name: %d)", err->name);
208 if (err->fence_reg != I915_FENCE_REG_NONE)
209 err_printf(m, " (fence: %d)", err->fence_reg);
210
211 err_puts(m, "\n");
212 err++;
213 }
214}
215
216static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
217 struct drm_device *dev,
218 struct drm_i915_error_state *error,
219 unsigned ring)
220{
221 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
222 err_printf(m, "%s command stream:\n", ring_str(ring));
223 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
224 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
225 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
226 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
227 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
228 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
229 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
230 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
231 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
232
233 if (INTEL_INFO(dev)->gen >= 4)
234 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
235 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
236 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
237 if (INTEL_INFO(dev)->gen >= 6) {
238 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
239 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
240 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
241 error->semaphore_mboxes[ring][0],
242 error->semaphore_seqno[ring][0]);
243 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
244 error->semaphore_mboxes[ring][1],
245 error->semaphore_seqno[ring][1]);
246 if (HAS_VEBOX(dev)) {
247 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
248 error->semaphore_mboxes[ring][2],
249 error->semaphore_seqno[ring][2]);
250 }
251 }
252 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
253 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
254 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
255 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
256}
257
258void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
259{
260 va_list args;
261
262 va_start(args, f);
263 i915_error_vprintf(e, f, args);
264 va_end(args);
265}
266
267int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
268 const struct i915_error_state_file_priv *error_priv)
269{
270 struct drm_device *dev = error_priv->dev;
271 drm_i915_private_t *dev_priv = dev->dev_private;
272 struct drm_i915_error_state *error = error_priv->error;
273 struct intel_ring_buffer *ring;
274 int i, j, page, offset, elt;
275
276 if (!error) {
277 err_printf(m, "no error state collected\n");
278 goto out;
279 }
280
281 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
282 error->time.tv_usec);
283 err_printf(m, "Kernel: " UTS_RELEASE "\n");
284 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
285 err_printf(m, "EIR: 0x%08x\n", error->eir);
286 err_printf(m, "IER: 0x%08x\n", error->ier);
287 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
288 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
289 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
290 err_printf(m, "CCID: 0x%08x\n", error->ccid);
291
292 for (i = 0; i < dev_priv->num_fence_regs; i++)
293 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
294
295 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
296 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
297 error->extra_instdone[i]);
298
299 if (INTEL_INFO(dev)->gen >= 6) {
300 err_printf(m, "ERROR: 0x%08x\n", error->error);
301 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
302 }
303
304 if (INTEL_INFO(dev)->gen == 7)
305 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
306
307 for_each_ring(ring, dev_priv, i)
308 i915_ring_error_state(m, dev, error, i);
309
310 if (error->active_bo)
311 print_error_buffers(m, "Active",
312 error->active_bo[0],
313 error->active_bo_count[0]);
314
315 if (error->pinned_bo)
316 print_error_buffers(m, "Pinned",
317 error->pinned_bo[0],
318 error->pinned_bo_count[0]);
319
320 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
321 struct drm_i915_error_object *obj;
322
323 if ((obj = error->ring[i].batchbuffer)) {
324 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
325 dev_priv->ring[i].name,
326 obj->gtt_offset);
327 offset = 0;
328 for (page = 0; page < obj->page_count; page++) {
329 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
330 err_printf(m, "%08x : %08x\n", offset,
331 obj->pages[page][elt]);
332 offset += 4;
333 }
334 }
335 }
336
337 if (error->ring[i].num_requests) {
338 err_printf(m, "%s --- %d requests\n",
339 dev_priv->ring[i].name,
340 error->ring[i].num_requests);
341 for (j = 0; j < error->ring[i].num_requests; j++) {
342 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
343 error->ring[i].requests[j].seqno,
344 error->ring[i].requests[j].jiffies,
345 error->ring[i].requests[j].tail);
346 }
347 }
348
349 if ((obj = error->ring[i].ringbuffer)) {
350 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
351 dev_priv->ring[i].name,
352 obj->gtt_offset);
353 offset = 0;
354 for (page = 0; page < obj->page_count; page++) {
355 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
356 err_printf(m, "%08x : %08x\n",
357 offset,
358 obj->pages[page][elt]);
359 offset += 4;
360 }
361 }
362 }
363
364 obj = error->ring[i].ctx;
365 if (obj) {
366 err_printf(m, "%s --- HW Context = 0x%08x\n",
367 dev_priv->ring[i].name,
368 obj->gtt_offset);
369 offset = 0;
370 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
371 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
372 offset,
373 obj->pages[0][elt],
374 obj->pages[0][elt+1],
375 obj->pages[0][elt+2],
376 obj->pages[0][elt+3]);
377 offset += 16;
378 }
379 }
380 }
381
382 if (error->overlay)
383 intel_overlay_print_error_state(m, error->overlay);
384
385 if (error->display)
386 intel_display_print_error_state(m, dev, error->display);
387
388out:
389 if (m->bytes == 0 && m->err)
390 return m->err;
391
392 return 0;
393}
394
395int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
396 size_t count, loff_t pos)
397{
398 memset(ebuf, 0, sizeof(*ebuf));
399
400 /* We need to have enough room to store any i915_error_state printf
401 * so that we can move it to start position.
402 */
403 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
404 ebuf->buf = kmalloc(ebuf->size,
405 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
406
407 if (ebuf->buf == NULL) {
408 ebuf->size = PAGE_SIZE;
409 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
410 }
411
412 if (ebuf->buf == NULL) {
413 ebuf->size = 128;
414 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
415 }
416
417 if (ebuf->buf == NULL)
418 return -ENOMEM;
419
420 ebuf->start = pos;
421
422 return 0;
423}
424
425static void i915_error_object_free(struct drm_i915_error_object *obj)
426{
427 int page;
428
429 if (obj == NULL)
430 return;
431
432 for (page = 0; page < obj->page_count; page++)
433 kfree(obj->pages[page]);
434
435 kfree(obj);
436}
437
438static void i915_error_state_free(struct kref *error_ref)
439{
440 struct drm_i915_error_state *error = container_of(error_ref,
441 typeof(*error), ref);
442 int i;
443
444 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
445 i915_error_object_free(error->ring[i].batchbuffer);
446 i915_error_object_free(error->ring[i].ringbuffer);
447 i915_error_object_free(error->ring[i].ctx);
448 kfree(error->ring[i].requests);
449 }
450
451 kfree(error->active_bo);
452 kfree(error->overlay);
453 kfree(error->display);
454 kfree(error);
455}
456
457static struct drm_i915_error_object *
458i915_error_object_create_sized(struct drm_i915_private *dev_priv,
459 struct drm_i915_gem_object *src,
460 const int num_pages)
461{
462 struct drm_i915_error_object *dst;
463 int i;
464 u32 reloc_offset;
465
466 if (src == NULL || src->pages == NULL)
467 return NULL;
468
469 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
470 if (dst == NULL)
471 return NULL;
472
473 reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
474 for (i = 0; i < num_pages; i++) {
475 unsigned long flags;
476 void *d;
477
478 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
479 if (d == NULL)
480 goto unwind;
481
482 local_irq_save(flags);
483 if (reloc_offset < dev_priv->gtt.mappable_end &&
484 src->has_global_gtt_mapping) {
485 void __iomem *s;
486
487 /* Simply ignore tiling or any overlapping fence.
488 * It's part of the error state, and this hopefully
489 * captures what the GPU read.
490 */
491
492 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
493 reloc_offset);
494 memcpy_fromio(d, s, PAGE_SIZE);
495 io_mapping_unmap_atomic(s);
496 } else if (src->stolen) {
497 unsigned long offset;
498
499 offset = dev_priv->mm.stolen_base;
500 offset += src->stolen->start;
501 offset += i << PAGE_SHIFT;
502
503 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
504 } else {
505 struct page *page;
506 void *s;
507
508 page = i915_gem_object_get_page(src, i);
509
510 drm_clflush_pages(&page, 1);
511
512 s = kmap_atomic(page);
513 memcpy(d, s, PAGE_SIZE);
514 kunmap_atomic(s);
515
516 drm_clflush_pages(&page, 1);
517 }
518 local_irq_restore(flags);
519
520 dst->pages[i] = d;
521
522 reloc_offset += PAGE_SIZE;
523 }
524 dst->page_count = num_pages;
525
526 return dst;
527
528unwind:
529 while (i--)
530 kfree(dst->pages[i]);
531 kfree(dst);
532 return NULL;
533}
534#define i915_error_object_create(dev_priv, src) \
535 i915_error_object_create_sized((dev_priv), (src), \
536 (src)->base.size>>PAGE_SHIFT)
537
538static void capture_bo(struct drm_i915_error_buffer *err,
539 struct drm_i915_gem_object *obj)
540{
541 err->size = obj->base.size;
542 err->name = obj->base.name;
543 err->rseqno = obj->last_read_seqno;
544 err->wseqno = obj->last_write_seqno;
545 err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
546 err->read_domains = obj->base.read_domains;
547 err->write_domain = obj->base.write_domain;
548 err->fence_reg = obj->fence_reg;
549 err->pinned = 0;
550 if (obj->pin_count > 0)
551 err->pinned = 1;
552 if (obj->user_pin_count > 0)
553 err->pinned = -1;
554 err->tiling = obj->tiling_mode;
555 err->dirty = obj->dirty;
556 err->purgeable = obj->madv != I915_MADV_WILLNEED;
557 err->ring = obj->ring ? obj->ring->id : -1;
558 err->cache_level = obj->cache_level;
559}
560
561static u32 capture_active_bo(struct drm_i915_error_buffer *err,
562 int count, struct list_head *head)
563{
564 struct i915_vma *vma;
565 int i = 0;
566
567 list_for_each_entry(vma, head, mm_list) {
568 capture_bo(err++, vma->obj);
569 if (++i == count)
570 break;
571 }
572
573 return i;
574}
575
576static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
577 int count, struct list_head *head)
578{
579 struct drm_i915_gem_object *obj;
580 int i = 0;
581
582 list_for_each_entry(obj, head, global_list) {
583 if (obj->pin_count == 0)
584 continue;
585
586 capture_bo(err++, obj);
587 if (++i == count)
588 break;
589 }
590
591 return i;
592}
593
594static void i915_gem_record_fences(struct drm_device *dev,
595 struct drm_i915_error_state *error)
596{
597 struct drm_i915_private *dev_priv = dev->dev_private;
598 int i;
599
600 /* Fences */
601 switch (INTEL_INFO(dev)->gen) {
602 case 7:
603 case 6:
604 for (i = 0; i < dev_priv->num_fence_regs; i++)
605 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
606 break;
607 case 5:
608 case 4:
609 for (i = 0; i < 16; i++)
610 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
611 break;
612 case 3:
613 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
614 for (i = 0; i < 8; i++)
615 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
616 case 2:
617 for (i = 0; i < 8; i++)
618 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
619 break;
620
621 default:
622 BUG();
623 }
624}
625
626static struct drm_i915_error_object *
627i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
628 struct intel_ring_buffer *ring)
629{
630 struct i915_address_space *vm;
631 struct i915_vma *vma;
632 struct drm_i915_gem_object *obj;
633 u32 seqno;
634
635 if (!ring->get_seqno)
636 return NULL;
637
638 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
639 u32 acthd = I915_READ(ACTHD);
640
641 if (WARN_ON(ring->id != RCS))
642 return NULL;
643
644 obj = ring->private;
645 if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
647 return i915_error_object_create(dev_priv, obj);
648 }
649
650 seqno = ring->get_seqno(ring, false);
651 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
652 list_for_each_entry(vma, &vm->active_list, mm_list) {
653 obj = vma->obj;
654 if (obj->ring != ring)
655 continue;
656
657 if (i915_seqno_passed(seqno, obj->last_read_seqno))
658 continue;
659
660 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
661 continue;
662
663 /* We need to copy these to an anonymous buffer as the simplest
664 * method to avoid being overwritten by userspace.
665 */
666 return i915_error_object_create(dev_priv, obj);
667 }
668 }
669
670 return NULL;
671}
672
673static void i915_record_ring_state(struct drm_device *dev,
674 struct drm_i915_error_state *error,
675 struct intel_ring_buffer *ring)
676{
677 struct drm_i915_private *dev_priv = dev->dev_private;
678
679 if (INTEL_INFO(dev)->gen >= 6) {
680 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
681 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
682 error->semaphore_mboxes[ring->id][0]
683 = I915_READ(RING_SYNC_0(ring->mmio_base));
684 error->semaphore_mboxes[ring->id][1]
685 = I915_READ(RING_SYNC_1(ring->mmio_base));
686 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
687 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
688 }
689
690 if (HAS_VEBOX(dev)) {
691 error->semaphore_mboxes[ring->id][2] =
692 I915_READ(RING_SYNC_2(ring->mmio_base));
693 error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
694 }
695
696 if (INTEL_INFO(dev)->gen >= 4) {
697 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
698 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
699 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
700 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
701 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
702 if (ring->id == RCS)
703 error->bbaddr = I915_READ64(BB_ADDR);
704 } else {
705 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
706 error->ipeir[ring->id] = I915_READ(IPEIR);
707 error->ipehr[ring->id] = I915_READ(IPEHR);
708 error->instdone[ring->id] = I915_READ(INSTDONE);
709 }
710
711 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
712 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
713 error->seqno[ring->id] = ring->get_seqno(ring, false);
714 error->acthd[ring->id] = intel_ring_get_active_head(ring);
715 error->head[ring->id] = I915_READ_HEAD(ring);
716 error->tail[ring->id] = I915_READ_TAIL(ring);
717 error->ctl[ring->id] = I915_READ_CTL(ring);
718
719 error->cpu_ring_head[ring->id] = ring->head;
720 error->cpu_ring_tail[ring->id] = ring->tail;
721}
722
723
724static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
725 struct drm_i915_error_state *error,
726 struct drm_i915_error_ring *ering)
727{
728 struct drm_i915_private *dev_priv = ring->dev->dev_private;
729 struct drm_i915_gem_object *obj;
730
731 /* Currently render ring is the only HW context user */
732 if (ring->id != RCS || !error->ccid)
733 return;
734
735 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
736 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
737 ering->ctx = i915_error_object_create_sized(dev_priv,
738 obj, 1);
739 break;
740 }
741 }
742}
743
744static void i915_gem_record_rings(struct drm_device *dev,
745 struct drm_i915_error_state *error)
746{
747 struct drm_i915_private *dev_priv = dev->dev_private;
748 struct intel_ring_buffer *ring;
749 struct drm_i915_gem_request *request;
750 int i, count;
751
752 for_each_ring(ring, dev_priv, i) {
753 i915_record_ring_state(dev, error, ring);
754
755 error->ring[i].batchbuffer =
756 i915_error_first_batchbuffer(dev_priv, ring);
757
758 error->ring[i].ringbuffer =
759 i915_error_object_create(dev_priv, ring->obj);
760
761
762 i915_gem_record_active_context(ring, error, &error->ring[i]);
763
764 count = 0;
765 list_for_each_entry(request, &ring->request_list, list)
766 count++;
767
768 error->ring[i].num_requests = count;
769 error->ring[i].requests =
770 kmalloc(count*sizeof(struct drm_i915_error_request),
771 GFP_ATOMIC);
772 if (error->ring[i].requests == NULL) {
773 error->ring[i].num_requests = 0;
774 continue;
775 }
776
777 count = 0;
778 list_for_each_entry(request, &ring->request_list, list) {
779 struct drm_i915_error_request *erq;
780
781 erq = &error->ring[i].requests[count++];
782 erq->seqno = request->seqno;
783 erq->jiffies = request->emitted_jiffies;
784 erq->tail = request->tail;
785 }
786 }
787}
788
789/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
790 * VM.
791 */
792static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
793 struct drm_i915_error_state *error,
794 struct i915_address_space *vm,
795 const int ndx)
796{
797 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
798 struct drm_i915_gem_object *obj;
799 struct i915_vma *vma;
800 int i;
801
802 i = 0;
803 list_for_each_entry(vma, &vm->active_list, mm_list)
804 i++;
805 error->active_bo_count[ndx] = i;
806 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
807 if (obj->pin_count)
808 i++;
809 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
810
811 if (i) {
812 active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
813 if (active_bo)
814 pinned_bo = active_bo + error->active_bo_count[ndx];
815 }
816
817 if (active_bo)
818 error->active_bo_count[ndx] =
819 capture_active_bo(active_bo,
820 error->active_bo_count[ndx],
821 &vm->active_list);
822
823 if (pinned_bo)
824 error->pinned_bo_count[ndx] =
825 capture_pinned_bo(pinned_bo,
826 error->pinned_bo_count[ndx],
827 &dev_priv->mm.bound_list);
828 error->active_bo[ndx] = active_bo;
829 error->pinned_bo[ndx] = pinned_bo;
830}
831
832static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
833 struct drm_i915_error_state *error)
834{
835 struct i915_address_space *vm;
836 int cnt = 0, i = 0;
837
838 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
839 cnt++;
840
841 if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
842 cnt = 1;
843
844 vm = &dev_priv->gtt.base;
845
846 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
847 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
848 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
849 GFP_ATOMIC);
850 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
851 GFP_ATOMIC);
852
853 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
854 i915_gem_capture_vm(dev_priv, error, vm, i++);
855}
856
857/**
858 * i915_capture_error_state - capture an error record for later analysis
859 * @dev: drm device
860 *
861 * Should be called when an error is detected (either a hang or an error
862 * interrupt) to capture error state from the time of the error. Fills
863 * out a structure which becomes available in debugfs for user level tools
864 * to pick up.
865 */
866void i915_capture_error_state(struct drm_device *dev)
867{
868 struct drm_i915_private *dev_priv = dev->dev_private;
869 struct drm_i915_error_state *error;
870 unsigned long flags;
871 int pipe;
872
873 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
874 error = dev_priv->gpu_error.first_error;
875 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
876 if (error)
877 return;
878
879 /* Account for pipe specific data like PIPE*STAT */
880 error = kzalloc(sizeof(*error), GFP_ATOMIC);
881 if (!error) {
882 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
883 return;
884 }
885
886 DRM_INFO("capturing error event; look for more information in "
887 "/sys/class/drm/card%d/error\n", dev->primary->index);
888
889 kref_init(&error->ref);
890 error->eir = I915_READ(EIR);
891 error->pgtbl_er = I915_READ(PGTBL_ER);
892 if (HAS_HW_CONTEXTS(dev))
893 error->ccid = I915_READ(CCID);
894
895 if (HAS_PCH_SPLIT(dev))
896 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
897 else if (IS_VALLEYVIEW(dev))
898 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
899 else if (IS_GEN2(dev))
900 error->ier = I915_READ16(IER);
901 else
902 error->ier = I915_READ(IER);
903
904 if (INTEL_INFO(dev)->gen >= 6)
905 error->derrmr = I915_READ(DERRMR);
906
907 if (IS_VALLEYVIEW(dev))
908 error->forcewake = I915_READ(FORCEWAKE_VLV);
909 else if (INTEL_INFO(dev)->gen >= 7)
910 error->forcewake = I915_READ(FORCEWAKE_MT);
911 else if (INTEL_INFO(dev)->gen == 6)
912 error->forcewake = I915_READ(FORCEWAKE);
913
914 if (!HAS_PCH_SPLIT(dev))
915 for_each_pipe(pipe)
916 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
917
918 if (INTEL_INFO(dev)->gen >= 6) {
919 error->error = I915_READ(ERROR_GEN6);
920 error->done_reg = I915_READ(DONE_REG);
921 }
922
923 if (INTEL_INFO(dev)->gen == 7)
924 error->err_int = I915_READ(GEN7_ERR_INT);
925
926 i915_get_extra_instdone(dev, error->extra_instdone);
927
928 i915_gem_capture_buffers(dev_priv, error);
929 i915_gem_record_fences(dev, error);
930 i915_gem_record_rings(dev, error);
931
932 do_gettimeofday(&error->time);
933
934 error->overlay = intel_overlay_capture_error_state(dev);
935 error->display = intel_display_capture_error_state(dev);
936
937 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
938 if (dev_priv->gpu_error.first_error == NULL) {
939 dev_priv->gpu_error.first_error = error;
940 error = NULL;
941 }
942 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
943
944 if (error)
945 i915_error_state_free(&error->ref);
946}
947
948void i915_error_state_get(struct drm_device *dev,
949 struct i915_error_state_file_priv *error_priv)
950{
951 struct drm_i915_private *dev_priv = dev->dev_private;
952 unsigned long flags;
953
954 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
955 error_priv->error = dev_priv->gpu_error.first_error;
956 if (error_priv->error)
957 kref_get(&error_priv->error->ref);
958 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
959
960}
961
962void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
963{
964 if (error_priv->error)
965 kref_put(&error_priv->error->ref, i915_error_state_free);
966}
967
968void i915_destroy_error_state(struct drm_device *dev)
969{
970 struct drm_i915_private *dev_priv = dev->dev_private;
971 struct drm_i915_error_state *error;
972 unsigned long flags;
973
974 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
975 error = dev_priv->gpu_error.first_error;
976 dev_priv->gpu_error.first_error = NULL;
977 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
978
979 if (error)
980 kref_put(&error->ref, i915_error_state_free);
981}
982
983const char *i915_cache_level_str(int type)
984{
985 switch (type) {
986 case I915_CACHE_NONE: return " uncached";
987 case I915_CACHE_LLC: return " snooped or LLC";
988 case I915_CACHE_L3_LLC: return " L3+LLC";
989 default: return "";
990 }
991}
992
993/* NB: please notice the memset */
994void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
995{
996 struct drm_i915_private *dev_priv = dev->dev_private;
997 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
998
999 switch (INTEL_INFO(dev)->gen) {
1000 case 2:
1001 case 3:
1002 instdone[0] = I915_READ(INSTDONE);
1003 break;
1004 case 4:
1005 case 5:
1006 case 6:
1007 instdone[0] = I915_READ(INSTDONE_I965);
1008 instdone[1] = I915_READ(INSTDONE1);
1009 break;
1010 default:
1011 WARN_ONCE(1, "Unsupported platform\n");
1012 case 7:
1013 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1014 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1015 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1016 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1017 break;
1018 }
1019}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3d92a7cef154..a03b445ceb5f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
85{ 85{
86 assert_spin_locked(&dev_priv->irq_lock); 86 assert_spin_locked(&dev_priv->irq_lock);
87 87
88 if (dev_priv->pc8.irqs_disabled) {
89 WARN(1, "IRQs disabled\n");
90 dev_priv->pc8.regsave.deimr &= ~mask;
91 return;
92 }
93
88 if ((dev_priv->irq_mask & mask) != 0) { 94 if ((dev_priv->irq_mask & mask) != 0) {
89 dev_priv->irq_mask &= ~mask; 95 dev_priv->irq_mask &= ~mask;
90 I915_WRITE(DEIMR, dev_priv->irq_mask); 96 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97{ 103{
98 assert_spin_locked(&dev_priv->irq_lock); 104 assert_spin_locked(&dev_priv->irq_lock);
99 105
106 if (dev_priv->pc8.irqs_disabled) {
107 WARN(1, "IRQs disabled\n");
108 dev_priv->pc8.regsave.deimr |= mask;
109 return;
110 }
111
100 if ((dev_priv->irq_mask & mask) != mask) { 112 if ((dev_priv->irq_mask & mask) != mask) {
101 dev_priv->irq_mask |= mask; 113 dev_priv->irq_mask |= mask;
102 I915_WRITE(DEIMR, dev_priv->irq_mask); 114 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -104,6 +116,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 } 116 }
105} 117}
106 118
119/**
120 * ilk_update_gt_irq - update GTIMR
121 * @dev_priv: driver private
122 * @interrupt_mask: mask of interrupt bits to update
123 * @enabled_irq_mask: mask of interrupt bits to enable
124 */
125static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
126 uint32_t interrupt_mask,
127 uint32_t enabled_irq_mask)
128{
129 assert_spin_locked(&dev_priv->irq_lock);
130
131 if (dev_priv->pc8.irqs_disabled) {
132 WARN(1, "IRQs disabled\n");
133 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
134 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
135 interrupt_mask);
136 return;
137 }
138
139 dev_priv->gt_irq_mask &= ~interrupt_mask;
140 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
141 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
142 POSTING_READ(GTIMR);
143}
144
145void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
146{
147 ilk_update_gt_irq(dev_priv, mask, mask);
148}
149
150void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
151{
152 ilk_update_gt_irq(dev_priv, mask, 0);
153}
154
155/**
156 * snb_update_pm_irq - update GEN6_PMIMR
157 * @dev_priv: driver private
158 * @interrupt_mask: mask of interrupt bits to update
159 * @enabled_irq_mask: mask of interrupt bits to enable
160 */
161static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
162 uint32_t interrupt_mask,
163 uint32_t enabled_irq_mask)
164{
165 uint32_t new_val;
166
167 assert_spin_locked(&dev_priv->irq_lock);
168
169 if (dev_priv->pc8.irqs_disabled) {
170 WARN(1, "IRQs disabled\n");
171 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
172 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
173 interrupt_mask);
174 return;
175 }
176
177 new_val = dev_priv->pm_irq_mask;
178 new_val &= ~interrupt_mask;
179 new_val |= (~enabled_irq_mask & interrupt_mask);
180
181 if (new_val != dev_priv->pm_irq_mask) {
182 dev_priv->pm_irq_mask = new_val;
183 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
184 POSTING_READ(GEN6_PMIMR);
185 }
186}
187
188void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
189{
190 snb_update_pm_irq(dev_priv, mask, mask);
191}
192
193void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
194{
195 snb_update_pm_irq(dev_priv, mask, 0);
196}
197
107static bool ivb_can_enable_err_int(struct drm_device *dev) 198static bool ivb_can_enable_err_int(struct drm_device *dev)
108{ 199{
109 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -128,6 +219,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
128 enum pipe pipe; 219 enum pipe pipe;
129 struct intel_crtc *crtc; 220 struct intel_crtc *crtc;
130 221
222 assert_spin_locked(&dev_priv->irq_lock);
223
131 for_each_pipe(pipe) { 224 for_each_pipe(pipe) {
132 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 225 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
133 226
@@ -152,38 +245,75 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
152} 245}
153 246
154static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 247static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
155 bool enable) 248 enum pipe pipe, bool enable)
156{ 249{
157 struct drm_i915_private *dev_priv = dev->dev_private; 250 struct drm_i915_private *dev_priv = dev->dev_private;
158
159 if (enable) { 251 if (enable) {
252 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
253
160 if (!ivb_can_enable_err_int(dev)) 254 if (!ivb_can_enable_err_int(dev))
161 return; 255 return;
162 256
163 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
164 ERR_INT_FIFO_UNDERRUN_B |
165 ERR_INT_FIFO_UNDERRUN_C);
166
167 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 257 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
168 } else { 258 } else {
259 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
260
261 /* Change the state _after_ we've read out the current one. */
169 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 262 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
263
264 if (!was_enabled &&
265 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
266 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
267 pipe_name(pipe));
268 }
170 } 269 }
171} 270}
172 271
173static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, 272/**
273 * ibx_display_interrupt_update - update SDEIMR
274 * @dev_priv: driver private
275 * @interrupt_mask: mask of interrupt bits to update
276 * @enabled_irq_mask: mask of interrupt bits to enable
277 */
278static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
279 uint32_t interrupt_mask,
280 uint32_t enabled_irq_mask)
281{
282 uint32_t sdeimr = I915_READ(SDEIMR);
283 sdeimr &= ~interrupt_mask;
284 sdeimr |= (~enabled_irq_mask & interrupt_mask);
285
286 assert_spin_locked(&dev_priv->irq_lock);
287
288 if (dev_priv->pc8.irqs_disabled &&
289 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
290 WARN(1, "IRQs disabled\n");
291 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
292 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
293 interrupt_mask);
294 return;
295 }
296
297 I915_WRITE(SDEIMR, sdeimr);
298 POSTING_READ(SDEIMR);
299}
300#define ibx_enable_display_interrupt(dev_priv, bits) \
301 ibx_display_interrupt_update((dev_priv), (bits), (bits))
302#define ibx_disable_display_interrupt(dev_priv, bits) \
303 ibx_display_interrupt_update((dev_priv), (bits), 0)
304
305static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
306 enum transcoder pch_transcoder,
174 bool enable) 307 bool enable)
175{ 308{
176 struct drm_device *dev = crtc->base.dev;
177 struct drm_i915_private *dev_priv = dev->dev_private; 309 struct drm_i915_private *dev_priv = dev->dev_private;
178 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : 310 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
179 SDE_TRANSB_FIFO_UNDER; 311 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
180 312
181 if (enable) 313 if (enable)
182 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit); 314 ibx_enable_display_interrupt(dev_priv, bit);
183 else 315 else
184 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit); 316 ibx_disable_display_interrupt(dev_priv, bit);
185
186 POSTING_READ(SDEIMR);
187} 317}
188 318
189static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 319static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +323,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
193 struct drm_i915_private *dev_priv = dev->dev_private; 323 struct drm_i915_private *dev_priv = dev->dev_private;
194 324
195 if (enable) { 325 if (enable) {
326 I915_WRITE(SERR_INT,
327 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
328
196 if (!cpt_can_enable_serr_int(dev)) 329 if (!cpt_can_enable_serr_int(dev))
197 return; 330 return;
198 331
199 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN | 332 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
200 SERR_INT_TRANS_B_FIFO_UNDERRUN |
201 SERR_INT_TRANS_C_FIFO_UNDERRUN);
202
203 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
204 } else { 333 } else {
205 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT); 334 uint32_t tmp = I915_READ(SERR_INT);
206 } 335 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
207 336
208 POSTING_READ(SDEIMR); 337 /* Change the state _after_ we've read out the current one. */
338 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
339
340 if (!was_enabled &&
341 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
342 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
343 transcoder_name(pch_transcoder));
344 }
345 }
209} 346}
210 347
211/** 348/**
@@ -243,7 +380,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
243 if (IS_GEN5(dev) || IS_GEN6(dev)) 380 if (IS_GEN5(dev) || IS_GEN6(dev))
244 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 381 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
245 else if (IS_GEN7(dev)) 382 else if (IS_GEN7(dev))
246 ivybridge_set_fifo_underrun_reporting(dev, enable); 383 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
247 384
248done: 385done:
249 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +406,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
269 bool enable) 406 bool enable)
270{ 407{
271 struct drm_i915_private *dev_priv = dev->dev_private; 408 struct drm_i915_private *dev_priv = dev->dev_private;
272 enum pipe p; 409 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
273 struct drm_crtc *crtc; 410 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
274 struct intel_crtc *intel_crtc;
275 unsigned long flags; 411 unsigned long flags;
276 bool ret; 412 bool ret;
277 413
278 if (HAS_PCH_LPT(dev)) { 414 /*
279 crtc = NULL; 415 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
280 for_each_pipe(p) { 416 * has only one pch transcoder A that all pipes can use. To avoid racy
281 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; 417 * pch transcoder -> pipe lookups from interrupt code simply store the
282 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { 418 * underrun statistics in crtc A. Since we never expose this anywhere
283 crtc = c; 419 * nor use it outside of the fifo underrun code here using the "wrong"
284 break; 420 * crtc on LPT won't cause issues.
285 } 421 */
286 }
287 if (!crtc) {
288 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
289 return false;
290 }
291 } else {
292 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
293 }
294 intel_crtc = to_intel_crtc(crtc);
295 422
296 spin_lock_irqsave(&dev_priv->irq_lock, flags); 423 spin_lock_irqsave(&dev_priv->irq_lock, flags);
297 424
@@ -303,7 +430,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
303 intel_crtc->pch_fifo_underrun_disabled = !enable; 430 intel_crtc->pch_fifo_underrun_disabled = !enable;
304 431
305 if (HAS_PCH_IBX(dev)) 432 if (HAS_PCH_IBX(dev))
306 ibx_set_fifo_underrun_reporting(intel_crtc, enable); 433 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
307 else 434 else
308 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 435 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
309 436
@@ -319,6 +446,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
319 u32 reg = PIPESTAT(pipe); 446 u32 reg = PIPESTAT(pipe);
320 u32 pipestat = I915_READ(reg) & 0x7fff0000; 447 u32 pipestat = I915_READ(reg) & 0x7fff0000;
321 448
449 assert_spin_locked(&dev_priv->irq_lock);
450
322 if ((pipestat & mask) == mask) 451 if ((pipestat & mask) == mask)
323 return; 452 return;
324 453
@@ -334,6 +463,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
334 u32 reg = PIPESTAT(pipe); 463 u32 reg = PIPESTAT(pipe);
335 u32 pipestat = I915_READ(reg) & 0x7fff0000; 464 u32 pipestat = I915_READ(reg) & 0x7fff0000;
336 465
466 assert_spin_locked(&dev_priv->irq_lock);
467
337 if ((pipestat & mask) == 0) 468 if ((pipestat & mask) == 0)
338 return; 469 return;
339 470
@@ -625,14 +756,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
625 drm_kms_helper_hotplug_event(dev); 756 drm_kms_helper_hotplug_event(dev);
626} 757}
627 758
628static void ironlake_handle_rps_change(struct drm_device *dev) 759static void ironlake_rps_change_irq_handler(struct drm_device *dev)
629{ 760{
630 drm_i915_private_t *dev_priv = dev->dev_private; 761 drm_i915_private_t *dev_priv = dev->dev_private;
631 u32 busy_up, busy_down, max_avg, min_avg; 762 u32 busy_up, busy_down, max_avg, min_avg;
632 u8 new_delay; 763 u8 new_delay;
633 unsigned long flags;
634 764
635 spin_lock_irqsave(&mchdev_lock, flags); 765 spin_lock(&mchdev_lock);
636 766
637 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 767 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
638 768
@@ -660,7 +790,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
660 if (ironlake_set_drps(dev, new_delay)) 790 if (ironlake_set_drps(dev, new_delay))
661 dev_priv->ips.cur_delay = new_delay; 791 dev_priv->ips.cur_delay = new_delay;
662 792
663 spin_unlock_irqrestore(&mchdev_lock, flags); 793 spin_unlock(&mchdev_lock);
664 794
665 return; 795 return;
666} 796}
@@ -668,34 +798,31 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
668static void notify_ring(struct drm_device *dev, 798static void notify_ring(struct drm_device *dev,
669 struct intel_ring_buffer *ring) 799 struct intel_ring_buffer *ring)
670{ 800{
671 struct drm_i915_private *dev_priv = dev->dev_private;
672
673 if (ring->obj == NULL) 801 if (ring->obj == NULL)
674 return; 802 return;
675 803
676 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
677 805
678 wake_up_all(&ring->irq_queue); 806 wake_up_all(&ring->irq_queue);
679 if (i915_enable_hangcheck) { 807 i915_queue_hangcheck(dev);
680 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
681 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
682 }
683} 808}
684 809
685static void gen6_pm_rps_work(struct work_struct *work) 810static void gen6_pm_rps_work(struct work_struct *work)
686{ 811{
687 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
688 rps.work); 813 rps.work);
689 u32 pm_iir, pm_imr; 814 u32 pm_iir;
690 u8 new_delay; 815 u8 new_delay;
691 816
692 spin_lock_irq(&dev_priv->rps.lock); 817 spin_lock_irq(&dev_priv->irq_lock);
693 pm_iir = dev_priv->rps.pm_iir; 818 pm_iir = dev_priv->rps.pm_iir;
694 dev_priv->rps.pm_iir = 0; 819 dev_priv->rps.pm_iir = 0;
695 pm_imr = I915_READ(GEN6_PMIMR);
696 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 820 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
697 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 821 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
698 spin_unlock_irq(&dev_priv->rps.lock); 822 spin_unlock_irq(&dev_priv->irq_lock);
823
824 /* Make sure we didn't queue anything we're not going to process. */
825 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
699 826
700 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 827 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
701 return; 828 return;
@@ -781,13 +908,12 @@ static void ivybridge_parity_work(struct work_struct *work)
781 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 908 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
782 909
783 spin_lock_irqsave(&dev_priv->irq_lock, flags); 910 spin_lock_irqsave(&dev_priv->irq_lock, flags);
784 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
785 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
786 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
787 913
788 mutex_unlock(&dev_priv->dev->struct_mutex); 914 mutex_unlock(&dev_priv->dev->struct_mutex);
789 915
790 parity_event[0] = "L3_PARITY_ERROR=1"; 916 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
791 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
792 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
793 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
@@ -804,22 +930,31 @@ static void ivybridge_parity_work(struct work_struct *work)
804 kfree(parity_event[1]); 930 kfree(parity_event[1]);
805} 931}
806 932
807static void ivybridge_handle_parity_error(struct drm_device *dev) 933static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
808{ 934{
809 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
810 unsigned long flags;
811 936
812 if (!HAS_L3_GPU_CACHE(dev)) 937 if (!HAS_L3_GPU_CACHE(dev))
813 return; 938 return;
814 939
815 spin_lock_irqsave(&dev_priv->irq_lock, flags); 940 spin_lock(&dev_priv->irq_lock);
816 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
817 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 942 spin_unlock(&dev_priv->irq_lock);
818 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
819 943
820 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
821} 945}
822 946
947static void ilk_gt_irq_handler(struct drm_device *dev,
948 struct drm_i915_private *dev_priv,
949 u32 gt_iir)
950{
951 if (gt_iir &
952 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
953 notify_ring(dev, &dev_priv->ring[RCS]);
954 if (gt_iir & ILK_BSD_USER_INTERRUPT)
955 notify_ring(dev, &dev_priv->ring[VCS]);
956}
957
823static void snb_gt_irq_handler(struct drm_device *dev, 958static void snb_gt_irq_handler(struct drm_device *dev,
824 struct drm_i915_private *dev_priv, 959 struct drm_i915_private *dev_priv,
825 u32 gt_iir) 960 u32 gt_iir)
@@ -841,32 +976,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
841 } 976 }
842 977
843 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
844 ivybridge_handle_parity_error(dev); 979 ivybridge_parity_error_irq_handler(dev);
845}
846
847/* Legacy way of handling PM interrupts */
848static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
849 u32 pm_iir)
850{
851 unsigned long flags;
852
853 /*
854 * IIR bits should never already be set because IMR should
855 * prevent an interrupt from being shown in IIR. The warning
856 * displays a case where we've unsafely cleared
857 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
858 * type is not a problem, it displays a problem in the logic.
859 *
860 * The mask bit in IMR is cleared by dev_priv->rps.work.
861 */
862
863 spin_lock_irqsave(&dev_priv->rps.lock, flags);
864 dev_priv->rps.pm_iir |= pm_iir;
865 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
866 POSTING_READ(GEN6_PMIMR);
867 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
868
869 queue_work(dev_priv->wq, &dev_priv->rps.work);
870} 980}
871 981
872#define HPD_STORM_DETECT_PERIOD 1000 982#define HPD_STORM_DETECT_PERIOD 1000
@@ -886,6 +996,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
886 spin_lock(&dev_priv->irq_lock); 996 spin_lock(&dev_priv->irq_lock);
887 for (i = 1; i < HPD_NUM_PINS; i++) { 997 for (i = 1; i < HPD_NUM_PINS; i++) {
888 998
999 WARN(((hpd[i] & hotplug_trigger) &&
1000 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1001 "Received HPD interrupt although disabled\n");
1002
889 if (!(hpd[i] & hotplug_trigger) || 1003 if (!(hpd[i] & hotplug_trigger) ||
890 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1004 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
891 continue; 1005 continue;
@@ -896,6 +1010,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
896 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1010 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
897 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1011 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
898 dev_priv->hpd_stats[i].hpd_cnt = 0; 1012 dev_priv->hpd_stats[i].hpd_cnt = 0;
1013 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
899 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1014 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
900 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1015 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
901 dev_priv->hpd_event_bits &= ~(1 << i); 1016 dev_priv->hpd_event_bits &= ~(1 << i);
@@ -903,6 +1018,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
903 storm_detected = true; 1018 storm_detected = true;
904 } else { 1019 } else {
905 dev_priv->hpd_stats[i].hpd_cnt++; 1020 dev_priv->hpd_stats[i].hpd_cnt++;
1021 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1022 dev_priv->hpd_stats[i].hpd_cnt);
906 } 1023 }
907 } 1024 }
908 1025
@@ -928,28 +1045,21 @@ static void dp_aux_irq_handler(struct drm_device *dev)
928 wake_up_all(&dev_priv->gmbus_wait_queue); 1045 wake_up_all(&dev_priv->gmbus_wait_queue);
929} 1046}
930 1047
931/* Unlike gen6_queue_rps_work() from which this function is originally derived, 1048/* The RPS events need forcewake, so we add them to a work queue and mask their
932 * we must be able to deal with other PM interrupts. This is complicated because 1049 * IMR bits until the work is done. Other interrupts can be processed without
933 * of the way in which we use the masks to defer the RPS work (which for 1050 * the work queue. */
934 * posterity is necessary because of forcewake). 1051static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
935 */
936static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
937 u32 pm_iir)
938{ 1052{
939 unsigned long flags; 1053 if (pm_iir & GEN6_PM_RPS_EVENTS) {
1054 spin_lock(&dev_priv->irq_lock);
1055 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1056 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1057 spin_unlock(&dev_priv->irq_lock);
940 1058
941 spin_lock_irqsave(&dev_priv->rps.lock, flags);
942 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
943 if (dev_priv->rps.pm_iir) {
944 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
945 /* never want to mask useful interrupts. (also posting read) */
946 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
947 /* TODO: if queue_work is slow, move it out of the spinlock */
948 queue_work(dev_priv->wq, &dev_priv->rps.work); 1059 queue_work(dev_priv->wq, &dev_priv->rps.work);
949 } 1060 }
950 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
951 1061
952 if (pm_iir & ~GEN6_PM_RPS_EVENTS) { 1062 if (HAS_VEBOX(dev_priv->dev)) {
953 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1063 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
954 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1064 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
955 1065
@@ -1028,8 +1138,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1028 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1138 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1029 gmbus_irq_handler(dev); 1139 gmbus_irq_handler(dev);
1030 1140
1031 if (pm_iir & GEN6_PM_RPS_EVENTS) 1141 if (pm_iir)
1032 gen6_queue_rps_work(dev_priv, pm_iir); 1142 gen6_rps_irq_handler(dev_priv, pm_iir);
1033 1143
1034 I915_WRITE(GTIIR, gt_iir); 1144 I915_WRITE(GTIIR, gt_iir);
1035 I915_WRITE(GEN6_PMIIR, pm_iir); 1145 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1179,27 +1289,112 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1179 cpt_serr_int_handler(dev); 1289 cpt_serr_int_handler(dev);
1180} 1290}
1181 1291
1182static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 1292static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1293{
1294 struct drm_i915_private *dev_priv = dev->dev_private;
1295
1296 if (de_iir & DE_AUX_CHANNEL_A)
1297 dp_aux_irq_handler(dev);
1298
1299 if (de_iir & DE_GSE)
1300 intel_opregion_asle_intr(dev);
1301
1302 if (de_iir & DE_PIPEA_VBLANK)
1303 drm_handle_vblank(dev, 0);
1304
1305 if (de_iir & DE_PIPEB_VBLANK)
1306 drm_handle_vblank(dev, 1);
1307
1308 if (de_iir & DE_POISON)
1309 DRM_ERROR("Poison interrupt\n");
1310
1311 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1312 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1313 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1314
1315 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1316 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1317 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1318
1319 if (de_iir & DE_PLANEA_FLIP_DONE) {
1320 intel_prepare_page_flip(dev, 0);
1321 intel_finish_page_flip_plane(dev, 0);
1322 }
1323
1324 if (de_iir & DE_PLANEB_FLIP_DONE) {
1325 intel_prepare_page_flip(dev, 1);
1326 intel_finish_page_flip_plane(dev, 1);
1327 }
1328
1329 /* check event from PCH */
1330 if (de_iir & DE_PCH_EVENT) {
1331 u32 pch_iir = I915_READ(SDEIIR);
1332
1333 if (HAS_PCH_CPT(dev))
1334 cpt_irq_handler(dev, pch_iir);
1335 else
1336 ibx_irq_handler(dev, pch_iir);
1337
1338 /* should clear PCH hotplug event before clear CPU irq */
1339 I915_WRITE(SDEIIR, pch_iir);
1340 }
1341
1342 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1343 ironlake_rps_change_irq_handler(dev);
1344}
1345
1346static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1347{
1348 struct drm_i915_private *dev_priv = dev->dev_private;
1349 int i;
1350
1351 if (de_iir & DE_ERR_INT_IVB)
1352 ivb_err_int_handler(dev);
1353
1354 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1355 dp_aux_irq_handler(dev);
1356
1357 if (de_iir & DE_GSE_IVB)
1358 intel_opregion_asle_intr(dev);
1359
1360 for (i = 0; i < 3; i++) {
1361 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1362 drm_handle_vblank(dev, i);
1363 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1364 intel_prepare_page_flip(dev, i);
1365 intel_finish_page_flip_plane(dev, i);
1366 }
1367 }
1368
1369 /* check event from PCH */
1370 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1371 u32 pch_iir = I915_READ(SDEIIR);
1372
1373 cpt_irq_handler(dev, pch_iir);
1374
1375 /* clear PCH hotplug event before clear CPU irq */
1376 I915_WRITE(SDEIIR, pch_iir);
1377 }
1378}
1379
1380static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1183{ 1381{
1184 struct drm_device *dev = (struct drm_device *) arg; 1382 struct drm_device *dev = (struct drm_device *) arg;
1185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1186 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; 1384 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1187 irqreturn_t ret = IRQ_NONE; 1385 irqreturn_t ret = IRQ_NONE;
1188 int i; 1386 bool err_int_reenable = false;
1189 1387
1190 atomic_inc(&dev_priv->irq_received); 1388 atomic_inc(&dev_priv->irq_received);
1191 1389
1192 /* We get interrupts on unclaimed registers, so check for this before we 1390 /* We get interrupts on unclaimed registers, so check for this before we
1193 * do any I915_{READ,WRITE}. */ 1391 * do any I915_{READ,WRITE}. */
1194 if (IS_HASWELL(dev) && 1392 intel_uncore_check_errors(dev);
1195 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1196 DRM_ERROR("Unclaimed register before interrupt\n");
1197 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1198 }
1199 1393
1200 /* disable master interrupt before clearing iir */ 1394 /* disable master interrupt before clearing iir */
1201 de_ier = I915_READ(DEIER); 1395 de_ier = I915_READ(DEIER);
1202 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1396 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1397 POSTING_READ(DEIER);
1203 1398
1204 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1399 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1205 * interrupts will will be stored on its back queue, and then we'll be 1400 * interrupts will will be stored on its back queue, and then we'll be
@@ -1217,62 +1412,42 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1217 * handler. */ 1412 * handler. */
1218 if (IS_HASWELL(dev)) { 1413 if (IS_HASWELL(dev)) {
1219 spin_lock(&dev_priv->irq_lock); 1414 spin_lock(&dev_priv->irq_lock);
1220 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1415 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1416 if (err_int_reenable)
1417 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1221 spin_unlock(&dev_priv->irq_lock); 1418 spin_unlock(&dev_priv->irq_lock);
1222 } 1419 }
1223 1420
1224 gt_iir = I915_READ(GTIIR); 1421 gt_iir = I915_READ(GTIIR);
1225 if (gt_iir) { 1422 if (gt_iir) {
1226 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1423 if (INTEL_INFO(dev)->gen >= 6)
1424 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1425 else
1426 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1227 I915_WRITE(GTIIR, gt_iir); 1427 I915_WRITE(GTIIR, gt_iir);
1228 ret = IRQ_HANDLED; 1428 ret = IRQ_HANDLED;
1229 } 1429 }
1230 1430
1231 de_iir = I915_READ(DEIIR); 1431 de_iir = I915_READ(DEIIR);
1232 if (de_iir) { 1432 if (de_iir) {
1233 if (de_iir & DE_ERR_INT_IVB) 1433 if (INTEL_INFO(dev)->gen >= 7)
1234 ivb_err_int_handler(dev); 1434 ivb_display_irq_handler(dev, de_iir);
1235 1435 else
1236 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1436 ilk_display_irq_handler(dev, de_iir);
1237 dp_aux_irq_handler(dev);
1238
1239 if (de_iir & DE_GSE_IVB)
1240 intel_opregion_asle_intr(dev);
1241
1242 for (i = 0; i < 3; i++) {
1243 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1244 drm_handle_vblank(dev, i);
1245 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1246 intel_prepare_page_flip(dev, i);
1247 intel_finish_page_flip_plane(dev, i);
1248 }
1249 }
1250
1251 /* check event from PCH */
1252 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1253 u32 pch_iir = I915_READ(SDEIIR);
1254
1255 cpt_irq_handler(dev, pch_iir);
1256
1257 /* clear PCH hotplug event before clear CPU irq */
1258 I915_WRITE(SDEIIR, pch_iir);
1259 }
1260
1261 I915_WRITE(DEIIR, de_iir); 1437 I915_WRITE(DEIIR, de_iir);
1262 ret = IRQ_HANDLED; 1438 ret = IRQ_HANDLED;
1263 } 1439 }
1264 1440
1265 pm_iir = I915_READ(GEN6_PMIIR); 1441 if (INTEL_INFO(dev)->gen >= 6) {
1266 if (pm_iir) { 1442 u32 pm_iir = I915_READ(GEN6_PMIIR);
1267 if (IS_HASWELL(dev)) 1443 if (pm_iir) {
1268 hsw_pm_irq_handler(dev_priv, pm_iir); 1444 gen6_rps_irq_handler(dev_priv, pm_iir);
1269 else if (pm_iir & GEN6_PM_RPS_EVENTS) 1445 I915_WRITE(GEN6_PMIIR, pm_iir);
1270 gen6_queue_rps_work(dev_priv, pm_iir); 1446 ret = IRQ_HANDLED;
1271 I915_WRITE(GEN6_PMIIR, pm_iir); 1447 }
1272 ret = IRQ_HANDLED;
1273 } 1448 }
1274 1449
1275 if (IS_HASWELL(dev)) { 1450 if (err_int_reenable) {
1276 spin_lock(&dev_priv->irq_lock); 1451 spin_lock(&dev_priv->irq_lock);
1277 if (ivb_can_enable_err_int(dev)) 1452 if (ivb_can_enable_err_int(dev))
1278 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1453 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
@@ -1289,119 +1464,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1289 return ret; 1464 return ret;
1290} 1465}
1291 1466
1292static void ilk_gt_irq_handler(struct drm_device *dev,
1293 struct drm_i915_private *dev_priv,
1294 u32 gt_iir)
1295{
1296 if (gt_iir &
1297 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1298 notify_ring(dev, &dev_priv->ring[RCS]);
1299 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1300 notify_ring(dev, &dev_priv->ring[VCS]);
1301}
1302
1303static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1304{
1305 struct drm_device *dev = (struct drm_device *) arg;
1306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1307 int ret = IRQ_NONE;
1308 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1309
1310 atomic_inc(&dev_priv->irq_received);
1311
1312 /* disable master interrupt before clearing iir */
1313 de_ier = I915_READ(DEIER);
1314 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1315 POSTING_READ(DEIER);
1316
1317 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1318 * interrupts will will be stored on its back queue, and then we'll be
1319 * able to process them after we restore SDEIER (as soon as we restore
1320 * it, we'll get an interrupt if SDEIIR still has something to process
1321 * due to its back queue). */
1322 sde_ier = I915_READ(SDEIER);
1323 I915_WRITE(SDEIER, 0);
1324 POSTING_READ(SDEIER);
1325
1326 de_iir = I915_READ(DEIIR);
1327 gt_iir = I915_READ(GTIIR);
1328 pm_iir = I915_READ(GEN6_PMIIR);
1329
1330 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1331 goto done;
1332
1333 ret = IRQ_HANDLED;
1334
1335 if (IS_GEN5(dev))
1336 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1337 else
1338 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1339
1340 if (de_iir & DE_AUX_CHANNEL_A)
1341 dp_aux_irq_handler(dev);
1342
1343 if (de_iir & DE_GSE)
1344 intel_opregion_asle_intr(dev);
1345
1346 if (de_iir & DE_PIPEA_VBLANK)
1347 drm_handle_vblank(dev, 0);
1348
1349 if (de_iir & DE_PIPEB_VBLANK)
1350 drm_handle_vblank(dev, 1);
1351
1352 if (de_iir & DE_POISON)
1353 DRM_ERROR("Poison interrupt\n");
1354
1355 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1356 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1357 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1358
1359 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1360 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1361 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1362
1363 if (de_iir & DE_PLANEA_FLIP_DONE) {
1364 intel_prepare_page_flip(dev, 0);
1365 intel_finish_page_flip_plane(dev, 0);
1366 }
1367
1368 if (de_iir & DE_PLANEB_FLIP_DONE) {
1369 intel_prepare_page_flip(dev, 1);
1370 intel_finish_page_flip_plane(dev, 1);
1371 }
1372
1373 /* check event from PCH */
1374 if (de_iir & DE_PCH_EVENT) {
1375 u32 pch_iir = I915_READ(SDEIIR);
1376
1377 if (HAS_PCH_CPT(dev))
1378 cpt_irq_handler(dev, pch_iir);
1379 else
1380 ibx_irq_handler(dev, pch_iir);
1381
1382 /* should clear PCH hotplug event before clear CPU irq */
1383 I915_WRITE(SDEIIR, pch_iir);
1384 }
1385
1386 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1387 ironlake_handle_rps_change(dev);
1388
1389 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1390 gen6_queue_rps_work(dev_priv, pm_iir);
1391
1392 I915_WRITE(GTIIR, gt_iir);
1393 I915_WRITE(DEIIR, de_iir);
1394 I915_WRITE(GEN6_PMIIR, pm_iir);
1395
1396done:
1397 I915_WRITE(DEIER, de_ier);
1398 POSTING_READ(DEIER);
1399 I915_WRITE(SDEIER, sde_ier);
1400 POSTING_READ(SDEIER);
1401
1402 return ret;
1403}
1404
1405/** 1467/**
1406 * i915_error_work_func - do process context error handling work 1468 * i915_error_work_func - do process context error handling work
1407 * @work: work struct 1469 * @work: work struct
@@ -1417,9 +1479,9 @@ static void i915_error_work_func(struct work_struct *work)
1417 gpu_error); 1479 gpu_error);
1418 struct drm_device *dev = dev_priv->dev; 1480 struct drm_device *dev = dev_priv->dev;
1419 struct intel_ring_buffer *ring; 1481 struct intel_ring_buffer *ring;
1420 char *error_event[] = { "ERROR=1", NULL }; 1482 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1421 char *reset_event[] = { "RESET=1", NULL }; 1483 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1422 char *reset_done_event[] = { "ERROR=0", NULL }; 1484 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1423 int i, ret; 1485 int i, ret;
1424 1486
1425 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1487 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
@@ -1470,535 +1532,6 @@ static void i915_error_work_func(struct work_struct *work)
1470 } 1532 }
1471} 1533}
1472 1534
1473/* NB: please notice the memset */
1474static void i915_get_extra_instdone(struct drm_device *dev,
1475 uint32_t *instdone)
1476{
1477 struct drm_i915_private *dev_priv = dev->dev_private;
1478 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1479
1480 switch(INTEL_INFO(dev)->gen) {
1481 case 2:
1482 case 3:
1483 instdone[0] = I915_READ(INSTDONE);
1484 break;
1485 case 4:
1486 case 5:
1487 case 6:
1488 instdone[0] = I915_READ(INSTDONE_I965);
1489 instdone[1] = I915_READ(INSTDONE1);
1490 break;
1491 default:
1492 WARN_ONCE(1, "Unsupported platform\n");
1493 case 7:
1494 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1495 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1496 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1497 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1498 break;
1499 }
1500}
1501
1502#ifdef CONFIG_DEBUG_FS
1503static struct drm_i915_error_object *
1504i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1505 struct drm_i915_gem_object *src,
1506 const int num_pages)
1507{
1508 struct drm_i915_error_object *dst;
1509 int i;
1510 u32 reloc_offset;
1511
1512 if (src == NULL || src->pages == NULL)
1513 return NULL;
1514
1515 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1516 if (dst == NULL)
1517 return NULL;
1518
1519 reloc_offset = src->gtt_offset;
1520 for (i = 0; i < num_pages; i++) {
1521 unsigned long flags;
1522 void *d;
1523
1524 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1525 if (d == NULL)
1526 goto unwind;
1527
1528 local_irq_save(flags);
1529 if (reloc_offset < dev_priv->gtt.mappable_end &&
1530 src->has_global_gtt_mapping) {
1531 void __iomem *s;
1532
1533 /* Simply ignore tiling or any overlapping fence.
1534 * It's part of the error state, and this hopefully
1535 * captures what the GPU read.
1536 */
1537
1538 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1539 reloc_offset);
1540 memcpy_fromio(d, s, PAGE_SIZE);
1541 io_mapping_unmap_atomic(s);
1542 } else if (src->stolen) {
1543 unsigned long offset;
1544
1545 offset = dev_priv->mm.stolen_base;
1546 offset += src->stolen->start;
1547 offset += i << PAGE_SHIFT;
1548
1549 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1550 } else {
1551 struct page *page;
1552 void *s;
1553
1554 page = i915_gem_object_get_page(src, i);
1555
1556 drm_clflush_pages(&page, 1);
1557
1558 s = kmap_atomic(page);
1559 memcpy(d, s, PAGE_SIZE);
1560 kunmap_atomic(s);
1561
1562 drm_clflush_pages(&page, 1);
1563 }
1564 local_irq_restore(flags);
1565
1566 dst->pages[i] = d;
1567
1568 reloc_offset += PAGE_SIZE;
1569 }
1570 dst->page_count = num_pages;
1571 dst->gtt_offset = src->gtt_offset;
1572
1573 return dst;
1574
1575unwind:
1576 while (i--)
1577 kfree(dst->pages[i]);
1578 kfree(dst);
1579 return NULL;
1580}
1581#define i915_error_object_create(dev_priv, src) \
1582 i915_error_object_create_sized((dev_priv), (src), \
1583 (src)->base.size>>PAGE_SHIFT)
1584
1585static void
1586i915_error_object_free(struct drm_i915_error_object *obj)
1587{
1588 int page;
1589
1590 if (obj == NULL)
1591 return;
1592
1593 for (page = 0; page < obj->page_count; page++)
1594 kfree(obj->pages[page]);
1595
1596 kfree(obj);
1597}
1598
1599void
1600i915_error_state_free(struct kref *error_ref)
1601{
1602 struct drm_i915_error_state *error = container_of(error_ref,
1603 typeof(*error), ref);
1604 int i;
1605
1606 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1607 i915_error_object_free(error->ring[i].batchbuffer);
1608 i915_error_object_free(error->ring[i].ringbuffer);
1609 i915_error_object_free(error->ring[i].ctx);
1610 kfree(error->ring[i].requests);
1611 }
1612
1613 kfree(error->active_bo);
1614 kfree(error->overlay);
1615 kfree(error->display);
1616 kfree(error);
1617}
1618static void capture_bo(struct drm_i915_error_buffer *err,
1619 struct drm_i915_gem_object *obj)
1620{
1621 err->size = obj->base.size;
1622 err->name = obj->base.name;
1623 err->rseqno = obj->last_read_seqno;
1624 err->wseqno = obj->last_write_seqno;
1625 err->gtt_offset = obj->gtt_offset;
1626 err->read_domains = obj->base.read_domains;
1627 err->write_domain = obj->base.write_domain;
1628 err->fence_reg = obj->fence_reg;
1629 err->pinned = 0;
1630 if (obj->pin_count > 0)
1631 err->pinned = 1;
1632 if (obj->user_pin_count > 0)
1633 err->pinned = -1;
1634 err->tiling = obj->tiling_mode;
1635 err->dirty = obj->dirty;
1636 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1637 err->ring = obj->ring ? obj->ring->id : -1;
1638 err->cache_level = obj->cache_level;
1639}
1640
1641static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1642 int count, struct list_head *head)
1643{
1644 struct drm_i915_gem_object *obj;
1645 int i = 0;
1646
1647 list_for_each_entry(obj, head, mm_list) {
1648 capture_bo(err++, obj);
1649 if (++i == count)
1650 break;
1651 }
1652
1653 return i;
1654}
1655
1656static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1657 int count, struct list_head *head)
1658{
1659 struct drm_i915_gem_object *obj;
1660 int i = 0;
1661
1662 list_for_each_entry(obj, head, global_list) {
1663 if (obj->pin_count == 0)
1664 continue;
1665
1666 capture_bo(err++, obj);
1667 if (++i == count)
1668 break;
1669 }
1670
1671 return i;
1672}
1673
1674static void i915_gem_record_fences(struct drm_device *dev,
1675 struct drm_i915_error_state *error)
1676{
1677 struct drm_i915_private *dev_priv = dev->dev_private;
1678 int i;
1679
1680 /* Fences */
1681 switch (INTEL_INFO(dev)->gen) {
1682 case 7:
1683 case 6:
1684 for (i = 0; i < dev_priv->num_fence_regs; i++)
1685 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1686 break;
1687 case 5:
1688 case 4:
1689 for (i = 0; i < 16; i++)
1690 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1691 break;
1692 case 3:
1693 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1694 for (i = 0; i < 8; i++)
1695 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1696 case 2:
1697 for (i = 0; i < 8; i++)
1698 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1699 break;
1700
1701 default:
1702 BUG();
1703 }
1704}
1705
1706static struct drm_i915_error_object *
1707i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1708 struct intel_ring_buffer *ring)
1709{
1710 struct drm_i915_gem_object *obj;
1711 u32 seqno;
1712
1713 if (!ring->get_seqno)
1714 return NULL;
1715
1716 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1717 u32 acthd = I915_READ(ACTHD);
1718
1719 if (WARN_ON(ring->id != RCS))
1720 return NULL;
1721
1722 obj = ring->private;
1723 if (acthd >= obj->gtt_offset &&
1724 acthd < obj->gtt_offset + obj->base.size)
1725 return i915_error_object_create(dev_priv, obj);
1726 }
1727
1728 seqno = ring->get_seqno(ring, false);
1729 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1730 if (obj->ring != ring)
1731 continue;
1732
1733 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1734 continue;
1735
1736 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1737 continue;
1738
1739 /* We need to copy these to an anonymous buffer as the simplest
1740 * method to avoid being overwritten by userspace.
1741 */
1742 return i915_error_object_create(dev_priv, obj);
1743 }
1744
1745 return NULL;
1746}
1747
1748static void i915_record_ring_state(struct drm_device *dev,
1749 struct drm_i915_error_state *error,
1750 struct intel_ring_buffer *ring)
1751{
1752 struct drm_i915_private *dev_priv = dev->dev_private;
1753
1754 if (INTEL_INFO(dev)->gen >= 6) {
1755 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1756 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1757 error->semaphore_mboxes[ring->id][0]
1758 = I915_READ(RING_SYNC_0(ring->mmio_base));
1759 error->semaphore_mboxes[ring->id][1]
1760 = I915_READ(RING_SYNC_1(ring->mmio_base));
1761 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1762 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1763 }
1764
1765 if (INTEL_INFO(dev)->gen >= 4) {
1766 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1767 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1768 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1769 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1770 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1771 if (ring->id == RCS)
1772 error->bbaddr = I915_READ64(BB_ADDR);
1773 } else {
1774 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1775 error->ipeir[ring->id] = I915_READ(IPEIR);
1776 error->ipehr[ring->id] = I915_READ(IPEHR);
1777 error->instdone[ring->id] = I915_READ(INSTDONE);
1778 }
1779
1780 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1781 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1782 error->seqno[ring->id] = ring->get_seqno(ring, false);
1783 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1784 error->head[ring->id] = I915_READ_HEAD(ring);
1785 error->tail[ring->id] = I915_READ_TAIL(ring);
1786 error->ctl[ring->id] = I915_READ_CTL(ring);
1787
1788 error->cpu_ring_head[ring->id] = ring->head;
1789 error->cpu_ring_tail[ring->id] = ring->tail;
1790}
1791
1792
1793static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1794 struct drm_i915_error_state *error,
1795 struct drm_i915_error_ring *ering)
1796{
1797 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1798 struct drm_i915_gem_object *obj;
1799
1800 /* Currently render ring is the only HW context user */
1801 if (ring->id != RCS || !error->ccid)
1802 return;
1803
1804 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1805 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1806 ering->ctx = i915_error_object_create_sized(dev_priv,
1807 obj, 1);
1808 }
1809 }
1810}
1811
1812static void i915_gem_record_rings(struct drm_device *dev,
1813 struct drm_i915_error_state *error)
1814{
1815 struct drm_i915_private *dev_priv = dev->dev_private;
1816 struct intel_ring_buffer *ring;
1817 struct drm_i915_gem_request *request;
1818 int i, count;
1819
1820 for_each_ring(ring, dev_priv, i) {
1821 i915_record_ring_state(dev, error, ring);
1822
1823 error->ring[i].batchbuffer =
1824 i915_error_first_batchbuffer(dev_priv, ring);
1825
1826 error->ring[i].ringbuffer =
1827 i915_error_object_create(dev_priv, ring->obj);
1828
1829
1830 i915_gem_record_active_context(ring, error, &error->ring[i]);
1831
1832 count = 0;
1833 list_for_each_entry(request, &ring->request_list, list)
1834 count++;
1835
1836 error->ring[i].num_requests = count;
1837 error->ring[i].requests =
1838 kmalloc(count*sizeof(struct drm_i915_error_request),
1839 GFP_ATOMIC);
1840 if (error->ring[i].requests == NULL) {
1841 error->ring[i].num_requests = 0;
1842 continue;
1843 }
1844
1845 count = 0;
1846 list_for_each_entry(request, &ring->request_list, list) {
1847 struct drm_i915_error_request *erq;
1848
1849 erq = &error->ring[i].requests[count++];
1850 erq->seqno = request->seqno;
1851 erq->jiffies = request->emitted_jiffies;
1852 erq->tail = request->tail;
1853 }
1854 }
1855}
1856
1857/**
1858 * i915_capture_error_state - capture an error record for later analysis
1859 * @dev: drm device
1860 *
1861 * Should be called when an error is detected (either a hang or an error
1862 * interrupt) to capture error state from the time of the error. Fills
1863 * out a structure which becomes available in debugfs for user level tools
1864 * to pick up.
1865 */
1866static void i915_capture_error_state(struct drm_device *dev)
1867{
1868 struct drm_i915_private *dev_priv = dev->dev_private;
1869 struct drm_i915_gem_object *obj;
1870 struct drm_i915_error_state *error;
1871 unsigned long flags;
1872 int i, pipe;
1873
1874 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1875 error = dev_priv->gpu_error.first_error;
1876 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1877 if (error)
1878 return;
1879
1880 /* Account for pipe specific data like PIPE*STAT */
1881 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1882 if (!error) {
1883 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1884 return;
1885 }
1886
1887 DRM_INFO("capturing error event; look for more information in "
1888 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1889 dev->primary->index);
1890
1891 kref_init(&error->ref);
1892 error->eir = I915_READ(EIR);
1893 error->pgtbl_er = I915_READ(PGTBL_ER);
1894 if (HAS_HW_CONTEXTS(dev))
1895 error->ccid = I915_READ(CCID);
1896
1897 if (HAS_PCH_SPLIT(dev))
1898 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1899 else if (IS_VALLEYVIEW(dev))
1900 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1901 else if (IS_GEN2(dev))
1902 error->ier = I915_READ16(IER);
1903 else
1904 error->ier = I915_READ(IER);
1905
1906 if (INTEL_INFO(dev)->gen >= 6)
1907 error->derrmr = I915_READ(DERRMR);
1908
1909 if (IS_VALLEYVIEW(dev))
1910 error->forcewake = I915_READ(FORCEWAKE_VLV);
1911 else if (INTEL_INFO(dev)->gen >= 7)
1912 error->forcewake = I915_READ(FORCEWAKE_MT);
1913 else if (INTEL_INFO(dev)->gen == 6)
1914 error->forcewake = I915_READ(FORCEWAKE);
1915
1916 if (!HAS_PCH_SPLIT(dev))
1917 for_each_pipe(pipe)
1918 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1919
1920 if (INTEL_INFO(dev)->gen >= 6) {
1921 error->error = I915_READ(ERROR_GEN6);
1922 error->done_reg = I915_READ(DONE_REG);
1923 }
1924
1925 if (INTEL_INFO(dev)->gen == 7)
1926 error->err_int = I915_READ(GEN7_ERR_INT);
1927
1928 i915_get_extra_instdone(dev, error->extra_instdone);
1929
1930 i915_gem_record_fences(dev, error);
1931 i915_gem_record_rings(dev, error);
1932
1933 /* Record buffers on the active and pinned lists. */
1934 error->active_bo = NULL;
1935 error->pinned_bo = NULL;
1936
1937 i = 0;
1938 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1939 i++;
1940 error->active_bo_count = i;
1941 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1942 if (obj->pin_count)
1943 i++;
1944 error->pinned_bo_count = i - error->active_bo_count;
1945
1946 error->active_bo = NULL;
1947 error->pinned_bo = NULL;
1948 if (i) {
1949 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1950 GFP_ATOMIC);
1951 if (error->active_bo)
1952 error->pinned_bo =
1953 error->active_bo + error->active_bo_count;
1954 }
1955
1956 if (error->active_bo)
1957 error->active_bo_count =
1958 capture_active_bo(error->active_bo,
1959 error->active_bo_count,
1960 &dev_priv->mm.active_list);
1961
1962 if (error->pinned_bo)
1963 error->pinned_bo_count =
1964 capture_pinned_bo(error->pinned_bo,
1965 error->pinned_bo_count,
1966 &dev_priv->mm.bound_list);
1967
1968 do_gettimeofday(&error->time);
1969
1970 error->overlay = intel_overlay_capture_error_state(dev);
1971 error->display = intel_display_capture_error_state(dev);
1972
1973 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1974 if (dev_priv->gpu_error.first_error == NULL) {
1975 dev_priv->gpu_error.first_error = error;
1976 error = NULL;
1977 }
1978 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1979
1980 if (error)
1981 i915_error_state_free(&error->ref);
1982}
1983
1984void i915_destroy_error_state(struct drm_device *dev)
1985{
1986 struct drm_i915_private *dev_priv = dev->dev_private;
1987 struct drm_i915_error_state *error;
1988 unsigned long flags;
1989
1990 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1991 error = dev_priv->gpu_error.first_error;
1992 dev_priv->gpu_error.first_error = NULL;
1993 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1994
1995 if (error)
1996 kref_put(&error->ref, i915_error_state_free);
1997}
1998#else
1999#define i915_capture_error_state(x)
2000#endif
2001
2002static void i915_report_and_clear_eir(struct drm_device *dev) 1535static void i915_report_and_clear_eir(struct drm_device *dev)
2003{ 1536{
2004 struct drm_i915_private *dev_priv = dev->dev_private; 1537 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2155,10 +1688,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
2155 if (INTEL_INFO(dev)->gen >= 4) { 1688 if (INTEL_INFO(dev)->gen >= 4) {
2156 int dspsurf = DSPSURF(intel_crtc->plane); 1689 int dspsurf = DSPSURF(intel_crtc->plane);
2157 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1690 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2158 obj->gtt_offset; 1691 i915_gem_obj_ggtt_offset(obj);
2159 } else { 1692 } else {
2160 int dspaddr = DSPADDR(intel_crtc->plane); 1693 int dspaddr = DSPADDR(intel_crtc->plane);
2161 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1694 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2162 crtc->y * crtc->fb->pitches[0] + 1695 crtc->y * crtc->fb->pitches[0] +
2163 crtc->x * crtc->fb->bits_per_pixel/8); 1696 crtc->x * crtc->fb->bits_per_pixel/8);
2164 } 1697 }
@@ -2202,29 +1735,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2202{ 1735{
2203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1736 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2204 unsigned long irqflags; 1737 unsigned long irqflags;
1738 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1739 DE_PIPE_VBLANK_ILK(pipe);
2205 1740
2206 if (!i915_pipe_enabled(dev, pipe)) 1741 if (!i915_pipe_enabled(dev, pipe))
2207 return -EINVAL; 1742 return -EINVAL;
2208 1743
2209 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1744 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2210 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1745 ironlake_enable_display_irq(dev_priv, bit);
2211 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2212 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2213
2214 return 0;
2215}
2216
2217static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2218{
2219 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2220 unsigned long irqflags;
2221
2222 if (!i915_pipe_enabled(dev, pipe))
2223 return -EINVAL;
2224
2225 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2226 ironlake_enable_display_irq(dev_priv,
2227 DE_PIPEA_VBLANK_IVB << (5 * pipe));
2228 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1746 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2229 1747
2230 return 0; 1748 return 0;
@@ -2275,21 +1793,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2275{ 1793{
2276 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2277 unsigned long irqflags; 1795 unsigned long irqflags;
1796 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1797 DE_PIPE_VBLANK_ILK(pipe);
2278 1798
2279 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1799 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2280 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1800 ironlake_disable_display_irq(dev_priv, bit);
2281 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2282 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2283}
2284
2285static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2286{
2287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2288 unsigned long irqflags;
2289
2290 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2291 ironlake_disable_display_irq(dev_priv,
2292 DE_PIPEA_VBLANK_IVB << (pipe * 5));
2293 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1801 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2294} 1802}
2295 1803
@@ -2392,10 +1900,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2392 u32 tmp; 1900 u32 tmp;
2393 1901
2394 if (ring->hangcheck.acthd != acthd) 1902 if (ring->hangcheck.acthd != acthd)
2395 return active; 1903 return HANGCHECK_ACTIVE;
2396 1904
2397 if (IS_GEN2(dev)) 1905 if (IS_GEN2(dev))
2398 return hung; 1906 return HANGCHECK_HUNG;
2399 1907
2400 /* Is the chip hanging on a WAIT_FOR_EVENT? 1908 /* Is the chip hanging on a WAIT_FOR_EVENT?
2401 * If so we can simply poke the RB_WAIT bit 1909 * If so we can simply poke the RB_WAIT bit
@@ -2407,24 +1915,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2407 DRM_ERROR("Kicking stuck wait on %s\n", 1915 DRM_ERROR("Kicking stuck wait on %s\n",
2408 ring->name); 1916 ring->name);
2409 I915_WRITE_CTL(ring, tmp); 1917 I915_WRITE_CTL(ring, tmp);
2410 return kick; 1918 return HANGCHECK_KICK;
2411 } 1919 }
2412 1920
2413 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 1921 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2414 switch (semaphore_passed(ring)) { 1922 switch (semaphore_passed(ring)) {
2415 default: 1923 default:
2416 return hung; 1924 return HANGCHECK_HUNG;
2417 case 1: 1925 case 1:
2418 DRM_ERROR("Kicking stuck semaphore on %s\n", 1926 DRM_ERROR("Kicking stuck semaphore on %s\n",
2419 ring->name); 1927 ring->name);
2420 I915_WRITE_CTL(ring, tmp); 1928 I915_WRITE_CTL(ring, tmp);
2421 return kick; 1929 return HANGCHECK_KICK;
2422 case 0: 1930 case 0:
2423 return wait; 1931 return HANGCHECK_WAIT;
2424 } 1932 }
2425 } 1933 }
2426 1934
2427 return hung; 1935 return HANGCHECK_HUNG;
2428} 1936}
2429 1937
2430/** 1938/**
@@ -2435,7 +1943,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2435 * we kick the ring. If we see no progress on three subsequent calls 1943 * we kick the ring. If we see no progress on three subsequent calls
2436 * we assume chip is wedged and try to fix it by resetting the chip. 1944 * we assume chip is wedged and try to fix it by resetting the chip.
2437 */ 1945 */
2438void i915_hangcheck_elapsed(unsigned long data) 1946static void i915_hangcheck_elapsed(unsigned long data)
2439{ 1947{
2440 struct drm_device *dev = (struct drm_device *)data; 1948 struct drm_device *dev = (struct drm_device *)data;
2441 drm_i915_private_t *dev_priv = dev->dev_private; 1949 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2471,8 +1979,6 @@ void i915_hangcheck_elapsed(unsigned long data)
2471 } else 1979 } else
2472 busy = false; 1980 busy = false;
2473 } else { 1981 } else {
2474 int score;
2475
2476 /* We always increment the hangcheck score 1982 /* We always increment the hangcheck score
2477 * if the ring is busy and still processing 1983 * if the ring is busy and still processing
2478 * the same request, so that no single request 1984 * the same request, so that no single request
@@ -2492,21 +1998,19 @@ void i915_hangcheck_elapsed(unsigned long data)
2492 acthd); 1998 acthd);
2493 1999
2494 switch (ring->hangcheck.action) { 2000 switch (ring->hangcheck.action) {
2495 case wait: 2001 case HANGCHECK_WAIT:
2496 score = 0;
2497 break; 2002 break;
2498 case active: 2003 case HANGCHECK_ACTIVE:
2499 score = BUSY; 2004 ring->hangcheck.score += BUSY;
2500 break; 2005 break;
2501 case kick: 2006 case HANGCHECK_KICK:
2502 score = KICK; 2007 ring->hangcheck.score += KICK;
2503 break; 2008 break;
2504 case hung: 2009 case HANGCHECK_HUNG:
2505 score = HUNG; 2010 ring->hangcheck.score += HUNG;
2506 stuck[i] = true; 2011 stuck[i] = true;
2507 break; 2012 break;
2508 } 2013 }
2509 ring->hangcheck.score += score;
2510 } 2014 }
2511 } else { 2015 } else {
2512 /* Gradually reduce the count so that we catch DoS 2016 /* Gradually reduce the count so that we catch DoS
@@ -2536,9 +2040,17 @@ void i915_hangcheck_elapsed(unsigned long data)
2536 if (busy_count) 2040 if (busy_count)
2537 /* Reset timer case chip hangs without another request 2041 /* Reset timer case chip hangs without another request
2538 * being added */ 2042 * being added */
2539 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2043 i915_queue_hangcheck(dev);
2540 round_jiffies_up(jiffies + 2044}
2541 DRM_I915_HANGCHECK_JIFFIES)); 2045
2046void i915_queue_hangcheck(struct drm_device *dev)
2047{
2048 struct drm_i915_private *dev_priv = dev->dev_private;
2049 if (!i915_enable_hangcheck)
2050 return;
2051
2052 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2053 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2542} 2054}
2543 2055
2544static void ibx_irq_preinstall(struct drm_device *dev) 2056static void ibx_irq_preinstall(struct drm_device *dev)
@@ -2560,31 +2072,26 @@ static void ibx_irq_preinstall(struct drm_device *dev)
2560 POSTING_READ(SDEIER); 2072 POSTING_READ(SDEIER);
2561} 2073}
2562 2074
2563/* drm_dma.h hooks 2075static void gen5_gt_irq_preinstall(struct drm_device *dev)
2564*/
2565static void ironlake_irq_preinstall(struct drm_device *dev)
2566{ 2076{
2567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2077 struct drm_i915_private *dev_priv = dev->dev_private;
2568
2569 atomic_set(&dev_priv->irq_received, 0);
2570
2571 I915_WRITE(HWSTAM, 0xeffe);
2572
2573 /* XXX hotplug from PCH */
2574
2575 I915_WRITE(DEIMR, 0xffffffff);
2576 I915_WRITE(DEIER, 0x0);
2577 POSTING_READ(DEIER);
2578 2078
2579 /* and GT */ 2079 /* and GT */
2580 I915_WRITE(GTIMR, 0xffffffff); 2080 I915_WRITE(GTIMR, 0xffffffff);
2581 I915_WRITE(GTIER, 0x0); 2081 I915_WRITE(GTIER, 0x0);
2582 POSTING_READ(GTIER); 2082 POSTING_READ(GTIER);
2583 2083
2584 ibx_irq_preinstall(dev); 2084 if (INTEL_INFO(dev)->gen >= 6) {
2085 /* and PM */
2086 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2087 I915_WRITE(GEN6_PMIER, 0x0);
2088 POSTING_READ(GEN6_PMIER);
2089 }
2585} 2090}
2586 2091
2587static void ivybridge_irq_preinstall(struct drm_device *dev) 2092/* drm_dma.h hooks
2093*/
2094static void ironlake_irq_preinstall(struct drm_device *dev)
2588{ 2095{
2589 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2096 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2590 2097
@@ -2592,21 +2099,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
2592 2099
2593 I915_WRITE(HWSTAM, 0xeffe); 2100 I915_WRITE(HWSTAM, 0xeffe);
2594 2101
2595 /* XXX hotplug from PCH */
2596
2597 I915_WRITE(DEIMR, 0xffffffff); 2102 I915_WRITE(DEIMR, 0xffffffff);
2598 I915_WRITE(DEIER, 0x0); 2103 I915_WRITE(DEIER, 0x0);
2599 POSTING_READ(DEIER); 2104 POSTING_READ(DEIER);
2600 2105
2601 /* and GT */ 2106 gen5_gt_irq_preinstall(dev);
2602 I915_WRITE(GTIMR, 0xffffffff);
2603 I915_WRITE(GTIER, 0x0);
2604 POSTING_READ(GTIER);
2605
2606 /* Power management */
2607 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2608 I915_WRITE(GEN6_PMIER, 0x0);
2609 POSTING_READ(GEN6_PMIER);
2610 2107
2611 ibx_irq_preinstall(dev); 2108 ibx_irq_preinstall(dev);
2612} 2109}
@@ -2627,9 +2124,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2627 /* and GT */ 2124 /* and GT */
2628 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2125 I915_WRITE(GTIIR, I915_READ(GTIIR));
2629 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2126 I915_WRITE(GTIIR, I915_READ(GTIIR));
2630 I915_WRITE(GTIMR, 0xffffffff); 2127
2631 I915_WRITE(GTIER, 0x0); 2128 gen5_gt_irq_preinstall(dev);
2632 POSTING_READ(GTIER);
2633 2129
2634 I915_WRITE(DPINVGTT, 0xff); 2130 I915_WRITE(DPINVGTT, 0xff);
2635 2131
@@ -2648,22 +2144,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
2648 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2144 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2649 struct drm_mode_config *mode_config = &dev->mode_config; 2145 struct drm_mode_config *mode_config = &dev->mode_config;
2650 struct intel_encoder *intel_encoder; 2146 struct intel_encoder *intel_encoder;
2651 u32 mask = ~I915_READ(SDEIMR); 2147 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2652 u32 hotplug;
2653 2148
2654 if (HAS_PCH_IBX(dev)) { 2149 if (HAS_PCH_IBX(dev)) {
2655 mask &= ~SDE_HOTPLUG_MASK; 2150 hotplug_irqs = SDE_HOTPLUG_MASK;
2656 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2151 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2657 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2152 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2658 mask |= hpd_ibx[intel_encoder->hpd_pin]; 2153 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2659 } else { 2154 } else {
2660 mask &= ~SDE_HOTPLUG_MASK_CPT; 2155 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2661 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2156 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2662 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2157 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2663 mask |= hpd_cpt[intel_encoder->hpd_pin]; 2158 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2664 } 2159 }
2665 2160
2666 I915_WRITE(SDEIMR, ~mask); 2161 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2667 2162
2668 /* 2163 /*
2669 * Enable digital hotplug on the PCH, and configure the DP short pulse 2164 * Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2700,123 +2195,103 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2700 I915_WRITE(SDEIMR, ~mask); 2195 I915_WRITE(SDEIMR, ~mask);
2701} 2196}
2702 2197
2703static int ironlake_irq_postinstall(struct drm_device *dev) 2198static void gen5_gt_irq_postinstall(struct drm_device *dev)
2704{ 2199{
2705 unsigned long irqflags; 2200 struct drm_i915_private *dev_priv = dev->dev_private;
2706 2201 u32 pm_irqs, gt_irqs;
2707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2708 /* enable kind of interrupts always enabled */
2709 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2710 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2711 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2712 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2713 u32 gt_irqs;
2714
2715 dev_priv->irq_mask = ~display_mask;
2716 2202
2717 /* should always can generate irq */ 2203 pm_irqs = gt_irqs = 0;
2718 I915_WRITE(DEIIR, I915_READ(DEIIR));
2719 I915_WRITE(DEIMR, dev_priv->irq_mask);
2720 I915_WRITE(DEIER, display_mask |
2721 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2722 POSTING_READ(DEIER);
2723 2204
2724 dev_priv->gt_irq_mask = ~0; 2205 dev_priv->gt_irq_mask = ~0;
2206 if (HAS_L3_GPU_CACHE(dev)) {
2207 /* L3 parity interrupt is always unmasked. */
2208 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2209 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2210 }
2725 2211
2726 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2212 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2727 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2213 if (IS_GEN5(dev)) {
2728
2729 gt_irqs = GT_RENDER_USER_INTERRUPT;
2730
2731 if (IS_GEN6(dev))
2732 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2733 else
2734 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2214 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2735 ILK_BSD_USER_INTERRUPT; 2215 ILK_BSD_USER_INTERRUPT;
2216 } else {
2217 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2218 }
2736 2219
2220 I915_WRITE(GTIIR, I915_READ(GTIIR));
2221 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2737 I915_WRITE(GTIER, gt_irqs); 2222 I915_WRITE(GTIER, gt_irqs);
2738 POSTING_READ(GTIER); 2223 POSTING_READ(GTIER);
2739 2224
2740 ibx_irq_postinstall(dev); 2225 if (INTEL_INFO(dev)->gen >= 6) {
2226 pm_irqs |= GEN6_PM_RPS_EVENTS;
2741 2227
2742 if (IS_IRONLAKE_M(dev)) { 2228 if (HAS_VEBOX(dev))
2743 /* Enable PCU event interrupts 2229 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2744 *
2745 * spinlocking not required here for correctness since interrupt
2746 * setup is guaranteed to run in single-threaded context. But we
2747 * need it to make the assert_spin_locked happy. */
2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2749 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2750 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2751 }
2752 2230
2753 return 0; 2231 dev_priv->pm_irq_mask = 0xffffffff;
2232 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2233 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2234 I915_WRITE(GEN6_PMIER, pm_irqs);
2235 POSTING_READ(GEN6_PMIER);
2236 }
2754} 2237}
2755 2238
2756static int ivybridge_irq_postinstall(struct drm_device *dev) 2239static int ironlake_irq_postinstall(struct drm_device *dev)
2757{ 2240{
2241 unsigned long irqflags;
2758 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2759 /* enable kind of interrupts always enabled */ 2243 u32 display_mask, extra_mask;
2760 u32 display_mask = 2244
2761 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 2245 if (INTEL_INFO(dev)->gen >= 7) {
2762 DE_PLANEC_FLIP_DONE_IVB | 2246 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2763 DE_PLANEB_FLIP_DONE_IVB | 2247 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2764 DE_PLANEA_FLIP_DONE_IVB | 2248 DE_PLANEB_FLIP_DONE_IVB |
2765 DE_AUX_CHANNEL_A_IVB | 2249 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2766 DE_ERR_INT_IVB; 2250 DE_ERR_INT_IVB);
2767 u32 pm_irqs = GEN6_PM_RPS_EVENTS; 2251 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2768 u32 gt_irqs; 2252 DE_PIPEA_VBLANK_IVB);
2253
2254 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2255 } else {
2256 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2257 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2258 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2259 DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2260 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2261 }
2769 2262
2770 dev_priv->irq_mask = ~display_mask; 2263 dev_priv->irq_mask = ~display_mask;
2771 2264
2772 /* should always can generate irq */ 2265 /* should always can generate irq */
2773 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2774 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2266 I915_WRITE(DEIIR, I915_READ(DEIIR));
2775 I915_WRITE(DEIMR, dev_priv->irq_mask); 2267 I915_WRITE(DEIMR, dev_priv->irq_mask);
2776 I915_WRITE(DEIER, 2268 I915_WRITE(DEIER, display_mask | extra_mask);
2777 display_mask |
2778 DE_PIPEC_VBLANK_IVB |
2779 DE_PIPEB_VBLANK_IVB |
2780 DE_PIPEA_VBLANK_IVB);
2781 POSTING_READ(DEIER); 2269 POSTING_READ(DEIER);
2782 2270
2783 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2271 gen5_gt_irq_postinstall(dev);
2784
2785 I915_WRITE(GTIIR, I915_READ(GTIIR));
2786 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2787
2788 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2789 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2790 I915_WRITE(GTIER, gt_irqs);
2791 POSTING_READ(GTIER);
2792
2793 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2794 if (HAS_VEBOX(dev))
2795 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2796 PM_VEBOX_CS_ERROR_INTERRUPT;
2797
2798 /* Our enable/disable rps functions may touch these registers so
2799 * make sure to set a known state for only the non-RPS bits.
2800 * The RMW is extra paranoia since this should be called after being set
2801 * to a known state in preinstall.
2802 * */
2803 I915_WRITE(GEN6_PMIMR,
2804 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2805 I915_WRITE(GEN6_PMIER,
2806 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2807 POSTING_READ(GEN6_PMIER);
2808 2272
2809 ibx_irq_postinstall(dev); 2273 ibx_irq_postinstall(dev);
2810 2274
2275 if (IS_IRONLAKE_M(dev)) {
2276 /* Enable PCU event interrupts
2277 *
2278 * spinlocking not required here for correctness since interrupt
2279 * setup is guaranteed to run in single-threaded context. But we
2280 * need it to make the assert_spin_locked happy. */
2281 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2282 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2283 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2284 }
2285
2811 return 0; 2286 return 0;
2812} 2287}
2813 2288
2814static int valleyview_irq_postinstall(struct drm_device *dev) 2289static int valleyview_irq_postinstall(struct drm_device *dev)
2815{ 2290{
2816 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2291 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2817 u32 gt_irqs;
2818 u32 enable_mask; 2292 u32 enable_mask;
2819 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2293 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2294 unsigned long irqflags;
2820 2295
2821 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2296 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2822 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2297 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,20 +2317,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2842 I915_WRITE(PIPESTAT(1), 0xffff); 2317 I915_WRITE(PIPESTAT(1), 0xffff);
2843 POSTING_READ(VLV_IER); 2318 POSTING_READ(VLV_IER);
2844 2319
2320 /* Interrupt setup is already guaranteed to be single-threaded, this is
2321 * just to make the assert_spin_locked check happy. */
2322 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2845 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2323 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2846 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2324 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2847 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2325 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2326 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2848 2327
2849 I915_WRITE(VLV_IIR, 0xffffffff); 2328 I915_WRITE(VLV_IIR, 0xffffffff);
2850 I915_WRITE(VLV_IIR, 0xffffffff); 2329 I915_WRITE(VLV_IIR, 0xffffffff);
2851 2330
2852 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2331 gen5_gt_irq_postinstall(dev);
2853 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2854
2855 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2856 GT_BLT_USER_INTERRUPT;
2857 I915_WRITE(GTIER, gt_irqs);
2858 POSTING_READ(GTIER);
2859 2332
2860 /* ack & enable invalid PTE error interrupts */ 2333 /* ack & enable invalid PTE error interrupts */
2861#if 0 /* FIXME: add support to irq handler for checking these bits */ 2334#if 0 /* FIXME: add support to irq handler for checking these bits */
@@ -3001,7 +2474,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3001 u16 iir, new_iir; 2474 u16 iir, new_iir;
3002 u32 pipe_stats[2]; 2475 u32 pipe_stats[2];
3003 unsigned long irqflags; 2476 unsigned long irqflags;
3004 int irq_received;
3005 int pipe; 2477 int pipe;
3006 u16 flip_mask = 2478 u16 flip_mask =
3007 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2479 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -3035,7 +2507,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3035 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2507 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3036 pipe_name(pipe)); 2508 pipe_name(pipe));
3037 I915_WRITE(reg, pipe_stats[pipe]); 2509 I915_WRITE(reg, pipe_stats[pipe]);
3038 irq_received = 1;
3039 } 2510 }
3040 } 2511 }
3041 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3323,6 +2794,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
3323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3324 u32 enable_mask; 2795 u32 enable_mask;
3325 u32 error_mask; 2796 u32 error_mask;
2797 unsigned long irqflags;
3326 2798
3327 /* Unmask the interrupts that we always want on. */ 2799 /* Unmask the interrupts that we always want on. */
3328 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2800 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2813,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
3341 if (IS_G4X(dev)) 2813 if (IS_G4X(dev))
3342 enable_mask |= I915_BSD_USER_INTERRUPT; 2814 enable_mask |= I915_BSD_USER_INTERRUPT;
3343 2815
2816 /* Interrupt setup is already guaranteed to be single-threaded, this is
2817 * just to make the assert_spin_locked check happy. */
2818 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3344 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2819 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2820 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3345 2821
3346 /* 2822 /*
3347 * Enable some error detection, note the instruction error mask 2823 * Enable some error detection, note the instruction error mask
@@ -3616,15 +3092,6 @@ void intel_irq_init(struct drm_device *dev)
3616 dev->driver->enable_vblank = valleyview_enable_vblank; 3092 dev->driver->enable_vblank = valleyview_enable_vblank;
3617 dev->driver->disable_vblank = valleyview_disable_vblank; 3093 dev->driver->disable_vblank = valleyview_disable_vblank;
3618 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3094 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3619 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3620 /* Share uninstall handlers with ILK/SNB */
3621 dev->driver->irq_handler = ivybridge_irq_handler;
3622 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3623 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3624 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3625 dev->driver->enable_vblank = ivybridge_enable_vblank;
3626 dev->driver->disable_vblank = ivybridge_disable_vblank;
3627 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3628 } else if (HAS_PCH_SPLIT(dev)) { 3095 } else if (HAS_PCH_SPLIT(dev)) {
3629 dev->driver->irq_handler = ironlake_irq_handler; 3096 dev->driver->irq_handler = ironlake_irq_handler;
3630 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3097 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -3683,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev)
3683 dev_priv->display.hpd_irq_setup(dev); 3150 dev_priv->display.hpd_irq_setup(dev);
3684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3151 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3685} 3152}
3153
3154/* Disable interrupts so we can allow Package C8+. */
3155void hsw_pc8_disable_interrupts(struct drm_device *dev)
3156{
3157 struct drm_i915_private *dev_priv = dev->dev_private;
3158 unsigned long irqflags;
3159
3160 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3161
3162 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3163 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3164 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3165 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3166 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3167
3168 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3169 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3170 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3171 snb_disable_pm_irq(dev_priv, 0xffffffff);
3172
3173 dev_priv->pc8.irqs_disabled = true;
3174
3175 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3176}
3177
3178/* Restore interrupts so we can recover from Package C8+. */
3179void hsw_pc8_restore_interrupts(struct drm_device *dev)
3180{
3181 struct drm_i915_private *dev_priv = dev->dev_private;
3182 unsigned long irqflags;
3183 uint32_t val, expected;
3184
3185 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3186
3187 val = I915_READ(DEIMR);
3188 expected = ~DE_PCH_EVENT_IVB;
3189 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3190
3191 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3192 expected = ~SDE_HOTPLUG_MASK_CPT;
3193 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3194 val, expected);
3195
3196 val = I915_READ(GTIMR);
3197 expected = 0xffffffff;
3198 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3199
3200 val = I915_READ(GEN6_PMIMR);
3201 expected = 0xffffffff;
3202 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3203 expected);
3204
3205 dev_priv->pc8.irqs_disabled = false;
3206
3207 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3208 ibx_enable_display_interrupt(dev_priv,
3209 ~dev_priv->pc8.regsave.sdeimr &
3210 ~SDE_HOTPLUG_MASK_CPT);
3211 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3212 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3213 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3214
3215 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3216}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 342f1f336168..b6a58f720f9a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -61,6 +61,12 @@
61#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 61#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
62#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 62#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
63#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) 63#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
64#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
65#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
66#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
67#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
68#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
69#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
64#define GC_DISPLAY_CLOCK_MASK (7 << 4) 70#define GC_DISPLAY_CLOCK_MASK (7 << 4)
65#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) 71#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
66#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) 72#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
@@ -363,6 +369,7 @@
363#define PUNIT_REG_GPU_LFM 0xd3 369#define PUNIT_REG_GPU_LFM 0xd3
364#define PUNIT_REG_GPU_FREQ_REQ 0xd4 370#define PUNIT_REG_GPU_FREQ_REQ 0xd4
365#define PUNIT_REG_GPU_FREQ_STS 0xd8 371#define PUNIT_REG_GPU_FREQ_STS 0xd8
372#define GENFREQSTATUS (1<<0)
366#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc 373#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
367 374
368#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ 375#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
@@ -680,6 +687,7 @@
680#define ERR_INT_FIFO_UNDERRUN_C (1<<6) 687#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
681#define ERR_INT_FIFO_UNDERRUN_B (1<<3) 688#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
682#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 689#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
690#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
683 691
684#define FPGA_DBG 0x42300 692#define FPGA_DBG 0x42300
685#define FPGA_DBG_RM_NOCLAIM (1<<31) 693#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -1127,7 +1135,8 @@
1127#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) 1135#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
1128#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 1136#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
1129#define DPLL_VCO_ENABLE (1 << 31) 1137#define DPLL_VCO_ENABLE (1 << 31)
1130#define DPLL_DVO_HIGH_SPEED (1 << 30) 1138#define DPLL_SDVO_HIGH_SPEED (1 << 30)
1139#define DPLL_DVO_2X_MODE (1 << 30)
1131#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) 1140#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
1132#define DPLL_SYNCLOCK_ENABLE (1 << 29) 1141#define DPLL_SYNCLOCK_ENABLE (1 << 29)
1133#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) 1142#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
@@ -1440,6 +1449,8 @@
1440#define MCH_SSKPD_WM0_MASK 0x3f 1449#define MCH_SSKPD_WM0_MASK 0x3f
1441#define MCH_SSKPD_WM0_VAL 0xc 1450#define MCH_SSKPD_WM0_VAL 0xc
1442 1451
1452#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
1453
1443/* Clocking configuration register */ 1454/* Clocking configuration register */
1444#define CLKCFG 0x10c00 1455#define CLKCFG 0x10c00
1445#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ 1456#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1696,15 +1707,26 @@
1696 */ 1707 */
1697#define CCID 0x2180 1708#define CCID 0x2180
1698#define CCID_EN (1<<0) 1709#define CCID_EN (1<<0)
1710/*
1711 * Notes on SNB/IVB/VLV context size:
1712 * - Power context is saved elsewhere (LLC or stolen)
1713 * - Ring/execlist context is saved on SNB, not on IVB
1714 * - Extended context size already includes render context size
1715 * - We always need to follow the extended context size.
1716 * SNB BSpec has comments indicating that we should use the
1717 * render context size instead if execlists are disabled, but
1718 * based on empirical testing that's just nonsense.
1719 * - Pipelined/VF state is saved on SNB/IVB respectively
1720 * - GT1 size just indicates how much of render context
1721 * doesn't need saving on GT1
1722 */
1699#define CXT_SIZE 0x21a0 1723#define CXT_SIZE 0x21a0
1700#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) 1724#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
1701#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) 1725#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
1702#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) 1726#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
1703#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) 1727#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
1704#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) 1728#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
1705#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ 1729#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
1706 GEN6_CXT_RING_SIZE(cxt_reg) + \
1707 GEN6_CXT_RENDER_SIZE(cxt_reg) + \
1708 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ 1730 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
1709 GEN6_CXT_PIPELINE_SIZE(cxt_reg)) 1731 GEN6_CXT_PIPELINE_SIZE(cxt_reg))
1710#define GEN7_CXT_SIZE 0x21a8 1732#define GEN7_CXT_SIZE 0x21a8
@@ -1714,11 +1736,7 @@
1714#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) 1736#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
1715#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) 1737#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
1716#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) 1738#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
1717#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ 1739#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1718 GEN7_CXT_RING_SIZE(ctx_reg) + \
1719 GEN7_CXT_RENDER_SIZE(ctx_reg) + \
1720 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1721 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1722 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1740 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1723/* Haswell does have the CXT_SIZE register however it does not appear to be 1741/* Haswell does have the CXT_SIZE register however it does not appear to be
1724 * valid. Now, docs explain in dwords what is in the context object. The full 1742 * valid. Now, docs explain in dwords what is in the context object. The full
@@ -1778,6 +1796,71 @@
1778#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1796#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1779#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1797#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1780 1798
1799/* HSW eDP PSR registers */
1800#define EDP_PSR_CTL 0x64800
1801#define EDP_PSR_ENABLE (1<<31)
1802#define EDP_PSR_LINK_DISABLE (0<<27)
1803#define EDP_PSR_LINK_STANDBY (1<<27)
1804#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
1805#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
1806#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
1807#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
1808#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
1809#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
1810#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
1811#define EDP_PSR_TP1_TP2_SEL (0<<11)
1812#define EDP_PSR_TP1_TP3_SEL (1<<11)
1813#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
1814#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
1815#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
1816#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
1817#define EDP_PSR_TP1_TIME_500us (0<<4)
1818#define EDP_PSR_TP1_TIME_100us (1<<4)
1819#define EDP_PSR_TP1_TIME_2500us (2<<4)
1820#define EDP_PSR_TP1_TIME_0us (3<<4)
1821#define EDP_PSR_IDLE_FRAME_SHIFT 0
1822
1823#define EDP_PSR_AUX_CTL 0x64810
1824#define EDP_PSR_AUX_DATA1 0x64814
1825#define EDP_PSR_DPCD_COMMAND 0x80060000
1826#define EDP_PSR_AUX_DATA2 0x64818
1827#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
1828#define EDP_PSR_AUX_DATA3 0x6481c
1829#define EDP_PSR_AUX_DATA4 0x64820
1830#define EDP_PSR_AUX_DATA5 0x64824
1831
1832#define EDP_PSR_STATUS_CTL 0x64840
1833#define EDP_PSR_STATUS_STATE_MASK (7<<29)
1834#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
1835#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
1836#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
1837#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
1838#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
1839#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
1840#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
1841#define EDP_PSR_STATUS_LINK_MASK (3<<26)
1842#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
1843#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
1844#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
1845#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
1846#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
1847#define EDP_PSR_STATUS_COUNT_SHIFT 16
1848#define EDP_PSR_STATUS_COUNT_MASK 0xf
1849#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
1850#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
1851#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
1852#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
1853#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
1854#define EDP_PSR_STATUS_IDLE_MASK 0xf
1855
1856#define EDP_PSR_PERF_CNT 0x64844
1857#define EDP_PSR_PERF_CNT_MASK 0xffffff
1858
1859#define EDP_PSR_DEBUG_CTL 0x64860
1860#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
1861#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
1862#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
1863
1781/* VGA port control */ 1864/* VGA port control */
1782#define ADPA 0x61100 1865#define ADPA 0x61100
1783#define PCH_ADPA 0xe1100 1866#define PCH_ADPA 0xe1100
@@ -2053,6 +2136,7 @@
2053 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte 2136 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
2054 * of the infoframe structure specified by CEA-861. */ 2137 * of the infoframe structure specified by CEA-861. */
2055#define VIDEO_DIP_DATA_SIZE 32 2138#define VIDEO_DIP_DATA_SIZE 32
2139#define VIDEO_DIP_VSC_DATA_SIZE 36
2056#define VIDEO_DIP_CTL 0x61170 2140#define VIDEO_DIP_CTL 0x61170
2057/* Pre HSW: */ 2141/* Pre HSW: */
2058#define VIDEO_DIP_ENABLE (1 << 31) 2142#define VIDEO_DIP_ENABLE (1 << 31)
@@ -2200,6 +2284,8 @@
2200#define BLC_PWM_CPU_CTL2 0x48250 2284#define BLC_PWM_CPU_CTL2 0x48250
2201#define BLC_PWM_CPU_CTL 0x48254 2285#define BLC_PWM_CPU_CTL 0x48254
2202 2286
2287#define HSW_BLC_PWM2_CTL 0x48350
2288
2203/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is 2289/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
2204 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ 2290 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
2205#define BLC_PWM_PCH_CTL1 0xc8250 2291#define BLC_PWM_PCH_CTL1 0xc8250
@@ -2208,6 +2294,12 @@
2208#define BLM_PCH_POLARITY (1 << 29) 2294#define BLM_PCH_POLARITY (1 << 29)
2209#define BLC_PWM_PCH_CTL2 0xc8254 2295#define BLC_PWM_PCH_CTL2 0xc8254
2210 2296
2297#define UTIL_PIN_CTL 0x48400
2298#define UTIL_PIN_ENABLE (1 << 31)
2299
2300#define PCH_GTC_CTL 0xe7000
2301#define PCH_GTC_ENABLE (1 << 31)
2302
2211/* TV port control */ 2303/* TV port control */
2212#define TV_CTL 0x68000 2304#define TV_CTL 0x68000
2213/** Enables the TV encoder */ 2305/** Enables the TV encoder */
@@ -3121,9 +3213,6 @@
3121#define MLTR_WM2_SHIFT 8 3213#define MLTR_WM2_SHIFT 8
3122/* the unit of memory self-refresh latency time is 0.5us */ 3214/* the unit of memory self-refresh latency time is 0.5us */
3123#define ILK_SRLT_MASK 0x3f 3215#define ILK_SRLT_MASK 0x3f
3124#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
3125#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
3126#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
3127 3216
3128/* define the fifo size on Ironlake */ 3217/* define the fifo size on Ironlake */
3129#define ILK_DISPLAY_FIFO 128 3218#define ILK_DISPLAY_FIFO 128
@@ -3170,12 +3259,6 @@
3170#define SSKPD_WM2_SHIFT 16 3259#define SSKPD_WM2_SHIFT 16
3171#define SSKPD_WM3_SHIFT 24 3260#define SSKPD_WM3_SHIFT 24
3172 3261
3173#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
3174#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
3175#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
3176#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
3177#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
3178
3179/* 3262/*
3180 * The two pipe frame counter registers are not synchronized, so 3263 * The two pipe frame counter registers are not synchronized, so
3181 * reading a stable value is somewhat tricky. The following code 3264 * reading a stable value is somewhat tricky. The following code
@@ -3726,6 +3809,9 @@
3726#define DE_PLANEA_FLIP_DONE_IVB (1<<3) 3809#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3727#define DE_PIPEA_VBLANK_IVB (1<<0) 3810#define DE_PIPEA_VBLANK_IVB (1<<0)
3728 3811
3812#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
3813#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
3814
3729#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 3815#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
3730#define MASTER_INTERRUPT_ENABLE (1<<31) 3816#define MASTER_INTERRUPT_ENABLE (1<<31)
3731 3817
@@ -3888,6 +3974,7 @@
3888#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) 3974#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
3889#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) 3975#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
3890#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) 3976#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
3977#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
3891 3978
3892/* digital port hotplug */ 3979/* digital port hotplug */
3893#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 3980#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
@@ -4081,6 +4168,8 @@
4081 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 4168 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
4082#define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 4169#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
4083 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 4170 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
4171#define HSW_TVIDEO_DIP_VS_DATA(trans) \
4172 _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
4084#define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 4173#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
4085 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 4174 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
4086#define HSW_TVIDEO_DIP_GCP(trans) \ 4175#define HSW_TVIDEO_DIP_GCP(trans) \
@@ -4088,6 +4177,13 @@
4088#define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 4177#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
4089 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) 4178 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
4090 4179
4180#define HSW_STEREO_3D_CTL_A 0x70020
4181#define S3D_ENABLE (1<<31)
4182#define HSW_STEREO_3D_CTL_B 0x71020
4183
4184#define HSW_STEREO_3D_CTL(trans) \
4185 _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
4186
4091#define _PCH_TRANS_HTOTAL_B 0xe1000 4187#define _PCH_TRANS_HTOTAL_B 0xe1000
4092#define _PCH_TRANS_HBLANK_B 0xe1004 4188#define _PCH_TRANS_HBLANK_B 0xe1004
4093#define _PCH_TRANS_HSYNC_B 0xe1008 4189#define _PCH_TRANS_HSYNC_B 0xe1008
@@ -4476,6 +4572,10 @@
4476#define GT_FIFO_FREE_ENTRIES 0x120008 4572#define GT_FIFO_FREE_ENTRIES 0x120008
4477#define GT_FIFO_NUM_RESERVED_ENTRIES 20 4573#define GT_FIFO_NUM_RESERVED_ENTRIES 20
4478 4574
4575#define HSW_IDICR 0x9008
4576#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
4577#define HSW_EDRAM_PRESENT 0x120010
4578
4479#define GEN6_UCGCTL1 0x9400 4579#define GEN6_UCGCTL1 0x9400
4480# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) 4580# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
4481# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) 4581# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -4744,8 +4844,8 @@
4744#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ 4844#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
4745#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ 4845#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
4746#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ 4846#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
4747#define HSW_PWR_WELL_ENABLE (1<<31) 4847#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
4748#define HSW_PWR_WELL_STATE (1<<30) 4848#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
4749#define HSW_PWR_WELL_CTL5 0x45410 4849#define HSW_PWR_WELL_CTL5 0x45410
4750#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) 4850#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4751#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) 4851#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
@@ -4866,7 +4966,8 @@
4866#define SBI_SSCAUXDIV6 0x0610 4966#define SBI_SSCAUXDIV6 0x0610
4867#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) 4967#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4868#define SBI_DBUFF0 0x2a00 4968#define SBI_DBUFF0 0x2a00
4869#define SBI_DBUFF0_ENABLE (1<<0) 4969#define SBI_GEN0 0x1f00
4970#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
4870 4971
4871/* LPT PIXCLK_GATE */ 4972/* LPT PIXCLK_GATE */
4872#define PIXCLK_GATE 0xC6020 4973#define PIXCLK_GATE 0xC6020
@@ -4932,7 +5033,14 @@
4932#define LCPLL_CLK_FREQ_450 (0<<26) 5033#define LCPLL_CLK_FREQ_450 (0<<26)
4933#define LCPLL_CD_CLOCK_DISABLE (1<<25) 5034#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4934#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 5035#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
5036#define LCPLL_POWER_DOWN_ALLOW (1<<22)
4935#define LCPLL_CD_SOURCE_FCLK (1<<21) 5037#define LCPLL_CD_SOURCE_FCLK (1<<21)
5038#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
5039
5040#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
5041#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
5042#define D_COMP_COMP_FORCE (1<<8)
5043#define D_COMP_COMP_DISABLE (1<<0)
4936 5044
4937/* Pipe WM_LINETIME - watermark line time */ 5045/* Pipe WM_LINETIME - watermark line time */
4938#define PIPE_WM_LINETIME_A 0x45270 5046#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6875b5654c63..a777e7f3b0df 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
409 NULL, 409 NULL,
410}; 410};
411 411
412static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
413 struct bin_attribute *attr, char *buf,
414 loff_t off, size_t count)
415{
416
417 struct device *kdev = container_of(kobj, struct device, kobj);
418 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
419 struct drm_device *dev = minor->dev;
420 struct i915_error_state_file_priv error_priv;
421 struct drm_i915_error_state_buf error_str;
422 ssize_t ret_count = 0;
423 int ret;
424
425 memset(&error_priv, 0, sizeof(error_priv));
426
427 ret = i915_error_state_buf_init(&error_str, count, off);
428 if (ret)
429 return ret;
430
431 error_priv.dev = dev;
432 i915_error_state_get(dev, &error_priv);
433
434 ret = i915_error_state_to_str(&error_str, &error_priv);
435 if (ret)
436 goto out;
437
438 ret_count = count < error_str.bytes ? count : error_str.bytes;
439
440 memcpy(buf, error_str.buf, ret_count);
441out:
442 i915_error_state_put(&error_priv);
443 i915_error_state_buf_release(&error_str);
444
445 return ret ?: ret_count;
446}
447
448static ssize_t error_state_write(struct file *file, struct kobject *kobj,
449 struct bin_attribute *attr, char *buf,
450 loff_t off, size_t count)
451{
452 struct device *kdev = container_of(kobj, struct device, kobj);
453 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
454 struct drm_device *dev = minor->dev;
455 int ret;
456
457 DRM_DEBUG_DRIVER("Resetting error state\n");
458
459 ret = mutex_lock_interruptible(&dev->struct_mutex);
460 if (ret)
461 return ret;
462
463 i915_destroy_error_state(dev);
464 mutex_unlock(&dev->struct_mutex);
465
466 return count;
467}
468
469static struct bin_attribute error_state_attr = {
470 .attr.name = "error",
471 .attr.mode = S_IRUSR | S_IWUSR,
472 .size = 0,
473 .read = error_state_read,
474 .write = error_state_write,
475};
476
412void i915_setup_sysfs(struct drm_device *dev) 477void i915_setup_sysfs(struct drm_device *dev)
413{ 478{
414 int ret; 479 int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
432 if (ret) 497 if (ret)
433 DRM_ERROR("gen6 sysfs setup failed\n"); 498 DRM_ERROR("gen6 sysfs setup failed\n");
434 } 499 }
500
501 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
502 &error_state_attr);
503 if (ret)
504 DRM_ERROR("error_state sysfs setup failed\n");
435} 505}
436 506
437void i915_teardown_sysfs(struct drm_device *dev) 507void i915_teardown_sysfs(struct drm_device *dev)
438{ 508{
509 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
439 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 510 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
440 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 511 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
441#ifdef CONFIG_PM 512#ifdef CONFIG_PM
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a6817713..e2c5ee6f6194 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
34); 34);
35 35
36TRACE_EVENT(i915_gem_object_bind, 36TRACE_EVENT(i915_vma_bind,
37 TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), 37 TP_PROTO(struct i915_vma *vma, bool mappable),
38 TP_ARGS(obj, mappable), 38 TP_ARGS(vma, mappable),
39 39
40 TP_STRUCT__entry( 40 TP_STRUCT__entry(
41 __field(struct drm_i915_gem_object *, obj) 41 __field(struct drm_i915_gem_object *, obj)
42 __field(struct i915_address_space *, vm)
42 __field(u32, offset) 43 __field(u32, offset)
43 __field(u32, size) 44 __field(u32, size)
44 __field(bool, mappable) 45 __field(bool, mappable)
45 ), 46 ),
46 47
47 TP_fast_assign( 48 TP_fast_assign(
48 __entry->obj = obj; 49 __entry->obj = vma->obj;
49 __entry->offset = obj->gtt_space->start; 50 __entry->vm = vma->vm;
50 __entry->size = obj->gtt_space->size; 51 __entry->offset = vma->node.start;
52 __entry->size = vma->node.size;
51 __entry->mappable = mappable; 53 __entry->mappable = mappable;
52 ), 54 ),
53 55
54 TP_printk("obj=%p, offset=%08x size=%x%s", 56 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
55 __entry->obj, __entry->offset, __entry->size, 57 __entry->obj, __entry->offset, __entry->size,
56 __entry->mappable ? ", mappable" : "") 58 __entry->mappable ? ", mappable" : "",
59 __entry->vm)
57); 60);
58 61
59TRACE_EVENT(i915_gem_object_unbind, 62TRACE_EVENT(i915_vma_unbind,
60 TP_PROTO(struct drm_i915_gem_object *obj), 63 TP_PROTO(struct i915_vma *vma),
61 TP_ARGS(obj), 64 TP_ARGS(vma),
62 65
63 TP_STRUCT__entry( 66 TP_STRUCT__entry(
64 __field(struct drm_i915_gem_object *, obj) 67 __field(struct drm_i915_gem_object *, obj)
68 __field(struct i915_address_space *, vm)
65 __field(u32, offset) 69 __field(u32, offset)
66 __field(u32, size) 70 __field(u32, size)
67 ), 71 ),
68 72
69 TP_fast_assign( 73 TP_fast_assign(
70 __entry->obj = obj; 74 __entry->obj = vma->obj;
71 __entry->offset = obj->gtt_space->start; 75 __entry->vm = vma->vm;
72 __entry->size = obj->gtt_space->size; 76 __entry->offset = vma->node.start;
77 __entry->size = vma->node.size;
73 ), 78 ),
74 79
75 TP_printk("obj=%p, offset=%08x size=%x", 80 TP_printk("obj=%p, offset=%08x size=%x vm=%p",
76 __entry->obj, __entry->offset, __entry->size) 81 __entry->obj, __entry->offset, __entry->size, __entry->vm)
77); 82);
78 83
79TRACE_EVENT(i915_gem_object_change_domain, 84TRACE_EVENT(i915_gem_object_change_domain,
@@ -406,10 +411,12 @@ TRACE_EVENT(i915_flip_complete,
406 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 411 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
407); 412);
408 413
409TRACE_EVENT(i915_reg_rw, 414TRACE_EVENT_CONDITION(i915_reg_rw,
410 TP_PROTO(bool write, u32 reg, u64 val, int len), 415 TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
416
417 TP_ARGS(write, reg, val, len, trace),
411 418
412 TP_ARGS(write, reg, val, len), 419 TP_CONDITION(trace),
413 420
414 TP_STRUCT__entry( 421 TP_STRUCT__entry(
415 __field(u64, val) 422 __field(u64, val)
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bcbbaea2a78e..57fe1ae32a0d 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = {
28 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c 28 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
29}; 29};
30 30
31static int intel_dsm(acpi_handle handle, int func, int arg) 31static int intel_dsm(acpi_handle handle, int func)
32{ 32{
33 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 33 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
34 struct acpi_object_list input; 34 struct acpi_object_list input;
@@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
46 params[1].integer.value = INTEL_DSM_REVISION_ID; 46 params[1].integer.value = INTEL_DSM_REVISION_ID;
47 params[2].type = ACPI_TYPE_INTEGER; 47 params[2].type = ACPI_TYPE_INTEGER;
48 params[2].integer.value = func; 48 params[2].integer.value = func;
49 params[3].type = ACPI_TYPE_INTEGER; 49 params[3].type = ACPI_TYPE_PACKAGE;
50 params[3].integer.value = arg; 50 params[3].package.count = 0;
51 params[3].package.elements = NULL;
51 52
52 ret = acpi_evaluate_object(handle, "_DSM", &input, &output); 53 ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
53 if (ret) { 54 if (ret) {
@@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void)
151 params[1].integer.value = INTEL_DSM_REVISION_ID; 152 params[1].integer.value = INTEL_DSM_REVISION_ID;
152 params[2].type = ACPI_TYPE_INTEGER; 153 params[2].type = ACPI_TYPE_INTEGER;
153 params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; 154 params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
154 params[3].type = ACPI_TYPE_INTEGER; 155 params[3].type = ACPI_TYPE_PACKAGE;
155 params[3].integer.value = 0; 156 params[3].package.count = 0;
157 params[3].package.elements = NULL;
156 158
157 ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, 159 ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
158 &output); 160 &output);
@@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
205 return false; 207 return false;
206 } 208 }
207 209
208 ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); 210 ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
209 if (ret < 0) { 211 if (ret < 0) {
210 DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); 212 DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
211 return false; 213 return false;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3acec8c48166..b5a3875f22c7 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -52,15 +52,14 @@ struct intel_crt {
52 u32 adpa_reg; 52 u32 adpa_reg;
53}; 53};
54 54
55static struct intel_crt *intel_attached_crt(struct drm_connector *connector) 55static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
56{ 56{
57 return container_of(intel_attached_encoder(connector), 57 return container_of(encoder, struct intel_crt, base);
58 struct intel_crt, base);
59} 58}
60 59
61static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) 60static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
62{ 61{
63 return container_of(encoder, struct intel_crt, base); 62 return intel_encoder_to_crt(intel_attached_encoder(connector));
64} 63}
65 64
66static bool intel_crt_get_hw_state(struct intel_encoder *encoder, 65static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
@@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
238 return true; 237 return true;
239} 238}
240 239
241static void intel_crt_mode_set(struct drm_encoder *encoder, 240static void intel_crt_mode_set(struct intel_encoder *encoder)
242 struct drm_display_mode *mode,
243 struct drm_display_mode *adjusted_mode)
244{ 241{
245 242
246 struct drm_device *dev = encoder->dev; 243 struct drm_device *dev = encoder->base.dev;
247 struct drm_crtc *crtc = encoder->crtc; 244 struct intel_crt *crt = intel_encoder_to_crt(encoder);
248 struct intel_crt *crt = 245 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
249 intel_encoder_to_crt(to_intel_encoder(encoder));
250 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
251 struct drm_i915_private *dev_priv = dev->dev_private; 246 struct drm_i915_private *dev_priv = dev->dev_private;
247 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
252 u32 adpa; 248 u32 adpa;
253 249
254 if (HAS_PCH_SPLIT(dev)) 250 if (HAS_PCH_SPLIT(dev))
@@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
265 if (HAS_PCH_LPT(dev)) 261 if (HAS_PCH_LPT(dev))
266 ; /* Those bits don't exist here */ 262 ; /* Those bits don't exist here */
267 else if (HAS_PCH_CPT(dev)) 263 else if (HAS_PCH_CPT(dev))
268 adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 264 adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
269 else if (intel_crtc->pipe == 0) 265 else if (crtc->pipe == 0)
270 adpa |= ADPA_PIPE_A_SELECT; 266 adpa |= ADPA_PIPE_A_SELECT;
271 else 267 else
272 adpa |= ADPA_PIPE_B_SELECT; 268 adpa |= ADPA_PIPE_B_SELECT;
273 269
274 if (!HAS_PCH_SPLIT(dev)) 270 if (!HAS_PCH_SPLIT(dev))
275 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); 271 I915_WRITE(BCLRPAT(crtc->pipe), 0);
276 272
277 I915_WRITE(crt->adpa_reg, adpa); 273 I915_WRITE(crt->adpa_reg, adpa);
278} 274}
@@ -613,6 +609,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
613 enum drm_connector_status status; 609 enum drm_connector_status status;
614 struct intel_load_detect_pipe tmp; 610 struct intel_load_detect_pipe tmp;
615 611
612 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
613 connector->base.id, drm_get_connector_name(connector),
614 force);
615
616 if (I915_HAS_HOTPLUG(dev)) { 616 if (I915_HAS_HOTPLUG(dev)) {
617 /* We can not rely on the HPD pin always being correctly wired 617 /* We can not rely on the HPD pin always being correctly wired
618 * up, for example many KVM do not pass it through, and so 618 * up, for example many KVM do not pass it through, and so
@@ -707,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector)
707 * Routines for controlling stuff on the analog port 707 * Routines for controlling stuff on the analog port
708 */ 708 */
709 709
710static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
711 .mode_set = intel_crt_mode_set,
712};
713
714static const struct drm_connector_funcs intel_crt_connector_funcs = { 710static const struct drm_connector_funcs intel_crt_connector_funcs = {
715 .reset = intel_crt_reset, 711 .reset = intel_crt_reset,
716 .dpms = intel_crt_dpms, 712 .dpms = intel_crt_dpms,
@@ -800,6 +796,7 @@ void intel_crt_init(struct drm_device *dev)
800 crt->adpa_reg = ADPA; 796 crt->adpa_reg = ADPA;
801 797
802 crt->base.compute_config = intel_crt_compute_config; 798 crt->base.compute_config = intel_crt_compute_config;
799 crt->base.mode_set = intel_crt_mode_set;
803 crt->base.disable = intel_disable_crt; 800 crt->base.disable = intel_disable_crt;
804 crt->base.enable = intel_enable_crt; 801 crt->base.enable = intel_enable_crt;
805 crt->base.get_config = intel_crt_get_config; 802 crt->base.get_config = intel_crt_get_config;
@@ -811,7 +808,6 @@ void intel_crt_init(struct drm_device *dev)
811 crt->base.get_hw_state = intel_crt_get_hw_state; 808 crt->base.get_hw_state = intel_crt_get_hw_state;
812 intel_connector->get_hw_state = intel_connector_get_hw_state; 809 intel_connector->get_hw_state = intel_connector_get_hw_state;
813 810
814 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
815 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 811 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
816 812
817 drm_sysfs_connector_add(connector); 813 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b042ee5c4070..63aca49d11a8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
84 * in either FDI or DP modes only, as HDMI connections will work with both 84 * in either FDI or DP modes only, as HDMI connections will work with both
85 * of those 85 * of those
86 */ 86 */
87static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, 87static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
88 bool use_fdi_mode)
89{ 88{
90 struct drm_i915_private *dev_priv = dev->dev_private; 89 struct drm_i915_private *dev_priv = dev->dev_private;
91 u32 reg; 90 u32 reg;
92 int i; 91 int i;
93 const u32 *ddi_translations = ((use_fdi_mode) ? 92 const u32 *ddi_translations = (port == PORT_E) ?
94 hsw_ddi_translations_fdi : 93 hsw_ddi_translations_fdi :
95 hsw_ddi_translations_dp); 94 hsw_ddi_translations_dp;
96 95
97 DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n", 96 for (i = 0, reg = DDI_BUF_TRANS(port);
98 port_name(port), 97 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
99 use_fdi_mode ? "FDI" : "DP");
100
101 WARN((use_fdi_mode && (port != PORT_E)),
102 "Programming port %c in FDI mode, this probably will not work.\n",
103 port_name(port));
104
105 for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
106 I915_WRITE(reg, ddi_translations[i]); 98 I915_WRITE(reg, ddi_translations[i]);
107 reg += 4; 99 reg += 4;
108 } 100 }
@@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev)
118 if (!HAS_DDI(dev)) 110 if (!HAS_DDI(dev))
119 return; 111 return;
120 112
121 for (port = PORT_A; port < PORT_E; port++) 113 for (port = PORT_A; port <= PORT_E; port++)
122 intel_prepare_ddi_buffers(dev, port, false); 114 intel_prepare_ddi_buffers(dev, port);
123
124 /* DDI E is the suggested one to work in FDI mode, so program is as such
125 * by default. It will have to be re-programmed in case a digital DP
126 * output will be detected on it
127 */
128 intel_prepare_ddi_buffers(dev, PORT_E, true);
129} 115}
130 116
131static const long hsw_ddi_buf_ctl_values[] = { 117static const long hsw_ddi_buf_ctl_values[] = {
@@ -281,25 +267,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
281 DRM_ERROR("FDI link training failed!\n"); 267 DRM_ERROR("FDI link training failed!\n");
282} 268}
283 269
284static void intel_ddi_mode_set(struct drm_encoder *encoder, 270static void intel_ddi_mode_set(struct intel_encoder *encoder)
285 struct drm_display_mode *mode,
286 struct drm_display_mode *adjusted_mode)
287{ 271{
288 struct drm_crtc *crtc = encoder->crtc; 272 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 273 int port = intel_ddi_get_encoder_port(encoder);
290 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 274 int pipe = crtc->pipe;
291 int port = intel_ddi_get_encoder_port(intel_encoder); 275 int type = encoder->type;
292 int pipe = intel_crtc->pipe; 276 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
293 int type = intel_encoder->type;
294 277
295 DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n", 278 DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
296 port_name(port), pipe_name(pipe)); 279 port_name(port), pipe_name(pipe));
297 280
298 intel_crtc->eld_vld = false; 281 crtc->eld_vld = false;
299 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 282 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 283 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
301 struct intel_digital_port *intel_dig_port = 284 struct intel_digital_port *intel_dig_port =
302 enc_to_dig_port(encoder); 285 enc_to_dig_port(&encoder->base);
303 286
304 intel_dp->DP = intel_dig_port->saved_port_bits | 287 intel_dp->DP = intel_dig_port->saved_port_bits |
305 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 288 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
@@ -307,17 +290,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
307 290
308 if (intel_dp->has_audio) { 291 if (intel_dp->has_audio) {
309 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", 292 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
310 pipe_name(intel_crtc->pipe)); 293 pipe_name(crtc->pipe));
311 294
312 /* write eld */ 295 /* write eld */
313 DRM_DEBUG_DRIVER("DP audio: write eld information\n"); 296 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
314 intel_write_eld(encoder, adjusted_mode); 297 intel_write_eld(&encoder->base, adjusted_mode);
315 } 298 }
316 299
317 intel_dp_init_link_config(intel_dp); 300 intel_dp_init_link_config(intel_dp);
318 301
319 } else if (type == INTEL_OUTPUT_HDMI) { 302 } else if (type == INTEL_OUTPUT_HDMI) {
320 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 303 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
321 304
322 if (intel_hdmi->has_audio) { 305 if (intel_hdmi->has_audio) {
323 /* Proper support for digital audio needs a new logic 306 /* Proper support for digital audio needs a new logic
@@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
325 * patch bombing. 308 * patch bombing.
326 */ 309 */
327 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", 310 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
328 pipe_name(intel_crtc->pipe)); 311 pipe_name(crtc->pipe));
329 312
330 /* write eld */ 313 /* write eld */
331 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); 314 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
332 intel_write_eld(encoder, adjusted_mode); 315 intel_write_eld(&encoder->base, adjusted_mode);
333 } 316 }
334 317
335 intel_hdmi->set_infoframes(encoder, adjusted_mode); 318 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
336 } 319 }
337} 320}
338 321
@@ -1118,6 +1101,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1118 intel_dp_stop_link_train(intel_dp); 1101 intel_dp_stop_link_train(intel_dp);
1119 1102
1120 ironlake_edp_backlight_on(intel_dp); 1103 ironlake_edp_backlight_on(intel_dp);
1104 intel_edp_psr_enable(intel_dp);
1121 } 1105 }
1122 1106
1123 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { 1107 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
@@ -1148,16 +1132,20 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1148 if (type == INTEL_OUTPUT_EDP) { 1132 if (type == INTEL_OUTPUT_EDP) {
1149 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1133 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1150 1134
1135 intel_edp_psr_disable(intel_dp);
1151 ironlake_edp_backlight_off(intel_dp); 1136 ironlake_edp_backlight_off(intel_dp);
1152 } 1137 }
1153} 1138}
1154 1139
1155int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1140int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1156{ 1141{
1157 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) 1142 uint32_t lcpll = I915_READ(LCPLL_CTL);
1143
1144 if (lcpll & LCPLL_CD_SOURCE_FCLK)
1145 return 800000;
1146 else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1158 return 450000; 1147 return 450000;
1159 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == 1148 else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
1160 LCPLL_CLK_FREQ_450)
1161 return 450000; 1149 return 450000;
1162 else if (IS_ULT(dev_priv->dev)) 1150 else if (IS_ULT(dev_priv->dev))
1163 return 337500; 1151 return 337500;
@@ -1309,10 +1297,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
1309 .destroy = intel_ddi_destroy, 1297 .destroy = intel_ddi_destroy,
1310}; 1298};
1311 1299
1312static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
1313 .mode_set = intel_ddi_mode_set,
1314};
1315
1316void intel_ddi_init(struct drm_device *dev, enum port port) 1300void intel_ddi_init(struct drm_device *dev, enum port port)
1317{ 1301{
1318 struct drm_i915_private *dev_priv = dev->dev_private; 1302 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1337,9 +1321,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1337 1321
1338 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 1322 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
1339 DRM_MODE_ENCODER_TMDS); 1323 DRM_MODE_ENCODER_TMDS);
1340 drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
1341 1324
1342 intel_encoder->compute_config = intel_ddi_compute_config; 1325 intel_encoder->compute_config = intel_ddi_compute_config;
1326 intel_encoder->mode_set = intel_ddi_mode_set;
1343 intel_encoder->enable = intel_enable_ddi; 1327 intel_encoder->enable = intel_enable_ddi;
1344 intel_encoder->pre_enable = intel_ddi_pre_enable; 1328 intel_encoder->pre_enable = intel_ddi_pre_enable;
1345 intel_encoder->disable = intel_disable_ddi; 1329 intel_encoder->disable = intel_disable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index be79f477a38f..38452d82ac7d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -45,6 +45,15 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 45static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 47
48static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
49 struct intel_crtc_config *pipe_config);
50static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
51 struct intel_crtc_config *pipe_config);
52
53static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
54 int x, int y, struct drm_framebuffer *old_fb);
55
56
48typedef struct { 57typedef struct {
49 int min, max; 58 int min, max;
50} intel_range_t; 59} intel_range_t;
@@ -54,7 +63,6 @@ typedef struct {
54 int p2_slow, p2_fast; 63 int p2_slow, p2_fast;
55} intel_p2_t; 64} intel_p2_t;
56 65
57#define INTEL_P2_NUM 2
58typedef struct intel_limit intel_limit_t; 66typedef struct intel_limit intel_limit_t;
59struct intel_limit { 67struct intel_limit {
60 intel_range_t dot, vco, n, m, m1, m2, p, p1; 68 intel_range_t dot, vco, n, m, m1, m2, p, p1;
@@ -84,7 +92,7 @@ intel_fdi_link_freq(struct drm_device *dev)
84 return 27; 92 return 27;
85} 93}
86 94
87static const intel_limit_t intel_limits_i8xx_dvo = { 95static const intel_limit_t intel_limits_i8xx_dac = {
88 .dot = { .min = 25000, .max = 350000 }, 96 .dot = { .min = 25000, .max = 350000 },
89 .vco = { .min = 930000, .max = 1400000 }, 97 .vco = { .min = 930000, .max = 1400000 },
90 .n = { .min = 3, .max = 16 }, 98 .n = { .min = 3, .max = 16 },
@@ -97,6 +105,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
97 .p2_slow = 4, .p2_fast = 2 }, 105 .p2_slow = 4, .p2_fast = 2 },
98}; 106};
99 107
108static const intel_limit_t intel_limits_i8xx_dvo = {
109 .dot = { .min = 25000, .max = 350000 },
110 .vco = { .min = 930000, .max = 1400000 },
111 .n = { .min = 3, .max = 16 },
112 .m = { .min = 96, .max = 140 },
113 .m1 = { .min = 18, .max = 26 },
114 .m2 = { .min = 6, .max = 16 },
115 .p = { .min = 4, .max = 128 },
116 .p1 = { .min = 2, .max = 33 },
117 .p2 = { .dot_limit = 165000,
118 .p2_slow = 4, .p2_fast = 4 },
119};
120
100static const intel_limit_t intel_limits_i8xx_lvds = { 121static const intel_limit_t intel_limits_i8xx_lvds = {
101 .dot = { .min = 25000, .max = 350000 }, 122 .dot = { .min = 25000, .max = 350000 },
102 .vco = { .min = 930000, .max = 1400000 }, 123 .vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +426,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
405 } else { 426 } else {
406 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
407 limit = &intel_limits_i8xx_lvds; 428 limit = &intel_limits_i8xx_lvds;
408 else 429 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
409 limit = &intel_limits_i8xx_dvo; 430 limit = &intel_limits_i8xx_dvo;
431 else
432 limit = &intel_limits_i8xx_dac;
410 } 433 }
411 return limit; 434 return limit;
412} 435}
@@ -667,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
667{ 690{
668 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; 691 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
669 u32 m, n, fastclk; 692 u32 m, n, fastclk;
670 u32 updrate, minupdate, fracbits, p; 693 u32 updrate, minupdate, p;
671 unsigned long bestppm, ppm, absppm; 694 unsigned long bestppm, ppm, absppm;
672 int dotclk, flag; 695 int dotclk, flag;
673 696
@@ -678,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
678 fastclk = dotclk / (2*100); 701 fastclk = dotclk / (2*100);
679 updrate = 0; 702 updrate = 0;
680 minupdate = 19200; 703 minupdate = 19200;
681 fracbits = 1;
682 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; 704 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
683 bestm1 = bestm2 = bestp1 = bestp2 = 0; 705 bestm1 = bestm2 = bestp1 = bestp2 = 0;
684 706
@@ -892,8 +914,8 @@ static const char *state_string(bool enabled)
892} 914}
893 915
894/* Only for pre-ILK configs */ 916/* Only for pre-ILK configs */
895static void assert_pll(struct drm_i915_private *dev_priv, 917void assert_pll(struct drm_i915_private *dev_priv,
896 enum pipe pipe, bool state) 918 enum pipe pipe, bool state)
897{ 919{
898 int reg; 920 int reg;
899 u32 val; 921 u32 val;
@@ -906,10 +928,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
906 "PLL state assertion failure (expected %s, current %s)\n", 928 "PLL state assertion failure (expected %s, current %s)\n",
907 state_string(state), state_string(cur_state)); 929 state_string(state), state_string(cur_state));
908} 930}
909#define assert_pll_enabled(d, p) assert_pll(d, p, true)
910#define assert_pll_disabled(d, p) assert_pll(d, p, false)
911 931
912static struct intel_shared_dpll * 932struct intel_shared_dpll *
913intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 933intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
914{ 934{
915 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 935 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +941,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
921} 941}
922 942
923/* For ILK+ */ 943/* For ILK+ */
924static void assert_shared_dpll(struct drm_i915_private *dev_priv, 944void assert_shared_dpll(struct drm_i915_private *dev_priv,
925 struct intel_shared_dpll *pll, 945 struct intel_shared_dpll *pll,
926 bool state) 946 bool state)
927{ 947{
928 bool cur_state; 948 bool cur_state;
929 struct intel_dpll_hw_state hw_state; 949 struct intel_dpll_hw_state hw_state;
@@ -942,8 +962,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
942 "%s assertion failure (expected %s, current %s)\n", 962 "%s assertion failure (expected %s, current %s)\n",
943 pll->name, state_string(state), state_string(cur_state)); 963 pll->name, state_string(state), state_string(cur_state));
944} 964}
945#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
946#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
947 965
948static void assert_fdi_tx(struct drm_i915_private *dev_priv, 966static void assert_fdi_tx(struct drm_i915_private *dev_priv,
949 enum pipe pipe, bool state) 967 enum pipe pipe, bool state)
@@ -1007,15 +1025,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1007 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1025 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1008} 1026}
1009 1027
1010static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 1028void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1011 enum pipe pipe) 1029 enum pipe pipe, bool state)
1012{ 1030{
1013 int reg; 1031 int reg;
1014 u32 val; 1032 u32 val;
1033 bool cur_state;
1015 1034
1016 reg = FDI_RX_CTL(pipe); 1035 reg = FDI_RX_CTL(pipe);
1017 val = I915_READ(reg); 1036 val = I915_READ(reg);
1018 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1037 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1038 WARN(cur_state != state,
1039 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1040 state_string(state), state_string(cur_state));
1019} 1041}
1020 1042
1021static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1043static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1133,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1111 } 1133 }
1112 1134
1113 /* Need to check both planes against the pipe */ 1135 /* Need to check both planes against the pipe */
1114 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 1136 for_each_pipe(i) {
1115 reg = DSPCNTR(i); 1137 reg = DSPCNTR(i);
1116 val = I915_READ(reg); 1138 val = I915_READ(reg);
1117 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1139 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1323,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1301 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1323 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1302} 1324}
1303 1325
1304/** 1326static void vlv_enable_pll(struct intel_crtc *crtc)
1305 * intel_enable_pll - enable a PLL
1306 * @dev_priv: i915 private structure
1307 * @pipe: pipe PLL to enable
1308 *
1309 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1310 * make sure the PLL reg is writable first though, since the panel write
1311 * protect mechanism may be enabled.
1312 *
1313 * Note! This is for pre-ILK only.
1314 *
1315 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1316 */
1317static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1318{ 1327{
1319 int reg; 1328 struct drm_device *dev = crtc->base.dev;
1320 u32 val; 1329 struct drm_i915_private *dev_priv = dev->dev_private;
1330 int reg = DPLL(crtc->pipe);
1331 u32 dpll = crtc->config.dpll_hw_state.dpll;
1321 1332
1322 assert_pipe_disabled(dev_priv, pipe); 1333 assert_pipe_disabled(dev_priv, crtc->pipe);
1323 1334
1324 /* No really, not for ILK+ */ 1335 /* No really, not for ILK+ */
1325 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); 1336 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1326 1337
1327 /* PLL is protected by panel, make sure we can write it */ 1338 /* PLL is protected by panel, make sure we can write it */
1328 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1339 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1329 assert_panel_unlocked(dev_priv, pipe); 1340 assert_panel_unlocked(dev_priv, crtc->pipe);
1330 1341
1331 reg = DPLL(pipe); 1342 I915_WRITE(reg, dpll);
1332 val = I915_READ(reg); 1343 POSTING_READ(reg);
1333 val |= DPLL_VCO_ENABLE; 1344 udelay(150);
1345
1346 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1347 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1348
1349 I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1350 POSTING_READ(DPLL_MD(crtc->pipe));
1334 1351
1335 /* We do this three times for luck */ 1352 /* We do this three times for luck */
1336 I915_WRITE(reg, val); 1353 I915_WRITE(reg, dpll);
1337 POSTING_READ(reg); 1354 POSTING_READ(reg);
1338 udelay(150); /* wait for warmup */ 1355 udelay(150); /* wait for warmup */
1339 I915_WRITE(reg, val); 1356 I915_WRITE(reg, dpll);
1340 POSTING_READ(reg); 1357 POSTING_READ(reg);
1341 udelay(150); /* wait for warmup */ 1358 udelay(150); /* wait for warmup */
1342 I915_WRITE(reg, val); 1359 I915_WRITE(reg, dpll);
1360 POSTING_READ(reg);
1361 udelay(150); /* wait for warmup */
1362}
1363
1364static void i9xx_enable_pll(struct intel_crtc *crtc)
1365{
1366 struct drm_device *dev = crtc->base.dev;
1367 struct drm_i915_private *dev_priv = dev->dev_private;
1368 int reg = DPLL(crtc->pipe);
1369 u32 dpll = crtc->config.dpll_hw_state.dpll;
1370
1371 assert_pipe_disabled(dev_priv, crtc->pipe);
1372
1373 /* No really, not for ILK+ */
1374 BUG_ON(dev_priv->info->gen >= 5);
1375
1376 /* PLL is protected by panel, make sure we can write it */
1377 if (IS_MOBILE(dev) && !IS_I830(dev))
1378 assert_panel_unlocked(dev_priv, crtc->pipe);
1379
1380 I915_WRITE(reg, dpll);
1381
1382 /* Wait for the clocks to stabilize. */
1383 POSTING_READ(reg);
1384 udelay(150);
1385
1386 if (INTEL_INFO(dev)->gen >= 4) {
1387 I915_WRITE(DPLL_MD(crtc->pipe),
1388 crtc->config.dpll_hw_state.dpll_md);
1389 } else {
1390 /* The pixel multiplier can only be updated once the
1391 * DPLL is enabled and the clocks are stable.
1392 *
1393 * So write it again.
1394 */
1395 I915_WRITE(reg, dpll);
1396 }
1397
1398 /* We do this three times for luck */
1399 I915_WRITE(reg, dpll);
1400 POSTING_READ(reg);
1401 udelay(150); /* wait for warmup */
1402 I915_WRITE(reg, dpll);
1403 POSTING_READ(reg);
1404 udelay(150); /* wait for warmup */
1405 I915_WRITE(reg, dpll);
1343 POSTING_READ(reg); 1406 POSTING_READ(reg);
1344 udelay(150); /* wait for warmup */ 1407 udelay(150); /* wait for warmup */
1345} 1408}
1346 1409
1347/** 1410/**
1348 * intel_disable_pll - disable a PLL 1411 * i9xx_disable_pll - disable a PLL
1349 * @dev_priv: i915 private structure 1412 * @dev_priv: i915 private structure
1350 * @pipe: pipe PLL to disable 1413 * @pipe: pipe PLL to disable
1351 * 1414 *
@@ -1353,11 +1416,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1353 * 1416 *
1354 * Note! This is for pre-ILK only. 1417 * Note! This is for pre-ILK only.
1355 */ 1418 */
1356static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1419static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1357{ 1420{
1358 int reg;
1359 u32 val;
1360
1361 /* Don't disable pipe A or pipe A PLLs if needed */ 1421 /* Don't disable pipe A or pipe A PLLs if needed */
1362 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1422 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1363 return; 1423 return;
@@ -1365,11 +1425,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1365 /* Make sure the pipe isn't still relying on us */ 1425 /* Make sure the pipe isn't still relying on us */
1366 assert_pipe_disabled(dev_priv, pipe); 1426 assert_pipe_disabled(dev_priv, pipe);
1367 1427
1368 reg = DPLL(pipe); 1428 I915_WRITE(DPLL(pipe), 0);
1369 val = I915_READ(reg); 1429 POSTING_READ(DPLL(pipe));
1370 val &= ~DPLL_VCO_ENABLE;
1371 I915_WRITE(reg, val);
1372 POSTING_READ(reg);
1373} 1430}
1374 1431
1375void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1432void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1819,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1819 return 0; 1876 return 0;
1820 1877
1821err_unpin: 1878err_unpin:
1822 i915_gem_object_unpin(obj); 1879 i915_gem_object_unpin_from_display_plane(obj);
1823err_interruptible: 1880err_interruptible:
1824 dev_priv->mm.interruptible = true; 1881 dev_priv->mm.interruptible = true;
1825 return ret; 1882 return ret;
@@ -1828,7 +1885,7 @@ err_interruptible:
1828void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 1885void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1829{ 1886{
1830 i915_gem_object_unpin_fence(obj); 1887 i915_gem_object_unpin_fence(obj);
1831 i915_gem_object_unpin(obj); 1888 i915_gem_object_unpin_from_display_plane(obj);
1832} 1889}
1833 1890
1834/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 1891/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -1942,16 +1999,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1942 intel_crtc->dspaddr_offset = linear_offset; 1999 intel_crtc->dspaddr_offset = linear_offset;
1943 } 2000 }
1944 2001
1945 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2002 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1946 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2003 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2004 fb->pitches[0]);
1947 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2005 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1948 if (INTEL_INFO(dev)->gen >= 4) { 2006 if (INTEL_INFO(dev)->gen >= 4) {
1949 I915_MODIFY_DISPBASE(DSPSURF(plane), 2007 I915_MODIFY_DISPBASE(DSPSURF(plane),
1950 obj->gtt_offset + intel_crtc->dspaddr_offset); 2008 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
1951 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2009 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1952 I915_WRITE(DSPLINOFF(plane), linear_offset); 2010 I915_WRITE(DSPLINOFF(plane), linear_offset);
1953 } else 2011 } else
1954 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); 2012 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
1955 POSTING_READ(reg); 2013 POSTING_READ(reg);
1956 2014
1957 return 0; 2015 return 0;
@@ -2031,11 +2089,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2031 fb->pitches[0]); 2089 fb->pitches[0]);
2032 linear_offset -= intel_crtc->dspaddr_offset; 2090 linear_offset -= intel_crtc->dspaddr_offset;
2033 2091
2034 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2092 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2035 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2093 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2094 fb->pitches[0]);
2036 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2095 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2037 I915_MODIFY_DISPBASE(DSPSURF(plane), 2096 I915_MODIFY_DISPBASE(DSPSURF(plane),
2038 obj->gtt_offset + intel_crtc->dspaddr_offset); 2097 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2039 if (IS_HASWELL(dev)) { 2098 if (IS_HASWELL(dev)) {
2040 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2099 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2041 } else { 2100 } else {
@@ -2183,6 +2242,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2183 return ret; 2242 return ret;
2184 } 2243 }
2185 2244
2245 /* Update pipe size and adjust fitter if needed */
2246 if (i915_fastboot) {
2247 I915_WRITE(PIPESRC(intel_crtc->pipe),
2248 ((crtc->mode.hdisplay - 1) << 16) |
2249 (crtc->mode.vdisplay - 1));
2250 if (!intel_crtc->config.pch_pfit.size &&
2251 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2252 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2253 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2254 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2255 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2256 }
2257 }
2258
2186 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2259 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2187 if (ret) { 2260 if (ret) {
2188 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); 2261 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2203,6 +2276,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2203 } 2276 }
2204 2277
2205 intel_update_fbc(dev); 2278 intel_update_fbc(dev);
2279 intel_edp_psr_update(dev);
2206 mutex_unlock(&dev->struct_mutex); 2280 mutex_unlock(&dev->struct_mutex);
2207 2281
2208 intel_crtc_update_sarea_pos(crtc, x, y); 2282 intel_crtc_update_sarea_pos(crtc, x, y);
@@ -2523,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2523 struct drm_i915_private *dev_priv = dev->dev_private; 2597 struct drm_i915_private *dev_priv = dev->dev_private;
2524 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2525 int pipe = intel_crtc->pipe; 2599 int pipe = intel_crtc->pipe;
2526 u32 reg, temp, i; 2600 u32 reg, temp, i, j;
2527 2601
2528 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2602 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2529 for train result */ 2603 for train result */
@@ -2539,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2539 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 2613 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2540 I915_READ(FDI_RX_IIR(pipe))); 2614 I915_READ(FDI_RX_IIR(pipe)));
2541 2615
2542 /* enable CPU FDI TX and PCH FDI RX */ 2616 /* Try each vswing and preemphasis setting twice before moving on */
2543 reg = FDI_TX_CTL(pipe); 2617 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2544 temp = I915_READ(reg); 2618 /* disable first in case we need to retry */
2545 temp &= ~FDI_DP_PORT_WIDTH_MASK; 2619 reg = FDI_TX_CTL(pipe);
2546 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 2620 temp = I915_READ(reg);
2547 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2621 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2548 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2622 temp &= ~FDI_TX_ENABLE;
2549 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2623 I915_WRITE(reg, temp);
2550 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2551 temp |= FDI_COMPOSITE_SYNC;
2552 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2553
2554 I915_WRITE(FDI_RX_MISC(pipe),
2555 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2556
2557 reg = FDI_RX_CTL(pipe);
2558 temp = I915_READ(reg);
2559 temp &= ~FDI_LINK_TRAIN_AUTO;
2560 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2561 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2562 temp |= FDI_COMPOSITE_SYNC;
2563 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2564 2624
2565 POSTING_READ(reg); 2625 reg = FDI_RX_CTL(pipe);
2566 udelay(150); 2626 temp = I915_READ(reg);
2627 temp &= ~FDI_LINK_TRAIN_AUTO;
2628 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2629 temp &= ~FDI_RX_ENABLE;
2630 I915_WRITE(reg, temp);
2567 2631
2568 for (i = 0; i < 4; i++) { 2632 /* enable CPU FDI TX and PCH FDI RX */
2569 reg = FDI_TX_CTL(pipe); 2633 reg = FDI_TX_CTL(pipe);
2570 temp = I915_READ(reg); 2634 temp = I915_READ(reg);
2635 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2636 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2637 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2571 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2638 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2572 temp |= snb_b_fdi_train_param[i]; 2639 temp |= snb_b_fdi_train_param[j/2];
2573 I915_WRITE(reg, temp); 2640 temp |= FDI_COMPOSITE_SYNC;
2641 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2574 2642
2575 POSTING_READ(reg); 2643 I915_WRITE(FDI_RX_MISC(pipe),
2576 udelay(500); 2644 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2577 2645
2578 reg = FDI_RX_IIR(pipe); 2646 reg = FDI_RX_CTL(pipe);
2579 temp = I915_READ(reg); 2647 temp = I915_READ(reg);
2580 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2648 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2581 2649 temp |= FDI_COMPOSITE_SYNC;
2582 if (temp & FDI_RX_BIT_LOCK || 2650 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2583 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2584 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2585 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2586 break;
2587 }
2588 }
2589 if (i == 4)
2590 DRM_ERROR("FDI train 1 fail!\n");
2591 2651
2592 /* Train 2 */ 2652 POSTING_READ(reg);
2593 reg = FDI_TX_CTL(pipe); 2653 udelay(1); /* should be 0.5us */
2594 temp = I915_READ(reg);
2595 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2596 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2597 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2598 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2599 I915_WRITE(reg, temp);
2600 2654
2601 reg = FDI_RX_CTL(pipe); 2655 for (i = 0; i < 4; i++) {
2602 temp = I915_READ(reg); 2656 reg = FDI_RX_IIR(pipe);
2603 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2657 temp = I915_READ(reg);
2604 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2658 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2605 I915_WRITE(reg, temp);
2606 2659
2607 POSTING_READ(reg); 2660 if (temp & FDI_RX_BIT_LOCK ||
2608 udelay(150); 2661 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2662 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2663 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2664 i);
2665 break;
2666 }
2667 udelay(1); /* should be 0.5us */
2668 }
2669 if (i == 4) {
2670 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2671 continue;
2672 }
2609 2673
2610 for (i = 0; i < 4; i++) { 2674 /* Train 2 */
2611 reg = FDI_TX_CTL(pipe); 2675 reg = FDI_TX_CTL(pipe);
2612 temp = I915_READ(reg); 2676 temp = I915_READ(reg);
2613 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2677 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2614 temp |= snb_b_fdi_train_param[i]; 2678 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2679 I915_WRITE(reg, temp);
2680
2681 reg = FDI_RX_CTL(pipe);
2682 temp = I915_READ(reg);
2683 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2684 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2615 I915_WRITE(reg, temp); 2685 I915_WRITE(reg, temp);
2616 2686
2617 POSTING_READ(reg); 2687 POSTING_READ(reg);
2618 udelay(500); 2688 udelay(2); /* should be 1.5us */
2619 2689
2620 reg = FDI_RX_IIR(pipe); 2690 for (i = 0; i < 4; i++) {
2621 temp = I915_READ(reg); 2691 reg = FDI_RX_IIR(pipe);
2622 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2692 temp = I915_READ(reg);
2693 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2623 2694
2624 if (temp & FDI_RX_SYMBOL_LOCK) { 2695 if (temp & FDI_RX_SYMBOL_LOCK ||
2625 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2696 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2626 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); 2697 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2627 break; 2698 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2699 i);
2700 goto train_done;
2701 }
2702 udelay(2); /* should be 1.5us */
2628 } 2703 }
2704 if (i == 4)
2705 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2629 } 2706 }
2630 if (i == 4)
2631 DRM_ERROR("FDI train 2 fail!\n");
2632 2707
2708train_done:
2633 DRM_DEBUG_KMS("FDI train done.\n"); 2709 DRM_DEBUG_KMS("FDI train done.\n");
2634} 2710}
2635 2711
@@ -2927,15 +3003,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2927 /* For PCH output, training FDI link */ 3003 /* For PCH output, training FDI link */
2928 dev_priv->display.fdi_link_train(crtc); 3004 dev_priv->display.fdi_link_train(crtc);
2929 3005
2930 /* XXX: pch pll's can be enabled any time before we enable the PCH 3006 /* We need to program the right clock selection before writing the pixel
2931 * transcoder, and we actually should do this to not upset any PCH 3007 * mutliplier into the DPLL. */
2932 * transcoder that already use the clock when we share it.
2933 *
2934 * Note that enable_shared_dpll tries to do the right thing, but
2935 * get_shared_dpll unconditionally resets the pll - we need that to have
2936 * the right LVDS enable sequence. */
2937 ironlake_enable_shared_dpll(intel_crtc);
2938
2939 if (HAS_PCH_CPT(dev)) { 3008 if (HAS_PCH_CPT(dev)) {
2940 u32 sel; 3009 u32 sel;
2941 3010
@@ -2949,6 +3018,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2949 I915_WRITE(PCH_DPLL_SEL, temp); 3018 I915_WRITE(PCH_DPLL_SEL, temp);
2950 } 3019 }
2951 3020
3021 /* XXX: pch pll's can be enabled any time before we enable the PCH
3022 * transcoder, and we actually should do this to not upset any PCH
3023 * transcoder that already use the clock when we share it.
3024 *
3025 * Note that enable_shared_dpll tries to do the right thing, but
3026 * get_shared_dpll unconditionally resets the pll - we need that to have
3027 * the right LVDS enable sequence. */
3028 ironlake_enable_shared_dpll(intel_crtc);
3029
2952 /* set transcoder timing, panel must allow it */ 3030 /* set transcoder timing, panel must allow it */
2953 assert_panel_unlocked(dev_priv, pipe); 3031 assert_panel_unlocked(dev_priv, pipe);
2954 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 3032 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3109,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 crtc->config.shared_dpll = DPLL_ID_PRIVATE; 3109 crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3032} 3110}
3033 3111
3034static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp) 3112static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3035{ 3113{
3036 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3114 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3037 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3115 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3123,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
3045 3123
3046 if (HAS_PCH_IBX(dev_priv->dev)) { 3124 if (HAS_PCH_IBX(dev_priv->dev)) {
3047 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3125 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3048 i = crtc->pipe; 3126 i = (enum intel_dpll_id) crtc->pipe;
3049 pll = &dev_priv->shared_dplls[i]; 3127 pll = &dev_priv->shared_dplls[i];
3050 3128
3051 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3129 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3139,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
3061 if (pll->refcount == 0) 3139 if (pll->refcount == 0)
3062 continue; 3140 continue;
3063 3141
3064 if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) && 3142 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3065 fp == I915_READ(PCH_FP0(pll->id))) { 3143 sizeof(pll->hw_state)) == 0) {
3066 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", 3144 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3067 crtc->base.base.id, 3145 crtc->base.base.id,
3068 pll->name, pll->refcount, pll->active); 3146 pll->name, pll->refcount, pll->active);
@@ -3096,13 +3174,7 @@ found:
3096 WARN_ON(pll->on); 3174 WARN_ON(pll->on);
3097 assert_shared_dpll_disabled(dev_priv, pll); 3175 assert_shared_dpll_disabled(dev_priv, pll);
3098 3176
3099 /* Wait for the clocks to stabilize before rewriting the regs */ 3177 pll->mode_set(dev_priv, pll);
3100 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3101 POSTING_READ(PCH_DPLL(pll->id));
3102 udelay(150);
3103
3104 I915_WRITE(PCH_FP0(pll->id), fp);
3105 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3106 } 3178 }
3107 pll->refcount++; 3179 pll->refcount++;
3108 3180
@@ -3174,7 +3246,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3174 struct intel_encoder *encoder; 3246 struct intel_encoder *encoder;
3175 int pipe = intel_crtc->pipe; 3247 int pipe = intel_crtc->pipe;
3176 int plane = intel_crtc->plane; 3248 int plane = intel_crtc->plane;
3177 u32 temp;
3178 3249
3179 WARN_ON(!crtc->enabled); 3250 WARN_ON(!crtc->enabled);
3180 3251
@@ -3188,12 +3259,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3188 3259
3189 intel_update_watermarks(dev); 3260 intel_update_watermarks(dev);
3190 3261
3191 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 3262 for_each_encoder_on_crtc(dev, crtc, encoder)
3192 temp = I915_READ(PCH_LVDS); 3263 if (encoder->pre_enable)
3193 if ((temp & LVDS_PORT_EN) == 0) 3264 encoder->pre_enable(encoder);
3194 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3195 }
3196
3197 3265
3198 if (intel_crtc->config.has_pch_encoder) { 3266 if (intel_crtc->config.has_pch_encoder) {
3199 /* Note: FDI PLL enabling _must_ be done before we enable the 3267 /* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3273,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3205 assert_fdi_rx_disabled(dev_priv, pipe); 3273 assert_fdi_rx_disabled(dev_priv, pipe);
3206 } 3274 }
3207 3275
3208 for_each_encoder_on_crtc(dev, crtc, encoder)
3209 if (encoder->pre_enable)
3210 encoder->pre_enable(encoder);
3211
3212 ironlake_pfit_enable(intel_crtc); 3276 ironlake_pfit_enable(intel_crtc);
3213 3277
3214 /* 3278 /*
@@ -3389,7 +3453,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3389 intel_crtc_wait_for_pending_flips(crtc); 3453 intel_crtc_wait_for_pending_flips(crtc);
3390 drm_vblank_off(dev, pipe); 3454 drm_vblank_off(dev, pipe);
3391 3455
3392 if (dev_priv->cfb_plane == plane) 3456 if (dev_priv->fbc.plane == plane)
3393 intel_disable_fbc(dev); 3457 intel_disable_fbc(dev);
3394 3458
3395 intel_crtc_update_cursor(crtc, false); 3459 intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3526,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3462 drm_vblank_off(dev, pipe); 3526 drm_vblank_off(dev, pipe);
3463 3527
3464 /* FBC must be disabled before disabling the plane on HSW. */ 3528 /* FBC must be disabled before disabling the plane on HSW. */
3465 if (dev_priv->cfb_plane == plane) 3529 if (dev_priv->fbc.plane == plane)
3466 intel_disable_fbc(dev); 3530 intel_disable_fbc(dev);
3467 3531
3468 hsw_disable_ips(intel_crtc); 3532 hsw_disable_ips(intel_crtc);
@@ -3593,22 +3657,16 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3593 intel_crtc->active = true; 3657 intel_crtc->active = true;
3594 intel_update_watermarks(dev); 3658 intel_update_watermarks(dev);
3595 3659
3596 mutex_lock(&dev_priv->dpio_lock);
3597
3598 for_each_encoder_on_crtc(dev, crtc, encoder) 3660 for_each_encoder_on_crtc(dev, crtc, encoder)
3599 if (encoder->pre_pll_enable) 3661 if (encoder->pre_pll_enable)
3600 encoder->pre_pll_enable(encoder); 3662 encoder->pre_pll_enable(encoder);
3601 3663
3602 intel_enable_pll(dev_priv, pipe); 3664 vlv_enable_pll(intel_crtc);
3603 3665
3604 for_each_encoder_on_crtc(dev, crtc, encoder) 3666 for_each_encoder_on_crtc(dev, crtc, encoder)
3605 if (encoder->pre_enable) 3667 if (encoder->pre_enable)
3606 encoder->pre_enable(encoder); 3668 encoder->pre_enable(encoder);
3607 3669
3608 /* VLV wants encoder enabling _before_ the pipe is up. */
3609 for_each_encoder_on_crtc(dev, crtc, encoder)
3610 encoder->enable(encoder);
3611
3612 i9xx_pfit_enable(intel_crtc); 3670 i9xx_pfit_enable(intel_crtc);
3613 3671
3614 intel_crtc_load_lut(crtc); 3672 intel_crtc_load_lut(crtc);
@@ -3620,7 +3678,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3620 3678
3621 intel_update_fbc(dev); 3679 intel_update_fbc(dev);
3622 3680
3623 mutex_unlock(&dev_priv->dpio_lock); 3681 for_each_encoder_on_crtc(dev, crtc, encoder)
3682 encoder->enable(encoder);
3624} 3683}
3625 3684
3626static void i9xx_crtc_enable(struct drm_crtc *crtc) 3685static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -3640,12 +3699,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3640 intel_crtc->active = true; 3699 intel_crtc->active = true;
3641 intel_update_watermarks(dev); 3700 intel_update_watermarks(dev);
3642 3701
3643 intel_enable_pll(dev_priv, pipe);
3644
3645 for_each_encoder_on_crtc(dev, crtc, encoder) 3702 for_each_encoder_on_crtc(dev, crtc, encoder)
3646 if (encoder->pre_enable) 3703 if (encoder->pre_enable)
3647 encoder->pre_enable(encoder); 3704 encoder->pre_enable(encoder);
3648 3705
3706 i9xx_enable_pll(intel_crtc);
3707
3649 i9xx_pfit_enable(intel_crtc); 3708 i9xx_pfit_enable(intel_crtc);
3650 3709
3651 intel_crtc_load_lut(crtc); 3710 intel_crtc_load_lut(crtc);
@@ -3701,7 +3760,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3701 intel_crtc_wait_for_pending_flips(crtc); 3760 intel_crtc_wait_for_pending_flips(crtc);
3702 drm_vblank_off(dev, pipe); 3761 drm_vblank_off(dev, pipe);
3703 3762
3704 if (dev_priv->cfb_plane == plane) 3763 if (dev_priv->fbc.plane == plane)
3705 intel_disable_fbc(dev); 3764 intel_disable_fbc(dev);
3706 3765
3707 intel_crtc_dpms_overlay(intel_crtc, false); 3766 intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3717 if (encoder->post_disable) 3776 if (encoder->post_disable)
3718 encoder->post_disable(encoder); 3777 encoder->post_disable(encoder);
3719 3778
3720 intel_disable_pll(dev_priv, pipe); 3779 i9xx_disable_pll(dev_priv, pipe);
3721 3780
3722 intel_crtc->active = false; 3781 intel_crtc->active = false;
3723 intel_update_fbc(dev); 3782 intel_update_fbc(dev);
@@ -3817,16 +3876,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3817 } 3876 }
3818} 3877}
3819 3878
3820void intel_modeset_disable(struct drm_device *dev)
3821{
3822 struct drm_crtc *crtc;
3823
3824 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3825 if (crtc->enabled)
3826 intel_crtc_disable(crtc);
3827 }
3828}
3829
3830void intel_encoder_destroy(struct drm_encoder *encoder) 3879void intel_encoder_destroy(struct drm_encoder *encoder)
3831{ 3880{
3832 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3881 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -3835,10 +3884,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
3835 kfree(intel_encoder); 3884 kfree(intel_encoder);
3836} 3885}
3837 3886
3838/* Simple dpms helper for encodres with just one connector, no cloning and only 3887/* Simple dpms helper for encoders with just one connector, no cloning and only
3839 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 3888 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3840 * state of the entire output pipe. */ 3889 * state of the entire output pipe. */
3841void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 3890static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
3842{ 3891{
3843 if (mode == DRM_MODE_DPMS_ON) { 3892 if (mode == DRM_MODE_DPMS_ON) {
3844 encoder->connectors_active = true; 3893 encoder->connectors_active = true;
@@ -4032,7 +4081,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
4032{ 4081{
4033 pipe_config->ips_enabled = i915_enable_ips && 4082 pipe_config->ips_enabled = i915_enable_ips &&
4034 hsw_crtc_supports_ips(crtc) && 4083 hsw_crtc_supports_ips(crtc) &&
4035 pipe_config->pipe_bpp == 24; 4084 pipe_config->pipe_bpp <= 24;
4036} 4085}
4037 4086
4038static int intel_crtc_compute_config(struct intel_crtc *crtc, 4087static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -4048,12 +4097,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
4048 return -EINVAL; 4097 return -EINVAL;
4049 } 4098 }
4050 4099
4051 /* All interlaced capable intel hw wants timings in frames. Note though
4052 * that intel_lvds_mode_fixup does some funny tricks with the crtc
4053 * timings, so we need to be careful not to clobber these.*/
4054 if (!pipe_config->timings_set)
4055 drm_mode_set_crtcinfo(adjusted_mode, 0);
4056
4057 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 4100 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4058 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 4101 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4059 */ 4102 */
@@ -4103,6 +4146,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4103 return 200000; 4146 return 200000;
4104} 4147}
4105 4148
4149static int pnv_get_display_clock_speed(struct drm_device *dev)
4150{
4151 u16 gcfgc = 0;
4152
4153 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4154
4155 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4156 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4157 return 267000;
4158 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4159 return 333000;
4160 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4161 return 444000;
4162 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4163 return 200000;
4164 default:
4165 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4166 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4167 return 133000;
4168 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4169 return 167000;
4170 }
4171}
4172
4106static int i915gm_get_display_clock_speed(struct drm_device *dev) 4173static int i915gm_get_display_clock_speed(struct drm_device *dev)
4107{ 4174{
4108 u16 gcfgc = 0; 4175 u16 gcfgc = 0;
@@ -4266,14 +4333,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4266 } 4333 }
4267 4334
4268 I915_WRITE(FP0(pipe), fp); 4335 I915_WRITE(FP0(pipe), fp);
4336 crtc->config.dpll_hw_state.fp0 = fp;
4269 4337
4270 crtc->lowfreq_avail = false; 4338 crtc->lowfreq_avail = false;
4271 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4339 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4272 reduced_clock && i915_powersave) { 4340 reduced_clock && i915_powersave) {
4273 I915_WRITE(FP1(pipe), fp2); 4341 I915_WRITE(FP1(pipe), fp2);
4342 crtc->config.dpll_hw_state.fp1 = fp2;
4274 crtc->lowfreq_avail = true; 4343 crtc->lowfreq_avail = true;
4275 } else { 4344 } else {
4276 I915_WRITE(FP1(pipe), fp); 4345 I915_WRITE(FP1(pipe), fp);
4346 crtc->config.dpll_hw_state.fp1 = fp;
4277 } 4347 }
4278} 4348}
4279 4349
@@ -4351,17 +4421,13 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4351{ 4421{
4352 struct drm_device *dev = crtc->base.dev; 4422 struct drm_device *dev = crtc->base.dev;
4353 struct drm_i915_private *dev_priv = dev->dev_private; 4423 struct drm_i915_private *dev_priv = dev->dev_private;
4354 struct intel_encoder *encoder;
4355 int pipe = crtc->pipe; 4424 int pipe = crtc->pipe;
4356 u32 dpll, mdiv; 4425 u32 dpll, mdiv;
4357 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4426 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4358 bool is_hdmi;
4359 u32 coreclk, reg_val, dpll_md; 4427 u32 coreclk, reg_val, dpll_md;
4360 4428
4361 mutex_lock(&dev_priv->dpio_lock); 4429 mutex_lock(&dev_priv->dpio_lock);
4362 4430
4363 is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4364
4365 bestn = crtc->config.dpll.n; 4431 bestn = crtc->config.dpll.n;
4366 bestm1 = crtc->config.dpll.m1; 4432 bestm1 = crtc->config.dpll.m1;
4367 bestm2 = crtc->config.dpll.m2; 4433 bestm2 = crtc->config.dpll.m2;
@@ -4407,7 +4473,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4407 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 4473 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4408 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4474 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4409 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4475 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4410 0x005f0021); 4476 0x009f0003);
4411 else 4477 else
4412 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4478 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4413 0x00d0000f); 4479 0x00d0000f);
@@ -4440,10 +4506,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4440 4506
4441 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); 4507 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
4442 4508
4443 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4444 if (encoder->pre_pll_enable)
4445 encoder->pre_pll_enable(encoder);
4446
4447 /* Enable DPIO clock input */ 4509 /* Enable DPIO clock input */
4448 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4510 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4449 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4511 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4513,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4451 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 4513 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4452 4514
4453 dpll |= DPLL_VCO_ENABLE; 4515 dpll |= DPLL_VCO_ENABLE;
4454 I915_WRITE(DPLL(pipe), dpll); 4516 crtc->config.dpll_hw_state.dpll = dpll;
4455 POSTING_READ(DPLL(pipe));
4456 udelay(150);
4457
4458 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4459 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4460 4517
4461 dpll_md = (crtc->config.pixel_multiplier - 1) 4518 dpll_md = (crtc->config.pixel_multiplier - 1)
4462 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4519 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4463 I915_WRITE(DPLL_MD(pipe), dpll_md); 4520 crtc->config.dpll_hw_state.dpll_md = dpll_md;
4464 POSTING_READ(DPLL_MD(pipe));
4465 4521
4466 if (crtc->config.has_dp_encoder) 4522 if (crtc->config.has_dp_encoder)
4467 intel_dp_set_m_n(crtc); 4523 intel_dp_set_m_n(crtc);
@@ -4475,8 +4531,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4475{ 4531{
4476 struct drm_device *dev = crtc->base.dev; 4532 struct drm_device *dev = crtc->base.dev;
4477 struct drm_i915_private *dev_priv = dev->dev_private; 4533 struct drm_i915_private *dev_priv = dev->dev_private;
4478 struct intel_encoder *encoder;
4479 int pipe = crtc->pipe;
4480 u32 dpll; 4534 u32 dpll;
4481 bool is_sdvo; 4535 bool is_sdvo;
4482 struct dpll *clock = &crtc->config.dpll; 4536 struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4553,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4499 } 4553 }
4500 4554
4501 if (is_sdvo) 4555 if (is_sdvo)
4502 dpll |= DPLL_DVO_HIGH_SPEED; 4556 dpll |= DPLL_SDVO_HIGH_SPEED;
4503 4557
4504 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 4558 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4505 dpll |= DPLL_DVO_HIGH_SPEED; 4559 dpll |= DPLL_SDVO_HIGH_SPEED;
4506 4560
4507 /* compute bitmask from p1 value */ 4561 /* compute bitmask from p1 value */
4508 if (IS_PINEVIEW(dev)) 4562 if (IS_PINEVIEW(dev))
@@ -4538,35 +4592,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4538 dpll |= PLL_REF_INPUT_DREFCLK; 4592 dpll |= PLL_REF_INPUT_DREFCLK;
4539 4593
4540 dpll |= DPLL_VCO_ENABLE; 4594 dpll |= DPLL_VCO_ENABLE;
4541 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4595 crtc->config.dpll_hw_state.dpll = dpll;
4542 POSTING_READ(DPLL(pipe));
4543 udelay(150);
4544
4545 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4546 if (encoder->pre_pll_enable)
4547 encoder->pre_pll_enable(encoder);
4548
4549 if (crtc->config.has_dp_encoder)
4550 intel_dp_set_m_n(crtc);
4551
4552 I915_WRITE(DPLL(pipe), dpll);
4553
4554 /* Wait for the clocks to stabilize. */
4555 POSTING_READ(DPLL(pipe));
4556 udelay(150);
4557 4596
4558 if (INTEL_INFO(dev)->gen >= 4) { 4597 if (INTEL_INFO(dev)->gen >= 4) {
4559 u32 dpll_md = (crtc->config.pixel_multiplier - 1) 4598 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
4560 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4599 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4561 I915_WRITE(DPLL_MD(pipe), dpll_md); 4600 crtc->config.dpll_hw_state.dpll_md = dpll_md;
4562 } else {
4563 /* The pixel multiplier can only be updated once the
4564 * DPLL is enabled and the clocks are stable.
4565 *
4566 * So write it again.
4567 */
4568 I915_WRITE(DPLL(pipe), dpll);
4569 } 4601 }
4602
4603 if (crtc->config.has_dp_encoder)
4604 intel_dp_set_m_n(crtc);
4570} 4605}
4571 4606
4572static void i8xx_update_pll(struct intel_crtc *crtc, 4607static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4610,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4575{ 4610{
4576 struct drm_device *dev = crtc->base.dev; 4611 struct drm_device *dev = crtc->base.dev;
4577 struct drm_i915_private *dev_priv = dev->dev_private; 4612 struct drm_i915_private *dev_priv = dev->dev_private;
4578 struct intel_encoder *encoder;
4579 int pipe = crtc->pipe;
4580 u32 dpll; 4613 u32 dpll;
4581 struct dpll *clock = &crtc->config.dpll; 4614 struct dpll *clock = &crtc->config.dpll;
4582 4615
@@ -4595,6 +4628,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4595 dpll |= PLL_P2_DIVIDE_BY_4; 4628 dpll |= PLL_P2_DIVIDE_BY_4;
4596 } 4629 }
4597 4630
4631 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
4632 dpll |= DPLL_DVO_2X_MODE;
4633
4598 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4634 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4599 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4635 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4600 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4636 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4638,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4602 dpll |= PLL_REF_INPUT_DREFCLK; 4638 dpll |= PLL_REF_INPUT_DREFCLK;
4603 4639
4604 dpll |= DPLL_VCO_ENABLE; 4640 dpll |= DPLL_VCO_ENABLE;
4605 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4641 crtc->config.dpll_hw_state.dpll = dpll;
4606 POSTING_READ(DPLL(pipe));
4607 udelay(150);
4608
4609 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4610 if (encoder->pre_pll_enable)
4611 encoder->pre_pll_enable(encoder);
4612
4613 I915_WRITE(DPLL(pipe), dpll);
4614
4615 /* Wait for the clocks to stabilize. */
4616 POSTING_READ(DPLL(pipe));
4617 udelay(150);
4618
4619 /* The pixel multiplier can only be updated once the
4620 * DPLL is enabled and the clocks are stable.
4621 *
4622 * So write it again.
4623 */
4624 I915_WRITE(DPLL(pipe), dpll);
4625} 4642}
4626 4643
4627static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 4644static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4744,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
4727 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; 4744 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
4728} 4745}
4729 4746
4747static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4748 struct intel_crtc_config *pipe_config)
4749{
4750 struct drm_crtc *crtc = &intel_crtc->base;
4751
4752 crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
4753 crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
4754 crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
4755 crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4756
4757 crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
4758 crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
4759 crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
4760 crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4761
4762 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4763
4764 crtc->mode.clock = pipe_config->adjusted_mode.clock;
4765 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4766}
4767
4730static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 4768static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4731{ 4769{
4732 struct drm_device *dev = intel_crtc->base.dev; 4770 struct drm_device *dev = intel_crtc->base.dev;
@@ -4939,7 +4977,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4939 struct drm_i915_private *dev_priv = dev->dev_private; 4977 struct drm_i915_private *dev_priv = dev->dev_private;
4940 uint32_t tmp; 4978 uint32_t tmp;
4941 4979
4942 pipe_config->cpu_transcoder = crtc->pipe; 4980 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4943 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 4981 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
4944 4982
4945 tmp = I915_READ(PIPECONF(crtc->pipe)); 4983 tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4955,6 +4993,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4955 pipe_config->pixel_multiplier = 4993 pipe_config->pixel_multiplier =
4956 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 4994 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4957 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 4995 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4996 pipe_config->dpll_hw_state.dpll_md = tmp;
4958 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 4997 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
4959 tmp = I915_READ(DPLL(crtc->pipe)); 4998 tmp = I915_READ(DPLL(crtc->pipe));
4960 pipe_config->pixel_multiplier = 4999 pipe_config->pixel_multiplier =
@@ -4966,6 +5005,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4966 * function. */ 5005 * function. */
4967 pipe_config->pixel_multiplier = 1; 5006 pipe_config->pixel_multiplier = 1;
4968 } 5007 }
5008 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5009 if (!IS_VALLEYVIEW(dev)) {
5010 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5011 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5012 } else {
5013 /* Mask out read-only status bits. */
5014 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5015 DPLL_PORTC_READY_MASK |
5016 DPLL_PORTB_READY_MASK);
5017 }
4969 5018
4970 return true; 5019 return true;
4971} 5020}
@@ -5119,74 +5168,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
5119 BUG_ON(val != final); 5168 BUG_ON(val != final);
5120} 5169}
5121 5170
5122/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ 5171static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5123static void lpt_init_pch_refclk(struct drm_device *dev)
5124{ 5172{
5125 struct drm_i915_private *dev_priv = dev->dev_private; 5173 uint32_t tmp;
5126 struct drm_mode_config *mode_config = &dev->mode_config;
5127 struct intel_encoder *encoder;
5128 bool has_vga = false;
5129 bool is_sdv = false;
5130 u32 tmp;
5131
5132 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5133 switch (encoder->type) {
5134 case INTEL_OUTPUT_ANALOG:
5135 has_vga = true;
5136 break;
5137 }
5138 }
5139
5140 if (!has_vga)
5141 return;
5142
5143 mutex_lock(&dev_priv->dpio_lock);
5144
5145 /* XXX: Rip out SDV support once Haswell ships for real. */
5146 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
5147 is_sdv = true;
5148
5149 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5150 tmp &= ~SBI_SSCCTL_DISABLE;
5151 tmp |= SBI_SSCCTL_PATHALT;
5152 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5153 5174
5154 udelay(24); 5175 tmp = I915_READ(SOUTH_CHICKEN2);
5176 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5177 I915_WRITE(SOUTH_CHICKEN2, tmp);
5155 5178
5156 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 5179 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5157 tmp &= ~SBI_SSCCTL_PATHALT; 5180 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5158 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 5181 DRM_ERROR("FDI mPHY reset assert timeout\n");
5159 5182
5160 if (!is_sdv) { 5183 tmp = I915_READ(SOUTH_CHICKEN2);
5161 tmp = I915_READ(SOUTH_CHICKEN2); 5184 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5162 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 5185 I915_WRITE(SOUTH_CHICKEN2, tmp);
5163 I915_WRITE(SOUTH_CHICKEN2, tmp);
5164 5186
5165 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 5187 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5166 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 5188 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5167 DRM_ERROR("FDI mPHY reset assert timeout\n"); 5189 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5168 5190}
5169 tmp = I915_READ(SOUTH_CHICKEN2);
5170 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5171 I915_WRITE(SOUTH_CHICKEN2, tmp);
5172 5191
5173 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 5192/* WaMPhyProgramming:hsw */
5174 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 5193static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5175 100)) 5194{
5176 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 5195 uint32_t tmp;
5177 }
5178 5196
5179 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 5197 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5180 tmp &= ~(0xFF << 24); 5198 tmp &= ~(0xFF << 24);
5181 tmp |= (0x12 << 24); 5199 tmp |= (0x12 << 24);
5182 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 5200 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5183 5201
5184 if (is_sdv) {
5185 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5186 tmp |= 0x7FFF;
5187 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5188 }
5189
5190 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 5202 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5191 tmp |= (1 << 11); 5203 tmp |= (1 << 11);
5192 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 5204 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
@@ -5195,24 +5207,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
5195 tmp |= (1 << 11); 5207 tmp |= (1 << 11);
5196 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 5208 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5197 5209
5198 if (is_sdv) {
5199 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5200 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5201 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5202
5203 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5204 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5205 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5206
5207 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5208 tmp |= (0x3F << 8);
5209 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5210
5211 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5212 tmp |= (0x3F << 8);
5213 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5214 }
5215
5216 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 5210 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5217 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 5211 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5218 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 5212 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
@@ -5221,17 +5215,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
5221 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 5215 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5222 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 5216 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5223 5217
5224 if (!is_sdv) { 5218 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5225 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 5219 tmp &= ~(7 << 13);
5226 tmp &= ~(7 << 13); 5220 tmp |= (5 << 13);
5227 tmp |= (5 << 13); 5221 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5228 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5229 5222
5230 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 5223 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5231 tmp &= ~(7 << 13); 5224 tmp &= ~(7 << 13);
5232 tmp |= (5 << 13); 5225 tmp |= (5 << 13);
5233 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 5226 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5234 }
5235 5227
5236 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 5228 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5237 tmp &= ~0xFF; 5229 tmp &= ~0xFF;
@@ -5253,34 +5245,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
5253 tmp |= (0x1C << 16); 5245 tmp |= (0x1C << 16);
5254 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 5246 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5255 5247
5256 if (!is_sdv) { 5248 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5257 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 5249 tmp |= (1 << 27);
5258 tmp |= (1 << 27); 5250 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5259 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 5251
5252 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5253 tmp |= (1 << 27);
5254 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5255
5256 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5257 tmp &= ~(0xF << 28);
5258 tmp |= (4 << 28);
5259 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5260
5261 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5262 tmp &= ~(0xF << 28);
5263 tmp |= (4 << 28);
5264 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5265}
5266
5267/* Implements 3 different sequences from BSpec chapter "Display iCLK
5268 * Programming" based on the parameters passed:
5269 * - Sequence to enable CLKOUT_DP
5270 * - Sequence to enable CLKOUT_DP without spread
5271 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5272 */
5273static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5274 bool with_fdi)
5275{
5276 struct drm_i915_private *dev_priv = dev->dev_private;
5277 uint32_t reg, tmp;
5278
5279 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5280 with_spread = true;
5281 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5282 with_fdi, "LP PCH doesn't have FDI\n"))
5283 with_fdi = false;
5284
5285 mutex_lock(&dev_priv->dpio_lock);
5286
5287 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5288 tmp &= ~SBI_SSCCTL_DISABLE;
5289 tmp |= SBI_SSCCTL_PATHALT;
5290 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5260 5291
5261 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 5292 udelay(24);
5262 tmp |= (1 << 27);
5263 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5264 5293
5265 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 5294 if (with_spread) {
5266 tmp &= ~(0xF << 28); 5295 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5267 tmp |= (4 << 28); 5296 tmp &= ~SBI_SSCCTL_PATHALT;
5268 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 5297 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5269 5298
5270 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 5299 if (with_fdi) {
5271 tmp &= ~(0xF << 28); 5300 lpt_reset_fdi_mphy(dev_priv);
5272 tmp |= (4 << 28); 5301 lpt_program_fdi_mphy(dev_priv);
5273 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 5302 }
5274 } 5303 }
5275 5304
5276 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ 5305 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5277 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); 5306 SBI_GEN0 : SBI_DBUFF0;
5278 tmp |= SBI_DBUFF0_ENABLE; 5307 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5279 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); 5308 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5309 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5310
5311 mutex_unlock(&dev_priv->dpio_lock);
5312}
5313
5314/* Sequence to disable CLKOUT_DP */
5315static void lpt_disable_clkout_dp(struct drm_device *dev)
5316{
5317 struct drm_i915_private *dev_priv = dev->dev_private;
5318 uint32_t reg, tmp;
5319
5320 mutex_lock(&dev_priv->dpio_lock);
5321
5322 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5323 SBI_GEN0 : SBI_DBUFF0;
5324 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5325 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5326 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5327
5328 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5329 if (!(tmp & SBI_SSCCTL_DISABLE)) {
5330 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5331 tmp |= SBI_SSCCTL_PATHALT;
5332 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5333 udelay(32);
5334 }
5335 tmp |= SBI_SSCCTL_DISABLE;
5336 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5337 }
5280 5338
5281 mutex_unlock(&dev_priv->dpio_lock); 5339 mutex_unlock(&dev_priv->dpio_lock);
5282} 5340}
5283 5341
5342static void lpt_init_pch_refclk(struct drm_device *dev)
5343{
5344 struct drm_mode_config *mode_config = &dev->mode_config;
5345 struct intel_encoder *encoder;
5346 bool has_vga = false;
5347
5348 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5349 switch (encoder->type) {
5350 case INTEL_OUTPUT_ANALOG:
5351 has_vga = true;
5352 break;
5353 }
5354 }
5355
5356 if (has_vga)
5357 lpt_enable_clkout_dp(dev, true, true);
5358 else
5359 lpt_disable_clkout_dp(dev);
5360}
5361
5284/* 5362/*
5285 * Initialize reference clocks when the driver loads 5363 * Initialize reference clocks when the driver loads
5286 */ 5364 */
@@ -5610,9 +5688,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5610 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5688 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5611 5689
5612 if (is_sdvo) 5690 if (is_sdvo)
5613 dpll |= DPLL_DVO_HIGH_SPEED; 5691 dpll |= DPLL_SDVO_HIGH_SPEED;
5614 if (intel_crtc->config.has_dp_encoder) 5692 if (intel_crtc->config.has_dp_encoder)
5615 dpll |= DPLL_DVO_HIGH_SPEED; 5693 dpll |= DPLL_SDVO_HIGH_SPEED;
5616 5694
5617 /* compute bitmask from p1 value */ 5695 /* compute bitmask from p1 value */
5618 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5696 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5708,7 +5786,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5708 else 5786 else
5709 intel_crtc->config.dpll_hw_state.fp1 = fp; 5787 intel_crtc->config.dpll_hw_state.fp1 = fp;
5710 5788
5711 pll = intel_get_shared_dpll(intel_crtc, dpll, fp); 5789 pll = intel_get_shared_dpll(intel_crtc);
5712 if (pll == NULL) { 5790 if (pll == NULL) {
5713 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 5791 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5714 pipe_name(pipe)); 5792 pipe_name(pipe));
@@ -5720,10 +5798,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5720 if (intel_crtc->config.has_dp_encoder) 5798 if (intel_crtc->config.has_dp_encoder)
5721 intel_dp_set_m_n(intel_crtc); 5799 intel_dp_set_m_n(intel_crtc);
5722 5800
5723 for_each_encoder_on_crtc(dev, crtc, encoder)
5724 if (encoder->pre_pll_enable)
5725 encoder->pre_pll_enable(encoder);
5726
5727 if (is_lvds && has_reduced_clock && i915_powersave) 5801 if (is_lvds && has_reduced_clock && i915_powersave)
5728 intel_crtc->lowfreq_avail = true; 5802 intel_crtc->lowfreq_avail = true;
5729 else 5803 else
@@ -5732,23 +5806,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5732 if (intel_crtc->config.has_pch_encoder) { 5806 if (intel_crtc->config.has_pch_encoder) {
5733 pll = intel_crtc_to_shared_dpll(intel_crtc); 5807 pll = intel_crtc_to_shared_dpll(intel_crtc);
5734 5808
5735 I915_WRITE(PCH_DPLL(pll->id), dpll);
5736
5737 /* Wait for the clocks to stabilize. */
5738 POSTING_READ(PCH_DPLL(pll->id));
5739 udelay(150);
5740
5741 /* The pixel multiplier can only be updated once the
5742 * DPLL is enabled and the clocks are stable.
5743 *
5744 * So write it again.
5745 */
5746 I915_WRITE(PCH_DPLL(pll->id), dpll);
5747
5748 if (has_reduced_clock)
5749 I915_WRITE(PCH_FP1(pll->id), fp2);
5750 else
5751 I915_WRITE(PCH_FP1(pll->id), fp);
5752 } 5809 }
5753 5810
5754 intel_set_pipe_timings(intel_crtc); 5811 intel_set_pipe_timings(intel_crtc);
@@ -5820,7 +5877,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5820 struct drm_i915_private *dev_priv = dev->dev_private; 5877 struct drm_i915_private *dev_priv = dev->dev_private;
5821 uint32_t tmp; 5878 uint32_t tmp;
5822 5879
5823 pipe_config->cpu_transcoder = crtc->pipe; 5880 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5824 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 5881 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5825 5882
5826 tmp = I915_READ(PIPECONF(crtc->pipe)); 5883 tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5838,12 +5895,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5838 5895
5839 ironlake_get_fdi_m_n_config(crtc, pipe_config); 5896 ironlake_get_fdi_m_n_config(crtc, pipe_config);
5840 5897
5841 /* XXX: Can't properly read out the pch dpll pixel multiplier
5842 * since we don't have state tracking for pch clocks yet. */
5843 pipe_config->pixel_multiplier = 1;
5844
5845 if (HAS_PCH_IBX(dev_priv->dev)) { 5898 if (HAS_PCH_IBX(dev_priv->dev)) {
5846 pipe_config->shared_dpll = crtc->pipe; 5899 pipe_config->shared_dpll =
5900 (enum intel_dpll_id) crtc->pipe;
5847 } else { 5901 } else {
5848 tmp = I915_READ(PCH_DPLL_SEL); 5902 tmp = I915_READ(PCH_DPLL_SEL);
5849 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 5903 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5856,6 +5910,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5856 5910
5857 WARN_ON(!pll->get_hw_state(dev_priv, pll, 5911 WARN_ON(!pll->get_hw_state(dev_priv, pll,
5858 &pipe_config->dpll_hw_state)); 5912 &pipe_config->dpll_hw_state));
5913
5914 tmp = pipe_config->dpll_hw_state.dpll;
5915 pipe_config->pixel_multiplier =
5916 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5917 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5859 } else { 5918 } else {
5860 pipe_config->pixel_multiplier = 1; 5919 pipe_config->pixel_multiplier = 1;
5861 } 5920 }
@@ -5867,6 +5926,305 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5867 return true; 5926 return true;
5868} 5927}
5869 5928
5929static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5930{
5931 struct drm_device *dev = dev_priv->dev;
5932 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
5933 struct intel_crtc *crtc;
5934 unsigned long irqflags;
5935 uint32_t val;
5936
5937 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
5938 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
5939 pipe_name(crtc->pipe));
5940
5941 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
5942 WARN(plls->spll_refcount, "SPLL enabled\n");
5943 WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
5944 WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
5945 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
5946 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
5947 "CPU PWM1 enabled\n");
5948 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
5949 "CPU PWM2 enabled\n");
5950 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
5951 "PCH PWM1 enabled\n");
5952 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
5953 "Utility pin enabled\n");
5954 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
5955
5956 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5957 val = I915_READ(DEIMR);
5958 WARN((val & ~DE_PCH_EVENT_IVB) != val,
5959 "Unexpected DEIMR bits enabled: 0x%x\n", val);
5960 val = I915_READ(SDEIMR);
5961 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
5962 "Unexpected SDEIMR bits enabled: 0x%x\n", val);
5963 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5964}
5965
5966/*
5967 * This function implements pieces of two sequences from BSpec:
5968 * - Sequence for display software to disable LCPLL
5969 * - Sequence for display software to allow package C8+
5970 * The steps implemented here are just the steps that actually touch the LCPLL
5971 * register. Callers should take care of disabling all the display engine
5972 * functions, doing the mode unset, fixing interrupts, etc.
5973 */
5974void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5975 bool switch_to_fclk, bool allow_power_down)
5976{
5977 uint32_t val;
5978
5979 assert_can_disable_lcpll(dev_priv);
5980
5981 val = I915_READ(LCPLL_CTL);
5982
5983 if (switch_to_fclk) {
5984 val |= LCPLL_CD_SOURCE_FCLK;
5985 I915_WRITE(LCPLL_CTL, val);
5986
5987 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
5988 LCPLL_CD_SOURCE_FCLK_DONE, 1))
5989 DRM_ERROR("Switching to FCLK failed\n");
5990
5991 val = I915_READ(LCPLL_CTL);
5992 }
5993
5994 val |= LCPLL_PLL_DISABLE;
5995 I915_WRITE(LCPLL_CTL, val);
5996 POSTING_READ(LCPLL_CTL);
5997
5998 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
5999 DRM_ERROR("LCPLL still locked\n");
6000
6001 val = I915_READ(D_COMP);
6002 val |= D_COMP_COMP_DISABLE;
6003 I915_WRITE(D_COMP, val);
6004 POSTING_READ(D_COMP);
6005 ndelay(100);
6006
6007 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6008 DRM_ERROR("D_COMP RCOMP still in progress\n");
6009
6010 if (allow_power_down) {
6011 val = I915_READ(LCPLL_CTL);
6012 val |= LCPLL_POWER_DOWN_ALLOW;
6013 I915_WRITE(LCPLL_CTL, val);
6014 POSTING_READ(LCPLL_CTL);
6015 }
6016}
6017
6018/*
6019 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6020 * source.
6021 */
6022void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6023{
6024 uint32_t val;
6025
6026 val = I915_READ(LCPLL_CTL);
6027
6028 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6029 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6030 return;
6031
6032 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6033 * we'll hang the machine! */
6034 dev_priv->uncore.funcs.force_wake_get(dev_priv);
6035
6036 if (val & LCPLL_POWER_DOWN_ALLOW) {
6037 val &= ~LCPLL_POWER_DOWN_ALLOW;
6038 I915_WRITE(LCPLL_CTL, val);
6039 POSTING_READ(LCPLL_CTL);
6040 }
6041
6042 val = I915_READ(D_COMP);
6043 val |= D_COMP_COMP_FORCE;
6044 val &= ~D_COMP_COMP_DISABLE;
6045 I915_WRITE(D_COMP, val);
6046 POSTING_READ(D_COMP);
6047
6048 val = I915_READ(LCPLL_CTL);
6049 val &= ~LCPLL_PLL_DISABLE;
6050 I915_WRITE(LCPLL_CTL, val);
6051
6052 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6053 DRM_ERROR("LCPLL not locked yet\n");
6054
6055 if (val & LCPLL_CD_SOURCE_FCLK) {
6056 val = I915_READ(LCPLL_CTL);
6057 val &= ~LCPLL_CD_SOURCE_FCLK;
6058 I915_WRITE(LCPLL_CTL, val);
6059
6060 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6061 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6062 DRM_ERROR("Switching back to LCPLL failed\n");
6063 }
6064
6065 dev_priv->uncore.funcs.force_wake_put(dev_priv);
6066}
6067
6068void hsw_enable_pc8_work(struct work_struct *__work)
6069{
6070 struct drm_i915_private *dev_priv =
6071 container_of(to_delayed_work(__work), struct drm_i915_private,
6072 pc8.enable_work);
6073 struct drm_device *dev = dev_priv->dev;
6074 uint32_t val;
6075
6076 if (dev_priv->pc8.enabled)
6077 return;
6078
6079 DRM_DEBUG_KMS("Enabling package C8+\n");
6080
6081 dev_priv->pc8.enabled = true;
6082
6083 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6084 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6085 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6086 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6087 }
6088
6089 lpt_disable_clkout_dp(dev);
6090 hsw_pc8_disable_interrupts(dev);
6091 hsw_disable_lcpll(dev_priv, true, true);
6092}
6093
6094static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6095{
6096 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6097 WARN(dev_priv->pc8.disable_count < 1,
6098 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6099
6100 dev_priv->pc8.disable_count--;
6101 if (dev_priv->pc8.disable_count != 0)
6102 return;
6103
6104 schedule_delayed_work(&dev_priv->pc8.enable_work,
6105 msecs_to_jiffies(i915_pc8_timeout));
6106}
6107
6108static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6109{
6110 struct drm_device *dev = dev_priv->dev;
6111 uint32_t val;
6112
6113 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6114 WARN(dev_priv->pc8.disable_count < 0,
6115 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6116
6117 dev_priv->pc8.disable_count++;
6118 if (dev_priv->pc8.disable_count != 1)
6119 return;
6120
6121 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6122 if (!dev_priv->pc8.enabled)
6123 return;
6124
6125 DRM_DEBUG_KMS("Disabling package C8+\n");
6126
6127 hsw_restore_lcpll(dev_priv);
6128 hsw_pc8_restore_interrupts(dev);
6129 lpt_init_pch_refclk(dev);
6130
6131 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6132 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6133 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6134 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6135 }
6136
6137 intel_prepare_ddi(dev);
6138 i915_gem_init_swizzling(dev);
6139 mutex_lock(&dev_priv->rps.hw_lock);
6140 gen6_update_ring_freq(dev);
6141 mutex_unlock(&dev_priv->rps.hw_lock);
6142 dev_priv->pc8.enabled = false;
6143}
6144
6145void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6146{
6147 mutex_lock(&dev_priv->pc8.lock);
6148 __hsw_enable_package_c8(dev_priv);
6149 mutex_unlock(&dev_priv->pc8.lock);
6150}
6151
6152void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6153{
6154 mutex_lock(&dev_priv->pc8.lock);
6155 __hsw_disable_package_c8(dev_priv);
6156 mutex_unlock(&dev_priv->pc8.lock);
6157}
6158
6159static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6160{
6161 struct drm_device *dev = dev_priv->dev;
6162 struct intel_crtc *crtc;
6163 uint32_t val;
6164
6165 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6166 if (crtc->base.enabled)
6167 return false;
6168
6169 /* This case is still possible since we have the i915.disable_power_well
6170 * parameter and also the KVMr or something else might be requesting the
6171 * power well. */
6172 val = I915_READ(HSW_PWR_WELL_DRIVER);
6173 if (val != 0) {
6174 DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6175 return false;
6176 }
6177
6178 return true;
6179}
6180
6181/* Since we're called from modeset_global_resources there's no way to
6182 * symmetrically increase and decrease the refcount, so we use
6183 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6184 * or not.
6185 */
6186static void hsw_update_package_c8(struct drm_device *dev)
6187{
6188 struct drm_i915_private *dev_priv = dev->dev_private;
6189 bool allow;
6190
6191 if (!i915_enable_pc8)
6192 return;
6193
6194 mutex_lock(&dev_priv->pc8.lock);
6195
6196 allow = hsw_can_enable_package_c8(dev_priv);
6197
6198 if (allow == dev_priv->pc8.requirements_met)
6199 goto done;
6200
6201 dev_priv->pc8.requirements_met = allow;
6202
6203 if (allow)
6204 __hsw_enable_package_c8(dev_priv);
6205 else
6206 __hsw_disable_package_c8(dev_priv);
6207
6208done:
6209 mutex_unlock(&dev_priv->pc8.lock);
6210}
6211
6212static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6213{
6214 if (!dev_priv->pc8.gpu_idle) {
6215 dev_priv->pc8.gpu_idle = true;
6216 hsw_enable_package_c8(dev_priv);
6217 }
6218}
6219
6220static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6221{
6222 if (dev_priv->pc8.gpu_idle) {
6223 dev_priv->pc8.gpu_idle = false;
6224 hsw_disable_package_c8(dev_priv);
6225 }
6226}
6227
5870static void haswell_modeset_global_resources(struct drm_device *dev) 6228static void haswell_modeset_global_resources(struct drm_device *dev)
5871{ 6229{
5872 bool enable = false; 6230 bool enable = false;
@@ -5882,6 +6240,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
5882 } 6240 }
5883 6241
5884 intel_set_power_well(dev, enable); 6242 intel_set_power_well(dev, enable);
6243
6244 hsw_update_package_c8(dev);
5885} 6245}
5886 6246
5887static int haswell_crtc_mode_set(struct drm_crtc *crtc, 6247static int haswell_crtc_mode_set(struct drm_crtc *crtc,
@@ -5935,7 +6295,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5935 enum intel_display_power_domain pfit_domain; 6295 enum intel_display_power_domain pfit_domain;
5936 uint32_t tmp; 6296 uint32_t tmp;
5937 6297
5938 pipe_config->cpu_transcoder = crtc->pipe; 6298 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5939 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 6299 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5940 6300
5941 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 6301 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6005,11 +6365,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
6005{ 6365{
6006 struct drm_device *dev = crtc->dev; 6366 struct drm_device *dev = crtc->dev;
6007 struct drm_i915_private *dev_priv = dev->dev_private; 6367 struct drm_i915_private *dev_priv = dev->dev_private;
6008 struct drm_encoder_helper_funcs *encoder_funcs;
6009 struct intel_encoder *encoder; 6368 struct intel_encoder *encoder;
6010 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6369 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6011 struct drm_display_mode *adjusted_mode =
6012 &intel_crtc->config.adjusted_mode;
6013 struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 6370 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
6014 int pipe = intel_crtc->pipe; 6371 int pipe = intel_crtc->pipe;
6015 int ret; 6372 int ret;
@@ -6028,12 +6385,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
6028 encoder->base.base.id, 6385 encoder->base.base.id,
6029 drm_get_encoder_name(&encoder->base), 6386 drm_get_encoder_name(&encoder->base),
6030 mode->base.id, mode->name); 6387 mode->base.id, mode->name);
6031 if (encoder->mode_set) { 6388 encoder->mode_set(encoder);
6032 encoder->mode_set(encoder);
6033 } else {
6034 encoder_funcs = encoder->base.helper_private;
6035 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
6036 }
6037 } 6389 }
6038 6390
6039 return 0; 6391 return 0;
@@ -6548,7 +6900,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6548 goto fail_unpin; 6900 goto fail_unpin;
6549 } 6901 }
6550 6902
6551 addr = obj->gtt_offset; 6903 addr = i915_gem_obj_ggtt_offset(obj);
6552 } else { 6904 } else {
6553 int align = IS_I830(dev) ? 16 * 1024 : 256; 6905 int align = IS_I830(dev) ? 16 * 1024 : 256;
6554 ret = i915_gem_attach_phys_object(dev, obj, 6906 ret = i915_gem_attach_phys_object(dev, obj,
@@ -6570,7 +6922,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6570 if (intel_crtc->cursor_bo != obj) 6922 if (intel_crtc->cursor_bo != obj)
6571 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 6923 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6572 } else 6924 } else
6573 i915_gem_object_unpin(intel_crtc->cursor_bo); 6925 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
6574 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 6926 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6575 } 6927 }
6576 6928
@@ -6585,7 +6937,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6585 6937
6586 return 0; 6938 return 0;
6587fail_unpin: 6939fail_unpin:
6588 i915_gem_object_unpin(obj); 6940 i915_gem_object_unpin_from_display_plane(obj);
6589fail_locked: 6941fail_locked:
6590 mutex_unlock(&dev->struct_mutex); 6942 mutex_unlock(&dev->struct_mutex);
6591fail: 6943fail:
@@ -6875,11 +7227,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
6875} 7227}
6876 7228
6877/* Returns the clock of the currently programmed mode of the given pipe. */ 7229/* Returns the clock of the currently programmed mode of the given pipe. */
6878static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 7230static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7231 struct intel_crtc_config *pipe_config)
6879{ 7232{
7233 struct drm_device *dev = crtc->base.dev;
6880 struct drm_i915_private *dev_priv = dev->dev_private; 7234 struct drm_i915_private *dev_priv = dev->dev_private;
6881 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7235 int pipe = pipe_config->cpu_transcoder;
6882 int pipe = intel_crtc->pipe;
6883 u32 dpll = I915_READ(DPLL(pipe)); 7236 u32 dpll = I915_READ(DPLL(pipe));
6884 u32 fp; 7237 u32 fp;
6885 intel_clock_t clock; 7238 intel_clock_t clock;
@@ -6918,7 +7271,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6918 default: 7271 default:
6919 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 7272 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6920 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 7273 "mode\n", (int)(dpll & DPLL_MODE_MASK));
6921 return 0; 7274 pipe_config->adjusted_mode.clock = 0;
7275 return;
6922 } 7276 }
6923 7277
6924 if (IS_PINEVIEW(dev)) 7278 if (IS_PINEVIEW(dev))
@@ -6955,12 +7309,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6955 } 7309 }
6956 } 7310 }
6957 7311
6958 /* XXX: It would be nice to validate the clocks, but we can't reuse 7312 pipe_config->adjusted_mode.clock = clock.dot *
6959 * i830PllIsValid() because it relies on the xf86_config connector 7313 pipe_config->pixel_multiplier;
6960 * configuration being accurate, which it isn't necessarily. 7314}
7315
7316static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
7317 struct intel_crtc_config *pipe_config)
7318{
7319 struct drm_device *dev = crtc->base.dev;
7320 struct drm_i915_private *dev_priv = dev->dev_private;
7321 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7322 int link_freq, repeat;
7323 u64 clock;
7324 u32 link_m, link_n;
7325
7326 repeat = pipe_config->pixel_multiplier;
7327
7328 /*
7329 * The calculation for the data clock is:
7330 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
7331 * But we want to avoid losing precison if possible, so:
7332 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
7333 *
7334 * and the link clock is simpler:
7335 * link_clock = (m * link_clock * repeat) / n
7336 */
7337
7338 /*
7339 * We need to get the FDI or DP link clock here to derive
7340 * the M/N dividers.
7341 *
7342 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
7343 * For DP, it's either 1.62GHz or 2.7GHz.
7344 * We do our calculations in 10*MHz since we don't need much precison.
6961 */ 7345 */
7346 if (pipe_config->has_pch_encoder)
7347 link_freq = intel_fdi_link_freq(dev) * 10000;
7348 else
7349 link_freq = pipe_config->port_clock;
6962 7350
6963 return clock.dot; 7351 link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
7352 link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
7353
7354 if (!link_m || !link_n)
7355 return;
7356
7357 clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
7358 do_div(clock, link_n);
7359
7360 pipe_config->adjusted_mode.clock = clock;
6964} 7361}
6965 7362
6966/** Returns the currently programmed mode of the given pipe. */ 7363/** Returns the currently programmed mode of the given pipe. */
@@ -6971,6 +7368,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6971 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6972 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 7369 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6973 struct drm_display_mode *mode; 7370 struct drm_display_mode *mode;
7371 struct intel_crtc_config pipe_config;
6974 int htot = I915_READ(HTOTAL(cpu_transcoder)); 7372 int htot = I915_READ(HTOTAL(cpu_transcoder));
6975 int hsync = I915_READ(HSYNC(cpu_transcoder)); 7373 int hsync = I915_READ(HSYNC(cpu_transcoder));
6976 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 7374 int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6980,7 +7378,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6980 if (!mode) 7378 if (!mode)
6981 return NULL; 7379 return NULL;
6982 7380
6983 mode->clock = intel_crtc_clock_get(dev, crtc); 7381 /*
7382 * Construct a pipe_config sufficient for getting the clock info
7383 * back out of crtc_clock_get.
7384 *
7385 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7386 * to use a real value here instead.
7387 */
7388 pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
7389 pipe_config.pixel_multiplier = 1;
7390 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7391
7392 mode->clock = pipe_config.adjusted_mode.clock;
6984 mode->hdisplay = (htot & 0xffff) + 1; 7393 mode->hdisplay = (htot & 0xffff) + 1;
6985 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 7394 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6986 mode->hsync_start = (hsync & 0xffff) + 1; 7395 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7064,13 +7473,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7064 7473
7065void intel_mark_busy(struct drm_device *dev) 7474void intel_mark_busy(struct drm_device *dev)
7066{ 7475{
7067 i915_update_gfx_val(dev->dev_private); 7476 struct drm_i915_private *dev_priv = dev->dev_private;
7477
7478 hsw_package_c8_gpu_busy(dev_priv);
7479 i915_update_gfx_val(dev_priv);
7068} 7480}
7069 7481
7070void intel_mark_idle(struct drm_device *dev) 7482void intel_mark_idle(struct drm_device *dev)
7071{ 7483{
7484 struct drm_i915_private *dev_priv = dev->dev_private;
7072 struct drm_crtc *crtc; 7485 struct drm_crtc *crtc;
7073 7486
7487 hsw_package_c8_gpu_idle(dev_priv);
7488
7074 if (!i915_powersave) 7489 if (!i915_powersave)
7075 return; 7490 return;
7076 7491
@@ -7235,7 +7650,8 @@ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7235static int intel_gen2_queue_flip(struct drm_device *dev, 7650static int intel_gen2_queue_flip(struct drm_device *dev,
7236 struct drm_crtc *crtc, 7651 struct drm_crtc *crtc,
7237 struct drm_framebuffer *fb, 7652 struct drm_framebuffer *fb,
7238 struct drm_i915_gem_object *obj) 7653 struct drm_i915_gem_object *obj,
7654 uint32_t flags)
7239{ 7655{
7240 struct drm_i915_private *dev_priv = dev->dev_private; 7656 struct drm_i915_private *dev_priv = dev->dev_private;
7241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7263,7 +7679,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7263 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7679 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7264 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7680 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7265 intel_ring_emit(ring, fb->pitches[0]); 7681 intel_ring_emit(ring, fb->pitches[0]);
7266 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7682 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7267 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7683 intel_ring_emit(ring, 0); /* aux display base address, unused */
7268 7684
7269 intel_mark_page_flip_active(intel_crtc); 7685 intel_mark_page_flip_active(intel_crtc);
@@ -7279,7 +7695,8 @@ err:
7279static int intel_gen3_queue_flip(struct drm_device *dev, 7695static int intel_gen3_queue_flip(struct drm_device *dev,
7280 struct drm_crtc *crtc, 7696 struct drm_crtc *crtc,
7281 struct drm_framebuffer *fb, 7697 struct drm_framebuffer *fb,
7282 struct drm_i915_gem_object *obj) 7698 struct drm_i915_gem_object *obj,
7699 uint32_t flags)
7283{ 7700{
7284 struct drm_i915_private *dev_priv = dev->dev_private; 7701 struct drm_i915_private *dev_priv = dev->dev_private;
7285 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7304,7 +7721,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7304 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 7721 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7305 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7722 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7306 intel_ring_emit(ring, fb->pitches[0]); 7723 intel_ring_emit(ring, fb->pitches[0]);
7307 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7724 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7308 intel_ring_emit(ring, MI_NOOP); 7725 intel_ring_emit(ring, MI_NOOP);
7309 7726
7310 intel_mark_page_flip_active(intel_crtc); 7727 intel_mark_page_flip_active(intel_crtc);
@@ -7320,7 +7737,8 @@ err:
7320static int intel_gen4_queue_flip(struct drm_device *dev, 7737static int intel_gen4_queue_flip(struct drm_device *dev,
7321 struct drm_crtc *crtc, 7738 struct drm_crtc *crtc,
7322 struct drm_framebuffer *fb, 7739 struct drm_framebuffer *fb,
7323 struct drm_i915_gem_object *obj) 7740 struct drm_i915_gem_object *obj,
7741 uint32_t flags)
7324{ 7742{
7325 struct drm_i915_private *dev_priv = dev->dev_private; 7743 struct drm_i915_private *dev_priv = dev->dev_private;
7326 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7744 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7344,7 +7762,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7344 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7762 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7345 intel_ring_emit(ring, fb->pitches[0]); 7763 intel_ring_emit(ring, fb->pitches[0]);
7346 intel_ring_emit(ring, 7764 intel_ring_emit(ring,
7347 (obj->gtt_offset + intel_crtc->dspaddr_offset) | 7765 (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
7348 obj->tiling_mode); 7766 obj->tiling_mode);
7349 7767
7350 /* XXX Enabling the panel-fitter across page-flip is so far 7768 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7368,7 +7786,8 @@ err:
7368static int intel_gen6_queue_flip(struct drm_device *dev, 7786static int intel_gen6_queue_flip(struct drm_device *dev,
7369 struct drm_crtc *crtc, 7787 struct drm_crtc *crtc,
7370 struct drm_framebuffer *fb, 7788 struct drm_framebuffer *fb,
7371 struct drm_i915_gem_object *obj) 7789 struct drm_i915_gem_object *obj,
7790 uint32_t flags)
7372{ 7791{
7373 struct drm_i915_private *dev_priv = dev->dev_private; 7792 struct drm_i915_private *dev_priv = dev->dev_private;
7374 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7793 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7387,7 +7806,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7387 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7806 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7388 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7807 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7389 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 7808 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
7390 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7809 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7391 7810
7392 /* Contrary to the suggestions in the documentation, 7811 /* Contrary to the suggestions in the documentation,
7393 * "Enable Panel Fitter" does not seem to be required when page 7812 * "Enable Panel Fitter" does not seem to be required when page
@@ -7418,7 +7837,8 @@ err:
7418static int intel_gen7_queue_flip(struct drm_device *dev, 7837static int intel_gen7_queue_flip(struct drm_device *dev,
7419 struct drm_crtc *crtc, 7838 struct drm_crtc *crtc,
7420 struct drm_framebuffer *fb, 7839 struct drm_framebuffer *fb,
7421 struct drm_i915_gem_object *obj) 7840 struct drm_i915_gem_object *obj,
7841 uint32_t flags)
7422{ 7842{
7423 struct drm_i915_private *dev_priv = dev->dev_private; 7843 struct drm_i915_private *dev_priv = dev->dev_private;
7424 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7452,7 +7872,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7452 7872
7453 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7873 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7454 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7874 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7455 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7875 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7456 intel_ring_emit(ring, (MI_NOOP)); 7876 intel_ring_emit(ring, (MI_NOOP));
7457 7877
7458 intel_mark_page_flip_active(intel_crtc); 7878 intel_mark_page_flip_active(intel_crtc);
@@ -7468,14 +7888,16 @@ err:
7468static int intel_default_queue_flip(struct drm_device *dev, 7888static int intel_default_queue_flip(struct drm_device *dev,
7469 struct drm_crtc *crtc, 7889 struct drm_crtc *crtc,
7470 struct drm_framebuffer *fb, 7890 struct drm_framebuffer *fb,
7471 struct drm_i915_gem_object *obj) 7891 struct drm_i915_gem_object *obj,
7892 uint32_t flags)
7472{ 7893{
7473 return -ENODEV; 7894 return -ENODEV;
7474} 7895}
7475 7896
7476static int intel_crtc_page_flip(struct drm_crtc *crtc, 7897static int intel_crtc_page_flip(struct drm_crtc *crtc,
7477 struct drm_framebuffer *fb, 7898 struct drm_framebuffer *fb,
7478 struct drm_pending_vblank_event *event) 7899 struct drm_pending_vblank_event *event,
7900 uint32_t page_flip_flags)
7479{ 7901{
7480 struct drm_device *dev = crtc->dev; 7902 struct drm_device *dev = crtc->dev;
7481 struct drm_i915_private *dev_priv = dev->dev_private; 7903 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7545,7 +7967,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7545 atomic_inc(&intel_crtc->unpin_work_count); 7967 atomic_inc(&intel_crtc->unpin_work_count);
7546 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 7968 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
7547 7969
7548 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7970 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
7549 if (ret) 7971 if (ret)
7550 goto cleanup_pending; 7972 goto cleanup_pending;
7551 7973
@@ -7789,7 +8211,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7789 struct drm_display_mode *mode) 8211 struct drm_display_mode *mode)
7790{ 8212{
7791 struct drm_device *dev = crtc->dev; 8213 struct drm_device *dev = crtc->dev;
7792 struct drm_encoder_helper_funcs *encoder_funcs;
7793 struct intel_encoder *encoder; 8214 struct intel_encoder *encoder;
7794 struct intel_crtc_config *pipe_config; 8215 struct intel_crtc_config *pipe_config;
7795 int plane_bpp, ret = -EINVAL; 8216 int plane_bpp, ret = -EINVAL;
@@ -7806,9 +8227,23 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7806 8227
7807 drm_mode_copy(&pipe_config->adjusted_mode, mode); 8228 drm_mode_copy(&pipe_config->adjusted_mode, mode);
7808 drm_mode_copy(&pipe_config->requested_mode, mode); 8229 drm_mode_copy(&pipe_config->requested_mode, mode);
7809 pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe; 8230 pipe_config->cpu_transcoder =
8231 (enum transcoder) to_intel_crtc(crtc)->pipe;
7810 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8232 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7811 8233
8234 /*
8235 * Sanitize sync polarity flags based on requested ones. If neither
8236 * positive or negative polarity is requested, treat this as meaning
8237 * negative polarity.
8238 */
8239 if (!(pipe_config->adjusted_mode.flags &
8240 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8241 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8242
8243 if (!(pipe_config->adjusted_mode.flags &
8244 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8245 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8246
7812 /* Compute a starting value for pipe_config->pipe_bpp taking the source 8247 /* Compute a starting value for pipe_config->pipe_bpp taking the source
7813 * plane pixel format and any sink constraints into account. Returns the 8248 * plane pixel format and any sink constraints into account. Returns the
7814 * source plane bpp so that dithering can be selected on mismatches 8249 * source plane bpp so that dithering can be selected on mismatches
@@ -7823,6 +8258,9 @@ encoder_retry:
7823 pipe_config->port_clock = 0; 8258 pipe_config->port_clock = 0;
7824 pipe_config->pixel_multiplier = 1; 8259 pipe_config->pixel_multiplier = 1;
7825 8260
8261 /* Fill in default crtc timings, allow encoders to overwrite them. */
8262 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
8263
7826 /* Pass our mode to the connectors and the CRTC to give them a chance to 8264 /* Pass our mode to the connectors and the CRTC to give them a chance to
7827 * adjust it according to limitations or connector properties, and also 8265 * adjust it according to limitations or connector properties, and also
7828 * a chance to reject the mode entirely. 8266 * a chance to reject the mode entirely.
@@ -7833,20 +8271,8 @@ encoder_retry:
7833 if (&encoder->new_crtc->base != crtc) 8271 if (&encoder->new_crtc->base != crtc)
7834 continue; 8272 continue;
7835 8273
7836 if (encoder->compute_config) { 8274 if (!(encoder->compute_config(encoder, pipe_config))) {
7837 if (!(encoder->compute_config(encoder, pipe_config))) { 8275 DRM_DEBUG_KMS("Encoder config failure\n");
7838 DRM_DEBUG_KMS("Encoder config failure\n");
7839 goto fail;
7840 }
7841
7842 continue;
7843 }
7844
7845 encoder_funcs = encoder->base.helper_private;
7846 if (!(encoder_funcs->mode_fixup(&encoder->base,
7847 &pipe_config->requested_mode,
7848 &pipe_config->adjusted_mode))) {
7849 DRM_DEBUG_KMS("Encoder fixup failed\n");
7850 goto fail; 8276 goto fail;
7851 } 8277 }
7852 } 8278 }
@@ -8041,6 +8467,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8041 8467
8042} 8468}
8043 8469
8470static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
8471 struct intel_crtc_config *new)
8472{
8473 int clock1, clock2, diff;
8474
8475 clock1 = cur->adjusted_mode.clock;
8476 clock2 = new->adjusted_mode.clock;
8477
8478 if (clock1 == clock2)
8479 return true;
8480
8481 if (!clock1 || !clock2)
8482 return false;
8483
8484 diff = abs(clock1 - clock2);
8485
8486 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8487 return true;
8488
8489 return false;
8490}
8491
8044#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 8492#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
8045 list_for_each_entry((intel_crtc), \ 8493 list_for_each_entry((intel_crtc), \
8046 &(dev)->mode_config.crtc_list, \ 8494 &(dev)->mode_config.crtc_list, \
@@ -8072,7 +8520,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8072 8520
8073#define PIPE_CONF_CHECK_FLAGS(name, mask) \ 8521#define PIPE_CONF_CHECK_FLAGS(name, mask) \
8074 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 8522 if ((current_config->name ^ pipe_config->name) & (mask)) { \
8075 DRM_ERROR("mismatch in " #name " " \ 8523 DRM_ERROR("mismatch in " #name "(" #mask ") " \
8076 "(expected %i, found %i)\n", \ 8524 "(expected %i, found %i)\n", \
8077 current_config->name & (mask), \ 8525 current_config->name & (mask), \
8078 pipe_config->name & (mask)); \ 8526 pipe_config->name & (mask)); \
@@ -8106,8 +8554,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8106 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); 8554 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
8107 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); 8555 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
8108 8556
8109 if (!HAS_PCH_SPLIT(dev)) 8557 PIPE_CONF_CHECK_I(pixel_multiplier);
8110 PIPE_CONF_CHECK_I(pixel_multiplier);
8111 8558
8112 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 8559 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8113 DRM_MODE_FLAG_INTERLACE); 8560 DRM_MODE_FLAG_INTERLACE);
@@ -8138,6 +8585,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8138 8585
8139 PIPE_CONF_CHECK_I(shared_dpll); 8586 PIPE_CONF_CHECK_I(shared_dpll);
8140 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 8587 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8588 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8141 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 8589 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8142 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 8590 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8143 8591
@@ -8146,6 +8594,15 @@ intel_pipe_config_compare(struct drm_device *dev,
8146#undef PIPE_CONF_CHECK_FLAGS 8594#undef PIPE_CONF_CHECK_FLAGS
8147#undef PIPE_CONF_QUIRK 8595#undef PIPE_CONF_QUIRK
8148 8596
8597 if (!IS_HASWELL(dev)) {
8598 if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
8599 DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
8600 current_config->adjusted_mode.clock,
8601 pipe_config->adjusted_mode.clock);
8602 return false;
8603 }
8604 }
8605
8149 return true; 8606 return true;
8150} 8607}
8151 8608
@@ -8277,6 +8734,9 @@ check_crtc_state(struct drm_device *dev)
8277 encoder->get_config(encoder, &pipe_config); 8734 encoder->get_config(encoder, &pipe_config);
8278 } 8735 }
8279 8736
8737 if (dev_priv->display.get_clock)
8738 dev_priv->display.get_clock(crtc, &pipe_config);
8739
8280 WARN(crtc->active != active, 8740 WARN(crtc->active != active,
8281 "crtc active state doesn't match with hw state " 8741 "crtc active state doesn't match with hw state "
8282 "(expected %i, found %i)\n", crtc->active, active); 8742 "(expected %i, found %i)\n", crtc->active, active);
@@ -8454,9 +8914,9 @@ out:
8454 return ret; 8914 return ret;
8455} 8915}
8456 8916
8457int intel_set_mode(struct drm_crtc *crtc, 8917static int intel_set_mode(struct drm_crtc *crtc,
8458 struct drm_display_mode *mode, 8918 struct drm_display_mode *mode,
8459 int x, int y, struct drm_framebuffer *fb) 8919 int x, int y, struct drm_framebuffer *fb)
8460{ 8920{
8461 int ret; 8921 int ret;
8462 8922
@@ -8573,8 +9033,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8573 } else if (set->crtc->fb != set->fb) { 9033 } else if (set->crtc->fb != set->fb) {
8574 /* If we have no fb then treat it as a full mode set */ 9034 /* If we have no fb then treat it as a full mode set */
8575 if (set->crtc->fb == NULL) { 9035 if (set->crtc->fb == NULL) {
8576 DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); 9036 struct intel_crtc *intel_crtc =
8577 config->mode_changed = true; 9037 to_intel_crtc(set->crtc);
9038
9039 if (intel_crtc->active && i915_fastboot) {
9040 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9041 config->fb_changed = true;
9042 } else {
9043 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
9044 config->mode_changed = true;
9045 }
8578 } else if (set->fb == NULL) { 9046 } else if (set->fb == NULL) {
8579 config->mode_changed = true; 9047 config->mode_changed = true;
8580 } else if (set->fb->pixel_format != 9048 } else if (set->fb->pixel_format !=
@@ -8594,6 +9062,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8594 drm_mode_debug_printmodeline(set->mode); 9062 drm_mode_debug_printmodeline(set->mode);
8595 config->mode_changed = true; 9063 config->mode_changed = true;
8596 } 9064 }
9065
9066 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9067 set->crtc->base.id, config->mode_changed, config->fb_changed);
8597} 9068}
8598 9069
8599static int 9070static int
@@ -8604,14 +9075,13 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8604 struct drm_crtc *new_crtc; 9075 struct drm_crtc *new_crtc;
8605 struct intel_connector *connector; 9076 struct intel_connector *connector;
8606 struct intel_encoder *encoder; 9077 struct intel_encoder *encoder;
8607 int count, ro; 9078 int ro;
8608 9079
8609 /* The upper layers ensure that we either disable a crtc or have a list 9080 /* The upper layers ensure that we either disable a crtc or have a list
8610 * of connectors. For paranoia, double-check this. */ 9081 * of connectors. For paranoia, double-check this. */
8611 WARN_ON(!set->fb && (set->num_connectors != 0)); 9082 WARN_ON(!set->fb && (set->num_connectors != 0));
8612 WARN_ON(set->fb && (set->num_connectors == 0)); 9083 WARN_ON(set->fb && (set->num_connectors == 0));
8613 9084
8614 count = 0;
8615 list_for_each_entry(connector, &dev->mode_config.connector_list, 9085 list_for_each_entry(connector, &dev->mode_config.connector_list,
8616 base.head) { 9086 base.head) {
8617 /* Otherwise traverse passed in connector list and get encoders 9087 /* Otherwise traverse passed in connector list and get encoders
@@ -8645,7 +9115,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8645 /* connector->new_encoder is now updated for all connectors. */ 9115 /* connector->new_encoder is now updated for all connectors. */
8646 9116
8647 /* Update crtc of enabled connectors. */ 9117 /* Update crtc of enabled connectors. */
8648 count = 0;
8649 list_for_each_entry(connector, &dev->mode_config.connector_list, 9118 list_for_each_entry(connector, &dev->mode_config.connector_list,
8650 base.head) { 9119 base.head) {
8651 if (!connector->new_encoder) 9120 if (!connector->new_encoder)
@@ -8804,19 +9273,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
8804 return val & DPLL_VCO_ENABLE; 9273 return val & DPLL_VCO_ENABLE;
8805} 9274}
8806 9275
9276static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
9277 struct intel_shared_dpll *pll)
9278{
9279 I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
9280 I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
9281}
9282
8807static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 9283static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
8808 struct intel_shared_dpll *pll) 9284 struct intel_shared_dpll *pll)
8809{ 9285{
8810 uint32_t reg, val;
8811
8812 /* PCH refclock must be enabled first */ 9286 /* PCH refclock must be enabled first */
8813 assert_pch_refclk_enabled(dev_priv); 9287 assert_pch_refclk_enabled(dev_priv);
8814 9288
8815 reg = PCH_DPLL(pll->id); 9289 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
8816 val = I915_READ(reg); 9290
8817 val |= DPLL_VCO_ENABLE; 9291 /* Wait for the clocks to stabilize. */
8818 I915_WRITE(reg, val); 9292 POSTING_READ(PCH_DPLL(pll->id));
8819 POSTING_READ(reg); 9293 udelay(150);
9294
9295 /* The pixel multiplier can only be updated once the
9296 * DPLL is enabled and the clocks are stable.
9297 *
9298 * So write it again.
9299 */
9300 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9301 POSTING_READ(PCH_DPLL(pll->id));
8820 udelay(200); 9302 udelay(200);
8821} 9303}
8822 9304
@@ -8825,7 +9307,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
8825{ 9307{
8826 struct drm_device *dev = dev_priv->dev; 9308 struct drm_device *dev = dev_priv->dev;
8827 struct intel_crtc *crtc; 9309 struct intel_crtc *crtc;
8828 uint32_t reg, val;
8829 9310
8830 /* Make sure no transcoder isn't still depending on us. */ 9311 /* Make sure no transcoder isn't still depending on us. */
8831 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 9312 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8833,11 +9314,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
8833 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 9314 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
8834 } 9315 }
8835 9316
8836 reg = PCH_DPLL(pll->id); 9317 I915_WRITE(PCH_DPLL(pll->id), 0);
8837 val = I915_READ(reg); 9318 POSTING_READ(PCH_DPLL(pll->id));
8838 val &= ~DPLL_VCO_ENABLE;
8839 I915_WRITE(reg, val);
8840 POSTING_READ(reg);
8841 udelay(200); 9319 udelay(200);
8842} 9320}
8843 9321
@@ -8856,6 +9334,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
8856 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 9334 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8857 dev_priv->shared_dplls[i].id = i; 9335 dev_priv->shared_dplls[i].id = i;
8858 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 9336 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
9337 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
8859 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 9338 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
8860 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 9339 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
8861 dev_priv->shared_dplls[i].get_hw_state = 9340 dev_priv->shared_dplls[i].get_hw_state =
@@ -9035,8 +9514,13 @@ static void intel_setup_outputs(struct drm_device *dev)
9035 intel_dp_init(dev, PCH_DP_D, PORT_D); 9514 intel_dp_init(dev, PCH_DP_D, PORT_D);
9036 } else if (IS_VALLEYVIEW(dev)) { 9515 } else if (IS_VALLEYVIEW(dev)) {
9037 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ 9516 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
9038 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 9517 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
9039 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 9518 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
9519 PORT_C);
9520 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
9521 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
9522 PORT_C);
9523 }
9040 9524
9041 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 9525 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
9042 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 9526 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
@@ -9096,13 +9580,17 @@ static void intel_setup_outputs(struct drm_device *dev)
9096 drm_helper_move_panel_connectors_to_head(dev); 9580 drm_helper_move_panel_connectors_to_head(dev);
9097} 9581}
9098 9582
9583void intel_framebuffer_fini(struct intel_framebuffer *fb)
9584{
9585 drm_framebuffer_cleanup(&fb->base);
9586 drm_gem_object_unreference_unlocked(&fb->obj->base);
9587}
9588
9099static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 9589static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
9100{ 9590{
9101 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 9591 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
9102 9592
9103 drm_framebuffer_cleanup(fb); 9593 intel_framebuffer_fini(intel_fb);
9104 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
9105
9106 kfree(intel_fb); 9594 kfree(intel_fb);
9107} 9595}
9108 9596
@@ -9272,6 +9760,7 @@ static void intel_init_display(struct drm_device *dev)
9272 dev_priv->display.update_plane = ironlake_update_plane; 9760 dev_priv->display.update_plane = ironlake_update_plane;
9273 } else if (HAS_PCH_SPLIT(dev)) { 9761 } else if (HAS_PCH_SPLIT(dev)) {
9274 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 9762 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
9763 dev_priv->display.get_clock = ironlake_crtc_clock_get;
9275 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 9764 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9276 dev_priv->display.crtc_enable = ironlake_crtc_enable; 9765 dev_priv->display.crtc_enable = ironlake_crtc_enable;
9277 dev_priv->display.crtc_disable = ironlake_crtc_disable; 9766 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9279,6 +9768,7 @@ static void intel_init_display(struct drm_device *dev)
9279 dev_priv->display.update_plane = ironlake_update_plane; 9768 dev_priv->display.update_plane = ironlake_update_plane;
9280 } else if (IS_VALLEYVIEW(dev)) { 9769 } else if (IS_VALLEYVIEW(dev)) {
9281 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 9770 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9771 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9282 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 9772 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9283 dev_priv->display.crtc_enable = valleyview_crtc_enable; 9773 dev_priv->display.crtc_enable = valleyview_crtc_enable;
9284 dev_priv->display.crtc_disable = i9xx_crtc_disable; 9774 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9286,6 +9776,7 @@ static void intel_init_display(struct drm_device *dev)
9286 dev_priv->display.update_plane = i9xx_update_plane; 9776 dev_priv->display.update_plane = i9xx_update_plane;
9287 } else { 9777 } else {
9288 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 9778 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9779 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9289 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 9780 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9290 dev_priv->display.crtc_enable = i9xx_crtc_enable; 9781 dev_priv->display.crtc_enable = i9xx_crtc_enable;
9291 dev_priv->display.crtc_disable = i9xx_crtc_disable; 9782 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9303,9 +9794,12 @@ static void intel_init_display(struct drm_device *dev)
9303 else if (IS_I915G(dev)) 9794 else if (IS_I915G(dev))
9304 dev_priv->display.get_display_clock_speed = 9795 dev_priv->display.get_display_clock_speed =
9305 i915_get_display_clock_speed; 9796 i915_get_display_clock_speed;
9306 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) 9797 else if (IS_I945GM(dev) || IS_845G(dev))
9307 dev_priv->display.get_display_clock_speed = 9798 dev_priv->display.get_display_clock_speed =
9308 i9xx_misc_get_display_clock_speed; 9799 i9xx_misc_get_display_clock_speed;
9800 else if (IS_PINEVIEW(dev))
9801 dev_priv->display.get_display_clock_speed =
9802 pnv_get_display_clock_speed;
9309 else if (IS_I915GM(dev)) 9803 else if (IS_I915GM(dev))
9310 dev_priv->display.get_display_clock_speed = 9804 dev_priv->display.get_display_clock_speed =
9311 i915gm_get_display_clock_speed; 9805 i915gm_get_display_clock_speed;
@@ -9586,7 +10080,7 @@ void intel_modeset_init(struct drm_device *dev)
9586 INTEL_INFO(dev)->num_pipes, 10080 INTEL_INFO(dev)->num_pipes,
9587 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 10081 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
9588 10082
9589 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 10083 for_each_pipe(i) {
9590 intel_crtc_init(dev, i); 10084 intel_crtc_init(dev, i);
9591 for (j = 0; j < dev_priv->num_plane; j++) { 10085 for (j = 0; j < dev_priv->num_plane; j++) {
9592 ret = intel_plane_init(dev, i, j); 10086 ret = intel_plane_init(dev, i, j);
@@ -9792,6 +10286,17 @@ void i915_redisable_vga(struct drm_device *dev)
9792 struct drm_i915_private *dev_priv = dev->dev_private; 10286 struct drm_i915_private *dev_priv = dev->dev_private;
9793 u32 vga_reg = i915_vgacntrl_reg(dev); 10287 u32 vga_reg = i915_vgacntrl_reg(dev);
9794 10288
10289 /* This function can be called both from intel_modeset_setup_hw_state or
10290 * at a very early point in our resume sequence, where the power well
10291 * structures are not yet restored. Since this function is at a very
10292 * paranoid "someone might have enabled VGA while we were not looking"
10293 * level, just check if the power well is enabled instead of trying to
10294 * follow the "don't touch the power well if we don't need it" policy
10295 * the rest of the driver uses. */
10296 if (HAS_POWER_WELL(dev) &&
10297 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
10298 return;
10299
9795 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10300 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9796 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10301 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9797 i915_disable_vga(dev); 10302 i915_disable_vga(dev);
@@ -9862,6 +10367,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
9862 pipe); 10367 pipe);
9863 } 10368 }
9864 10369
10370 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10371 base.head) {
10372 if (!crtc->active)
10373 continue;
10374 if (dev_priv->display.get_clock)
10375 dev_priv->display.get_clock(crtc,
10376 &crtc->config);
10377 }
10378
9865 list_for_each_entry(connector, &dev->mode_config.connector_list, 10379 list_for_each_entry(connector, &dev->mode_config.connector_list,
9866 base.head) { 10380 base.head) {
9867 if (connector->get_hw_state(connector)) { 10381 if (connector->get_hw_state(connector)) {
@@ -9893,6 +10407,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9893 10407
9894 intel_modeset_readout_hw_state(dev); 10408 intel_modeset_readout_hw_state(dev);
9895 10409
10410 /*
10411 * Now that we have the config, copy it to each CRTC struct
10412 * Note that this could go away if we move to using crtc_config
10413 * checking everywhere.
10414 */
10415 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10416 base.head) {
10417 if (crtc->active && i915_fastboot) {
10418 intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
10419
10420 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
10421 crtc->base.base.id);
10422 drm_mode_debug_printmodeline(&crtc->base.mode);
10423 }
10424 }
10425
9896 /* HW state is read out, now we need to sanitize this mess. */ 10426 /* HW state is read out, now we need to sanitize this mess. */
9897 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10427 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9898 base.head) { 10428 base.head) {
@@ -9955,7 +10485,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
9955{ 10485{
9956 struct drm_i915_private *dev_priv = dev->dev_private; 10486 struct drm_i915_private *dev_priv = dev->dev_private;
9957 struct drm_crtc *crtc; 10487 struct drm_crtc *crtc;
9958 struct intel_crtc *intel_crtc;
9959 10488
9960 /* 10489 /*
9961 * Interrupts and polling as the first thing to avoid creating havoc. 10490 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -9979,7 +10508,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
9979 if (!crtc->fb) 10508 if (!crtc->fb)
9980 continue; 10509 continue;
9981 10510
9982 intel_crtc = to_intel_crtc(crtc);
9983 intel_increase_pllclock(crtc); 10511 intel_increase_pllclock(crtc);
9984 } 10512 }
9985 10513
@@ -10035,9 +10563,6 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
10035 return 0; 10563 return 0;
10036} 10564}
10037 10565
10038#ifdef CONFIG_DEBUG_FS
10039#include <linux/seq_file.h>
10040
10041struct intel_display_error_state { 10566struct intel_display_error_state {
10042 10567
10043 u32 power_well_driver; 10568 u32 power_well_driver;
@@ -10151,8 +10676,7 @@ intel_display_capture_error_state(struct drm_device *dev)
10151 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to 10676 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
10152 * prevent the next I915_WRITE from detecting it and printing an error 10677 * prevent the next I915_WRITE from detecting it and printing an error
10153 * message. */ 10678 * message. */
10154 if (HAS_POWER_WELL(dev)) 10679 intel_uncore_clear_errors(dev);
10155 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
10156 10680
10157 return error; 10681 return error;
10158} 10682}
@@ -10209,4 +10733,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10209 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 10733 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
10210 } 10734 }
10211} 10735}
10212#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 26e162bb3a51..2151d13772b8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
276 return status; 276 return status;
277} 277}
278 278
279static int 279static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
280intel_dp_aux_ch(struct intel_dp *intel_dp, 280 int index)
281 uint8_t *send, int send_bytes,
282 uint8_t *recv, int recv_size)
283{ 281{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 282 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct drm_device *dev = intel_dig_port->base.base.dev; 283 struct drm_device *dev = intel_dig_port->base.base.dev;
286 struct drm_i915_private *dev_priv = dev->dev_private; 284 struct drm_i915_private *dev_priv = dev->dev_private;
287 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
288 uint32_t ch_data = ch_ctl + 4;
289 int i, ret, recv_bytes;
290 uint32_t status;
291 uint32_t aux_clock_divider;
292 int try, precharge;
293 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
294 285
295 /* dp aux is extremely sensitive to irq latency, hence request the
296 * lowest possible wakeup latency and so prevent the cpu from going into
297 * deep sleep states.
298 */
299 pm_qos_update_request(&dev_priv->pm_qos, 0);
300
301 intel_dp_check_edp(intel_dp);
302 /* The clock divider is based off the hrawclk, 286 /* The clock divider is based off the hrawclk,
303 * and would like to run at 2MHz. So, take the 287 * and would like to run at 2MHz. So, take the
304 * hrawclk value and divide by 2 and use that 288 * hrawclk value and divide by 2 and use that
@@ -307,29 +291,61 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
307 * clock divider. 291 * clock divider.
308 */ 292 */
309 if (IS_VALLEYVIEW(dev)) { 293 if (IS_VALLEYVIEW(dev)) {
310 aux_clock_divider = 100; 294 return index ? 0 : 100;
311 } else if (intel_dig_port->port == PORT_A) { 295 } else if (intel_dig_port->port == PORT_A) {
296 if (index)
297 return 0;
312 if (HAS_DDI(dev)) 298 if (HAS_DDI(dev))
313 aux_clock_divider = DIV_ROUND_CLOSEST( 299 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
314 intel_ddi_get_cdclk_freq(dev_priv), 2000);
315 else if (IS_GEN6(dev) || IS_GEN7(dev)) 300 else if (IS_GEN6(dev) || IS_GEN7(dev))
316 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 301 return 200; /* SNB & IVB eDP input clock at 400Mhz */
317 else 302 else
318 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 303 return 225; /* eDP input clock at 450Mhz */
319 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 304 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
320 /* Workaround for non-ULT HSW */ 305 /* Workaround for non-ULT HSW */
321 aux_clock_divider = 74; 306 switch (index) {
307 case 0: return 63;
308 case 1: return 72;
309 default: return 0;
310 }
322 } else if (HAS_PCH_SPLIT(dev)) { 311 } else if (HAS_PCH_SPLIT(dev)) {
323 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 312 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
324 } else { 313 } else {
325 aux_clock_divider = intel_hrawclk(dev) / 2; 314 return index ? 0 :intel_hrawclk(dev) / 2;
326 } 315 }
316}
317
318static int
319intel_dp_aux_ch(struct intel_dp *intel_dp,
320 uint8_t *send, int send_bytes,
321 uint8_t *recv, int recv_size)
322{
323 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
324 struct drm_device *dev = intel_dig_port->base.base.dev;
325 struct drm_i915_private *dev_priv = dev->dev_private;
326 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
327 uint32_t ch_data = ch_ctl + 4;
328 uint32_t aux_clock_divider;
329 int i, ret, recv_bytes;
330 uint32_t status;
331 int try, precharge, clock = 0;
332 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
333
334 /* dp aux is extremely sensitive to irq latency, hence request the
335 * lowest possible wakeup latency and so prevent the cpu from going into
336 * deep sleep states.
337 */
338 pm_qos_update_request(&dev_priv->pm_qos, 0);
339
340 intel_dp_check_edp(intel_dp);
327 341
328 if (IS_GEN6(dev)) 342 if (IS_GEN6(dev))
329 precharge = 3; 343 precharge = 3;
330 else 344 else
331 precharge = 5; 345 precharge = 5;
332 346
347 intel_aux_display_runtime_get(dev_priv);
348
333 /* Try to wait for any previous AUX channel activity */ 349 /* Try to wait for any previous AUX channel activity */
334 for (try = 0; try < 3; try++) { 350 for (try = 0; try < 3; try++) {
335 status = I915_READ_NOTRACE(ch_ctl); 351 status = I915_READ_NOTRACE(ch_ctl);
@@ -345,37 +361,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
345 goto out; 361 goto out;
346 } 362 }
347 363
348 /* Must try at least 3 times according to DP spec */ 364 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
349 for (try = 0; try < 5; try++) { 365 /* Must try at least 3 times according to DP spec */
350 /* Load the send data into the aux channel data registers */ 366 for (try = 0; try < 5; try++) {
351 for (i = 0; i < send_bytes; i += 4) 367 /* Load the send data into the aux channel data registers */
352 I915_WRITE(ch_data + i, 368 for (i = 0; i < send_bytes; i += 4)
353 pack_aux(send + i, send_bytes - i)); 369 I915_WRITE(ch_data + i,
354 370 pack_aux(send + i, send_bytes - i));
355 /* Send the command and wait for it to complete */ 371
356 I915_WRITE(ch_ctl, 372 /* Send the command and wait for it to complete */
357 DP_AUX_CH_CTL_SEND_BUSY | 373 I915_WRITE(ch_ctl,
358 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 374 DP_AUX_CH_CTL_SEND_BUSY |
359 DP_AUX_CH_CTL_TIME_OUT_400us | 375 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
360 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 376 DP_AUX_CH_CTL_TIME_OUT_400us |
361 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 377 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
362 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 378 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
363 DP_AUX_CH_CTL_DONE | 379 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
364 DP_AUX_CH_CTL_TIME_OUT_ERROR | 380 DP_AUX_CH_CTL_DONE |
365 DP_AUX_CH_CTL_RECEIVE_ERROR); 381 DP_AUX_CH_CTL_TIME_OUT_ERROR |
366 382 DP_AUX_CH_CTL_RECEIVE_ERROR);
367 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 383
368 384 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
369 /* Clear done status and any errors */ 385
370 I915_WRITE(ch_ctl, 386 /* Clear done status and any errors */
371 status | 387 I915_WRITE(ch_ctl,
372 DP_AUX_CH_CTL_DONE | 388 status |
373 DP_AUX_CH_CTL_TIME_OUT_ERROR | 389 DP_AUX_CH_CTL_DONE |
374 DP_AUX_CH_CTL_RECEIVE_ERROR); 390 DP_AUX_CH_CTL_TIME_OUT_ERROR |
375 391 DP_AUX_CH_CTL_RECEIVE_ERROR);
376 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 392
377 DP_AUX_CH_CTL_RECEIVE_ERROR)) 393 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
378 continue; 394 DP_AUX_CH_CTL_RECEIVE_ERROR))
395 continue;
396 if (status & DP_AUX_CH_CTL_DONE)
397 break;
398 }
379 if (status & DP_AUX_CH_CTL_DONE) 399 if (status & DP_AUX_CH_CTL_DONE)
380 break; 400 break;
381 } 401 }
@@ -416,6 +436,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
416 ret = recv_bytes; 436 ret = recv_bytes;
417out: 437out:
418 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 438 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
439 intel_aux_display_runtime_put(dev_priv);
419 440
420 return ret; 441 return ret;
421} 442}
@@ -710,8 +731,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
710 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 731 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
711 * bpc in between. */ 732 * bpc in between. */
712 bpp = pipe_config->pipe_bpp; 733 bpp = pipe_config->pipe_bpp;
713 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) 734 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
735 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
736 dev_priv->vbt.edp_bpp);
714 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); 737 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
738 }
715 739
716 for (; bpp >= 6*3; bpp -= 2*3) { 740 for (; bpp >= 6*3; bpp -= 2*3) {
717 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 741 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
@@ -812,15 +836,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
812 udelay(500); 836 udelay(500);
813} 837}
814 838
815static void 839static void intel_dp_mode_set(struct intel_encoder *encoder)
816intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
817 struct drm_display_mode *adjusted_mode)
818{ 840{
819 struct drm_device *dev = encoder->dev; 841 struct drm_device *dev = encoder->base.dev;
820 struct drm_i915_private *dev_priv = dev->dev_private; 842 struct drm_i915_private *dev_priv = dev->dev_private;
821 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 843 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
822 enum port port = dp_to_dig_port(intel_dp)->port; 844 enum port port = dp_to_dig_port(intel_dp)->port;
823 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 845 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
846 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
824 847
825 /* 848 /*
826 * There are four kinds of DP registers: 849 * There are four kinds of DP registers:
@@ -852,7 +875,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 875 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
853 pipe_name(crtc->pipe)); 876 pipe_name(crtc->pipe));
854 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 877 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
855 intel_write_eld(encoder, adjusted_mode); 878 intel_write_eld(&encoder->base, adjusted_mode);
856 } 879 }
857 880
858 intel_dp_init_link_config(intel_dp); 881 intel_dp_init_link_config(intel_dp);
@@ -1360,6 +1383,275 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1360 } 1383 }
1361 1384
1362 pipe_config->adjusted_mode.flags |= flags; 1385 pipe_config->adjusted_mode.flags |= flags;
1386
1387 if (dp_to_dig_port(intel_dp)->port == PORT_A) {
1388 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1389 pipe_config->port_clock = 162000;
1390 else
1391 pipe_config->port_clock = 270000;
1392 }
1393}
1394
1395static bool is_edp_psr(struct intel_dp *intel_dp)
1396{
1397 return is_edp(intel_dp) &&
1398 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1399}
1400
1401static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1402{
1403 struct drm_i915_private *dev_priv = dev->dev_private;
1404
1405 if (!IS_HASWELL(dev))
1406 return false;
1407
1408 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
1409}
1410
1411static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1412 struct edp_vsc_psr *vsc_psr)
1413{
1414 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1415 struct drm_device *dev = dig_port->base.base.dev;
1416 struct drm_i915_private *dev_priv = dev->dev_private;
1417 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1418 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1419 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1420 uint32_t *data = (uint32_t *) vsc_psr;
1421 unsigned int i;
1422
1423 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1424 the video DIP being updated before program video DIP data buffer
1425 registers for DIP being updated. */
1426 I915_WRITE(ctl_reg, 0);
1427 POSTING_READ(ctl_reg);
1428
1429 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1430 if (i < sizeof(struct edp_vsc_psr))
1431 I915_WRITE(data_reg + i, *data++);
1432 else
1433 I915_WRITE(data_reg + i, 0);
1434 }
1435
1436 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1437 POSTING_READ(ctl_reg);
1438}
1439
1440static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1441{
1442 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1443 struct drm_i915_private *dev_priv = dev->dev_private;
1444 struct edp_vsc_psr psr_vsc;
1445
1446 if (intel_dp->psr_setup_done)
1447 return;
1448
1449 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1450 memset(&psr_vsc, 0, sizeof(psr_vsc));
1451 psr_vsc.sdp_header.HB0 = 0;
1452 psr_vsc.sdp_header.HB1 = 0x7;
1453 psr_vsc.sdp_header.HB2 = 0x2;
1454 psr_vsc.sdp_header.HB3 = 0x8;
1455 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1456
1457 /* Avoid continuous PSR exit by masking memup and hpd */
1458 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
1459 EDP_PSR_DEBUG_MASK_HPD);
1460
1461 intel_dp->psr_setup_done = true;
1462}
1463
1464static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1465{
1466 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1467 struct drm_i915_private *dev_priv = dev->dev_private;
1468 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
1469 int precharge = 0x3;
1470 int msg_size = 5; /* Header(4) + Message(1) */
1471
1472 /* Enable PSR in sink */
1473 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1474 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1475 DP_PSR_ENABLE &
1476 ~DP_PSR_MAIN_LINK_ACTIVE);
1477 else
1478 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1479 DP_PSR_ENABLE |
1480 DP_PSR_MAIN_LINK_ACTIVE);
1481
1482 /* Setup AUX registers */
1483 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
1484 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
1485 I915_WRITE(EDP_PSR_AUX_CTL,
1486 DP_AUX_CH_CTL_TIME_OUT_400us |
1487 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1488 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1489 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1490}
1491
1492static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1493{
1494 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 uint32_t max_sleep_time = 0x1f;
1497 uint32_t idle_frames = 1;
1498 uint32_t val = 0x0;
1499
1500 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1501 val |= EDP_PSR_LINK_STANDBY;
1502 val |= EDP_PSR_TP2_TP3_TIME_0us;
1503 val |= EDP_PSR_TP1_TIME_0us;
1504 val |= EDP_PSR_SKIP_AUX_EXIT;
1505 } else
1506 val |= EDP_PSR_LINK_DISABLE;
1507
1508 I915_WRITE(EDP_PSR_CTL, val |
1509 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1510 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1511 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1512 EDP_PSR_ENABLE);
1513}
1514
1515static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1516{
1517 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1518 struct drm_device *dev = dig_port->base.base.dev;
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 struct drm_crtc *crtc = dig_port->base.base.crtc;
1521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1522 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1523 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1524
1525 if (!IS_HASWELL(dev)) {
1526 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1527 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1528 return false;
1529 }
1530
1531 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1532 (dig_port->port != PORT_A)) {
1533 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1534 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1535 return false;
1536 }
1537
1538 if (!is_edp_psr(intel_dp)) {
1539 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1540 dev_priv->no_psr_reason = PSR_NO_SINK;
1541 return false;
1542 }
1543
1544 if (!i915_enable_psr) {
1545 DRM_DEBUG_KMS("PSR disable by flag\n");
1546 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1547 return false;
1548 }
1549
1550 crtc = dig_port->base.base.crtc;
1551 if (crtc == NULL) {
1552 DRM_DEBUG_KMS("crtc not active for PSR\n");
1553 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1554 return false;
1555 }
1556
1557 intel_crtc = to_intel_crtc(crtc);
1558 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
1559 DRM_DEBUG_KMS("crtc not active for PSR\n");
1560 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1561 return false;
1562 }
1563
1564 obj = to_intel_framebuffer(crtc->fb)->obj;
1565 if (obj->tiling_mode != I915_TILING_X ||
1566 obj->fence_reg == I915_FENCE_REG_NONE) {
1567 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1568 dev_priv->no_psr_reason = PSR_NOT_TILED;
1569 return false;
1570 }
1571
1572 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1573 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1574 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1575 return false;
1576 }
1577
1578 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1579 S3D_ENABLE) {
1580 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1581 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1582 return false;
1583 }
1584
1585 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
1586 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1587 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1588 return false;
1589 }
1590
1591 return true;
1592}
1593
1594static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1595{
1596 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1597
1598 if (!intel_edp_psr_match_conditions(intel_dp) ||
1599 intel_edp_is_psr_enabled(dev))
1600 return;
1601
1602 /* Setup PSR once */
1603 intel_edp_psr_setup(intel_dp);
1604
1605 /* Enable PSR on the panel */
1606 intel_edp_psr_enable_sink(intel_dp);
1607
1608 /* Enable PSR on the host */
1609 intel_edp_psr_enable_source(intel_dp);
1610}
1611
1612void intel_edp_psr_enable(struct intel_dp *intel_dp)
1613{
1614 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1615
1616 if (intel_edp_psr_match_conditions(intel_dp) &&
1617 !intel_edp_is_psr_enabled(dev))
1618 intel_edp_psr_do_enable(intel_dp);
1619}
1620
1621void intel_edp_psr_disable(struct intel_dp *intel_dp)
1622{
1623 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1624 struct drm_i915_private *dev_priv = dev->dev_private;
1625
1626 if (!intel_edp_is_psr_enabled(dev))
1627 return;
1628
1629 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
1630
1631 /* Wait till PSR is idle */
1632 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
1633 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1634 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1635}
1636
1637void intel_edp_psr_update(struct drm_device *dev)
1638{
1639 struct intel_encoder *encoder;
1640 struct intel_dp *intel_dp = NULL;
1641
1642 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1643 if (encoder->type == INTEL_OUTPUT_EDP) {
1644 intel_dp = enc_to_intel_dp(&encoder->base);
1645
1646 if (!is_edp_psr(intel_dp))
1647 return;
1648
1649 if (!intel_edp_psr_match_conditions(intel_dp))
1650 intel_edp_psr_disable(intel_dp);
1651 else
1652 if (!intel_edp_is_psr_enabled(dev))
1653 intel_edp_psr_do_enable(intel_dp);
1654 }
1363} 1655}
1364 1656
1365static void intel_disable_dp(struct intel_encoder *encoder) 1657static void intel_disable_dp(struct intel_encoder *encoder)
@@ -1411,47 +1703,50 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1411 intel_dp_complete_link_train(intel_dp); 1703 intel_dp_complete_link_train(intel_dp);
1412 intel_dp_stop_link_train(intel_dp); 1704 intel_dp_stop_link_train(intel_dp);
1413 ironlake_edp_backlight_on(intel_dp); 1705 ironlake_edp_backlight_on(intel_dp);
1706}
1414 1707
1415 if (IS_VALLEYVIEW(dev)) { 1708static void vlv_enable_dp(struct intel_encoder *encoder)
1416 struct intel_digital_port *dport = 1709{
1417 enc_to_dig_port(&encoder->base);
1418 int channel = vlv_dport_to_channel(dport);
1419
1420 vlv_wait_port_ready(dev_priv, channel);
1421 }
1422} 1710}
1423 1711
1424static void intel_pre_enable_dp(struct intel_encoder *encoder) 1712static void intel_pre_enable_dp(struct intel_encoder *encoder)
1425{ 1713{
1426 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1714 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1427 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1715 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1716
1717 if (dport->port == PORT_A)
1718 ironlake_edp_pll_on(intel_dp);
1719}
1720
1721static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1722{
1723 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1724 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1428 struct drm_device *dev = encoder->base.dev; 1725 struct drm_device *dev = encoder->base.dev;
1429 struct drm_i915_private *dev_priv = dev->dev_private; 1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1728 int port = vlv_dport_to_channel(dport);
1729 int pipe = intel_crtc->pipe;
1730 u32 val;
1430 1731
1431 if (dport->port == PORT_A && !IS_VALLEYVIEW(dev)) 1732 mutex_lock(&dev_priv->dpio_lock);
1432 ironlake_edp_pll_on(intel_dp);
1433 1733
1434 if (IS_VALLEYVIEW(dev)) { 1734 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1435 struct intel_crtc *intel_crtc = 1735 val = 0;
1436 to_intel_crtc(encoder->base.crtc); 1736 if (pipe)
1437 int port = vlv_dport_to_channel(dport); 1737 val |= (1<<21);
1438 int pipe = intel_crtc->pipe; 1738 else
1439 u32 val; 1739 val &= ~(1<<21);
1440 1740 val |= 0x001000c4;
1441 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1741 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1442 val = 0; 1742 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
1443 if (pipe) 1743 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
1444 val |= (1<<21);
1445 else
1446 val &= ~(1<<21);
1447 val |= 0x001000c4;
1448 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1449 1744
1450 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 1745 mutex_unlock(&dev_priv->dpio_lock);
1451 0x00760018); 1746
1452 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1747 intel_enable_dp(encoder);
1453 0x00400888); 1748
1454 } 1749 vlv_wait_port_ready(dev_priv, port);
1455} 1750}
1456 1751
1457static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) 1752static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1465,6 +1760,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1465 return; 1760 return;
1466 1761
1467 /* Program Tx lane resets to default */ 1762 /* Program Tx lane resets to default */
1763 mutex_lock(&dev_priv->dpio_lock);
1468 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1764 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1469 DPIO_PCS_TX_LANE2_RESET | 1765 DPIO_PCS_TX_LANE2_RESET |
1470 DPIO_PCS_TX_LANE1_RESET); 1766 DPIO_PCS_TX_LANE1_RESET);
@@ -1478,6 +1774,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1478 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1774 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1479 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1775 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1480 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1776 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1777 mutex_unlock(&dev_priv->dpio_lock);
1481} 1778}
1482 1779
1483/* 1780/*
@@ -1689,6 +1986,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1689 return 0; 1986 return 0;
1690 } 1987 }
1691 1988
1989 mutex_lock(&dev_priv->dpio_lock);
1692 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); 1990 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1693 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); 1991 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1694 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 1992 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
@@ -1697,6 +1995,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1697 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 1995 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1698 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 1996 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1699 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); 1997 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
1998 mutex_unlock(&dev_priv->dpio_lock);
1700 1999
1701 return 0; 2000 return 0;
1702} 2001}
@@ -2030,7 +2329,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2030 struct drm_device *dev = encoder->dev; 2329 struct drm_device *dev = encoder->dev;
2031 int i; 2330 int i;
2032 uint8_t voltage; 2331 uint8_t voltage;
2033 bool clock_recovery = false;
2034 int voltage_tries, loop_tries; 2332 int voltage_tries, loop_tries;
2035 uint32_t DP = intel_dp->DP; 2333 uint32_t DP = intel_dp->DP;
2036 2334
@@ -2048,7 +2346,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2048 voltage = 0xff; 2346 voltage = 0xff;
2049 voltage_tries = 0; 2347 voltage_tries = 0;
2050 loop_tries = 0; 2348 loop_tries = 0;
2051 clock_recovery = false;
2052 for (;;) { 2349 for (;;) {
2053 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2350 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
2054 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2351 uint8_t link_status[DP_LINK_STATUS_SIZE];
@@ -2069,7 +2366,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2069 2366
2070 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2367 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2071 DRM_DEBUG_KMS("clock recovery OK\n"); 2368 DRM_DEBUG_KMS("clock recovery OK\n");
2072 clock_recovery = true;
2073 break; 2369 break;
2074 } 2370 }
2075 2371
@@ -2275,6 +2571,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2275 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2571 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2276 return false; /* DPCD not present */ 2572 return false; /* DPCD not present */
2277 2573
2574 /* Check if the panel supports PSR */
2575 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2576 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2577 intel_dp->psr_dpcd,
2578 sizeof(intel_dp->psr_dpcd));
2579 if (is_edp_psr(intel_dp))
2580 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2278 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2581 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2279 DP_DWN_STRM_PORT_PRESENT)) 2582 DP_DWN_STRM_PORT_PRESENT))
2280 return true; /* native DP sink */ 2583 return true; /* native DP sink */
@@ -2542,6 +2845,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2542 enum drm_connector_status status; 2845 enum drm_connector_status status;
2543 struct edid *edid = NULL; 2846 struct edid *edid = NULL;
2544 2847
2848 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
2849 connector->base.id, drm_get_connector_name(connector));
2850
2545 intel_dp->has_audio = false; 2851 intel_dp->has_audio = false;
2546 2852
2547 if (HAS_PCH_SPLIT(dev)) 2853 if (HAS_PCH_SPLIT(dev))
@@ -2735,10 +3041,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2735 kfree(intel_dig_port); 3041 kfree(intel_dig_port);
2736} 3042}
2737 3043
2738static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2739 .mode_set = intel_dp_mode_set,
2740};
2741
2742static const struct drm_connector_funcs intel_dp_connector_funcs = { 3044static const struct drm_connector_funcs intel_dp_connector_funcs = {
2743 .dpms = intel_connector_dpms, 3045 .dpms = intel_connector_dpms,
2744 .detect = intel_dp_detect, 3046 .detect = intel_dp_detect,
@@ -3166,6 +3468,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3166 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3468 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3167 error, port_name(port)); 3469 error, port_name(port));
3168 3470
3471 intel_dp->psr_setup_done = false;
3472
3169 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3473 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
3170 i2c_del_adapter(&intel_dp->adapter); 3474 i2c_del_adapter(&intel_dp->adapter);
3171 if (is_edp(intel_dp)) { 3475 if (is_edp(intel_dp)) {
@@ -3216,17 +3520,21 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3216 3520
3217 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 3521 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3218 DRM_MODE_ENCODER_TMDS); 3522 DRM_MODE_ENCODER_TMDS);
3219 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
3220 3523
3221 intel_encoder->compute_config = intel_dp_compute_config; 3524 intel_encoder->compute_config = intel_dp_compute_config;
3222 intel_encoder->enable = intel_enable_dp; 3525 intel_encoder->mode_set = intel_dp_mode_set;
3223 intel_encoder->pre_enable = intel_pre_enable_dp;
3224 intel_encoder->disable = intel_disable_dp; 3526 intel_encoder->disable = intel_disable_dp;
3225 intel_encoder->post_disable = intel_post_disable_dp; 3527 intel_encoder->post_disable = intel_post_disable_dp;
3226 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3528 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3227 intel_encoder->get_config = intel_dp_get_config; 3529 intel_encoder->get_config = intel_dp_get_config;
3228 if (IS_VALLEYVIEW(dev)) 3530 if (IS_VALLEYVIEW(dev)) {
3229 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3531 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
3532 intel_encoder->pre_enable = vlv_pre_enable_dp;
3533 intel_encoder->enable = vlv_enable_dp;
3534 } else {
3535 intel_encoder->pre_enable = intel_pre_enable_dp;
3536 intel_encoder->enable = intel_enable_dp;
3537 }
3230 3538
3231 intel_dig_port->port = port; 3539 intel_dig_port->port = port;
3232 intel_dig_port->dp.output_reg = output_reg; 3540 intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b7d6e09456ce..176080822a74 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/hdmi.h>
29#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
30#include "i915_drv.h" 31#include "i915_drv.h"
31#include <drm/drm_crtc.h> 32#include <drm/drm_crtc.h>
@@ -208,10 +209,6 @@ struct intel_crtc_config {
208 209
209 struct drm_display_mode requested_mode; 210 struct drm_display_mode requested_mode;
210 struct drm_display_mode adjusted_mode; 211 struct drm_display_mode adjusted_mode;
211 /* This flag must be set by the encoder's compute_config callback if it
212 * changes the crtc timings in the mode to prevent the crtc fixup from
213 * overwriting them. Currently only lvds needs that. */
214 bool timings_set;
215 /* Whether to set up the PCH/FDI. Note that we never allow sharing 212 /* Whether to set up the PCH/FDI. Note that we never allow sharing
216 * between pch encoders and cpu encoders. */ 213 * between pch encoders and cpu encoders. */
217 bool has_pch_encoder; 214 bool has_pch_encoder;
@@ -334,6 +331,13 @@ struct intel_crtc {
334 bool pch_fifo_underrun_disabled; 331 bool pch_fifo_underrun_disabled;
335}; 332};
336 333
334struct intel_plane_wm_parameters {
335 uint32_t horiz_pixels;
336 uint8_t bytes_per_pixel;
337 bool enabled;
338 bool scaled;
339};
340
337struct intel_plane { 341struct intel_plane {
338 struct drm_plane base; 342 struct drm_plane base;
339 int plane; 343 int plane;
@@ -352,20 +356,18 @@ struct intel_plane {
352 * as the other pieces of the struct may not reflect the values we want 356 * as the other pieces of the struct may not reflect the values we want
353 * for the watermark calculations. Currently only Haswell uses this. 357 * for the watermark calculations. Currently only Haswell uses this.
354 */ 358 */
355 struct { 359 struct intel_plane_wm_parameters wm;
356 bool enable;
357 uint8_t bytes_per_pixel;
358 uint32_t horiz_pixels;
359 } wm;
360 360
361 void (*update_plane)(struct drm_plane *plane, 361 void (*update_plane)(struct drm_plane *plane,
362 struct drm_crtc *crtc,
362 struct drm_framebuffer *fb, 363 struct drm_framebuffer *fb,
363 struct drm_i915_gem_object *obj, 364 struct drm_i915_gem_object *obj,
364 int crtc_x, int crtc_y, 365 int crtc_x, int crtc_y,
365 unsigned int crtc_w, unsigned int crtc_h, 366 unsigned int crtc_w, unsigned int crtc_h,
366 uint32_t x, uint32_t y, 367 uint32_t x, uint32_t y,
367 uint32_t src_w, uint32_t src_h); 368 uint32_t src_w, uint32_t src_h);
368 void (*disable_plane)(struct drm_plane *plane); 369 void (*disable_plane)(struct drm_plane *plane,
370 struct drm_crtc *crtc);
369 int (*update_colorkey)(struct drm_plane *plane, 371 int (*update_colorkey)(struct drm_plane *plane,
370 struct drm_intel_sprite_colorkey *key); 372 struct drm_intel_sprite_colorkey *key);
371 void (*get_colorkey)(struct drm_plane *plane, 373 void (*get_colorkey)(struct drm_plane *plane,
@@ -397,66 +399,6 @@ struct cxsr_latency {
397#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 399#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
398#define to_intel_plane(x) container_of(x, struct intel_plane, base) 400#define to_intel_plane(x) container_of(x, struct intel_plane, base)
399 401
400#define DIP_HEADER_SIZE 5
401
402#define DIP_TYPE_AVI 0x82
403#define DIP_VERSION_AVI 0x2
404#define DIP_LEN_AVI 13
405#define DIP_AVI_PR_1 0
406#define DIP_AVI_PR_2 1
407#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
408#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
409#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
410
411#define DIP_TYPE_SPD 0x83
412#define DIP_VERSION_SPD 0x1
413#define DIP_LEN_SPD 25
414#define DIP_SPD_UNKNOWN 0
415#define DIP_SPD_DSTB 0x1
416#define DIP_SPD_DVDP 0x2
417#define DIP_SPD_DVHS 0x3
418#define DIP_SPD_HDDVR 0x4
419#define DIP_SPD_DVC 0x5
420#define DIP_SPD_DSC 0x6
421#define DIP_SPD_VCD 0x7
422#define DIP_SPD_GAME 0x8
423#define DIP_SPD_PC 0x9
424#define DIP_SPD_BD 0xa
425#define DIP_SPD_SCD 0xb
426
427struct dip_infoframe {
428 uint8_t type; /* HB0 */
429 uint8_t ver; /* HB1 */
430 uint8_t len; /* HB2 - body len, not including checksum */
431 uint8_t ecc; /* Header ECC */
432 uint8_t checksum; /* PB0 */
433 union {
434 struct {
435 /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
436 uint8_t Y_A_B_S;
437 /* PB2 - C 7:6, M 5:4, R 3:0 */
438 uint8_t C_M_R;
439 /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
440 uint8_t ITC_EC_Q_SC;
441 /* PB4 - VIC 6:0 */
442 uint8_t VIC;
443 /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
444 uint8_t YQ_CN_PR;
445 /* PB6 to PB13 */
446 uint16_t top_bar_end;
447 uint16_t bottom_bar_start;
448 uint16_t left_bar_end;
449 uint16_t right_bar_start;
450 } __attribute__ ((packed)) avi;
451 struct {
452 uint8_t vn[8];
453 uint8_t pd[16];
454 uint8_t sdi;
455 } __attribute__ ((packed)) spd;
456 uint8_t payload[27];
457 } __attribute__ ((packed)) body;
458} __attribute__((packed));
459
460struct intel_hdmi { 402struct intel_hdmi {
461 u32 hdmi_reg; 403 u32 hdmi_reg;
462 int ddc_bus; 404 int ddc_bus;
@@ -467,7 +409,8 @@ struct intel_hdmi {
467 enum hdmi_force_audio force_audio; 409 enum hdmi_force_audio force_audio;
468 bool rgb_quant_range_selectable; 410 bool rgb_quant_range_selectable;
469 void (*write_infoframe)(struct drm_encoder *encoder, 411 void (*write_infoframe)(struct drm_encoder *encoder,
470 struct dip_infoframe *frame); 412 enum hdmi_infoframe_type type,
413 const uint8_t *frame, ssize_t len);
471 void (*set_infoframes)(struct drm_encoder *encoder, 414 void (*set_infoframes)(struct drm_encoder *encoder,
472 struct drm_display_mode *adjusted_mode); 415 struct drm_display_mode *adjusted_mode);
473}; 416};
@@ -487,6 +430,7 @@ struct intel_dp {
487 uint8_t link_bw; 430 uint8_t link_bw;
488 uint8_t lane_count; 431 uint8_t lane_count;
489 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 432 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
433 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
490 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 434 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
491 struct i2c_adapter adapter; 435 struct i2c_adapter adapter;
492 struct i2c_algo_dp_aux_data algo; 436 struct i2c_algo_dp_aux_data algo;
@@ -498,6 +442,7 @@ struct intel_dp {
498 int backlight_off_delay; 442 int backlight_off_delay;
499 struct delayed_work panel_vdd_work; 443 struct delayed_work panel_vdd_work;
500 bool want_panel_vdd; 444 bool want_panel_vdd;
445 bool psr_setup_done;
501 struct intel_connector *attached_connector; 446 struct intel_connector *attached_connector;
502}; 447};
503 448
@@ -549,13 +494,6 @@ struct intel_unpin_work {
549 bool enable_stall_check; 494 bool enable_stall_check;
550}; 495};
551 496
552struct intel_fbc_work {
553 struct delayed_work work;
554 struct drm_crtc *crtc;
555 struct drm_framebuffer *fb;
556 int interval;
557};
558
559int intel_pch_rawclk(struct drm_device *dev); 497int intel_pch_rawclk(struct drm_device *dev);
560 498
561int intel_connector_update_modes(struct drm_connector *connector, 499int intel_connector_update_modes(struct drm_connector *connector,
@@ -574,7 +512,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
574extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 512extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
575extern bool intel_hdmi_compute_config(struct intel_encoder *encoder, 513extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
576 struct intel_crtc_config *pipe_config); 514 struct intel_crtc_config *pipe_config);
577extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
578extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 515extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
579 bool is_sdvob); 516 bool is_sdvob);
580extern void intel_dvo_init(struct drm_device *dev); 517extern void intel_dvo_init(struct drm_device *dev);
@@ -639,14 +576,10 @@ struct intel_set_config {
639 bool mode_changed; 576 bool mode_changed;
640}; 577};
641 578
642extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
643 int x, int y, struct drm_framebuffer *old_fb);
644extern void intel_modeset_disable(struct drm_device *dev);
645extern void intel_crtc_restore_mode(struct drm_crtc *crtc); 579extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
646extern void intel_crtc_load_lut(struct drm_crtc *crtc); 580extern void intel_crtc_load_lut(struct drm_crtc *crtc);
647extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 581extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
648extern void intel_encoder_destroy(struct drm_encoder *encoder); 582extern void intel_encoder_destroy(struct drm_encoder *encoder);
649extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
650extern void intel_connector_dpms(struct drm_connector *, int mode); 583extern void intel_connector_dpms(struct drm_connector *, int mode);
651extern bool intel_connector_get_hw_state(struct intel_connector *connector); 584extern bool intel_connector_get_hw_state(struct intel_connector *connector);
652extern void intel_modeset_check_state(struct drm_device *dev); 585extern void intel_modeset_check_state(struct drm_device *dev);
@@ -712,12 +645,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
712extern void intel_release_load_detect_pipe(struct drm_connector *connector, 645extern void intel_release_load_detect_pipe(struct drm_connector *connector,
713 struct intel_load_detect_pipe *old); 646 struct intel_load_detect_pipe *old);
714 647
715extern void intelfb_restore(void);
716extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 648extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
717 u16 blue, int regno); 649 u16 blue, int regno);
718extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 650extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
719 u16 *blue, int regno); 651 u16 *blue, int regno);
720extern void intel_enable_clock_gating(struct drm_device *dev);
721 652
722extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 653extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
723 struct drm_i915_gem_object *obj, 654 struct drm_i915_gem_object *obj,
@@ -728,6 +659,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
728 struct intel_framebuffer *ifb, 659 struct intel_framebuffer *ifb,
729 struct drm_mode_fb_cmd2 *mode_cmd, 660 struct drm_mode_fb_cmd2 *mode_cmd,
730 struct drm_i915_gem_object *obj); 661 struct drm_i915_gem_object *obj);
662extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
731extern int intel_fbdev_init(struct drm_device *dev); 663extern int intel_fbdev_init(struct drm_device *dev);
732extern void intel_fbdev_initial_config(struct drm_device *dev); 664extern void intel_fbdev_initial_config(struct drm_device *dev);
733extern void intel_fbdev_fini(struct drm_device *dev); 665extern void intel_fbdev_fini(struct drm_device *dev);
@@ -747,6 +679,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
747extern void intel_fb_output_poll_changed(struct drm_device *dev); 679extern void intel_fb_output_poll_changed(struct drm_device *dev);
748extern void intel_fb_restore_mode(struct drm_device *dev); 680extern void intel_fb_restore_mode(struct drm_device *dev);
749 681
682struct intel_shared_dpll *
683intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
684
685void assert_shared_dpll(struct drm_i915_private *dev_priv,
686 struct intel_shared_dpll *pll,
687 bool state);
688#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
689#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
690void assert_pll(struct drm_i915_private *dev_priv,
691 enum pipe pipe, bool state);
692#define assert_pll_enabled(d, p) assert_pll(d, p, true)
693#define assert_pll_disabled(d, p) assert_pll(d, p, false)
694void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
695 enum pipe pipe, bool state);
696#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
697#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
750extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 698extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
751 bool state); 699 bool state);
752#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 700#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -762,9 +710,10 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
762 710
763/* For use by IVB LP watermark workaround in intel_sprite.c */ 711/* For use by IVB LP watermark workaround in intel_sprite.c */
764extern void intel_update_watermarks(struct drm_device *dev); 712extern void intel_update_watermarks(struct drm_device *dev);
765extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 713extern void intel_update_sprite_watermarks(struct drm_plane *plane,
766 uint32_t sprite_width, 714 struct drm_crtc *crtc,
767 int pixel_size, bool enable); 715 uint32_t sprite_width, int pixel_size,
716 bool enabled, bool scaled);
768 717
769extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 718extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
770 unsigned int tiling_mode, 719 unsigned int tiling_mode,
@@ -780,7 +729,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
780extern void intel_init_pm(struct drm_device *dev); 729extern void intel_init_pm(struct drm_device *dev);
781/* FBC */ 730/* FBC */
782extern bool intel_fbc_enabled(struct drm_device *dev); 731extern bool intel_fbc_enabled(struct drm_device *dev);
783extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
784extern void intel_update_fbc(struct drm_device *dev); 732extern void intel_update_fbc(struct drm_device *dev);
785/* IPS */ 733/* IPS */
786extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 734extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -796,8 +744,8 @@ extern void intel_init_power_well(struct drm_device *dev);
796extern void intel_set_power_well(struct drm_device *dev, bool enable); 744extern void intel_set_power_well(struct drm_device *dev, bool enable);
797extern void intel_enable_gt_powersave(struct drm_device *dev); 745extern void intel_enable_gt_powersave(struct drm_device *dev);
798extern void intel_disable_gt_powersave(struct drm_device *dev); 746extern void intel_disable_gt_powersave(struct drm_device *dev);
799extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
800extern void ironlake_teardown_rc6(struct drm_device *dev); 747extern void ironlake_teardown_rc6(struct drm_device *dev);
748void gen6_update_ring_freq(struct drm_device *dev);
801 749
802extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 750extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
803 enum pipe *pipe); 751 enum pipe *pipe);
@@ -825,4 +773,24 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
825 enum transcoder pch_transcoder, 773 enum transcoder pch_transcoder,
826 bool enable); 774 bool enable);
827 775
776extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
777extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
778extern void intel_edp_psr_update(struct drm_device *dev);
779extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
780 bool switch_to_fclk, bool allow_power_down);
781extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
782extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
783extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
784 uint32_t mask);
785extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
786extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
787 uint32_t mask);
788extern void hsw_enable_pc8_work(struct work_struct *__work);
789extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
790extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
791extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
792extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
793extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
794extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
795
828#endif /* __INTEL_DRV_H__ */ 796#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eb2020eb2b7e..406303b509c1 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -100,15 +100,14 @@ struct intel_dvo {
100 bool panel_wants_dither; 100 bool panel_wants_dither;
101}; 101};
102 102
103static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) 103static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
104{ 104{
105 return container_of(encoder, struct intel_dvo, base.base); 105 return container_of(encoder, struct intel_dvo, base);
106} 106}
107 107
108static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) 108static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
109{ 109{
110 return container_of(intel_attached_encoder(connector), 110 return enc_to_dvo(intel_attached_encoder(connector));
111 struct intel_dvo, base);
112} 111}
113 112
114static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) 113static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
@@ -123,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
123{ 122{
124 struct drm_device *dev = encoder->base.dev; 123 struct drm_device *dev = encoder->base.dev;
125 struct drm_i915_private *dev_priv = dev->dev_private; 124 struct drm_i915_private *dev_priv = dev->dev_private;
126 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 125 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
127 u32 tmp; 126 u32 tmp;
128 127
129 tmp = I915_READ(intel_dvo->dev.dvo_reg); 128 tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -140,7 +139,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
140 struct intel_crtc_config *pipe_config) 139 struct intel_crtc_config *pipe_config)
141{ 140{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 141 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
143 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 142 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
144 u32 tmp, flags = 0; 143 u32 tmp, flags = 0;
145 144
146 tmp = I915_READ(intel_dvo->dev.dvo_reg); 145 tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -159,7 +158,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
159static void intel_disable_dvo(struct intel_encoder *encoder) 158static void intel_disable_dvo(struct intel_encoder *encoder)
160{ 159{
161 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 160 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
162 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 161 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
163 u32 dvo_reg = intel_dvo->dev.dvo_reg; 162 u32 dvo_reg = intel_dvo->dev.dvo_reg;
164 u32 temp = I915_READ(dvo_reg); 163 u32 temp = I915_READ(dvo_reg);
165 164
@@ -171,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
171static void intel_enable_dvo(struct intel_encoder *encoder) 170static void intel_enable_dvo(struct intel_encoder *encoder)
172{ 171{
173 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 172 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
174 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 173 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
175 u32 dvo_reg = intel_dvo->dev.dvo_reg; 174 u32 dvo_reg = intel_dvo->dev.dvo_reg;
176 u32 temp = I915_READ(dvo_reg); 175 u32 temp = I915_READ(dvo_reg);
177 176
@@ -241,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
241 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); 240 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
242} 241}
243 242
244static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, 243static bool intel_dvo_compute_config(struct intel_encoder *encoder,
245 const struct drm_display_mode *mode, 244 struct intel_crtc_config *pipe_config)
246 struct drm_display_mode *adjusted_mode)
247{ 245{
248 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 246 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
247 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
249 248
250 /* If we have timings from the BIOS for the panel, put them in 249 /* If we have timings from the BIOS for the panel, put them in
251 * to the adjusted mode. The CRTC will be set up for this mode, 250 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -267,23 +266,23 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
267 } 266 }
268 267
269 if (intel_dvo->dev.dev_ops->mode_fixup) 268 if (intel_dvo->dev.dev_ops->mode_fixup)
270 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode); 269 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
270 &pipe_config->requested_mode,
271 adjusted_mode);
271 272
272 return true; 273 return true;
273} 274}
274 275
275static void intel_dvo_mode_set(struct drm_encoder *encoder, 276static void intel_dvo_mode_set(struct intel_encoder *encoder)
276 struct drm_display_mode *mode,
277 struct drm_display_mode *adjusted_mode)
278{ 277{
279 struct drm_device *dev = encoder->dev; 278 struct drm_device *dev = encoder->base.dev;
280 struct drm_i915_private *dev_priv = dev->dev_private; 279 struct drm_i915_private *dev_priv = dev->dev_private;
281 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 280 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
282 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 281 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
283 int pipe = intel_crtc->pipe; 282 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
283 int pipe = crtc->pipe;
284 u32 dvo_val; 284 u32 dvo_val;
285 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; 285 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
286 int dpll_reg = DPLL(pipe);
287 286
288 switch (dvo_reg) { 287 switch (dvo_reg) {
289 case DVOA: 288 case DVOA:
@@ -298,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
298 break; 297 break;
299 } 298 }
300 299
301 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode); 300 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
301 &crtc->config.requested_mode,
302 adjusted_mode);
302 303
303 /* Save the data order, since I don't know what it should be set to. */ 304 /* Save the data order, since I don't know what it should be set to. */
304 dvo_val = I915_READ(dvo_reg) & 305 dvo_val = I915_READ(dvo_reg) &
@@ -314,8 +315,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
314 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 315 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
315 dvo_val |= DVO_VSYNC_ACTIVE_HIGH; 316 dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
316 317
317 I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
318
319 /*I915_WRITE(DVOB_SRCDIM, 318 /*I915_WRITE(DVOB_SRCDIM,
320 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 319 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
321 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ 320 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -335,6 +334,8 @@ static enum drm_connector_status
335intel_dvo_detect(struct drm_connector *connector, bool force) 334intel_dvo_detect(struct drm_connector *connector, bool force)
336{ 335{
337 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 336 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
337 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
338 connector->base.id, drm_get_connector_name(connector));
338 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); 339 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
339} 340}
340 341
@@ -372,11 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
372 kfree(connector); 373 kfree(connector);
373} 374}
374 375
375static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
376 .mode_fixup = intel_dvo_mode_fixup,
377 .mode_set = intel_dvo_mode_set,
378};
379
380static const struct drm_connector_funcs intel_dvo_connector_funcs = { 376static const struct drm_connector_funcs intel_dvo_connector_funcs = {
381 .dpms = intel_dvo_dpms, 377 .dpms = intel_dvo_dpms,
382 .detect = intel_dvo_detect, 378 .detect = intel_dvo_detect,
@@ -392,7 +388,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
392 388
393static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 389static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
394{ 390{
395 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 391 struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
396 392
397 if (intel_dvo->dev.dev_ops->destroy) 393 if (intel_dvo->dev.dev_ops->destroy)
398 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); 394 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
@@ -471,6 +467,8 @@ void intel_dvo_init(struct drm_device *dev)
471 intel_encoder->enable = intel_enable_dvo; 467 intel_encoder->enable = intel_enable_dvo;
472 intel_encoder->get_hw_state = intel_dvo_get_hw_state; 468 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
473 intel_encoder->get_config = intel_dvo_get_config; 469 intel_encoder->get_config = intel_dvo_get_config;
470 intel_encoder->compute_config = intel_dvo_compute_config;
471 intel_encoder->mode_set = intel_dvo_mode_set;
474 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 472 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
475 473
476 /* Now, try to find a controller */ 474 /* Now, try to find a controller */
@@ -537,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev)
537 connector->interlace_allowed = false; 535 connector->interlace_allowed = false;
538 connector->doublescan_allowed = false; 536 connector->doublescan_allowed = false;
539 537
540 drm_encoder_helper_add(&intel_encoder->base,
541 &intel_dvo_helper_funcs);
542
543 intel_connector_attach_encoder(intel_connector, intel_encoder); 538 intel_connector_attach_encoder(intel_connector, intel_encoder);
544 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 539 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
545 /* For our LVDS chipsets, we should hopefully be able 540 /* For our LVDS chipsets, we should hopefully be able
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index dff669e2387f..bc2100007b21 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
139 info->apertures->ranges[0].base = dev->mode_config.fb_base; 139 info->apertures->ranges[0].base = dev->mode_config.fb_base;
140 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; 140 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
141 141
142 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 142 info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
143 info->fix.smem_len = size; 143 info->fix.smem_len = size;
144 144
145 info->screen_base = 145 info->screen_base =
146 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 146 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
147 size); 147 size);
148 if (!info->screen_base) { 148 if (!info->screen_base) {
149 ret = -ENOSPC; 149 ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
166 166
167 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 167 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
168 168
169 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 169 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
170 fb->width, fb->height, 170 fb->width, fb->height,
171 obj->gtt_offset, obj); 171 i915_gem_obj_ggtt_offset(obj), obj);
172 172
173 173
174 mutex_unlock(&dev->struct_mutex); 174 mutex_unlock(&dev->struct_mutex);
@@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
193static void intel_fbdev_destroy(struct drm_device *dev, 193static void intel_fbdev_destroy(struct drm_device *dev,
194 struct intel_fbdev *ifbdev) 194 struct intel_fbdev *ifbdev)
195{ 195{
196 struct fb_info *info;
197 struct intel_framebuffer *ifb = &ifbdev->ifb;
198
199 if (ifbdev->helper.fbdev) { 196 if (ifbdev->helper.fbdev) {
200 info = ifbdev->helper.fbdev; 197 struct fb_info *info = ifbdev->helper.fbdev;
198
201 unregister_framebuffer(info); 199 unregister_framebuffer(info);
202 iounmap(info->screen_base); 200 iounmap(info->screen_base);
203 if (info->cmap.len) 201 if (info->cmap.len)
204 fb_dealloc_cmap(&info->cmap); 202 fb_dealloc_cmap(&info->cmap);
203
205 framebuffer_release(info); 204 framebuffer_release(info);
206 } 205 }
207 206
208 drm_fb_helper_fini(&ifbdev->helper); 207 drm_fb_helper_fini(&ifbdev->helper);
209 208
210 drm_framebuffer_unregister_private(&ifb->base); 209 drm_framebuffer_unregister_private(&ifbdev->ifb.base);
211 drm_framebuffer_cleanup(&ifb->base); 210 intel_framebuffer_fini(&ifbdev->ifb);
212 if (ifb->obj) {
213 drm_gem_object_unreference_unlocked(&ifb->obj->base);
214 ifb->obj = NULL;
215 }
216} 211}
217 212
218int intel_fbdev_init(struct drm_device *dev) 213int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2fd3fd5b943e..4148cc85bf7f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -29,6 +29,7 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/hdmi.h>
32#include <drm/drmP.h> 33#include <drm/drmP.h>
33#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
34#include <drm/drm_edid.h> 35#include <drm/drm_edid.h>
@@ -66,89 +67,83 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
66 return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); 67 return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
67} 68}
68 69
69void intel_dip_infoframe_csum(struct dip_infoframe *frame) 70static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
70{ 71{
71 uint8_t *data = (uint8_t *)frame; 72 switch (type) {
72 uint8_t sum = 0; 73 case HDMI_INFOFRAME_TYPE_AVI:
73 unsigned i;
74
75 frame->checksum = 0;
76 frame->ecc = 0;
77
78 for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
79 sum += data[i];
80
81 frame->checksum = 0x100 - sum;
82}
83
84static u32 g4x_infoframe_index(struct dip_infoframe *frame)
85{
86 switch (frame->type) {
87 case DIP_TYPE_AVI:
88 return VIDEO_DIP_SELECT_AVI; 74 return VIDEO_DIP_SELECT_AVI;
89 case DIP_TYPE_SPD: 75 case HDMI_INFOFRAME_TYPE_SPD:
90 return VIDEO_DIP_SELECT_SPD; 76 return VIDEO_DIP_SELECT_SPD;
77 case HDMI_INFOFRAME_TYPE_VENDOR:
78 return VIDEO_DIP_SELECT_VENDOR;
91 default: 79 default:
92 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 80 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
93 return 0; 81 return 0;
94 } 82 }
95} 83}
96 84
97static u32 g4x_infoframe_enable(struct dip_infoframe *frame) 85static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
98{ 86{
99 switch (frame->type) { 87 switch (type) {
100 case DIP_TYPE_AVI: 88 case HDMI_INFOFRAME_TYPE_AVI:
101 return VIDEO_DIP_ENABLE_AVI; 89 return VIDEO_DIP_ENABLE_AVI;
102 case DIP_TYPE_SPD: 90 case HDMI_INFOFRAME_TYPE_SPD:
103 return VIDEO_DIP_ENABLE_SPD; 91 return VIDEO_DIP_ENABLE_SPD;
92 case HDMI_INFOFRAME_TYPE_VENDOR:
93 return VIDEO_DIP_ENABLE_VENDOR;
104 default: 94 default:
105 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 95 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
106 return 0; 96 return 0;
107 } 97 }
108} 98}
109 99
110static u32 hsw_infoframe_enable(struct dip_infoframe *frame) 100static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
111{ 101{
112 switch (frame->type) { 102 switch (type) {
113 case DIP_TYPE_AVI: 103 case HDMI_INFOFRAME_TYPE_AVI:
114 return VIDEO_DIP_ENABLE_AVI_HSW; 104 return VIDEO_DIP_ENABLE_AVI_HSW;
115 case DIP_TYPE_SPD: 105 case HDMI_INFOFRAME_TYPE_SPD:
116 return VIDEO_DIP_ENABLE_SPD_HSW; 106 return VIDEO_DIP_ENABLE_SPD_HSW;
107 case HDMI_INFOFRAME_TYPE_VENDOR:
108 return VIDEO_DIP_ENABLE_VS_HSW;
117 default: 109 default:
118 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 110 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
119 return 0; 111 return 0;
120 } 112 }
121} 113}
122 114
123static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, 115static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
124 enum transcoder cpu_transcoder) 116 enum transcoder cpu_transcoder)
125{ 117{
126 switch (frame->type) { 118 switch (type) {
127 case DIP_TYPE_AVI: 119 case HDMI_INFOFRAME_TYPE_AVI:
128 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); 120 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
129 case DIP_TYPE_SPD: 121 case HDMI_INFOFRAME_TYPE_SPD:
130 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); 122 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
123 case HDMI_INFOFRAME_TYPE_VENDOR:
124 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
131 default: 125 default:
132 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 126 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
133 return 0; 127 return 0;
134 } 128 }
135} 129}
136 130
137static void g4x_write_infoframe(struct drm_encoder *encoder, 131static void g4x_write_infoframe(struct drm_encoder *encoder,
138 struct dip_infoframe *frame) 132 enum hdmi_infoframe_type type,
133 const uint8_t *frame, ssize_t len)
139{ 134{
140 uint32_t *data = (uint32_t *)frame; 135 uint32_t *data = (uint32_t *)frame;
141 struct drm_device *dev = encoder->dev; 136 struct drm_device *dev = encoder->dev;
142 struct drm_i915_private *dev_priv = dev->dev_private; 137 struct drm_i915_private *dev_priv = dev->dev_private;
143 u32 val = I915_READ(VIDEO_DIP_CTL); 138 u32 val = I915_READ(VIDEO_DIP_CTL);
144 unsigned i, len = DIP_HEADER_SIZE + frame->len; 139 int i;
145 140
146 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 141 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
147 142
148 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 143 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
149 val |= g4x_infoframe_index(frame); 144 val |= g4x_infoframe_index(type);
150 145
151 val &= ~g4x_infoframe_enable(frame); 146 val &= ~g4x_infoframe_enable(type);
152 147
153 I915_WRITE(VIDEO_DIP_CTL, val); 148 I915_WRITE(VIDEO_DIP_CTL, val);
154 149
@@ -162,7 +157,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
162 I915_WRITE(VIDEO_DIP_DATA, 0); 157 I915_WRITE(VIDEO_DIP_DATA, 0);
163 mmiowb(); 158 mmiowb();
164 159
165 val |= g4x_infoframe_enable(frame); 160 val |= g4x_infoframe_enable(type);
166 val &= ~VIDEO_DIP_FREQ_MASK; 161 val &= ~VIDEO_DIP_FREQ_MASK;
167 val |= VIDEO_DIP_FREQ_VSYNC; 162 val |= VIDEO_DIP_FREQ_VSYNC;
168 163
@@ -171,22 +166,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
171} 166}
172 167
173static void ibx_write_infoframe(struct drm_encoder *encoder, 168static void ibx_write_infoframe(struct drm_encoder *encoder,
174 struct dip_infoframe *frame) 169 enum hdmi_infoframe_type type,
170 const uint8_t *frame, ssize_t len)
175{ 171{
176 uint32_t *data = (uint32_t *)frame; 172 uint32_t *data = (uint32_t *)frame;
177 struct drm_device *dev = encoder->dev; 173 struct drm_device *dev = encoder->dev;
178 struct drm_i915_private *dev_priv = dev->dev_private; 174 struct drm_i915_private *dev_priv = dev->dev_private;
179 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 175 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
180 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 176 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
181 unsigned i, len = DIP_HEADER_SIZE + frame->len;
182 u32 val = I915_READ(reg); 177 u32 val = I915_READ(reg);
183 178
184 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 179 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
185 180
186 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 181 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
187 val |= g4x_infoframe_index(frame); 182 val |= g4x_infoframe_index(type);
188 183
189 val &= ~g4x_infoframe_enable(frame); 184 val &= ~g4x_infoframe_enable(type);
190 185
191 I915_WRITE(reg, val); 186 I915_WRITE(reg, val);
192 187
@@ -200,7 +195,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
200 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); 195 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
201 mmiowb(); 196 mmiowb();
202 197
203 val |= g4x_infoframe_enable(frame); 198 val |= g4x_infoframe_enable(type);
204 val &= ~VIDEO_DIP_FREQ_MASK; 199 val &= ~VIDEO_DIP_FREQ_MASK;
205 val |= VIDEO_DIP_FREQ_VSYNC; 200 val |= VIDEO_DIP_FREQ_VSYNC;
206 201
@@ -209,25 +204,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
209} 204}
210 205
211static void cpt_write_infoframe(struct drm_encoder *encoder, 206static void cpt_write_infoframe(struct drm_encoder *encoder,
212 struct dip_infoframe *frame) 207 enum hdmi_infoframe_type type,
208 const uint8_t *frame, ssize_t len)
213{ 209{
214 uint32_t *data = (uint32_t *)frame; 210 uint32_t *data = (uint32_t *)frame;
215 struct drm_device *dev = encoder->dev; 211 struct drm_device *dev = encoder->dev;
216 struct drm_i915_private *dev_priv = dev->dev_private; 212 struct drm_i915_private *dev_priv = dev->dev_private;
217 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 213 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
218 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 214 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
219 unsigned i, len = DIP_HEADER_SIZE + frame->len;
220 u32 val = I915_READ(reg); 215 u32 val = I915_READ(reg);
221 216
222 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 217 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
223 218
224 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 219 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
225 val |= g4x_infoframe_index(frame); 220 val |= g4x_infoframe_index(type);
226 221
227 /* The DIP control register spec says that we need to update the AVI 222 /* The DIP control register spec says that we need to update the AVI
228 * infoframe without clearing its enable bit */ 223 * infoframe without clearing its enable bit */
229 if (frame->type != DIP_TYPE_AVI) 224 if (type != HDMI_INFOFRAME_TYPE_AVI)
230 val &= ~g4x_infoframe_enable(frame); 225 val &= ~g4x_infoframe_enable(type);
231 226
232 I915_WRITE(reg, val); 227 I915_WRITE(reg, val);
233 228
@@ -241,7 +236,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
241 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); 236 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
242 mmiowb(); 237 mmiowb();
243 238
244 val |= g4x_infoframe_enable(frame); 239 val |= g4x_infoframe_enable(type);
245 val &= ~VIDEO_DIP_FREQ_MASK; 240 val &= ~VIDEO_DIP_FREQ_MASK;
246 val |= VIDEO_DIP_FREQ_VSYNC; 241 val |= VIDEO_DIP_FREQ_VSYNC;
247 242
@@ -250,22 +245,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
250} 245}
251 246
252static void vlv_write_infoframe(struct drm_encoder *encoder, 247static void vlv_write_infoframe(struct drm_encoder *encoder,
253 struct dip_infoframe *frame) 248 enum hdmi_infoframe_type type,
249 const uint8_t *frame, ssize_t len)
254{ 250{
255 uint32_t *data = (uint32_t *)frame; 251 uint32_t *data = (uint32_t *)frame;
256 struct drm_device *dev = encoder->dev; 252 struct drm_device *dev = encoder->dev;
257 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
258 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 254 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
259 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 255 int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
260 unsigned i, len = DIP_HEADER_SIZE + frame->len;
261 u32 val = I915_READ(reg); 256 u32 val = I915_READ(reg);
262 257
263 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 258 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
264 259
265 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 260 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
266 val |= g4x_infoframe_index(frame); 261 val |= g4x_infoframe_index(type);
267 262
268 val &= ~g4x_infoframe_enable(frame); 263 val &= ~g4x_infoframe_enable(type);
269 264
270 I915_WRITE(reg, val); 265 I915_WRITE(reg, val);
271 266
@@ -279,7 +274,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
279 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); 274 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
280 mmiowb(); 275 mmiowb();
281 276
282 val |= g4x_infoframe_enable(frame); 277 val |= g4x_infoframe_enable(type);
283 val &= ~VIDEO_DIP_FREQ_MASK; 278 val &= ~VIDEO_DIP_FREQ_MASK;
284 val |= VIDEO_DIP_FREQ_VSYNC; 279 val |= VIDEO_DIP_FREQ_VSYNC;
285 280
@@ -288,21 +283,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
288} 283}
289 284
290static void hsw_write_infoframe(struct drm_encoder *encoder, 285static void hsw_write_infoframe(struct drm_encoder *encoder,
291 struct dip_infoframe *frame) 286 enum hdmi_infoframe_type type,
287 const uint8_t *frame, ssize_t len)
292{ 288{
293 uint32_t *data = (uint32_t *)frame; 289 uint32_t *data = (uint32_t *)frame;
294 struct drm_device *dev = encoder->dev; 290 struct drm_device *dev = encoder->dev;
295 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
296 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 292 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
297 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); 293 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
298 u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder); 294 u32 data_reg;
299 unsigned int i, len = DIP_HEADER_SIZE + frame->len; 295 int i;
300 u32 val = I915_READ(ctl_reg); 296 u32 val = I915_READ(ctl_reg);
301 297
298 data_reg = hsw_infoframe_data_reg(type,
299 intel_crtc->config.cpu_transcoder);
302 if (data_reg == 0) 300 if (data_reg == 0)
303 return; 301 return;
304 302
305 val &= ~hsw_infoframe_enable(frame); 303 val &= ~hsw_infoframe_enable(type);
306 I915_WRITE(ctl_reg, val); 304 I915_WRITE(ctl_reg, val);
307 305
308 mmiowb(); 306 mmiowb();
@@ -315,18 +313,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
315 I915_WRITE(data_reg + i, 0); 313 I915_WRITE(data_reg + i, 0);
316 mmiowb(); 314 mmiowb();
317 315
318 val |= hsw_infoframe_enable(frame); 316 val |= hsw_infoframe_enable(type);
319 I915_WRITE(ctl_reg, val); 317 I915_WRITE(ctl_reg, val);
320 POSTING_READ(ctl_reg); 318 POSTING_READ(ctl_reg);
321} 319}
322 320
323static void intel_set_infoframe(struct drm_encoder *encoder, 321/*
324 struct dip_infoframe *frame) 322 * The data we write to the DIP data buffer registers is 1 byte bigger than the
323 * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
324 * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
325 * used for both technologies.
326 *
327 * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
328 * DW1: DB3 | DB2 | DB1 | DB0
329 * DW2: DB7 | DB6 | DB5 | DB4
330 * DW3: ...
331 *
332 * (HB is Header Byte, DB is Data Byte)
333 *
334 * The hdmi pack() functions don't know about that hardware specific hole so we
335 * trick them by giving an offset into the buffer and moving back the header
336 * bytes by one.
337 */
338static void intel_write_infoframe(struct drm_encoder *encoder,
339 union hdmi_infoframe *frame)
325{ 340{
326 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 341 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
342 uint8_t buffer[VIDEO_DIP_DATA_SIZE];
343 ssize_t len;
344
345 /* see comment above for the reason for this offset */
346 len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
347 if (len < 0)
348 return;
327 349
328 intel_dip_infoframe_csum(frame); 350 /* Insert the 'hole' (see big comment above) at position 3 */
329 intel_hdmi->write_infoframe(encoder, frame); 351 buffer[0] = buffer[1];
352 buffer[1] = buffer[2];
353 buffer[2] = buffer[3];
354 buffer[3] = 0;
355 len++;
356
357 intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
330} 358}
331 359
332static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 360static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@@ -334,40 +362,57 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
334{ 362{
335 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 363 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
336 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 364 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
337 struct dip_infoframe avi_if = { 365 union hdmi_infoframe frame;
338 .type = DIP_TYPE_AVI, 366 int ret;
339 .ver = DIP_VERSION_AVI,
340 .len = DIP_LEN_AVI,
341 };
342 367
343 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 368 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
344 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 369 adjusted_mode);
370 if (ret < 0) {
371 DRM_ERROR("couldn't fill AVI infoframe\n");
372 return;
373 }
345 374
346 if (intel_hdmi->rgb_quant_range_selectable) { 375 if (intel_hdmi->rgb_quant_range_selectable) {
347 if (intel_crtc->config.limited_color_range) 376 if (intel_crtc->config.limited_color_range)
348 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 377 frame.avi.quantization_range =
378 HDMI_QUANTIZATION_RANGE_LIMITED;
349 else 379 else
350 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 380 frame.avi.quantization_range =
381 HDMI_QUANTIZATION_RANGE_FULL;
351 } 382 }
352 383
353 avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); 384 intel_write_infoframe(encoder, &frame);
354
355 intel_set_infoframe(encoder, &avi_if);
356} 385}
357 386
358static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) 387static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
359{ 388{
360 struct dip_infoframe spd_if; 389 union hdmi_infoframe frame;
390 int ret;
391
392 ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
393 if (ret < 0) {
394 DRM_ERROR("couldn't fill SPD infoframe\n");
395 return;
396 }
397
398 frame.spd.sdi = HDMI_SPD_SDI_PC;
399
400 intel_write_infoframe(encoder, &frame);
401}
402
403static void
404intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
405 struct drm_display_mode *adjusted_mode)
406{
407 union hdmi_infoframe frame;
408 int ret;
361 409
362 memset(&spd_if, 0, sizeof(spd_if)); 410 ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
363 spd_if.type = DIP_TYPE_SPD; 411 adjusted_mode);
364 spd_if.ver = DIP_VERSION_SPD; 412 if (ret < 0)
365 spd_if.len = DIP_LEN_SPD; 413 return;
366 strcpy(spd_if.body.spd.vn, "Intel");
367 strcpy(spd_if.body.spd.pd, "Integrated gfx");
368 spd_if.body.spd.sdi = DIP_SPD_PC;
369 414
370 intel_set_infoframe(encoder, &spd_if); 415 intel_write_infoframe(encoder, &frame);
371} 416}
372 417
373static void g4x_set_infoframes(struct drm_encoder *encoder, 418static void g4x_set_infoframes(struct drm_encoder *encoder,
@@ -432,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
432 477
433 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 478 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
434 intel_hdmi_set_spd_infoframe(encoder); 479 intel_hdmi_set_spd_infoframe(encoder);
480 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
435} 481}
436 482
437static void ibx_set_infoframes(struct drm_encoder *encoder, 483static void ibx_set_infoframes(struct drm_encoder *encoder,
@@ -493,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
493 539
494 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 540 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
495 intel_hdmi_set_spd_infoframe(encoder); 541 intel_hdmi_set_spd_infoframe(encoder);
542 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
496} 543}
497 544
498static void cpt_set_infoframes(struct drm_encoder *encoder, 545static void cpt_set_infoframes(struct drm_encoder *encoder,
@@ -528,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
528 575
529 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 576 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
530 intel_hdmi_set_spd_infoframe(encoder); 577 intel_hdmi_set_spd_infoframe(encoder);
578 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
531} 579}
532 580
533static void vlv_set_infoframes(struct drm_encoder *encoder, 581static void vlv_set_infoframes(struct drm_encoder *encoder,
@@ -562,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
562 610
563 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 611 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
564 intel_hdmi_set_spd_infoframe(encoder); 612 intel_hdmi_set_spd_infoframe(encoder);
613 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
565} 614}
566 615
567static void hsw_set_infoframes(struct drm_encoder *encoder, 616static void hsw_set_infoframes(struct drm_encoder *encoder,
@@ -589,16 +638,16 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
589 638
590 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 639 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
591 intel_hdmi_set_spd_infoframe(encoder); 640 intel_hdmi_set_spd_infoframe(encoder);
641 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
592} 642}
593 643
594static void intel_hdmi_mode_set(struct drm_encoder *encoder, 644static void intel_hdmi_mode_set(struct intel_encoder *encoder)
595 struct drm_display_mode *mode,
596 struct drm_display_mode *adjusted_mode)
597{ 645{
598 struct drm_device *dev = encoder->dev; 646 struct drm_device *dev = encoder->base.dev;
599 struct drm_i915_private *dev_priv = dev->dev_private; 647 struct drm_i915_private *dev_priv = dev->dev_private;
600 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 648 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
601 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 649 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
650 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
602 u32 hdmi_val; 651 u32 hdmi_val;
603 652
604 hdmi_val = SDVO_ENCODING_HDMI; 653 hdmi_val = SDVO_ENCODING_HDMI;
@@ -609,7 +658,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
609 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 658 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
610 hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; 659 hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
611 660
612 if (intel_crtc->config.pipe_bpp > 24) 661 if (crtc->config.pipe_bpp > 24)
613 hdmi_val |= HDMI_COLOR_FORMAT_12bpc; 662 hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
614 else 663 else
615 hdmi_val |= SDVO_COLOR_FORMAT_8bpc; 664 hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
@@ -620,21 +669,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
620 669
621 if (intel_hdmi->has_audio) { 670 if (intel_hdmi->has_audio) {
622 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", 671 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
623 pipe_name(intel_crtc->pipe)); 672 pipe_name(crtc->pipe));
624 hdmi_val |= SDVO_AUDIO_ENABLE; 673 hdmi_val |= SDVO_AUDIO_ENABLE;
625 hdmi_val |= HDMI_MODE_SELECT_HDMI; 674 hdmi_val |= HDMI_MODE_SELECT_HDMI;
626 intel_write_eld(encoder, adjusted_mode); 675 intel_write_eld(&encoder->base, adjusted_mode);
627 } 676 }
628 677
629 if (HAS_PCH_CPT(dev)) 678 if (HAS_PCH_CPT(dev))
630 hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 679 hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
631 else 680 else
632 hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe); 681 hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
633 682
634 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); 683 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
635 POSTING_READ(intel_hdmi->hdmi_reg); 684 POSTING_READ(intel_hdmi->hdmi_reg);
636 685
637 intel_hdmi->set_infoframes(encoder, adjusted_mode); 686 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
638} 687}
639 688
640static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, 689static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -719,14 +768,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
719 I915_WRITE(intel_hdmi->hdmi_reg, temp); 768 I915_WRITE(intel_hdmi->hdmi_reg, temp);
720 POSTING_READ(intel_hdmi->hdmi_reg); 769 POSTING_READ(intel_hdmi->hdmi_reg);
721 } 770 }
771}
722 772
723 if (IS_VALLEYVIEW(dev)) { 773static void vlv_enable_hdmi(struct intel_encoder *encoder)
724 struct intel_digital_port *dport = 774{
725 enc_to_dig_port(&encoder->base);
726 int channel = vlv_dport_to_channel(dport);
727
728 vlv_wait_port_ready(dev_priv, channel);
729 }
730} 775}
731 776
732static void intel_disable_hdmi(struct intel_encoder *encoder) 777static void intel_disable_hdmi(struct intel_encoder *encoder)
@@ -879,6 +924,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
879 struct edid *edid; 924 struct edid *edid;
880 enum drm_connector_status status = connector_status_disconnected; 925 enum drm_connector_status status = connector_status_disconnected;
881 926
927 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
928 connector->base.id, drm_get_connector_name(connector));
929
882 intel_hdmi->has_hdmi_sink = false; 930 intel_hdmi->has_hdmi_sink = false;
883 intel_hdmi->has_audio = false; 931 intel_hdmi->has_audio = false;
884 intel_hdmi->rgb_quant_range_selectable = false; 932 intel_hdmi->rgb_quant_range_selectable = false;
@@ -1030,6 +1078,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1030 return; 1078 return;
1031 1079
1032 /* Enable clock channels for this port */ 1080 /* Enable clock channels for this port */
1081 mutex_lock(&dev_priv->dpio_lock);
1033 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1082 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1034 val = 0; 1083 val = 0;
1035 if (pipe) 1084 if (pipe)
@@ -1060,6 +1109,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1060 0x00760018); 1109 0x00760018);
1061 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1110 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1062 0x00400888); 1111 0x00400888);
1112 mutex_unlock(&dev_priv->dpio_lock);
1113
1114 intel_enable_hdmi(encoder);
1115
1116 vlv_wait_port_ready(dev_priv, port);
1063} 1117}
1064 1118
1065static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1119static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1073,6 +1127,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1073 return; 1127 return;
1074 1128
1075 /* Program Tx lane resets to default */ 1129 /* Program Tx lane resets to default */
1130 mutex_lock(&dev_priv->dpio_lock);
1076 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1131 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1077 DPIO_PCS_TX_LANE2_RESET | 1132 DPIO_PCS_TX_LANE2_RESET |
1078 DPIO_PCS_TX_LANE1_RESET); 1133 DPIO_PCS_TX_LANE1_RESET);
@@ -1091,6 +1146,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1091 0x00002000); 1146 0x00002000);
1092 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1147 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
1093 DPIO_TX_OCALINIT_EN); 1148 DPIO_TX_OCALINIT_EN);
1149 mutex_unlock(&dev_priv->dpio_lock);
1094} 1150}
1095 1151
1096static void intel_hdmi_post_disable(struct intel_encoder *encoder) 1152static void intel_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1113,10 +1169,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
1113 kfree(connector); 1169 kfree(connector);
1114} 1170}
1115 1171
1116static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
1117 .mode_set = intel_hdmi_mode_set,
1118};
1119
1120static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 1172static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
1121 .dpms = intel_connector_dpms, 1173 .dpms = intel_connector_dpms,
1122 .detect = intel_hdmi_detect, 1174 .detect = intel_hdmi_detect,
@@ -1221,7 +1273,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1221{ 1273{
1222 struct intel_digital_port *intel_dig_port; 1274 struct intel_digital_port *intel_dig_port;
1223 struct intel_encoder *intel_encoder; 1275 struct intel_encoder *intel_encoder;
1224 struct drm_encoder *encoder;
1225 struct intel_connector *intel_connector; 1276 struct intel_connector *intel_connector;
1226 1277
1227 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1278 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
@@ -1235,21 +1286,22 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1235 } 1286 }
1236 1287
1237 intel_encoder = &intel_dig_port->base; 1288 intel_encoder = &intel_dig_port->base;
1238 encoder = &intel_encoder->base;
1239 1289
1240 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 1290 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
1241 DRM_MODE_ENCODER_TMDS); 1291 DRM_MODE_ENCODER_TMDS);
1242 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
1243 1292
1244 intel_encoder->compute_config = intel_hdmi_compute_config; 1293 intel_encoder->compute_config = intel_hdmi_compute_config;
1245 intel_encoder->enable = intel_enable_hdmi; 1294 intel_encoder->mode_set = intel_hdmi_mode_set;
1246 intel_encoder->disable = intel_disable_hdmi; 1295 intel_encoder->disable = intel_disable_hdmi;
1247 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1296 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1248 intel_encoder->get_config = intel_hdmi_get_config; 1297 intel_encoder->get_config = intel_hdmi_get_config;
1249 if (IS_VALLEYVIEW(dev)) { 1298 if (IS_VALLEYVIEW(dev)) {
1250 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1251 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; 1299 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
1300 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1301 intel_encoder->enable = vlv_enable_hdmi;
1252 intel_encoder->post_disable = intel_hdmi_post_disable; 1302 intel_encoder->post_disable = intel_hdmi_post_disable;
1303 } else {
1304 intel_encoder->enable = intel_enable_hdmi;
1253 } 1305 }
1254 1306
1255 intel_encoder->type = INTEL_OUTPUT_HDMI; 1307 intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 639fe192997c..d1c1e0f7f262 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
398 int i, reg_offset; 398 int i, reg_offset;
399 int ret = 0; 399 int ret = 0;
400 400
401 intel_aux_display_runtime_get(dev_priv);
401 mutex_lock(&dev_priv->gmbus_mutex); 402 mutex_lock(&dev_priv->gmbus_mutex);
402 403
403 if (bus->force_bit) { 404 if (bus->force_bit) {
@@ -497,6 +498,7 @@ timeout:
497 498
498out: 499out:
499 mutex_unlock(&dev_priv->gmbus_mutex); 500 mutex_unlock(&dev_priv->gmbus_mutex);
501 intel_aux_display_runtime_put(dev_priv);
500 return ret; 502 return ret;
501} 503}
502 504
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 61348eae2f04..4d33278e31fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -122,17 +122,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
122 * This is an exception to the general rule that mode_set doesn't turn 122 * This is an exception to the general rule that mode_set doesn't turn
123 * things on. 123 * things on.
124 */ 124 */
125static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) 125static void intel_pre_enable_lvds(struct intel_encoder *encoder)
126{ 126{
127 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 127 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
128 struct drm_device *dev = encoder->base.dev; 128 struct drm_device *dev = encoder->base.dev;
129 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
131 struct drm_display_mode *fixed_mode = 131 struct drm_display_mode *fixed_mode =
132 lvds_encoder->attached_connector->base.panel.fixed_mode; 132 lvds_encoder->attached_connector->base.panel.fixed_mode;
133 int pipe = intel_crtc->pipe; 133 int pipe = crtc->pipe;
134 u32 temp; 134 u32 temp;
135 135
136 if (HAS_PCH_SPLIT(dev)) {
137 assert_fdi_rx_pll_disabled(dev_priv, pipe);
138 assert_shared_dpll_disabled(dev_priv,
139 intel_crtc_to_shared_dpll(crtc));
140 } else {
141 assert_pll_disabled(dev_priv, pipe);
142 }
143
136 temp = I915_READ(lvds_encoder->reg); 144 temp = I915_READ(lvds_encoder->reg);
137 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 145 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
138 146
@@ -149,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
149 157
150 /* set the corresponsding LVDS_BORDER bit */ 158 /* set the corresponsding LVDS_BORDER bit */
151 temp &= ~LVDS_BORDER_ENABLE; 159 temp &= ~LVDS_BORDER_ENABLE;
152 temp |= intel_crtc->config.gmch_pfit.lvds_border_bits; 160 temp |= crtc->config.gmch_pfit.lvds_border_bits;
153 /* Set the B0-B3 data pairs corresponding to whether we're going to 161 /* Set the B0-B3 data pairs corresponding to whether we're going to
154 * set the DPLLs for dual-channel mode or not. 162 * set the DPLLs for dual-channel mode or not.
155 */ 163 */
@@ -169,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
169 if (INTEL_INFO(dev)->gen == 4) { 177 if (INTEL_INFO(dev)->gen == 4) {
170 /* Bspec wording suggests that LVDS port dithering only exists 178 /* Bspec wording suggests that LVDS port dithering only exists
171 * for 18bpp panels. */ 179 * for 18bpp panels. */
172 if (intel_crtc->config.dither && 180 if (crtc->config.dither && crtc->config.pipe_bpp == 18)
173 intel_crtc->config.pipe_bpp == 18)
174 temp |= LVDS_ENABLE_DITHER; 181 temp |= LVDS_ENABLE_DITHER;
175 else 182 else
176 temp &= ~LVDS_ENABLE_DITHER; 183 temp &= ~LVDS_ENABLE_DITHER;
@@ -312,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
312 return true; 319 return true;
313} 320}
314 321
315static void intel_lvds_mode_set(struct drm_encoder *encoder, 322static void intel_lvds_mode_set(struct intel_encoder *encoder)
316 struct drm_display_mode *mode,
317 struct drm_display_mode *adjusted_mode)
318{ 323{
319 /* 324 /*
320 * The LVDS pin pair will already have been turned on in the 325 * We don't do anything here, the LVDS port is fully set up in the pre
321 * intel_crtc_mode_set since it has a large impact on the DPLL 326 * enable hook - the ordering constraints for enabling the lvds port vs.
322 * settings. 327 * enabling the display pll are too strict.
323 */ 328 */
324} 329}
325 330
@@ -336,6 +341,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
336 struct drm_device *dev = connector->dev; 341 struct drm_device *dev = connector->dev;
337 enum drm_connector_status status; 342 enum drm_connector_status status;
338 343
344 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
345 connector->base.id, drm_get_connector_name(connector));
346
339 status = intel_panel_detect(dev); 347 status = intel_panel_detect(dev);
340 if (status != connector_status_unknown) 348 if (status != connector_status_unknown)
341 return status; 349 return status;
@@ -497,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
497 return 0; 505 return 0;
498} 506}
499 507
500static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
501 .mode_set = intel_lvds_mode_set,
502};
503
504static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 508static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
505 .get_modes = intel_lvds_get_modes, 509 .get_modes = intel_lvds_get_modes,
506 .mode_valid = intel_lvds_mode_valid, 510 .mode_valid = intel_lvds_mode_valid,
@@ -959,8 +963,9 @@ void intel_lvds_init(struct drm_device *dev)
959 DRM_MODE_ENCODER_LVDS); 963 DRM_MODE_ENCODER_LVDS);
960 964
961 intel_encoder->enable = intel_enable_lvds; 965 intel_encoder->enable = intel_enable_lvds;
962 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; 966 intel_encoder->pre_enable = intel_pre_enable_lvds;
963 intel_encoder->compute_config = intel_lvds_compute_config; 967 intel_encoder->compute_config = intel_lvds_compute_config;
968 intel_encoder->mode_set = intel_lvds_mode_set;
964 intel_encoder->disable = intel_disable_lvds; 969 intel_encoder->disable = intel_disable_lvds;
965 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 970 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
966 intel_encoder->get_config = intel_lvds_get_config; 971 intel_encoder->get_config = intel_lvds_get_config;
@@ -977,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev)
977 else 982 else
978 intel_encoder->crtc_mask = (1 << 1); 983 intel_encoder->crtc_mask = (1 << 1);
979 984
980 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
981 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 985 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
982 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 986 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
983 connector->interlace_allowed = false; 987 connector->interlace_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a3698812e9c7..ddfd0aefe0c0 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 overlay->reg_bo->gtt_offset); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
200 200
201 return regs; 201 return regs;
202} 202}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
740 swidth = params->src_w; 740 swidth = params->src_w;
741 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 741 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
742 sheight = params->src_h; 742 sheight = params->src_h;
743 iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y); 743 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
744 ostride = params->stride_Y; 744 ostride = params->stride_Y;
745 745
746 if (params->format & I915_OVERLAY_YUV_PLANAR) { 746 if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
754 params->src_w/uv_hscale); 754 params->src_w/uv_hscale);
755 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 755 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
756 sheight |= (params->src_h/uv_vscale) << 16; 756 sheight |= (params->src_h/uv_vscale) << 16;
757 iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U); 757 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
758 iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V); 758 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
759 ostride |= params->stride_UV << 16; 759 ostride |= params->stride_UV << 16;
760 } 760 }
761 761
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev)
1333 1333
1334 overlay->dev = dev; 1334 overlay->dev = dev;
1335 1335
1336 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); 1336 reg_bo = NULL;
1337 if (!OVERLAY_NEEDS_PHYSICAL(dev))
1338 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
1337 if (reg_bo == NULL) 1339 if (reg_bo == NULL)
1338 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1340 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1339 if (reg_bo == NULL) 1341 if (reg_bo == NULL)
@@ -1350,12 +1352,12 @@ void intel_setup_overlay(struct drm_device *dev)
1350 } 1352 }
1351 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1353 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1352 } else { 1354 } else {
1353 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); 1355 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
1354 if (ret) { 1356 if (ret) {
1355 DRM_ERROR("failed to pin overlay register bo\n"); 1357 DRM_ERROR("failed to pin overlay register bo\n");
1356 goto out_free_bo; 1358 goto out_free_bo;
1357 } 1359 }
1358 overlay->flip_addr = reg_bo->gtt_offset; 1360 overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
1359 1361
1360 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); 1362 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1361 if (ret) { 1363 if (ret) {
@@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
1412 kfree(dev_priv->overlay); 1414 kfree(dev_priv->overlay);
1413} 1415}
1414 1416
1415#ifdef CONFIG_DEBUG_FS
1416#include <linux/seq_file.h>
1417
1418struct intel_overlay_error_state { 1417struct intel_overlay_error_state {
1419 struct overlay_registers regs; 1418 struct overlay_registers regs;
1420 unsigned long base; 1419 unsigned long base;
@@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1435 overlay->reg_bo->phys_obj->handle->vaddr; 1434 overlay->reg_bo->phys_obj->handle->vaddr;
1436 else 1435 else
1437 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1436 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1438 overlay->reg_bo->gtt_offset); 1437 i915_gem_obj_ggtt_offset(overlay->reg_bo));
1439 1438
1440 return regs; 1439 return regs;
1441} 1440}
@@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1468 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1467 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1469 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1468 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
1470 else 1469 else
1471 error->base = overlay->reg_bo->gtt_offset; 1470 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1472 1471
1473 regs = intel_overlay_map_regs_atomic(overlay); 1472 regs = intel_overlay_map_regs_atomic(overlay);
1474 if (!regs) 1473 if (!regs)
@@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
1537 P(UVSCALEV); 1536 P(UVSCALEV);
1538#undef P 1537#undef P
1539} 1538}
1540#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 5950888ae1d0..a43c33bc4a35 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -194,9 +194,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
194 adjusted_mode->vdisplay == mode->vdisplay) 194 adjusted_mode->vdisplay == mode->vdisplay)
195 goto out; 195 goto out;
196 196
197 drm_mode_set_crtcinfo(adjusted_mode, 0);
198 pipe_config->timings_set = true;
199
200 switch (fitting_mode) { 197 switch (fitting_mode) {
201 case DRM_MODE_SCALE_CENTER: 198 case DRM_MODE_SCALE_CENTER:
202 /* 199 /*
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b0e4a0bd1313..46056820d1d2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,8 +30,7 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33 33#include <drm/i915_powerwell.h>
34#define FORCEWAKE_ACK_TIMEOUT_MS 2
35 34
36/* FBC, or Frame Buffer Compression, is a technique employed to compress the 35/* FBC, or Frame Buffer Compression, is a technique employed to compress the
37 * framebuffer contents in-memory, aiming at reducing the required bandwidth 36 * framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -86,7 +85,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
86 int plane, i; 85 int plane, i;
87 u32 fbc_ctl, fbc_ctl2; 86 u32 fbc_ctl, fbc_ctl2;
88 87
89 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 88 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
90 if (fb->pitches[0] < cfb_pitch) 89 if (fb->pitches[0] < cfb_pitch)
91 cfb_pitch = fb->pitches[0]; 90 cfb_pitch = fb->pitches[0];
92 91
@@ -217,7 +216,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
217 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 216 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
218 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 217 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
219 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 218 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
220 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); 219 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
221 /* enable it... */ 220 /* enable it... */
222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 221 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
223 222
@@ -274,7 +273,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
274 struct drm_i915_gem_object *obj = intel_fb->obj; 273 struct drm_i915_gem_object *obj = intel_fb->obj;
275 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
276 275
277 I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); 276 I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
278 277
279 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | 278 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
280 IVB_DPFC_CTL_FENCE_EN | 279 IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +324,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
325 struct drm_i915_private *dev_priv = dev->dev_private; 324 struct drm_i915_private *dev_priv = dev->dev_private;
326 325
327 mutex_lock(&dev->struct_mutex); 326 mutex_lock(&dev->struct_mutex);
328 if (work == dev_priv->fbc_work) { 327 if (work == dev_priv->fbc.fbc_work) {
329 /* Double check that we haven't switched fb without cancelling 328 /* Double check that we haven't switched fb without cancelling
330 * the prior work. 329 * the prior work.
331 */ 330 */
@@ -333,12 +332,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
333 dev_priv->display.enable_fbc(work->crtc, 332 dev_priv->display.enable_fbc(work->crtc,
334 work->interval); 333 work->interval);
335 334
336 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; 335 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
337 dev_priv->cfb_fb = work->crtc->fb->base.id; 336 dev_priv->fbc.fb_id = work->crtc->fb->base.id;
338 dev_priv->cfb_y = work->crtc->y; 337 dev_priv->fbc.y = work->crtc->y;
339 } 338 }
340 339
341 dev_priv->fbc_work = NULL; 340 dev_priv->fbc.fbc_work = NULL;
342 } 341 }
343 mutex_unlock(&dev->struct_mutex); 342 mutex_unlock(&dev->struct_mutex);
344 343
@@ -347,28 +346,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
347 346
348static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) 347static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
349{ 348{
350 if (dev_priv->fbc_work == NULL) 349 if (dev_priv->fbc.fbc_work == NULL)
351 return; 350 return;
352 351
353 DRM_DEBUG_KMS("cancelling pending FBC enable\n"); 352 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
354 353
355 /* Synchronisation is provided by struct_mutex and checking of 354 /* Synchronisation is provided by struct_mutex and checking of
356 * dev_priv->fbc_work, so we can perform the cancellation 355 * dev_priv->fbc.fbc_work, so we can perform the cancellation
357 * entirely asynchronously. 356 * entirely asynchronously.
358 */ 357 */
359 if (cancel_delayed_work(&dev_priv->fbc_work->work)) 358 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
360 /* tasklet was killed before being run, clean up */ 359 /* tasklet was killed before being run, clean up */
361 kfree(dev_priv->fbc_work); 360 kfree(dev_priv->fbc.fbc_work);
362 361
363 /* Mark the work as no longer wanted so that if it does 362 /* Mark the work as no longer wanted so that if it does
364 * wake-up (because the work was already running and waiting 363 * wake-up (because the work was already running and waiting
365 * for our mutex), it will discover that is no longer 364 * for our mutex), it will discover that is no longer
366 * necessary to run. 365 * necessary to run.
367 */ 366 */
368 dev_priv->fbc_work = NULL; 367 dev_priv->fbc.fbc_work = NULL;
369} 368}
370 369
371void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 370static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
372{ 371{
373 struct intel_fbc_work *work; 372 struct intel_fbc_work *work;
374 struct drm_device *dev = crtc->dev; 373 struct drm_device *dev = crtc->dev;
@@ -381,6 +380,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
381 380
382 work = kzalloc(sizeof *work, GFP_KERNEL); 381 work = kzalloc(sizeof *work, GFP_KERNEL);
383 if (work == NULL) { 382 if (work == NULL) {
383 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 384 dev_priv->display.enable_fbc(crtc, interval);
385 return; 385 return;
386 } 386 }
@@ -390,9 +390,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
390 work->interval = interval; 390 work->interval = interval;
391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); 391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
392 392
393 dev_priv->fbc_work = work; 393 dev_priv->fbc.fbc_work = work;
394
395 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
396 394
397 /* Delay the actual enabling to let pageflipping cease and the 395 /* Delay the actual enabling to let pageflipping cease and the
398 * display to settle before starting the compression. Note that 396 * display to settle before starting the compression. Note that
@@ -404,6 +402,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
404 * following the termination of the page-flipping sequence 402 * following the termination of the page-flipping sequence
405 * and indeed performing the enable as a co-routine and not 403 * and indeed performing the enable as a co-routine and not
406 * waiting synchronously upon the vblank. 404 * waiting synchronously upon the vblank.
405 *
406 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
407 */ 407 */
408 schedule_delayed_work(&work->work, msecs_to_jiffies(50)); 408 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
409} 409}
@@ -418,7 +418,17 @@ void intel_disable_fbc(struct drm_device *dev)
418 return; 418 return;
419 419
420 dev_priv->display.disable_fbc(dev); 420 dev_priv->display.disable_fbc(dev);
421 dev_priv->cfb_plane = -1; 421 dev_priv->fbc.plane = -1;
422}
423
424static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
425 enum no_fbc_reason reason)
426{
427 if (dev_priv->fbc.no_fbc_reason == reason)
428 return false;
429
430 dev_priv->fbc.no_fbc_reason = reason;
431 return true;
422} 432}
423 433
424/** 434/**
@@ -448,14 +458,18 @@ void intel_update_fbc(struct drm_device *dev)
448 struct drm_framebuffer *fb; 458 struct drm_framebuffer *fb;
449 struct intel_framebuffer *intel_fb; 459 struct intel_framebuffer *intel_fb;
450 struct drm_i915_gem_object *obj; 460 struct drm_i915_gem_object *obj;
451 int enable_fbc;
452 unsigned int max_hdisplay, max_vdisplay; 461 unsigned int max_hdisplay, max_vdisplay;
453 462
454 if (!i915_powersave) 463 if (!I915_HAS_FBC(dev)) {
464 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
455 return; 465 return;
466 }
456 467
457 if (!I915_HAS_FBC(dev)) 468 if (!i915_powersave) {
469 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
470 DRM_DEBUG_KMS("fbc disabled per module param\n");
458 return; 471 return;
472 }
459 473
460 /* 474 /*
461 * If FBC is already on, we just have to verify that we can 475 * If FBC is already on, we just have to verify that we can
@@ -470,8 +484,8 @@ void intel_update_fbc(struct drm_device *dev)
470 if (intel_crtc_active(tmp_crtc) && 484 if (intel_crtc_active(tmp_crtc) &&
471 !to_intel_crtc(tmp_crtc)->primary_disabled) { 485 !to_intel_crtc(tmp_crtc)->primary_disabled) {
472 if (crtc) { 486 if (crtc) {
473 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 487 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
474 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 488 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
475 goto out_disable; 489 goto out_disable;
476 } 490 }
477 crtc = tmp_crtc; 491 crtc = tmp_crtc;
@@ -479,8 +493,8 @@ void intel_update_fbc(struct drm_device *dev)
479 } 493 }
480 494
481 if (!crtc || crtc->fb == NULL) { 495 if (!crtc || crtc->fb == NULL) {
482 DRM_DEBUG_KMS("no output, disabling\n"); 496 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
483 dev_priv->no_fbc_reason = FBC_NO_OUTPUT; 497 DRM_DEBUG_KMS("no output, disabling\n");
484 goto out_disable; 498 goto out_disable;
485 } 499 }
486 500
@@ -489,23 +503,22 @@ void intel_update_fbc(struct drm_device *dev)
489 intel_fb = to_intel_framebuffer(fb); 503 intel_fb = to_intel_framebuffer(fb);
490 obj = intel_fb->obj; 504 obj = intel_fb->obj;
491 505
492 enable_fbc = i915_enable_fbc; 506 if (i915_enable_fbc < 0 &&
493 if (enable_fbc < 0) { 507 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
494 DRM_DEBUG_KMS("fbc set to per-chip default\n"); 508 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
495 enable_fbc = 1; 509 DRM_DEBUG_KMS("disabled per chip default\n");
496 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 510 goto out_disable;
497 enable_fbc = 0;
498 } 511 }
499 if (!enable_fbc) { 512 if (!i915_enable_fbc) {
500 DRM_DEBUG_KMS("fbc disabled per module param\n"); 513 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
501 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 514 DRM_DEBUG_KMS("fbc disabled per module param\n");
502 goto out_disable; 515 goto out_disable;
503 } 516 }
504 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 517 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
505 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 518 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
506 DRM_DEBUG_KMS("mode incompatible with compression, " 519 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
507 "disabling\n"); 520 DRM_DEBUG_KMS("mode incompatible with compression, "
508 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 521 "disabling\n");
509 goto out_disable; 522 goto out_disable;
510 } 523 }
511 524
@@ -518,14 +531,14 @@ void intel_update_fbc(struct drm_device *dev)
518 } 531 }
519 if ((crtc->mode.hdisplay > max_hdisplay) || 532 if ((crtc->mode.hdisplay > max_hdisplay) ||
520 (crtc->mode.vdisplay > max_vdisplay)) { 533 (crtc->mode.vdisplay > max_vdisplay)) {
521 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 534 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
522 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 535 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
523 goto out_disable; 536 goto out_disable;
524 } 537 }
525 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && 538 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
526 intel_crtc->plane != 0) { 539 intel_crtc->plane != 0) {
527 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 540 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
528 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 541 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
529 goto out_disable; 542 goto out_disable;
530 } 543 }
531 544
@@ -534,8 +547,8 @@ void intel_update_fbc(struct drm_device *dev)
534 */ 547 */
535 if (obj->tiling_mode != I915_TILING_X || 548 if (obj->tiling_mode != I915_TILING_X ||
536 obj->fence_reg == I915_FENCE_REG_NONE) { 549 obj->fence_reg == I915_FENCE_REG_NONE) {
537 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 550 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
538 dev_priv->no_fbc_reason = FBC_NOT_TILED; 551 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
539 goto out_disable; 552 goto out_disable;
540 } 553 }
541 554
@@ -544,8 +557,8 @@ void intel_update_fbc(struct drm_device *dev)
544 goto out_disable; 557 goto out_disable;
545 558
546 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { 559 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
547 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 560 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
548 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 561 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
549 goto out_disable; 562 goto out_disable;
550 } 563 }
551 564
@@ -554,9 +567,9 @@ void intel_update_fbc(struct drm_device *dev)
554 * cannot be unpinned (and have its GTT offset and fence revoked) 567 * cannot be unpinned (and have its GTT offset and fence revoked)
555 * without first being decoupled from the scanout and FBC disabled. 568 * without first being decoupled from the scanout and FBC disabled.
556 */ 569 */
557 if (dev_priv->cfb_plane == intel_crtc->plane && 570 if (dev_priv->fbc.plane == intel_crtc->plane &&
558 dev_priv->cfb_fb == fb->base.id && 571 dev_priv->fbc.fb_id == fb->base.id &&
559 dev_priv->cfb_y == crtc->y) 572 dev_priv->fbc.y == crtc->y)
560 return; 573 return;
561 574
562 if (intel_fbc_enabled(dev)) { 575 if (intel_fbc_enabled(dev)) {
@@ -588,6 +601,7 @@ void intel_update_fbc(struct drm_device *dev)
588 } 601 }
589 602
590 intel_enable_fbc(crtc, 500); 603 intel_enable_fbc(crtc, 500);
604 dev_priv->fbc.no_fbc_reason = FBC_OK;
591 return; 605 return;
592 606
593out_disable: 607out_disable:
@@ -1666,9 +1680,6 @@ static void i830_update_wm(struct drm_device *dev)
1666 I915_WRITE(FW_BLC, fwater_lo); 1680 I915_WRITE(FW_BLC, fwater_lo);
1667} 1681}
1668 1682
1669#define ILK_LP0_PLANE_LATENCY 700
1670#define ILK_LP0_CURSOR_LATENCY 1300
1671
1672/* 1683/*
1673 * Check the wm result. 1684 * Check the wm result.
1674 * 1685 *
@@ -1783,9 +1794,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1783 enabled = 0; 1794 enabled = 0;
1784 if (g4x_compute_wm0(dev, PIPE_A, 1795 if (g4x_compute_wm0(dev, PIPE_A,
1785 &ironlake_display_wm_info, 1796 &ironlake_display_wm_info,
1786 ILK_LP0_PLANE_LATENCY, 1797 dev_priv->wm.pri_latency[0] * 100,
1787 &ironlake_cursor_wm_info, 1798 &ironlake_cursor_wm_info,
1788 ILK_LP0_CURSOR_LATENCY, 1799 dev_priv->wm.cur_latency[0] * 100,
1789 &plane_wm, &cursor_wm)) { 1800 &plane_wm, &cursor_wm)) {
1790 I915_WRITE(WM0_PIPEA_ILK, 1801 I915_WRITE(WM0_PIPEA_ILK,
1791 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1802 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1797,9 +1808,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1797 1808
1798 if (g4x_compute_wm0(dev, PIPE_B, 1809 if (g4x_compute_wm0(dev, PIPE_B,
1799 &ironlake_display_wm_info, 1810 &ironlake_display_wm_info,
1800 ILK_LP0_PLANE_LATENCY, 1811 dev_priv->wm.pri_latency[0] * 100,
1801 &ironlake_cursor_wm_info, 1812 &ironlake_cursor_wm_info,
1802 ILK_LP0_CURSOR_LATENCY, 1813 dev_priv->wm.cur_latency[0] * 100,
1803 &plane_wm, &cursor_wm)) { 1814 &plane_wm, &cursor_wm)) {
1804 I915_WRITE(WM0_PIPEB_ILK, 1815 I915_WRITE(WM0_PIPEB_ILK,
1805 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1816 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1823,7 +1834,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1823 1834
1824 /* WM1 */ 1835 /* WM1 */
1825 if (!ironlake_compute_srwm(dev, 1, enabled, 1836 if (!ironlake_compute_srwm(dev, 1, enabled,
1826 ILK_READ_WM1_LATENCY() * 500, 1837 dev_priv->wm.pri_latency[1] * 500,
1827 &ironlake_display_srwm_info, 1838 &ironlake_display_srwm_info,
1828 &ironlake_cursor_srwm_info, 1839 &ironlake_cursor_srwm_info,
1829 &fbc_wm, &plane_wm, &cursor_wm)) 1840 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1831,14 +1842,14 @@ static void ironlake_update_wm(struct drm_device *dev)
1831 1842
1832 I915_WRITE(WM1_LP_ILK, 1843 I915_WRITE(WM1_LP_ILK,
1833 WM1_LP_SR_EN | 1844 WM1_LP_SR_EN |
1834 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1845 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1835 (fbc_wm << WM1_LP_FBC_SHIFT) | 1846 (fbc_wm << WM1_LP_FBC_SHIFT) |
1836 (plane_wm << WM1_LP_SR_SHIFT) | 1847 (plane_wm << WM1_LP_SR_SHIFT) |
1837 cursor_wm); 1848 cursor_wm);
1838 1849
1839 /* WM2 */ 1850 /* WM2 */
1840 if (!ironlake_compute_srwm(dev, 2, enabled, 1851 if (!ironlake_compute_srwm(dev, 2, enabled,
1841 ILK_READ_WM2_LATENCY() * 500, 1852 dev_priv->wm.pri_latency[2] * 500,
1842 &ironlake_display_srwm_info, 1853 &ironlake_display_srwm_info,
1843 &ironlake_cursor_srwm_info, 1854 &ironlake_cursor_srwm_info,
1844 &fbc_wm, &plane_wm, &cursor_wm)) 1855 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1846,7 +1857,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1846 1857
1847 I915_WRITE(WM2_LP_ILK, 1858 I915_WRITE(WM2_LP_ILK,
1848 WM2_LP_EN | 1859 WM2_LP_EN |
1849 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1860 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1850 (fbc_wm << WM1_LP_FBC_SHIFT) | 1861 (fbc_wm << WM1_LP_FBC_SHIFT) |
1851 (plane_wm << WM1_LP_SR_SHIFT) | 1862 (plane_wm << WM1_LP_SR_SHIFT) |
1852 cursor_wm); 1863 cursor_wm);
@@ -1860,7 +1871,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1860static void sandybridge_update_wm(struct drm_device *dev) 1871static void sandybridge_update_wm(struct drm_device *dev)
1861{ 1872{
1862 struct drm_i915_private *dev_priv = dev->dev_private; 1873 struct drm_i915_private *dev_priv = dev->dev_private;
1863 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1874 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1864 u32 val; 1875 u32 val;
1865 int fbc_wm, plane_wm, cursor_wm; 1876 int fbc_wm, plane_wm, cursor_wm;
1866 unsigned int enabled; 1877 unsigned int enabled;
@@ -1915,7 +1926,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1915 1926
1916 /* WM1 */ 1927 /* WM1 */
1917 if (!ironlake_compute_srwm(dev, 1, enabled, 1928 if (!ironlake_compute_srwm(dev, 1, enabled,
1918 SNB_READ_WM1_LATENCY() * 500, 1929 dev_priv->wm.pri_latency[1] * 500,
1919 &sandybridge_display_srwm_info, 1930 &sandybridge_display_srwm_info,
1920 &sandybridge_cursor_srwm_info, 1931 &sandybridge_cursor_srwm_info,
1921 &fbc_wm, &plane_wm, &cursor_wm)) 1932 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1923,14 +1934,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
1923 1934
1924 I915_WRITE(WM1_LP_ILK, 1935 I915_WRITE(WM1_LP_ILK,
1925 WM1_LP_SR_EN | 1936 WM1_LP_SR_EN |
1926 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1937 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1927 (fbc_wm << WM1_LP_FBC_SHIFT) | 1938 (fbc_wm << WM1_LP_FBC_SHIFT) |
1928 (plane_wm << WM1_LP_SR_SHIFT) | 1939 (plane_wm << WM1_LP_SR_SHIFT) |
1929 cursor_wm); 1940 cursor_wm);
1930 1941
1931 /* WM2 */ 1942 /* WM2 */
1932 if (!ironlake_compute_srwm(dev, 2, enabled, 1943 if (!ironlake_compute_srwm(dev, 2, enabled,
1933 SNB_READ_WM2_LATENCY() * 500, 1944 dev_priv->wm.pri_latency[2] * 500,
1934 &sandybridge_display_srwm_info, 1945 &sandybridge_display_srwm_info,
1935 &sandybridge_cursor_srwm_info, 1946 &sandybridge_cursor_srwm_info,
1936 &fbc_wm, &plane_wm, &cursor_wm)) 1947 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1938,14 +1949,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
1938 1949
1939 I915_WRITE(WM2_LP_ILK, 1950 I915_WRITE(WM2_LP_ILK,
1940 WM2_LP_EN | 1951 WM2_LP_EN |
1941 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1952 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1942 (fbc_wm << WM1_LP_FBC_SHIFT) | 1953 (fbc_wm << WM1_LP_FBC_SHIFT) |
1943 (plane_wm << WM1_LP_SR_SHIFT) | 1954 (plane_wm << WM1_LP_SR_SHIFT) |
1944 cursor_wm); 1955 cursor_wm);
1945 1956
1946 /* WM3 */ 1957 /* WM3 */
1947 if (!ironlake_compute_srwm(dev, 3, enabled, 1958 if (!ironlake_compute_srwm(dev, 3, enabled,
1948 SNB_READ_WM3_LATENCY() * 500, 1959 dev_priv->wm.pri_latency[3] * 500,
1949 &sandybridge_display_srwm_info, 1960 &sandybridge_display_srwm_info,
1950 &sandybridge_cursor_srwm_info, 1961 &sandybridge_cursor_srwm_info,
1951 &fbc_wm, &plane_wm, &cursor_wm)) 1962 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1953,7 +1964,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1953 1964
1954 I915_WRITE(WM3_LP_ILK, 1965 I915_WRITE(WM3_LP_ILK,
1955 WM3_LP_EN | 1966 WM3_LP_EN |
1956 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1967 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
1957 (fbc_wm << WM1_LP_FBC_SHIFT) | 1968 (fbc_wm << WM1_LP_FBC_SHIFT) |
1958 (plane_wm << WM1_LP_SR_SHIFT) | 1969 (plane_wm << WM1_LP_SR_SHIFT) |
1959 cursor_wm); 1970 cursor_wm);
@@ -1962,7 +1973,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1962static void ivybridge_update_wm(struct drm_device *dev) 1973static void ivybridge_update_wm(struct drm_device *dev)
1963{ 1974{
1964 struct drm_i915_private *dev_priv = dev->dev_private; 1975 struct drm_i915_private *dev_priv = dev->dev_private;
1965 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1976 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1966 u32 val; 1977 u32 val;
1967 int fbc_wm, plane_wm, cursor_wm; 1978 int fbc_wm, plane_wm, cursor_wm;
1968 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; 1979 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
@@ -2032,7 +2043,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
2032 2043
2033 /* WM1 */ 2044 /* WM1 */
2034 if (!ironlake_compute_srwm(dev, 1, enabled, 2045 if (!ironlake_compute_srwm(dev, 1, enabled,
2035 SNB_READ_WM1_LATENCY() * 500, 2046 dev_priv->wm.pri_latency[1] * 500,
2036 &sandybridge_display_srwm_info, 2047 &sandybridge_display_srwm_info,
2037 &sandybridge_cursor_srwm_info, 2048 &sandybridge_cursor_srwm_info,
2038 &fbc_wm, &plane_wm, &cursor_wm)) 2049 &fbc_wm, &plane_wm, &cursor_wm))
@@ -2040,14 +2051,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
2040 2051
2041 I915_WRITE(WM1_LP_ILK, 2052 I915_WRITE(WM1_LP_ILK,
2042 WM1_LP_SR_EN | 2053 WM1_LP_SR_EN |
2043 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 2054 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
2044 (fbc_wm << WM1_LP_FBC_SHIFT) | 2055 (fbc_wm << WM1_LP_FBC_SHIFT) |
2045 (plane_wm << WM1_LP_SR_SHIFT) | 2056 (plane_wm << WM1_LP_SR_SHIFT) |
2046 cursor_wm); 2057 cursor_wm);
2047 2058
2048 /* WM2 */ 2059 /* WM2 */
2049 if (!ironlake_compute_srwm(dev, 2, enabled, 2060 if (!ironlake_compute_srwm(dev, 2, enabled,
2050 SNB_READ_WM2_LATENCY() * 500, 2061 dev_priv->wm.pri_latency[2] * 500,
2051 &sandybridge_display_srwm_info, 2062 &sandybridge_display_srwm_info,
2052 &sandybridge_cursor_srwm_info, 2063 &sandybridge_cursor_srwm_info,
2053 &fbc_wm, &plane_wm, &cursor_wm)) 2064 &fbc_wm, &plane_wm, &cursor_wm))
@@ -2055,19 +2066,19 @@ static void ivybridge_update_wm(struct drm_device *dev)
2055 2066
2056 I915_WRITE(WM2_LP_ILK, 2067 I915_WRITE(WM2_LP_ILK,
2057 WM2_LP_EN | 2068 WM2_LP_EN |
2058 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 2069 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
2059 (fbc_wm << WM1_LP_FBC_SHIFT) | 2070 (fbc_wm << WM1_LP_FBC_SHIFT) |
2060 (plane_wm << WM1_LP_SR_SHIFT) | 2071 (plane_wm << WM1_LP_SR_SHIFT) |
2061 cursor_wm); 2072 cursor_wm);
2062 2073
2063 /* WM3, note we have to correct the cursor latency */ 2074 /* WM3, note we have to correct the cursor latency */
2064 if (!ironlake_compute_srwm(dev, 3, enabled, 2075 if (!ironlake_compute_srwm(dev, 3, enabled,
2065 SNB_READ_WM3_LATENCY() * 500, 2076 dev_priv->wm.pri_latency[3] * 500,
2066 &sandybridge_display_srwm_info, 2077 &sandybridge_display_srwm_info,
2067 &sandybridge_cursor_srwm_info, 2078 &sandybridge_cursor_srwm_info,
2068 &fbc_wm, &plane_wm, &ignore_cursor_wm) || 2079 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2069 !ironlake_compute_srwm(dev, 3, enabled, 2080 !ironlake_compute_srwm(dev, 3, enabled,
2070 2 * SNB_READ_WM3_LATENCY() * 500, 2081 dev_priv->wm.cur_latency[3] * 500,
2071 &sandybridge_display_srwm_info, 2082 &sandybridge_display_srwm_info,
2072 &sandybridge_cursor_srwm_info, 2083 &sandybridge_cursor_srwm_info,
2073 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) 2084 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
@@ -2075,14 +2086,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
2075 2086
2076 I915_WRITE(WM3_LP_ILK, 2087 I915_WRITE(WM3_LP_ILK,
2077 WM3_LP_EN | 2088 WM3_LP_EN |
2078 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | 2089 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
2079 (fbc_wm << WM1_LP_FBC_SHIFT) | 2090 (fbc_wm << WM1_LP_FBC_SHIFT) |
2080 (plane_wm << WM1_LP_SR_SHIFT) | 2091 (plane_wm << WM1_LP_SR_SHIFT) |
2081 cursor_wm); 2092 cursor_wm);
2082} 2093}
2083 2094
2084static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev, 2095static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2085 struct drm_crtc *crtc) 2096 struct drm_crtc *crtc)
2086{ 2097{
2087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2088 uint32_t pixel_rate, pfit_size; 2099 uint32_t pixel_rate, pfit_size;
@@ -2112,30 +2123,38 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
2112 return pixel_rate; 2123 return pixel_rate;
2113} 2124}
2114 2125
2115static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, 2126/* latency must be in 0.1us units. */
2127static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2116 uint32_t latency) 2128 uint32_t latency)
2117{ 2129{
2118 uint64_t ret; 2130 uint64_t ret;
2119 2131
2132 if (WARN(latency == 0, "Latency value missing\n"))
2133 return UINT_MAX;
2134
2120 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; 2135 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2121 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; 2136 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2122 2137
2123 return ret; 2138 return ret;
2124} 2139}
2125 2140
2126static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 2141/* latency must be in 0.1us units. */
2142static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2127 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 2143 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2128 uint32_t latency) 2144 uint32_t latency)
2129{ 2145{
2130 uint32_t ret; 2146 uint32_t ret;
2131 2147
2148 if (WARN(latency == 0, "Latency value missing\n"))
2149 return UINT_MAX;
2150
2132 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 2151 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2133 ret = (ret + 1) * horiz_pixels * bytes_per_pixel; 2152 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2134 ret = DIV_ROUND_UP(ret, 64) + 2; 2153 ret = DIV_ROUND_UP(ret, 64) + 2;
2135 return ret; 2154 return ret;
2136} 2155}
2137 2156
2138static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 2157static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2139 uint8_t bytes_per_pixel) 2158 uint8_t bytes_per_pixel)
2140{ 2159{
2141 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 2160 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
@@ -2143,15 +2162,11 @@ static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2143 2162
2144struct hsw_pipe_wm_parameters { 2163struct hsw_pipe_wm_parameters {
2145 bool active; 2164 bool active;
2146 bool sprite_enabled;
2147 uint8_t pri_bytes_per_pixel;
2148 uint8_t spr_bytes_per_pixel;
2149 uint8_t cur_bytes_per_pixel;
2150 uint32_t pri_horiz_pixels;
2151 uint32_t spr_horiz_pixels;
2152 uint32_t cur_horiz_pixels;
2153 uint32_t pipe_htotal; 2165 uint32_t pipe_htotal;
2154 uint32_t pixel_rate; 2166 uint32_t pixel_rate;
2167 struct intel_plane_wm_parameters pri;
2168 struct intel_plane_wm_parameters spr;
2169 struct intel_plane_wm_parameters cur;
2155}; 2170};
2156 2171
2157struct hsw_wm_maximums { 2172struct hsw_wm_maximums {
@@ -2161,15 +2176,6 @@ struct hsw_wm_maximums {
2161 uint16_t fbc; 2176 uint16_t fbc;
2162}; 2177};
2163 2178
2164struct hsw_lp_wm_result {
2165 bool enable;
2166 bool fbc_enable;
2167 uint32_t pri_val;
2168 uint32_t spr_val;
2169 uint32_t cur_val;
2170 uint32_t fbc_val;
2171};
2172
2173struct hsw_wm_values { 2179struct hsw_wm_values {
2174 uint32_t wm_pipe[3]; 2180 uint32_t wm_pipe[3];
2175 uint32_t wm_lp[3]; 2181 uint32_t wm_lp[3];
@@ -2178,128 +2184,289 @@ struct hsw_wm_values {
2178 bool enable_fbc_wm; 2184 bool enable_fbc_wm;
2179}; 2185};
2180 2186
2181enum hsw_data_buf_partitioning { 2187/* used in computing the new watermarks state */
2182 HSW_DATA_BUF_PART_1_2, 2188struct intel_wm_config {
2183 HSW_DATA_BUF_PART_5_6, 2189 unsigned int num_pipes_active;
2190 bool sprites_enabled;
2191 bool sprites_scaled;
2192 bool fbc_wm_enabled;
2184}; 2193};
2185 2194
2186/* For both WM_PIPE and WM_LP. */ 2195/*
2187static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params, 2196 * For both WM_PIPE and WM_LP.
2197 * mem_value must be in 0.1us units.
2198 */
2199static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2188 uint32_t mem_value, 2200 uint32_t mem_value,
2189 bool is_lp) 2201 bool is_lp)
2190{ 2202{
2191 uint32_t method1, method2; 2203 uint32_t method1, method2;
2192 2204
2193 /* TODO: for now, assume the primary plane is always enabled. */ 2205 if (!params->active || !params->pri.enabled)
2194 if (!params->active)
2195 return 0; 2206 return 0;
2196 2207
2197 method1 = hsw_wm_method1(params->pixel_rate, 2208 method1 = ilk_wm_method1(params->pixel_rate,
2198 params->pri_bytes_per_pixel, 2209 params->pri.bytes_per_pixel,
2199 mem_value); 2210 mem_value);
2200 2211
2201 if (!is_lp) 2212 if (!is_lp)
2202 return method1; 2213 return method1;
2203 2214
2204 method2 = hsw_wm_method2(params->pixel_rate, 2215 method2 = ilk_wm_method2(params->pixel_rate,
2205 params->pipe_htotal, 2216 params->pipe_htotal,
2206 params->pri_horiz_pixels, 2217 params->pri.horiz_pixels,
2207 params->pri_bytes_per_pixel, 2218 params->pri.bytes_per_pixel,
2208 mem_value); 2219 mem_value);
2209 2220
2210 return min(method1, method2); 2221 return min(method1, method2);
2211} 2222}
2212 2223
2213/* For both WM_PIPE and WM_LP. */ 2224/*
2214static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params, 2225 * For both WM_PIPE and WM_LP.
2226 * mem_value must be in 0.1us units.
2227 */
2228static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2215 uint32_t mem_value) 2229 uint32_t mem_value)
2216{ 2230{
2217 uint32_t method1, method2; 2231 uint32_t method1, method2;
2218 2232
2219 if (!params->active || !params->sprite_enabled) 2233 if (!params->active || !params->spr.enabled)
2220 return 0; 2234 return 0;
2221 2235
2222 method1 = hsw_wm_method1(params->pixel_rate, 2236 method1 = ilk_wm_method1(params->pixel_rate,
2223 params->spr_bytes_per_pixel, 2237 params->spr.bytes_per_pixel,
2224 mem_value); 2238 mem_value);
2225 method2 = hsw_wm_method2(params->pixel_rate, 2239 method2 = ilk_wm_method2(params->pixel_rate,
2226 params->pipe_htotal, 2240 params->pipe_htotal,
2227 params->spr_horiz_pixels, 2241 params->spr.horiz_pixels,
2228 params->spr_bytes_per_pixel, 2242 params->spr.bytes_per_pixel,
2229 mem_value); 2243 mem_value);
2230 return min(method1, method2); 2244 return min(method1, method2);
2231} 2245}
2232 2246
2233/* For both WM_PIPE and WM_LP. */ 2247/*
2234static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params, 2248 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units.
2250 */
2251static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2235 uint32_t mem_value) 2252 uint32_t mem_value)
2236{ 2253{
2237 if (!params->active) 2254 if (!params->active || !params->cur.enabled)
2238 return 0; 2255 return 0;
2239 2256
2240 return hsw_wm_method2(params->pixel_rate, 2257 return ilk_wm_method2(params->pixel_rate,
2241 params->pipe_htotal, 2258 params->pipe_htotal,
2242 params->cur_horiz_pixels, 2259 params->cur.horiz_pixels,
2243 params->cur_bytes_per_pixel, 2260 params->cur.bytes_per_pixel,
2244 mem_value); 2261 mem_value);
2245} 2262}
2246 2263
2247/* Only for WM_LP. */ 2264/* Only for WM_LP. */
2248static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, 2265static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2249 uint32_t pri_val, 2266 uint32_t pri_val)
2250 uint32_t mem_value)
2251{ 2267{
2252 if (!params->active) 2268 if (!params->active || !params->pri.enabled)
2253 return 0; 2269 return 0;
2254 2270
2255 return hsw_wm_fbc(pri_val, 2271 return ilk_wm_fbc(pri_val,
2256 params->pri_horiz_pixels, 2272 params->pri.horiz_pixels,
2257 params->pri_bytes_per_pixel); 2273 params->pri.bytes_per_pixel);
2258} 2274}
2259 2275
2260static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, 2276static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2261 struct hsw_pipe_wm_parameters *params,
2262 struct hsw_lp_wm_result *result)
2263{ 2277{
2264 enum pipe pipe; 2278 if (INTEL_INFO(dev)->gen >= 7)
2265 uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3]; 2279 return 768;
2280 else
2281 return 512;
2282}
2266 2283
2267 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { 2284/* Calculate the maximum primary/sprite plane watermark */
2268 struct hsw_pipe_wm_parameters *p = &params[pipe]; 2285static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2286 int level,
2287 const struct intel_wm_config *config,
2288 enum intel_ddb_partitioning ddb_partitioning,
2289 bool is_sprite)
2290{
2291 unsigned int fifo_size = ilk_display_fifo_size(dev);
2292 unsigned int max;
2269 2293
2270 pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true); 2294 /* if sprites aren't enabled, sprites get nothing */
2271 spr_val[pipe] = hsw_compute_spr_wm(p, mem_value); 2295 if (is_sprite && !config->sprites_enabled)
2272 cur_val[pipe] = hsw_compute_cur_wm(p, mem_value); 2296 return 0;
2273 fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
2274 }
2275 2297
2276 result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]); 2298 /* HSW allows LP1+ watermarks even with multiple pipes */
2277 result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]); 2299 if (level == 0 || config->num_pipes_active > 1) {
2278 result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]); 2300 fifo_size /= INTEL_INFO(dev)->num_pipes;
2279 result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
2280 2301
2281 if (result->fbc_val > max->fbc) { 2302 /*
2282 result->fbc_enable = false; 2303 * For some reason the non self refresh
2283 result->fbc_val = 0; 2304 * FIFO size is only half of the self
2284 } else { 2305 * refresh FIFO size on ILK/SNB.
2285 result->fbc_enable = true; 2306 */
2307 if (INTEL_INFO(dev)->gen <= 6)
2308 fifo_size /= 2;
2309 }
2310
2311 if (config->sprites_enabled) {
2312 /* level 0 is always calculated with 1:1 split */
2313 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2314 if (is_sprite)
2315 fifo_size *= 5;
2316 fifo_size /= 6;
2317 } else {
2318 fifo_size /= 2;
2319 }
2286 } 2320 }
2287 2321
2322 /* clamp to max that the registers can hold */
2323 if (INTEL_INFO(dev)->gen >= 7)
2324 /* IVB/HSW primary/sprite plane watermarks */
2325 max = level == 0 ? 127 : 1023;
2326 else if (!is_sprite)
2327 /* ILK/SNB primary plane watermarks */
2328 max = level == 0 ? 127 : 511;
2329 else
2330 /* ILK/SNB sprite plane watermarks */
2331 max = level == 0 ? 63 : 255;
2332
2333 return min(fifo_size, max);
2334}
2335
2336/* Calculate the maximum cursor plane watermark */
2337static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2338 int level,
2339 const struct intel_wm_config *config)
2340{
2341 /* HSW LP1+ watermarks w/ multiple pipes */
2342 if (level > 0 && config->num_pipes_active > 1)
2343 return 64;
2344
2345 /* otherwise just report max that registers can hold */
2346 if (INTEL_INFO(dev)->gen >= 7)
2347 return level == 0 ? 63 : 255;
2348 else
2349 return level == 0 ? 31 : 63;
2350}
2351
2352/* Calculate the maximum FBC watermark */
2353static unsigned int ilk_fbc_wm_max(void)
2354{
2355 /* max that registers can hold */
2356 return 15;
2357}
2358
2359static void ilk_wm_max(struct drm_device *dev,
2360 int level,
2361 const struct intel_wm_config *config,
2362 enum intel_ddb_partitioning ddb_partitioning,
2363 struct hsw_wm_maximums *max)
2364{
2365 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2366 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2367 max->cur = ilk_cursor_wm_max(dev, level, config);
2368 max->fbc = ilk_fbc_wm_max();
2369}
2370
2371static bool ilk_check_wm(int level,
2372 const struct hsw_wm_maximums *max,
2373 struct intel_wm_level *result)
2374{
2375 bool ret;
2376
2377 /* already determined to be invalid? */
2378 if (!result->enable)
2379 return false;
2380
2288 result->enable = result->pri_val <= max->pri && 2381 result->enable = result->pri_val <= max->pri &&
2289 result->spr_val <= max->spr && 2382 result->spr_val <= max->spr &&
2290 result->cur_val <= max->cur; 2383 result->cur_val <= max->cur;
2291 return result->enable; 2384
2385 ret = result->enable;
2386
2387 /*
2388 * HACK until we can pre-compute everything,
2389 * and thus fail gracefully if LP0 watermarks
2390 * are exceeded...
2391 */
2392 if (level == 0 && !result->enable) {
2393 if (result->pri_val > max->pri)
2394 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2395 level, result->pri_val, max->pri);
2396 if (result->spr_val > max->spr)
2397 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2398 level, result->spr_val, max->spr);
2399 if (result->cur_val > max->cur)
2400 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2401 level, result->cur_val, max->cur);
2402
2403 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2404 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2405 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2406 result->enable = true;
2407 }
2408
2409 DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2410
2411 return ret;
2412}
2413
2414static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2415 int level,
2416 struct hsw_pipe_wm_parameters *p,
2417 struct intel_wm_level *result)
2418{
2419 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2420 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2421 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2422
2423 /* WM1+ latency values stored in 0.5us units */
2424 if (level > 0) {
2425 pri_latency *= 5;
2426 spr_latency *= 5;
2427 cur_latency *= 5;
2428 }
2429
2430 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2431 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2432 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2433 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2434 result->enable = true;
2435}
2436
2437static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
2438 int level, struct hsw_wm_maximums *max,
2439 struct hsw_pipe_wm_parameters *params,
2440 struct intel_wm_level *result)
2441{
2442 enum pipe pipe;
2443 struct intel_wm_level res[3];
2444
2445 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
2446 ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
2447
2448 result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
2449 result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
2450 result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
2451 result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
2452 result->enable = true;
2453
2454 return ilk_check_wm(level, max, result);
2292} 2455}
2293 2456
2294static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv, 2457static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2295 uint32_t mem_value, enum pipe pipe, 2458 enum pipe pipe,
2296 struct hsw_pipe_wm_parameters *params) 2459 struct hsw_pipe_wm_parameters *params)
2297{ 2460{
2298 uint32_t pri_val, cur_val, spr_val; 2461 uint32_t pri_val, cur_val, spr_val;
2462 /* WM0 latency values stored in 0.1us units */
2463 uint16_t pri_latency = dev_priv->wm.pri_latency[0];
2464 uint16_t spr_latency = dev_priv->wm.spr_latency[0];
2465 uint16_t cur_latency = dev_priv->wm.cur_latency[0];
2299 2466
2300 pri_val = hsw_compute_pri_wm(params, mem_value, false); 2467 pri_val = ilk_compute_pri_wm(params, pri_latency, false);
2301 spr_val = hsw_compute_spr_wm(params, mem_value); 2468 spr_val = ilk_compute_spr_wm(params, spr_latency);
2302 cur_val = hsw_compute_cur_wm(params, mem_value); 2469 cur_val = ilk_compute_cur_wm(params, cur_latency);
2303 2470
2304 WARN(pri_val > 127, 2471 WARN(pri_val > 127,
2305 "Primary WM error, mode not supported for pipe %c\n", 2472 "Primary WM error, mode not supported for pipe %c\n",
@@ -2338,27 +2505,116 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2338 PIPE_WM_LINETIME_TIME(linetime); 2505 PIPE_WM_LINETIME_TIME(linetime);
2339} 2506}
2340 2507
2508static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2509{
2510 struct drm_i915_private *dev_priv = dev->dev_private;
2511
2512 if (IS_HASWELL(dev)) {
2513 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2514
2515 wm[0] = (sskpd >> 56) & 0xFF;
2516 if (wm[0] == 0)
2517 wm[0] = sskpd & 0xF;
2518 wm[1] = (sskpd >> 4) & 0xFF;
2519 wm[2] = (sskpd >> 12) & 0xFF;
2520 wm[3] = (sskpd >> 20) & 0x1FF;
2521 wm[4] = (sskpd >> 32) & 0x1FF;
2522 } else if (INTEL_INFO(dev)->gen >= 6) {
2523 uint32_t sskpd = I915_READ(MCH_SSKPD);
2524
2525 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2526 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2527 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2528 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2529 } else if (INTEL_INFO(dev)->gen >= 5) {
2530 uint32_t mltr = I915_READ(MLTR_ILK);
2531
2532 /* ILK primary LP0 latency is 700 ns */
2533 wm[0] = 7;
2534 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2535 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2536 }
2537}
2538
2539static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2540{
2541 /* ILK sprite LP0 latency is 1300 ns */
2542 if (INTEL_INFO(dev)->gen == 5)
2543 wm[0] = 13;
2544}
2545
2546static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2547{
2548 /* ILK cursor LP0 latency is 1300 ns */
2549 if (INTEL_INFO(dev)->gen == 5)
2550 wm[0] = 13;
2551
2552 /* WaDoubleCursorLP3Latency:ivb */
2553 if (IS_IVYBRIDGE(dev))
2554 wm[3] *= 2;
2555}
2556
2557static void intel_print_wm_latency(struct drm_device *dev,
2558 const char *name,
2559 const uint16_t wm[5])
2560{
2561 int level, max_level;
2562
2563 /* how many WM levels are we expecting */
2564 if (IS_HASWELL(dev))
2565 max_level = 4;
2566 else if (INTEL_INFO(dev)->gen >= 6)
2567 max_level = 3;
2568 else
2569 max_level = 2;
2570
2571 for (level = 0; level <= max_level; level++) {
2572 unsigned int latency = wm[level];
2573
2574 if (latency == 0) {
2575 DRM_ERROR("%s WM%d latency not provided\n",
2576 name, level);
2577 continue;
2578 }
2579
2580 /* WM1+ latency values in 0.5us units */
2581 if (level > 0)
2582 latency *= 5;
2583
2584 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2585 name, level, wm[level],
2586 latency / 10, latency % 10);
2587 }
2588}
2589
2590static void intel_setup_wm_latency(struct drm_device *dev)
2591{
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593
2594 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2595
2596 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2597 sizeof(dev_priv->wm.pri_latency));
2598 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2599 sizeof(dev_priv->wm.pri_latency));
2600
2601 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2602 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2603
2604 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2605 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2606 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2607}
2608
2341static void hsw_compute_wm_parameters(struct drm_device *dev, 2609static void hsw_compute_wm_parameters(struct drm_device *dev,
2342 struct hsw_pipe_wm_parameters *params, 2610 struct hsw_pipe_wm_parameters *params,
2343 uint32_t *wm,
2344 struct hsw_wm_maximums *lp_max_1_2, 2611 struct hsw_wm_maximums *lp_max_1_2,
2345 struct hsw_wm_maximums *lp_max_5_6) 2612 struct hsw_wm_maximums *lp_max_5_6)
2346{ 2613{
2347 struct drm_i915_private *dev_priv = dev->dev_private;
2348 struct drm_crtc *crtc; 2614 struct drm_crtc *crtc;
2349 struct drm_plane *plane; 2615 struct drm_plane *plane;
2350 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2351 enum pipe pipe; 2616 enum pipe pipe;
2352 int pipes_active = 0, sprites_enabled = 0; 2617 struct intel_wm_config config = {};
2353
2354 if ((sskpd >> 56) & 0xFF)
2355 wm[0] = (sskpd >> 56) & 0xFF;
2356 else
2357 wm[0] = sskpd & 0xF;
2358 wm[1] = ((sskpd >> 4) & 0xFF) * 5;
2359 wm[2] = ((sskpd >> 12) & 0xFF) * 5;
2360 wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
2361 wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
2362 2618
2363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2619 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2364 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2371,15 +2627,18 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
2371 if (!p->active) 2627 if (!p->active)
2372 continue; 2628 continue;
2373 2629
2374 pipes_active++; 2630 config.num_pipes_active++;
2375 2631
2376 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; 2632 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2377 p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc); 2633 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2378 p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2634 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2379 p->cur_bytes_per_pixel = 4; 2635 p->cur.bytes_per_pixel = 4;
2380 p->pri_horiz_pixels = 2636 p->pri.horiz_pixels =
2381 intel_crtc->config.requested_mode.hdisplay; 2637 intel_crtc->config.requested_mode.hdisplay;
2382 p->cur_horiz_pixels = 64; 2638 p->cur.horiz_pixels = 64;
2639 /* TODO: for now, assume primary and cursor planes are always enabled. */
2640 p->pri.enabled = true;
2641 p->cur.enabled = true;
2383 } 2642 }
2384 2643
2385 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2644 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2389,59 +2648,53 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
2389 pipe = intel_plane->pipe; 2648 pipe = intel_plane->pipe;
2390 p = &params[pipe]; 2649 p = &params[pipe];
2391 2650
2392 p->sprite_enabled = intel_plane->wm.enable; 2651 p->spr = intel_plane->wm;
2393 p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
2394 p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
2395 2652
2396 if (p->sprite_enabled) 2653 config.sprites_enabled |= p->spr.enabled;
2397 sprites_enabled++; 2654 config.sprites_scaled |= p->spr.scaled;
2398 } 2655 }
2399 2656
2400 if (pipes_active > 1) { 2657 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
2401 lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256; 2658
2402 lp_max_1_2->spr = lp_max_5_6->spr = 128; 2659 /* 5/6 split only in single pipe config on IVB+ */
2403 lp_max_1_2->cur = lp_max_5_6->cur = 64; 2660 if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
2404 } else { 2661 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
2405 lp_max_1_2->pri = sprites_enabled ? 384 : 768; 2662 else
2406 lp_max_5_6->pri = sprites_enabled ? 128 : 768; 2663 *lp_max_5_6 = *lp_max_1_2;
2407 lp_max_1_2->spr = 384;
2408 lp_max_5_6->spr = 640;
2409 lp_max_1_2->cur = lp_max_5_6->cur = 255;
2410 }
2411 lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
2412} 2664}
2413 2665
2414static void hsw_compute_wm_results(struct drm_device *dev, 2666static void hsw_compute_wm_results(struct drm_device *dev,
2415 struct hsw_pipe_wm_parameters *params, 2667 struct hsw_pipe_wm_parameters *params,
2416 uint32_t *wm,
2417 struct hsw_wm_maximums *lp_maximums, 2668 struct hsw_wm_maximums *lp_maximums,
2418 struct hsw_wm_values *results) 2669 struct hsw_wm_values *results)
2419{ 2670{
2420 struct drm_i915_private *dev_priv = dev->dev_private; 2671 struct drm_i915_private *dev_priv = dev->dev_private;
2421 struct drm_crtc *crtc; 2672 struct drm_crtc *crtc;
2422 struct hsw_lp_wm_result lp_results[4] = {}; 2673 struct intel_wm_level lp_results[4] = {};
2423 enum pipe pipe; 2674 enum pipe pipe;
2424 int level, max_level, wm_lp; 2675 int level, max_level, wm_lp;
2425 2676
2426 for (level = 1; level <= 4; level++) 2677 for (level = 1; level <= 4; level++)
2427 if (!hsw_compute_lp_wm(wm[level], lp_maximums, params, 2678 if (!hsw_compute_lp_wm(dev_priv, level,
2679 lp_maximums, params,
2428 &lp_results[level - 1])) 2680 &lp_results[level - 1]))
2429 break; 2681 break;
2430 max_level = level - 1; 2682 max_level = level - 1;
2431 2683
2684 memset(results, 0, sizeof(*results));
2685
2432 /* The spec says it is preferred to disable FBC WMs instead of disabling 2686 /* The spec says it is preferred to disable FBC WMs instead of disabling
2433 * a WM level. */ 2687 * a WM level. */
2434 results->enable_fbc_wm = true; 2688 results->enable_fbc_wm = true;
2435 for (level = 1; level <= max_level; level++) { 2689 for (level = 1; level <= max_level; level++) {
2436 if (!lp_results[level - 1].fbc_enable) { 2690 if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
2437 results->enable_fbc_wm = false; 2691 results->enable_fbc_wm = false;
2438 break; 2692 lp_results[level - 1].fbc_val = 0;
2439 } 2693 }
2440 } 2694 }
2441 2695
2442 memset(results, 0, sizeof(*results));
2443 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2696 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2444 const struct hsw_lp_wm_result *r; 2697 const struct intel_wm_level *r;
2445 2698
2446 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; 2699 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2447 if (level > max_level) 2700 if (level > max_level)
@@ -2456,8 +2709,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2456 } 2709 }
2457 2710
2458 for_each_pipe(pipe) 2711 for_each_pipe(pipe)
2459 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0], 2712 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
2460 pipe,
2461 &params[pipe]); 2713 &params[pipe]);
2462 2714
2463 for_each_pipe(pipe) { 2715 for_each_pipe(pipe) {
@@ -2468,8 +2720,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2468 2720
2469/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2721/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2470 * case both are at the same level. Prefer r1 in case they're the same. */ 2722 * case both are at the same level. Prefer r1 in case they're the same. */
2471struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, 2723static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2472 struct hsw_wm_values *r2) 2724 struct hsw_wm_values *r2)
2473{ 2725{
2474 int i, val_r1 = 0, val_r2 = 0; 2726 int i, val_r1 = 0, val_r2 = 0;
2475 2727
@@ -2498,11 +2750,11 @@ struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2498 */ 2750 */
2499static void hsw_write_wm_values(struct drm_i915_private *dev_priv, 2751static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2500 struct hsw_wm_values *results, 2752 struct hsw_wm_values *results,
2501 enum hsw_data_buf_partitioning partitioning) 2753 enum intel_ddb_partitioning partitioning)
2502{ 2754{
2503 struct hsw_wm_values previous; 2755 struct hsw_wm_values previous;
2504 uint32_t val; 2756 uint32_t val;
2505 enum hsw_data_buf_partitioning prev_partitioning; 2757 enum intel_ddb_partitioning prev_partitioning;
2506 bool prev_enable_fbc_wm; 2758 bool prev_enable_fbc_wm;
2507 2759
2508 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK); 2760 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
@@ -2519,7 +2771,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2519 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C)); 2771 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2520 2772
2521 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2773 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2522 HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2; 2774 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2523 2775
2524 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 2776 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2525 2777
@@ -2558,7 +2810,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2558 2810
2559 if (prev_partitioning != partitioning) { 2811 if (prev_partitioning != partitioning) {
2560 val = I915_READ(WM_MISC); 2812 val = I915_READ(WM_MISC);
2561 if (partitioning == HSW_DATA_BUF_PART_1_2) 2813 if (partitioning == INTEL_DDB_PART_1_2)
2562 val &= ~WM_MISC_DATA_PARTITION_5_6; 2814 val &= ~WM_MISC_DATA_PARTITION_5_6;
2563 else 2815 else
2564 val |= WM_MISC_DATA_PARTITION_5_6; 2816 val |= WM_MISC_DATA_PARTITION_5_6;
@@ -2595,44 +2847,39 @@ static void haswell_update_wm(struct drm_device *dev)
2595 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; 2847 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2596 struct hsw_pipe_wm_parameters params[3]; 2848 struct hsw_pipe_wm_parameters params[3];
2597 struct hsw_wm_values results_1_2, results_5_6, *best_results; 2849 struct hsw_wm_values results_1_2, results_5_6, *best_results;
2598 uint32_t wm[5]; 2850 enum intel_ddb_partitioning partitioning;
2599 enum hsw_data_buf_partitioning partitioning;
2600 2851
2601 hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6); 2852 hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
2602 2853
2603 hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2); 2854 hsw_compute_wm_results(dev, params,
2855 &lp_max_1_2, &results_1_2);
2604 if (lp_max_1_2.pri != lp_max_5_6.pri) { 2856 if (lp_max_1_2.pri != lp_max_5_6.pri) {
2605 hsw_compute_wm_results(dev, params, wm, &lp_max_5_6, 2857 hsw_compute_wm_results(dev, params,
2606 &results_5_6); 2858 &lp_max_5_6, &results_5_6);
2607 best_results = hsw_find_best_result(&results_1_2, &results_5_6); 2859 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2608 } else { 2860 } else {
2609 best_results = &results_1_2; 2861 best_results = &results_1_2;
2610 } 2862 }
2611 2863
2612 partitioning = (best_results == &results_1_2) ? 2864 partitioning = (best_results == &results_1_2) ?
2613 HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6; 2865 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2614 2866
2615 hsw_write_wm_values(dev_priv, best_results, partitioning); 2867 hsw_write_wm_values(dev_priv, best_results, partitioning);
2616} 2868}
2617 2869
2618static void haswell_update_sprite_wm(struct drm_device *dev, int pipe, 2870static void haswell_update_sprite_wm(struct drm_plane *plane,
2871 struct drm_crtc *crtc,
2619 uint32_t sprite_width, int pixel_size, 2872 uint32_t sprite_width, int pixel_size,
2620 bool enable) 2873 bool enabled, bool scaled)
2621{ 2874{
2622 struct drm_plane *plane; 2875 struct intel_plane *intel_plane = to_intel_plane(plane);
2623 2876
2624 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2877 intel_plane->wm.enabled = enabled;
2625 struct intel_plane *intel_plane = to_intel_plane(plane); 2878 intel_plane->wm.scaled = scaled;
2626 2879 intel_plane->wm.horiz_pixels = sprite_width;
2627 if (intel_plane->pipe == pipe) { 2880 intel_plane->wm.bytes_per_pixel = pixel_size;
2628 intel_plane->wm.enable = enable;
2629 intel_plane->wm.horiz_pixels = sprite_width + 1;
2630 intel_plane->wm.bytes_per_pixel = pixel_size;
2631 break;
2632 }
2633 }
2634 2881
2635 haswell_update_wm(dev); 2882 haswell_update_wm(plane->dev);
2636} 2883}
2637 2884
2638static bool 2885static bool
@@ -2711,17 +2958,20 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2711 return *sprite_wm > 0x3ff ? false : true; 2958 return *sprite_wm > 0x3ff ? false : true;
2712} 2959}
2713 2960
2714static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, 2961static void sandybridge_update_sprite_wm(struct drm_plane *plane,
2962 struct drm_crtc *crtc,
2715 uint32_t sprite_width, int pixel_size, 2963 uint32_t sprite_width, int pixel_size,
2716 bool enable) 2964 bool enabled, bool scaled)
2717{ 2965{
2966 struct drm_device *dev = plane->dev;
2718 struct drm_i915_private *dev_priv = dev->dev_private; 2967 struct drm_i915_private *dev_priv = dev->dev_private;
2719 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 2968 int pipe = to_intel_plane(plane)->pipe;
2969 int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
2720 u32 val; 2970 u32 val;
2721 int sprite_wm, reg; 2971 int sprite_wm, reg;
2722 int ret; 2972 int ret;
2723 2973
2724 if (!enable) 2974 if (!enabled)
2725 return; 2975 return;
2726 2976
2727 switch (pipe) { 2977 switch (pipe) {
@@ -2756,7 +3006,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2756 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 3006 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2757 pixel_size, 3007 pixel_size,
2758 &sandybridge_display_srwm_info, 3008 &sandybridge_display_srwm_info,
2759 SNB_READ_WM1_LATENCY() * 500, 3009 dev_priv->wm.spr_latency[1] * 500,
2760 &sprite_wm); 3010 &sprite_wm);
2761 if (!ret) { 3011 if (!ret) {
2762 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n", 3012 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
@@ -2772,7 +3022,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2772 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 3022 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2773 pixel_size, 3023 pixel_size,
2774 &sandybridge_display_srwm_info, 3024 &sandybridge_display_srwm_info,
2775 SNB_READ_WM2_LATENCY() * 500, 3025 dev_priv->wm.spr_latency[2] * 500,
2776 &sprite_wm); 3026 &sprite_wm);
2777 if (!ret) { 3027 if (!ret) {
2778 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n", 3028 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
@@ -2784,7 +3034,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2784 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 3034 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2785 pixel_size, 3035 pixel_size,
2786 &sandybridge_display_srwm_info, 3036 &sandybridge_display_srwm_info,
2787 SNB_READ_WM3_LATENCY() * 500, 3037 dev_priv->wm.spr_latency[3] * 500,
2788 &sprite_wm); 3038 &sprite_wm);
2789 if (!ret) { 3039 if (!ret) {
2790 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n", 3040 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
@@ -2834,15 +3084,16 @@ void intel_update_watermarks(struct drm_device *dev)
2834 dev_priv->display.update_wm(dev); 3084 dev_priv->display.update_wm(dev);
2835} 3085}
2836 3086
2837void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 3087void intel_update_sprite_watermarks(struct drm_plane *plane,
3088 struct drm_crtc *crtc,
2838 uint32_t sprite_width, int pixel_size, 3089 uint32_t sprite_width, int pixel_size,
2839 bool enable) 3090 bool enabled, bool scaled)
2840{ 3091{
2841 struct drm_i915_private *dev_priv = dev->dev_private; 3092 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2842 3093
2843 if (dev_priv->display.update_sprite_wm) 3094 if (dev_priv->display.update_sprite_wm)
2844 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, 3095 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2845 pixel_size, enable); 3096 pixel_size, enabled, scaled);
2846} 3097}
2847 3098
2848static struct drm_i915_gem_object * 3099static struct drm_i915_gem_object *
@@ -2859,7 +3110,7 @@ intel_alloc_context_page(struct drm_device *dev)
2859 return NULL; 3110 return NULL;
2860 } 3111 }
2861 3112
2862 ret = i915_gem_object_pin(ctx, 4096, true, false); 3113 ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
2863 if (ret) { 3114 if (ret) {
2864 DRM_ERROR("failed to pin power context: %d\n", ret); 3115 DRM_ERROR("failed to pin power context: %d\n", ret);
2865 goto err_unref; 3116 goto err_unref;
@@ -3076,19 +3327,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3076 */ 3327 */
3077static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) 3328static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3078{ 3329{
3079 unsigned long timeout = jiffies + msecs_to_jiffies(10);
3080 u32 pval; 3330 u32 pval;
3081 3331
3082 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3332 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3083 3333
3084 do { 3334 if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3085 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3335 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3086 if (time_after(jiffies, timeout)) {
3087 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3088 break;
3089 }
3090 udelay(10);
3091 } while (pval & 1);
3092 3336
3093 pval >>= 8; 3337 pval >>= 8;
3094 3338
@@ -3129,13 +3373,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3129 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); 3373 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3130} 3374}
3131 3375
3132 3376static void gen6_disable_rps_interrupts(struct drm_device *dev)
3133static void gen6_disable_rps(struct drm_device *dev)
3134{ 3377{
3135 struct drm_i915_private *dev_priv = dev->dev_private; 3378 struct drm_i915_private *dev_priv = dev->dev_private;
3136 3379
3137 I915_WRITE(GEN6_RC_CONTROL, 0);
3138 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3139 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3380 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3140 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); 3381 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3141 /* Complete PM interrupt masking here doesn't race with the rps work 3382 /* Complete PM interrupt masking here doesn't race with the rps work
@@ -3143,30 +3384,30 @@ static void gen6_disable_rps(struct drm_device *dev)
3143 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3384 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3144 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 3385 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3145 3386
3146 spin_lock_irq(&dev_priv->rps.lock); 3387 spin_lock_irq(&dev_priv->irq_lock);
3147 dev_priv->rps.pm_iir = 0; 3388 dev_priv->rps.pm_iir = 0;
3148 spin_unlock_irq(&dev_priv->rps.lock); 3389 spin_unlock_irq(&dev_priv->irq_lock);
3149 3390
3150 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3391 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3151} 3392}
3152 3393
3153static void valleyview_disable_rps(struct drm_device *dev) 3394static void gen6_disable_rps(struct drm_device *dev)
3154{ 3395{
3155 struct drm_i915_private *dev_priv = dev->dev_private; 3396 struct drm_i915_private *dev_priv = dev->dev_private;
3156 3397
3157 I915_WRITE(GEN6_RC_CONTROL, 0); 3398 I915_WRITE(GEN6_RC_CONTROL, 0);
3158 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3399 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3159 I915_WRITE(GEN6_PMIER, 0);
3160 /* Complete PM interrupt masking here doesn't race with the rps work
3161 * item again unmasking PM interrupts because that is using a different
3162 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3163 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3164 3400
3165 spin_lock_irq(&dev_priv->rps.lock); 3401 gen6_disable_rps_interrupts(dev);
3166 dev_priv->rps.pm_iir = 0; 3402}
3167 spin_unlock_irq(&dev_priv->rps.lock);
3168 3403
3169 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 3404static void valleyview_disable_rps(struct drm_device *dev)
3405{
3406 struct drm_i915_private *dev_priv = dev->dev_private;
3407
3408 I915_WRITE(GEN6_RC_CONTROL, 0);
3409
3410 gen6_disable_rps_interrupts(dev);
3170 3411
3171 if (dev_priv->vlv_pctx) { 3412 if (dev_priv->vlv_pctx) {
3172 drm_gem_object_unreference(&dev_priv->vlv_pctx->base); 3413 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
@@ -3176,6 +3417,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
3176 3417
3177int intel_enable_rc6(const struct drm_device *dev) 3418int intel_enable_rc6(const struct drm_device *dev)
3178{ 3419{
3420 /* No RC6 before Ironlake */
3421 if (INTEL_INFO(dev)->gen < 5)
3422 return 0;
3423
3179 /* Respect the kernel parameter if it is set */ 3424 /* Respect the kernel parameter if it is set */
3180 if (i915_enable_rc6 >= 0) 3425 if (i915_enable_rc6 >= 0)
3181 return i915_enable_rc6; 3426 return i915_enable_rc6;
@@ -3199,6 +3444,19 @@ int intel_enable_rc6(const struct drm_device *dev)
3199 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3444 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3200} 3445}
3201 3446
3447static void gen6_enable_rps_interrupts(struct drm_device *dev)
3448{
3449 struct drm_i915_private *dev_priv = dev->dev_private;
3450
3451 spin_lock_irq(&dev_priv->irq_lock);
3452 WARN_ON(dev_priv->rps.pm_iir);
3453 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
3454 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3455 spin_unlock_irq(&dev_priv->irq_lock);
3456 /* only unmask PM interrupts we need. Mask all others. */
3457 I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
3458}
3459
3202static void gen6_enable_rps(struct drm_device *dev) 3460static void gen6_enable_rps(struct drm_device *dev)
3203{ 3461{
3204 struct drm_i915_private *dev_priv = dev->dev_private; 3462 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3250,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3250 3508
3251 I915_WRITE(GEN6_RC_SLEEP, 0); 3509 I915_WRITE(GEN6_RC_SLEEP, 0);
3252 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 3510 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3253 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 3511 if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
3512 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3513 else
3514 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3254 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 3515 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3255 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 3516 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3256 3517
@@ -3327,17 +3588,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3327 3588
3328 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3589 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
3329 3590
3330 /* requires MSI enabled */ 3591 gen6_enable_rps_interrupts(dev);
3331 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
3332 spin_lock_irq(&dev_priv->rps.lock);
3333 /* FIXME: Our interrupt enabling sequence is bonghits.
3334 * dev_priv->rps.pm_iir really should be 0 here. */
3335 dev_priv->rps.pm_iir = 0;
3336 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3337 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3338 spin_unlock_irq(&dev_priv->rps.lock);
3339 /* unmask all PM interrupts */
3340 I915_WRITE(GEN6_PMINTRMSK, 0);
3341 3592
3342 rc6vids = 0; 3593 rc6vids = 0;
3343 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 3594 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -3356,7 +3607,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3356 gen6_gt_force_wake_put(dev_priv); 3607 gen6_gt_force_wake_put(dev_priv);
3357} 3608}
3358 3609
3359static void gen6_update_ring_freq(struct drm_device *dev) 3610void gen6_update_ring_freq(struct drm_device *dev)
3360{ 3611{
3361 struct drm_i915_private *dev_priv = dev->dev_private; 3612 struct drm_i915_private *dev_priv = dev->dev_private;
3362 int min_freq = 15; 3613 int min_freq = 15;
@@ -3482,7 +3733,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
3482 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 3733 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3483 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 3734 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3484 pcbr_offset, 3735 pcbr_offset,
3485 -1, 3736 I915_GTT_OFFSET_NONE,
3486 pctx_size); 3737 pctx_size);
3487 goto out; 3738 goto out;
3488 } 3739 }
@@ -3607,14 +3858,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
3607 3858
3608 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3859 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3609 3860
3610 /* requires MSI enabled */ 3861 gen6_enable_rps_interrupts(dev);
3611 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3612 spin_lock_irq(&dev_priv->rps.lock);
3613 WARN_ON(dev_priv->rps.pm_iir != 0);
3614 I915_WRITE(GEN6_PMIMR, 0);
3615 spin_unlock_irq(&dev_priv->rps.lock);
3616 /* enable all PM interrupts */
3617 I915_WRITE(GEN6_PMINTRMSK, 0);
3618 3862
3619 gen6_gt_force_wake_put(dev_priv); 3863 gen6_gt_force_wake_put(dev_priv);
3620} 3864}
@@ -3708,7 +3952,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3708 3952
3709 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 3953 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3710 intel_ring_emit(ring, MI_SET_CONTEXT); 3954 intel_ring_emit(ring, MI_SET_CONTEXT);
3711 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | 3955 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3712 MI_MM_SPACE_GTT | 3956 MI_MM_SPACE_GTT |
3713 MI_SAVE_EXT_STATE_EN | 3957 MI_SAVE_EXT_STATE_EN |
3714 MI_RESTORE_EXT_STATE_EN | 3958 MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3975,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3731 return; 3975 return;
3732 } 3976 }
3733 3977
3734 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); 3978 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3735 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3979 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3736} 3980}
3737 3981
@@ -4429,7 +4673,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
4429 struct drm_i915_private *dev_priv = dev->dev_private; 4673 struct drm_i915_private *dev_priv = dev->dev_private;
4430 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 4674 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4431 4675
4432 /* Required for FBC */ 4676 /*
4677 * Required for FBC
4678 * WaFbcDisableDpfcClockGating:ilk
4679 */
4433 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 4680 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4434 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 4681 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4435 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 4682 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
@@ -4466,6 +4713,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
4466 * The bit 7,8,9 of 0x42020. 4713 * The bit 7,8,9 of 0x42020.
4467 */ 4714 */
4468 if (IS_IRONLAKE_M(dev)) { 4715 if (IS_IRONLAKE_M(dev)) {
4716 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
4469 I915_WRITE(ILK_DISPLAY_CHICKEN1, 4717 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4470 I915_READ(ILK_DISPLAY_CHICKEN1) | 4718 I915_READ(ILK_DISPLAY_CHICKEN1) |
4471 ILK_FBCQ_DIS); 4719 ILK_FBCQ_DIS);
@@ -4602,6 +4850,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4602 * The bit5 and bit7 of 0x42020 4850 * The bit5 and bit7 of 0x42020
4603 * The bit14 of 0x70180 4851 * The bit14 of 0x70180
4604 * The bit14 of 0x71180 4852 * The bit14 of 0x71180
4853 *
4854 * WaFbcAsynchFlipDisableFbcQueue:snb
4605 */ 4855 */
4606 I915_WRITE(ILK_DISPLAY_CHICKEN1, 4856 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4607 I915_READ(ILK_DISPLAY_CHICKEN1) | 4857 I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -4614,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4614 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 4864 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4615 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 4865 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4616 4866
4617 /* WaMbcDriverBootEnable:snb */
4618 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4619 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4620
4621 g4x_disable_trickle_feed(dev); 4867 g4x_disable_trickle_feed(dev);
4622 4868
4623 /* The default value should be 0x200 according to docs, but the two 4869 /* The default value should be 0x200 according to docs, but the two
@@ -4713,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4713 I915_WRITE(CACHE_MODE_1, 4959 I915_WRITE(CACHE_MODE_1,
4714 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4960 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4715 4961
4716 /* WaMbcDriverBootEnable:hsw */
4717 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4718 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4719
4720 /* WaSwitchSolVfFArbitrationPriority:hsw */ 4962 /* WaSwitchSolVfFArbitrationPriority:hsw */
4721 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 4963 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4722 4964
@@ -4800,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4800 5042
4801 g4x_disable_trickle_feed(dev); 5043 g4x_disable_trickle_feed(dev);
4802 5044
4803 /* WaMbcDriverBootEnable:ivb */
4804 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4805 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4806
4807 /* WaVSRefCountFullforceMissDisable:ivb */ 5045 /* WaVSRefCountFullforceMissDisable:ivb */
4808 gen7_setup_fixed_func_scheduler(dev_priv); 5046 gen7_setup_fixed_func_scheduler(dev_priv);
4809 5047
@@ -4863,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4863 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5101 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4864 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5102 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4865 5103
4866 /* WaMbcDriverBootEnable:vlv */
4867 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4868 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4869
4870
4871 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5104 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4872 * gating disable must be set. Failure to set it results in 5105 * gating disable must be set. Failure to set it results in
4873 * flickering pixels due to Z write ordering failures after 5106 * flickering pixels due to Z write ordering failures after
@@ -5035,7 +5268,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
5035 case POWER_DOMAIN_TRANSCODER_B: 5268 case POWER_DOMAIN_TRANSCODER_B:
5036 case POWER_DOMAIN_TRANSCODER_C: 5269 case POWER_DOMAIN_TRANSCODER_C:
5037 return I915_READ(HSW_PWR_WELL_DRIVER) == 5270 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5038 (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); 5271 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5039 default: 5272 default:
5040 BUG(); 5273 BUG();
5041 } 5274 }
@@ -5048,17 +5281,18 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5048 uint32_t tmp; 5281 uint32_t tmp;
5049 5282
5050 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5283 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5051 is_enabled = tmp & HSW_PWR_WELL_STATE; 5284 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5052 enable_requested = tmp & HSW_PWR_WELL_ENABLE; 5285 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5053 5286
5054 if (enable) { 5287 if (enable) {
5055 if (!enable_requested) 5288 if (!enable_requested)
5056 I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); 5289 I915_WRITE(HSW_PWR_WELL_DRIVER,
5290 HSW_PWR_WELL_ENABLE_REQUEST);
5057 5291
5058 if (!is_enabled) { 5292 if (!is_enabled) {
5059 DRM_DEBUG_KMS("Enabling power well\n"); 5293 DRM_DEBUG_KMS("Enabling power well\n");
5060 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 5294 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5061 HSW_PWR_WELL_STATE), 20)) 5295 HSW_PWR_WELL_STATE_ENABLED), 20))
5062 DRM_ERROR("Timeout enabling power well\n"); 5296 DRM_ERROR("Timeout enabling power well\n");
5063 } 5297 }
5064 } else { 5298 } else {
@@ -5178,10 +5412,21 @@ void intel_init_power_well(struct drm_device *dev)
5178 5412
5179 /* We're taking over the BIOS, so clear any requests made by it since 5413 /* We're taking over the BIOS, so clear any requests made by it since
5180 * the driver is in charge now. */ 5414 * the driver is in charge now. */
5181 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) 5415 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5182 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 5416 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5183} 5417}
5184 5418
5419/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
5420void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5421{
5422 hsw_disable_package_c8(dev_priv);
5423}
5424
5425void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5426{
5427 hsw_enable_package_c8(dev_priv);
5428}
5429
5185/* Set up chip specific power management-related functions */ 5430/* Set up chip specific power management-related functions */
5186void intel_init_pm(struct drm_device *dev) 5431void intel_init_pm(struct drm_device *dev)
5187{ 5432{
@@ -5217,8 +5462,12 @@ void intel_init_pm(struct drm_device *dev)
5217 5462
5218 /* For FIFO watermark updates */ 5463 /* For FIFO watermark updates */
5219 if (HAS_PCH_SPLIT(dev)) { 5464 if (HAS_PCH_SPLIT(dev)) {
5465 intel_setup_wm_latency(dev);
5466
5220 if (IS_GEN5(dev)) { 5467 if (IS_GEN5(dev)) {
5221 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 5468 if (dev_priv->wm.pri_latency[1] &&
5469 dev_priv->wm.spr_latency[1] &&
5470 dev_priv->wm.cur_latency[1])
5222 dev_priv->display.update_wm = ironlake_update_wm; 5471 dev_priv->display.update_wm = ironlake_update_wm;
5223 else { 5472 else {
5224 DRM_DEBUG_KMS("Failed to get proper latency. " 5473 DRM_DEBUG_KMS("Failed to get proper latency. "
@@ -5227,7 +5476,9 @@ void intel_init_pm(struct drm_device *dev)
5227 } 5476 }
5228 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 5477 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5229 } else if (IS_GEN6(dev)) { 5478 } else if (IS_GEN6(dev)) {
5230 if (SNB_READ_WM0_LATENCY()) { 5479 if (dev_priv->wm.pri_latency[0] &&
5480 dev_priv->wm.spr_latency[0] &&
5481 dev_priv->wm.cur_latency[0]) {
5231 dev_priv->display.update_wm = sandybridge_update_wm; 5482 dev_priv->display.update_wm = sandybridge_update_wm;
5232 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 5483 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5233 } else { 5484 } else {
@@ -5237,7 +5488,9 @@ void intel_init_pm(struct drm_device *dev)
5237 } 5488 }
5238 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 5489 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5239 } else if (IS_IVYBRIDGE(dev)) { 5490 } else if (IS_IVYBRIDGE(dev)) {
5240 if (SNB_READ_WM0_LATENCY()) { 5491 if (dev_priv->wm.pri_latency[0] &&
5492 dev_priv->wm.spr_latency[0] &&
5493 dev_priv->wm.cur_latency[0]) {
5241 dev_priv->display.update_wm = ivybridge_update_wm; 5494 dev_priv->display.update_wm = ivybridge_update_wm;
5242 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 5495 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5243 } else { 5496 } else {
@@ -5247,7 +5500,9 @@ void intel_init_pm(struct drm_device *dev)
5247 } 5500 }
5248 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 5501 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
5249 } else if (IS_HASWELL(dev)) { 5502 } else if (IS_HASWELL(dev)) {
5250 if (I915_READ64(MCH_SSKPD)) { 5503 if (dev_priv->wm.pri_latency[0] &&
5504 dev_priv->wm.spr_latency[0] &&
5505 dev_priv->wm.cur_latency[0]) {
5251 dev_priv->display.update_wm = haswell_update_wm; 5506 dev_priv->display.update_wm = haswell_update_wm;
5252 dev_priv->display.update_sprite_wm = 5507 dev_priv->display.update_sprite_wm =
5253 haswell_update_sprite_wm; 5508 haswell_update_sprite_wm;
@@ -5310,260 +5565,6 @@ void intel_init_pm(struct drm_device *dev)
5310 } 5565 }
5311} 5566}
5312 5567
5313static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
5314{
5315 u32 gt_thread_status_mask;
5316
5317 if (IS_HASWELL(dev_priv->dev))
5318 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
5319 else
5320 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
5321
5322 /* w/a for a sporadic read returning 0 by waiting for the GT
5323 * thread to wake up.
5324 */
5325 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
5326 DRM_ERROR("GT thread status wait timed out\n");
5327}
5328
5329static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
5330{
5331 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5332 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5333}
5334
5335static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5336{
5337 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
5338 FORCEWAKE_ACK_TIMEOUT_MS))
5339 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5340
5341 I915_WRITE_NOTRACE(FORCEWAKE, 1);
5342 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5343
5344 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
5345 FORCEWAKE_ACK_TIMEOUT_MS))
5346 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5347
5348 /* WaRsForcewakeWaitTC0:snb */
5349 __gen6_gt_wait_for_thread_c0(dev_priv);
5350}
5351
5352static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
5353{
5354 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
5355 /* something from same cacheline, but !FORCEWAKE_MT */
5356 POSTING_READ(ECOBUS);
5357}
5358
5359static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
5360{
5361 u32 forcewake_ack;
5362
5363 if (IS_HASWELL(dev_priv->dev))
5364 forcewake_ack = FORCEWAKE_ACK_HSW;
5365 else
5366 forcewake_ack = FORCEWAKE_MT_ACK;
5367
5368 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
5369 FORCEWAKE_ACK_TIMEOUT_MS))
5370 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5371
5372 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5373 /* something from same cacheline, but !FORCEWAKE_MT */
5374 POSTING_READ(ECOBUS);
5375
5376 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
5377 FORCEWAKE_ACK_TIMEOUT_MS))
5378 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5379
5380 /* WaRsForcewakeWaitTC0:ivb,hsw */
5381 __gen6_gt_wait_for_thread_c0(dev_priv);
5382}
5383
5384/*
5385 * Generally this is called implicitly by the register read function. However,
5386 * if some sequence requires the GT to not power down then this function should
5387 * be called at the beginning of the sequence followed by a call to
5388 * gen6_gt_force_wake_put() at the end of the sequence.
5389 */
5390void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5391{
5392 unsigned long irqflags;
5393
5394 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5395 if (dev_priv->forcewake_count++ == 0)
5396 dev_priv->gt.force_wake_get(dev_priv);
5397 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5398}
5399
5400void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5401{
5402 u32 gtfifodbg;
5403 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
5404 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
5405 "MMIO read or write has been dropped %x\n", gtfifodbg))
5406 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
5407}
5408
5409static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5410{
5411 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5412 /* something from same cacheline, but !FORCEWAKE */
5413 POSTING_READ(ECOBUS);
5414 gen6_gt_check_fifodbg(dev_priv);
5415}
5416
5417static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
5418{
5419 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5420 /* something from same cacheline, but !FORCEWAKE_MT */
5421 POSTING_READ(ECOBUS);
5422 gen6_gt_check_fifodbg(dev_priv);
5423}
5424
5425/*
5426 * see gen6_gt_force_wake_get()
5427 */
5428void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5429{
5430 unsigned long irqflags;
5431
5432 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5433 if (--dev_priv->forcewake_count == 0)
5434 dev_priv->gt.force_wake_put(dev_priv);
5435 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5436}
5437
5438int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5439{
5440 int ret = 0;
5441
5442 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
5443 int loop = 500;
5444 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5445 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
5446 udelay(10);
5447 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5448 }
5449 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
5450 ++ret;
5451 dev_priv->gt_fifo_count = fifo;
5452 }
5453 dev_priv->gt_fifo_count--;
5454
5455 return ret;
5456}
5457
5458static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
5459{
5460 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
5461 /* something from same cacheline, but !FORCEWAKE_VLV */
5462 POSTING_READ(FORCEWAKE_ACK_VLV);
5463}
5464
5465static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
5466{
5467 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
5468 FORCEWAKE_ACK_TIMEOUT_MS))
5469 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5470
5471 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5472 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5473 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5474
5475 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
5476 FORCEWAKE_ACK_TIMEOUT_MS))
5477 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
5478
5479 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
5480 FORCEWAKE_KERNEL),
5481 FORCEWAKE_ACK_TIMEOUT_MS))
5482 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
5483
5484 /* WaRsForcewakeWaitTC0:vlv */
5485 __gen6_gt_wait_for_thread_c0(dev_priv);
5486}
5487
5488static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
5489{
5490 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5491 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5492 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5493 /* The below doubles as a POSTING_READ */
5494 gen6_gt_check_fifodbg(dev_priv);
5495}
5496
5497void intel_gt_sanitize(struct drm_device *dev)
5498{
5499 struct drm_i915_private *dev_priv = dev->dev_private;
5500
5501 if (IS_VALLEYVIEW(dev)) {
5502 vlv_force_wake_reset(dev_priv);
5503 } else if (INTEL_INFO(dev)->gen >= 6) {
5504 __gen6_gt_force_wake_reset(dev_priv);
5505 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5506 __gen6_gt_force_wake_mt_reset(dev_priv);
5507 }
5508
5509 /* BIOS often leaves RC6 enabled, but disable it for hw init */
5510 if (INTEL_INFO(dev)->gen >= 6)
5511 intel_disable_gt_powersave(dev);
5512}
5513
5514void intel_gt_init(struct drm_device *dev)
5515{
5516 struct drm_i915_private *dev_priv = dev->dev_private;
5517
5518 if (IS_VALLEYVIEW(dev)) {
5519 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5520 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5521 } else if (IS_HASWELL(dev)) {
5522 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5523 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5524 } else if (IS_IVYBRIDGE(dev)) {
5525 u32 ecobus;
5526
5527 /* IVB configs may use multi-threaded forcewake */
5528
5529 /* A small trick here - if the bios hasn't configured
5530 * MT forcewake, and if the device is in RC6, then
5531 * force_wake_mt_get will not wake the device and the
5532 * ECOBUS read will return zero. Which will be
5533 * (correctly) interpreted by the test below as MT
5534 * forcewake being disabled.
5535 */
5536 mutex_lock(&dev->struct_mutex);
5537 __gen6_gt_force_wake_mt_get(dev_priv);
5538 ecobus = I915_READ_NOTRACE(ECOBUS);
5539 __gen6_gt_force_wake_mt_put(dev_priv);
5540 mutex_unlock(&dev->struct_mutex);
5541
5542 if (ecobus & FORCEWAKE_MT_ENABLE) {
5543 dev_priv->gt.force_wake_get =
5544 __gen6_gt_force_wake_mt_get;
5545 dev_priv->gt.force_wake_put =
5546 __gen6_gt_force_wake_mt_put;
5547 } else {
5548 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5549 DRM_INFO("when using vblank-synced partial screen updates.\n");
5550 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5551 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5552 }
5553 } else if (IS_GEN6(dev)) {
5554 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5555 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5556 }
5557}
5558
5559void intel_pm_init(struct drm_device *dev)
5560{
5561 struct drm_i915_private *dev_priv = dev->dev_private;
5562
5563 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5564 intel_gen6_powersave_work);
5565}
5566
5567int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) 5568int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5568{ 5569{
5569 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5570 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5666,3 +5667,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
5666 return val; 5667 return val;
5667} 5668}
5668 5669
5670void intel_pm_init(struct drm_device *dev)
5671{
5672 struct drm_i915_private *dev_priv = dev->dev_private;
5673
5674 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5675 intel_gen6_powersave_work);
5676}
5677
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 079ef0129e74..f05cceac5a52 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
440 * registers with the above sequence (the readback of the HEAD registers 440 * registers with the above sequence (the readback of the HEAD registers
441 * also enforces ordering), otherwise the hw might lose the new ring 441 * also enforces ordering), otherwise the hw might lose the new ring
442 * register values. */ 442 * register values. */
443 I915_WRITE_START(ring, obj->gtt_offset); 443 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
444 I915_WRITE_CTL(ring, 444 I915_WRITE_CTL(ring,
445 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 445 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
446 | RING_VALID); 446 | RING_VALID);
447 447
448 /* If the head is still not zero, the ring is dead */ 448 /* If the head is still not zero, the ring is dead */
449 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 449 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
450 I915_READ_START(ring) == obj->gtt_offset && 450 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
451 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 451 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
452 DRM_ERROR("%s initialization failed " 452 DRM_ERROR("%s initialization failed "
453 "ctl %08x head %08x tail %08x start %08x\n", 453 "ctl %08x head %08x tail %08x start %08x\n",
@@ -501,11 +501,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
501 501
502 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 502 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
503 503
504 ret = i915_gem_object_pin(obj, 4096, true, false); 504 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
505 if (ret) 505 if (ret)
506 goto err_unref; 506 goto err_unref;
507 507
508 pc->gtt_offset = obj->gtt_offset; 508 pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
509 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 509 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
510 if (pc->cpu_page == NULL) { 510 if (pc->cpu_page == NULL) {
511 ret = -ENOMEM; 511 ret = -ENOMEM;
@@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
836 return false; 836 return false;
837 837
838 spin_lock_irqsave(&dev_priv->irq_lock, flags); 838 spin_lock_irqsave(&dev_priv->irq_lock, flags);
839 if (ring->irq_refcount.gt++ == 0) { 839 if (ring->irq_refcount++ == 0)
840 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 840 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 POSTING_READ(GTIMR);
843 }
844 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
845 842
846 return true; 843 return true;
@@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
854 unsigned long flags; 851 unsigned long flags;
855 852
856 spin_lock_irqsave(&dev_priv->irq_lock, flags); 853 spin_lock_irqsave(&dev_priv->irq_lock, flags);
857 if (--ring->irq_refcount.gt == 0) { 854 if (--ring->irq_refcount == 0)
858 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 855 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
859 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
860 POSTING_READ(GTIMR);
861 }
862 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 856 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
863} 857}
864 858
@@ -873,7 +867,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
873 return false; 867 return false;
874 868
875 spin_lock_irqsave(&dev_priv->irq_lock, flags); 869 spin_lock_irqsave(&dev_priv->irq_lock, flags);
876 if (ring->irq_refcount.gt++ == 0) { 870 if (ring->irq_refcount++ == 0) {
877 dev_priv->irq_mask &= ~ring->irq_enable_mask; 871 dev_priv->irq_mask &= ~ring->irq_enable_mask;
878 I915_WRITE(IMR, dev_priv->irq_mask); 872 I915_WRITE(IMR, dev_priv->irq_mask);
879 POSTING_READ(IMR); 873 POSTING_READ(IMR);
@@ -891,7 +885,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
891 unsigned long flags; 885 unsigned long flags;
892 886
893 spin_lock_irqsave(&dev_priv->irq_lock, flags); 887 spin_lock_irqsave(&dev_priv->irq_lock, flags);
894 if (--ring->irq_refcount.gt == 0) { 888 if (--ring->irq_refcount == 0) {
895 dev_priv->irq_mask |= ring->irq_enable_mask; 889 dev_priv->irq_mask |= ring->irq_enable_mask;
896 I915_WRITE(IMR, dev_priv->irq_mask); 890 I915_WRITE(IMR, dev_priv->irq_mask);
897 POSTING_READ(IMR); 891 POSTING_READ(IMR);
@@ -910,7 +904,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
910 return false; 904 return false;
911 905
912 spin_lock_irqsave(&dev_priv->irq_lock, flags); 906 spin_lock_irqsave(&dev_priv->irq_lock, flags);
913 if (ring->irq_refcount.gt++ == 0) { 907 if (ring->irq_refcount++ == 0) {
914 dev_priv->irq_mask &= ~ring->irq_enable_mask; 908 dev_priv->irq_mask &= ~ring->irq_enable_mask;
915 I915_WRITE16(IMR, dev_priv->irq_mask); 909 I915_WRITE16(IMR, dev_priv->irq_mask);
916 POSTING_READ16(IMR); 910 POSTING_READ16(IMR);
@@ -928,7 +922,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
928 unsigned long flags; 922 unsigned long flags;
929 923
930 spin_lock_irqsave(&dev_priv->irq_lock, flags); 924 spin_lock_irqsave(&dev_priv->irq_lock, flags);
931 if (--ring->irq_refcount.gt == 0) { 925 if (--ring->irq_refcount == 0) {
932 dev_priv->irq_mask |= ring->irq_enable_mask; 926 dev_priv->irq_mask |= ring->irq_enable_mask;
933 I915_WRITE16(IMR, dev_priv->irq_mask); 927 I915_WRITE16(IMR, dev_priv->irq_mask);
934 POSTING_READ16(IMR); 928 POSTING_READ16(IMR);
@@ -1033,16 +1027,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
1033 gen6_gt_force_wake_get(dev_priv); 1027 gen6_gt_force_wake_get(dev_priv);
1034 1028
1035 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1029 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1036 if (ring->irq_refcount.gt++ == 0) { 1030 if (ring->irq_refcount++ == 0) {
1037 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1031 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1038 I915_WRITE_IMR(ring, 1032 I915_WRITE_IMR(ring,
1039 ~(ring->irq_enable_mask | 1033 ~(ring->irq_enable_mask |
1040 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1034 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1041 else 1035 else
1042 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1036 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1043 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 1037 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1044 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1045 POSTING_READ(GTIMR);
1046 } 1038 }
1047 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1039 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1048 1040
@@ -1057,15 +1049,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1057 unsigned long flags; 1049 unsigned long flags;
1058 1050
1059 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1051 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1060 if (--ring->irq_refcount.gt == 0) { 1052 if (--ring->irq_refcount == 0) {
1061 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1053 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1062 I915_WRITE_IMR(ring, 1054 I915_WRITE_IMR(ring,
1063 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1055 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1064 else 1056 else
1065 I915_WRITE_IMR(ring, ~0); 1057 I915_WRITE_IMR(ring, ~0);
1066 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 1058 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1067 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1068 POSTING_READ(GTIMR);
1069 } 1059 }
1070 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1060 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1071 1061
@@ -1082,14 +1072,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1082 if (!dev->irq_enabled) 1072 if (!dev->irq_enabled)
1083 return false; 1073 return false;
1084 1074
1085 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1075 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1086 if (ring->irq_refcount.pm++ == 0) { 1076 if (ring->irq_refcount++ == 0) {
1087 u32 pm_imr = I915_READ(GEN6_PMIMR);
1088 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1077 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1089 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); 1078 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1090 POSTING_READ(GEN6_PMIMR);
1091 } 1079 }
1092 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1080 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1093 1081
1094 return true; 1082 return true;
1095} 1083}
@@ -1104,14 +1092,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1104 if (!dev->irq_enabled) 1092 if (!dev->irq_enabled)
1105 return; 1093 return;
1106 1094
1107 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1095 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1108 if (--ring->irq_refcount.pm == 0) { 1096 if (--ring->irq_refcount == 0) {
1109 u32 pm_imr = I915_READ(GEN6_PMIMR);
1110 I915_WRITE_IMR(ring, ~0); 1097 I915_WRITE_IMR(ring, ~0);
1111 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); 1098 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1112 POSTING_READ(GEN6_PMIMR);
1113 } 1099 }
1114 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1100 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1115} 1101}
1116 1102
1117static int 1103static int
@@ -1156,7 +1142,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1156 intel_ring_advance(ring); 1142 intel_ring_advance(ring);
1157 } else { 1143 } else {
1158 struct drm_i915_gem_object *obj = ring->private; 1144 struct drm_i915_gem_object *obj = ring->private;
1159 u32 cs_offset = obj->gtt_offset; 1145 u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
1160 1146
1161 if (len > I830_BATCH_LIMIT) 1147 if (len > I830_BATCH_LIMIT)
1162 return -ENOSPC; 1148 return -ENOSPC;
@@ -1236,12 +1222,12 @@ static int init_status_page(struct intel_ring_buffer *ring)
1236 1222
1237 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1223 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1238 1224
1239 ret = i915_gem_object_pin(obj, 4096, true, false); 1225 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
1240 if (ret != 0) { 1226 if (ret != 0) {
1241 goto err_unref; 1227 goto err_unref;
1242 } 1228 }
1243 1229
1244 ring->status_page.gfx_addr = obj->gtt_offset; 1230 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1245 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1231 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1246 if (ring->status_page.page_addr == NULL) { 1232 if (ring->status_page.page_addr == NULL) {
1247 ret = -ENOMEM; 1233 ret = -ENOMEM;
@@ -1319,7 +1305,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1319 1305
1320 ring->obj = obj; 1306 ring->obj = obj;
1321 1307
1322 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); 1308 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
1323 if (ret) 1309 if (ret)
1324 goto err_unref; 1310 goto err_unref;
1325 1311
@@ -1328,7 +1314,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1328 goto err_unpin; 1314 goto err_unpin;
1329 1315
1330 ring->virtual_start = 1316 ring->virtual_start =
1331 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 1317 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1332 ring->size); 1318 ring->size);
1333 if (ring->virtual_start == NULL) { 1319 if (ring->virtual_start == NULL) {
1334 DRM_ERROR("Failed to map ringbuffer.\n"); 1320 DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1606,6 +1592,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1606 if (INTEL_INFO(ring->dev)->gen >= 6) { 1592 if (INTEL_INFO(ring->dev)->gen >= 6) {
1607 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1593 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1608 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1594 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1595 if (HAS_VEBOX(ring->dev))
1596 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1609 } 1597 }
1610 1598
1611 ring->set_seqno(ring, seqno); 1599 ring->set_seqno(ring, seqno);
@@ -1840,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1840 return -ENOMEM; 1828 return -ENOMEM;
1841 } 1829 }
1842 1830
1843 ret = i915_gem_object_pin(obj, 0, true, false); 1831 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1844 if (ret != 0) { 1832 if (ret != 0) {
1845 drm_gem_object_unreference(&obj->base); 1833 drm_gem_object_unreference(&obj->base);
1846 DRM_ERROR("Failed to ping batch bo\n"); 1834 DRM_ERROR("Failed to ping batch bo\n");
@@ -2020,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2020 ring->add_request = gen6_add_request; 2008 ring->add_request = gen6_add_request;
2021 ring->get_seqno = gen6_ring_get_seqno; 2009 ring->get_seqno = gen6_ring_get_seqno;
2022 ring->set_seqno = ring_set_seqno; 2010 ring->set_seqno = ring_set_seqno;
2023 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | 2011 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2024 PM_VEBOX_CS_ERROR_INTERRUPT;
2025 ring->irq_get = hsw_vebox_get_irq; 2012 ring->irq_get = hsw_vebox_get_irq;
2026 ring->irq_put = hsw_vebox_put_irq; 2013 ring->irq_put = hsw_vebox_put_irq;
2027 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2014 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 799f04c9da45..432ad5311ba6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -33,11 +33,12 @@ struct intel_hw_status_page {
33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 36enum intel_ring_hangcheck_action {
37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 37 HANGCHECK_WAIT,
38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 38 HANGCHECK_ACTIVE,
39 39 HANGCHECK_KICK,
40enum intel_ring_hangcheck_action { wait, active, kick, hung }; 40 HANGCHECK_HUNG,
41};
41 42
42struct intel_ring_hangcheck { 43struct intel_ring_hangcheck {
43 bool deadlock; 44 bool deadlock;
@@ -78,10 +79,7 @@ struct intel_ring_buffer {
78 */ 79 */
79 u32 last_retired_head; 80 u32 last_retired_head;
80 81
81 struct { 82 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
82 u32 gt; /* protected by dev_priv->irq_lock */
83 u32 pm; /* protected by dev_priv->rps.lock (sucks) */
84 } irq_refcount;
85 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 83 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
86 u32 trace_irq_seqno; 84 u32 trace_irq_seqno;
87 u32 sync_seqno[I915_NUM_RINGS-1]; 85 u32 sync_seqno[I915_NUM_RINGS-1];
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2628d5622449..317e058fb3cf 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -202,15 +202,14 @@ struct intel_sdvo_connector {
202 u32 cur_dot_crawl, max_dot_crawl; 202 u32 cur_dot_crawl, max_dot_crawl;
203}; 203};
204 204
205static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) 205static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
206{ 206{
207 return container_of(encoder, struct intel_sdvo, base.base); 207 return container_of(encoder, struct intel_sdvo, base);
208} 208}
209 209
210static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) 210static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
211{ 211{
212 return container_of(intel_attached_encoder(connector), 212 return to_sdvo(intel_attached_encoder(connector));
213 struct intel_sdvo, base);
214} 213}
215 214
216static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) 215static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -539,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
539 &status)) 538 &status))
540 goto log_fail; 539 goto log_fail;
541 540
542 while (status == SDVO_CMD_STATUS_PENDING && --retry) { 541 while ((status == SDVO_CMD_STATUS_PENDING ||
542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
543 if (retry < 10) 543 if (retry < 10)
544 msleep(15); 544 msleep(15);
545 else 545 else
@@ -964,30 +964,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
964static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, 964static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
965 const struct drm_display_mode *adjusted_mode) 965 const struct drm_display_mode *adjusted_mode)
966{ 966{
967 struct dip_infoframe avi_if = { 967 uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
968 .type = DIP_TYPE_AVI, 968 struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
969 .ver = DIP_VERSION_AVI, 969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
970 .len = DIP_LEN_AVI, 970 union hdmi_infoframe frame;
971 }; 971 int ret;
972 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 972 ssize_t len;
973 struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc); 973
974 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
975 adjusted_mode);
976 if (ret < 0) {
977 DRM_ERROR("couldn't fill AVI infoframe\n");
978 return false;
979 }
974 980
975 if (intel_sdvo->rgb_quant_range_selectable) { 981 if (intel_sdvo->rgb_quant_range_selectable) {
976 if (intel_crtc->config.limited_color_range) 982 if (intel_crtc->config.limited_color_range)
977 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 983 frame.avi.quantization_range =
984 HDMI_QUANTIZATION_RANGE_LIMITED;
978 else 985 else
979 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 986 frame.avi.quantization_range =
987 HDMI_QUANTIZATION_RANGE_FULL;
980 } 988 }
981 989
982 avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); 990 len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
983 991 if (len < 0)
984 intel_dip_infoframe_csum(&avi_if); 992 return false;
985
986 /* sdvo spec says that the ecc is handled by the hw, and it looks like
987 * we must not send the ecc field, either. */
988 memcpy(sdvo_data, &avi_if, 3);
989 sdvo_data[3] = avi_if.checksum;
990 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
991 993
992 return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, 994 return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
993 SDVO_HBUF_TX_VSYNC, 995 SDVO_HBUF_TX_VSYNC,
@@ -1084,7 +1086,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
1084static bool intel_sdvo_compute_config(struct intel_encoder *encoder, 1086static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1085 struct intel_crtc_config *pipe_config) 1087 struct intel_crtc_config *pipe_config)
1086{ 1088{
1087 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1089 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1088 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 1090 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
1089 struct drm_display_mode *mode = &pipe_config->requested_mode; 1091 struct drm_display_mode *mode = &pipe_config->requested_mode;
1090 1092
@@ -1154,7 +1156,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1154 struct drm_display_mode *adjusted_mode = 1156 struct drm_display_mode *adjusted_mode =
1155 &intel_crtc->config.adjusted_mode; 1157 &intel_crtc->config.adjusted_mode;
1156 struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 1158 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
1157 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base); 1159 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
1158 u32 sdvox; 1160 u32 sdvox;
1159 struct intel_sdvo_in_out_map in_out; 1161 struct intel_sdvo_in_out_map in_out;
1160 struct intel_sdvo_dtd input_dtd, output_dtd; 1162 struct intel_sdvo_dtd input_dtd, output_dtd;
@@ -1292,7 +1294,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1292{ 1294{
1293 struct drm_device *dev = encoder->base.dev; 1295 struct drm_device *dev = encoder->base.dev;
1294 struct drm_i915_private *dev_priv = dev->dev_private; 1296 struct drm_i915_private *dev_priv = dev->dev_private;
1295 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1297 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1296 u16 active_outputs = 0; 1298 u16 active_outputs = 0;
1297 u32 tmp; 1299 u32 tmp;
1298 1300
@@ -1315,7 +1317,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1315{ 1317{
1316 struct drm_device *dev = encoder->base.dev; 1318 struct drm_device *dev = encoder->base.dev;
1317 struct drm_i915_private *dev_priv = dev->dev_private; 1319 struct drm_i915_private *dev_priv = dev->dev_private;
1318 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1320 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1319 struct intel_sdvo_dtd dtd; 1321 struct intel_sdvo_dtd dtd;
1320 int encoder_pixel_multiplier = 0; 1322 int encoder_pixel_multiplier = 0;
1321 u32 flags = 0, sdvox; 1323 u32 flags = 0, sdvox;
@@ -1357,22 +1359,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1357 } 1359 }
1358 1360
1359 /* Cross check the port pixel multiplier with the sdvo encoder state. */ 1361 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1360 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1); 1362 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
1361 switch (val) { 1363 &val, 1)) {
1362 case SDVO_CLOCK_RATE_MULT_1X: 1364 switch (val) {
1363 encoder_pixel_multiplier = 1; 1365 case SDVO_CLOCK_RATE_MULT_1X:
1364 break; 1366 encoder_pixel_multiplier = 1;
1365 case SDVO_CLOCK_RATE_MULT_2X: 1367 break;
1366 encoder_pixel_multiplier = 2; 1368 case SDVO_CLOCK_RATE_MULT_2X:
1367 break; 1369 encoder_pixel_multiplier = 2;
1368 case SDVO_CLOCK_RATE_MULT_4X: 1370 break;
1369 encoder_pixel_multiplier = 4; 1371 case SDVO_CLOCK_RATE_MULT_4X:
1370 break; 1372 encoder_pixel_multiplier = 4;
1373 break;
1374 }
1371 } 1375 }
1372 1376
1373 if(HAS_PCH_SPLIT(dev))
1374 return; /* no pixel multiplier readout support yet */
1375
1376 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, 1377 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
1377 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", 1378 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
1378 pipe_config->pixel_multiplier, encoder_pixel_multiplier); 1379 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1381,7 +1382,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1381static void intel_disable_sdvo(struct intel_encoder *encoder) 1382static void intel_disable_sdvo(struct intel_encoder *encoder)
1382{ 1383{
1383 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1384 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1384 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1385 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1385 u32 temp; 1386 u32 temp;
1386 1387
1387 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1388 intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1423,7 +1424,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1423{ 1424{
1424 struct drm_device *dev = encoder->base.dev; 1425 struct drm_device *dev = encoder->base.dev;
1425 struct drm_i915_private *dev_priv = dev->dev_private; 1426 struct drm_i915_private *dev_priv = dev->dev_private;
1426 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1427 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1427 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1428 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1428 u32 temp; 1429 u32 temp;
1429 bool input1, input2; 1430 bool input1, input2;
@@ -1584,7 +1585,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
1584 1585
1585static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) 1586static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1586{ 1587{
1587 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1588 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1588 1589
1589 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, 1590 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
1590 &intel_sdvo->hotplug_active, 2); 1591 &intel_sdvo->hotplug_active, 2);
@@ -1697,6 +1698,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1697 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1698 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1698 enum drm_connector_status ret; 1699 enum drm_connector_status ret;
1699 1700
1701 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1702 connector->base.id, drm_get_connector_name(connector));
1703
1700 if (!intel_sdvo_get_value(intel_sdvo, 1704 if (!intel_sdvo_get_value(intel_sdvo,
1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, 1705 SDVO_CMD_GET_ATTACHED_DISPLAYS,
1702 &response, 2)) 1706 &response, 2))
@@ -2188,7 +2192,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
2188 2192
2189static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2193static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
2190{ 2194{
2191 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 2195 struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
2192 2196
2193 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) 2197 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
2194 drm_mode_destroy(encoder->dev, 2198 drm_mode_destroy(encoder->dev,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1fa5612a4572..78b621cdd108 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -38,7 +38,8 @@
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40static void 40static void
41vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, 41vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
42 struct drm_framebuffer *fb,
42 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 43 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h, 44 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t x, uint32_t y, 45 uint32_t x, uint32_t y,
@@ -108,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
108 109
109 sprctl |= SP_ENABLE; 110 sprctl |= SP_ENABLE;
110 111
112 intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
113 src_w != crtc_w || src_h != crtc_h);
114
111 /* Sizes are 0 based */ 115 /* Sizes are 0 based */
112 src_w--; 116 src_w--;
113 src_h--; 117 src_h--;
114 crtc_w--; 118 crtc_w--;
115 crtc_h--; 119 crtc_h--;
116 120
117 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
118
119 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 121 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
120 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 122 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
121 123
@@ -133,13 +135,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
133 135
134 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); 136 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
135 I915_WRITE(SPCNTR(pipe, plane), sprctl); 137 I915_WRITE(SPCNTR(pipe, plane), sprctl);
136 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + 138 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
137 sprsurf_offset); 139 sprsurf_offset);
138 POSTING_READ(SPSURF(pipe, plane)); 140 POSTING_READ(SPSURF(pipe, plane));
139} 141}
140 142
141static void 143static void
142vlv_disable_plane(struct drm_plane *dplane) 144vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
143{ 145{
144 struct drm_device *dev = dplane->dev; 146 struct drm_device *dev = dplane->dev;
145 struct drm_i915_private *dev_priv = dev->dev_private; 147 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -152,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane)
152 /* Activate double buffered register update */ 154 /* Activate double buffered register update */
153 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0); 155 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
154 POSTING_READ(SPSURF(pipe, plane)); 156 POSTING_READ(SPSURF(pipe, plane));
157
158 intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
155} 159}
156 160
157static int 161static int
@@ -206,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane,
206} 210}
207 211
208static void 212static void
209ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, 213ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
214 struct drm_framebuffer *fb,
210 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 215 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
211 unsigned int crtc_w, unsigned int crtc_h, 216 unsigned int crtc_w, unsigned int crtc_h,
212 uint32_t x, uint32_t y, 217 uint32_t x, uint32_t y,
@@ -262,14 +267,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
262 if (IS_HASWELL(dev)) 267 if (IS_HASWELL(dev))
263 sprctl |= SPRITE_PIPE_CSC_ENABLE; 268 sprctl |= SPRITE_PIPE_CSC_ENABLE;
264 269
270 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
271 src_w != crtc_w || src_h != crtc_h);
272
265 /* Sizes are 0 based */ 273 /* Sizes are 0 based */
266 src_w--; 274 src_w--;
267 src_h--; 275 src_h--;
268 crtc_w--; 276 crtc_w--;
269 crtc_h--; 277 crtc_h--;
270 278
271 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
272
273 /* 279 /*
274 * IVB workaround: must disable low power watermarks for at least 280 * IVB workaround: must disable low power watermarks for at least
275 * one frame before enabling scaling. LP watermarks can be re-enabled 281 * one frame before enabling scaling. LP watermarks can be re-enabled
@@ -308,7 +314,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
308 if (intel_plane->can_scale) 314 if (intel_plane->can_scale)
309 I915_WRITE(SPRSCALE(pipe), sprscale); 315 I915_WRITE(SPRSCALE(pipe), sprscale);
310 I915_WRITE(SPRCTL(pipe), sprctl); 316 I915_WRITE(SPRCTL(pipe), sprctl);
311 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); 317 I915_MODIFY_DISPBASE(SPRSURF(pipe),
318 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
312 POSTING_READ(SPRSURF(pipe)); 319 POSTING_READ(SPRSURF(pipe));
313 320
314 /* potentially re-enable LP watermarks */ 321 /* potentially re-enable LP watermarks */
@@ -317,7 +324,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
317} 324}
318 325
319static void 326static void
320ivb_disable_plane(struct drm_plane *plane) 327ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
321{ 328{
322 struct drm_device *dev = plane->dev; 329 struct drm_device *dev = plane->dev;
323 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -335,7 +342,7 @@ ivb_disable_plane(struct drm_plane *plane)
335 342
336 dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 343 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
337 344
338 intel_update_sprite_watermarks(dev, pipe, 0, 0, false); 345 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
339 346
340 /* potentially re-enable LP watermarks */ 347 /* potentially re-enable LP watermarks */
341 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 348 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
@@ -397,7 +404,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
397} 404}
398 405
399static void 406static void
400ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, 407ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
408 struct drm_framebuffer *fb,
401 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 409 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
402 unsigned int crtc_w, unsigned int crtc_h, 410 unsigned int crtc_w, unsigned int crtc_h,
403 uint32_t x, uint32_t y, 411 uint32_t x, uint32_t y,
@@ -449,14 +457,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
449 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 457 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
450 dvscntr |= DVS_ENABLE; 458 dvscntr |= DVS_ENABLE;
451 459
460 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
461 src_w != crtc_w || src_h != crtc_h);
462
452 /* Sizes are 0 based */ 463 /* Sizes are 0 based */
453 src_w--; 464 src_w--;
454 src_h--; 465 src_h--;
455 crtc_w--; 466 crtc_w--;
456 crtc_h--; 467 crtc_h--;
457 468
458 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
459
460 dvsscale = 0; 469 dvsscale = 0;
461 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) 470 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
462 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 471 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -478,12 +487,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
478 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 487 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
479 I915_WRITE(DVSSCALE(pipe), dvsscale); 488 I915_WRITE(DVSSCALE(pipe), dvsscale);
480 I915_WRITE(DVSCNTR(pipe), dvscntr); 489 I915_WRITE(DVSCNTR(pipe), dvscntr);
481 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); 490 I915_MODIFY_DISPBASE(DVSSURF(pipe),
491 i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
482 POSTING_READ(DVSSURF(pipe)); 492 POSTING_READ(DVSSURF(pipe));
483} 493}
484 494
485static void 495static void
486ilk_disable_plane(struct drm_plane *plane) 496ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
487{ 497{
488 struct drm_device *dev = plane->dev; 498 struct drm_device *dev = plane->dev;
489 struct drm_i915_private *dev_priv = dev->dev_private; 499 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane)
496 /* Flush double buffered register updates */ 506 /* Flush double buffered register updates */
497 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); 507 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
498 POSTING_READ(DVSSURF(pipe)); 508 POSTING_READ(DVSSURF(pipe));
509
510 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
499} 511}
500 512
501static void 513static void
@@ -818,11 +830,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
818 intel_enable_primary(crtc); 830 intel_enable_primary(crtc);
819 831
820 if (visible) 832 if (visible)
821 intel_plane->update_plane(plane, fb, obj, 833 intel_plane->update_plane(plane, crtc, fb, obj,
822 crtc_x, crtc_y, crtc_w, crtc_h, 834 crtc_x, crtc_y, crtc_w, crtc_h,
823 src_x, src_y, src_w, src_h); 835 src_x, src_y, src_w, src_h);
824 else 836 else
825 intel_plane->disable_plane(plane); 837 intel_plane->disable_plane(plane, crtc);
826 838
827 if (disable_primary) 839 if (disable_primary)
828 intel_disable_primary(crtc); 840 intel_disable_primary(crtc);
@@ -855,9 +867,14 @@ intel_disable_plane(struct drm_plane *plane)
855 struct intel_plane *intel_plane = to_intel_plane(plane); 867 struct intel_plane *intel_plane = to_intel_plane(plane);
856 int ret = 0; 868 int ret = 0;
857 869
858 if (plane->crtc) 870 if (!plane->fb)
859 intel_enable_primary(plane->crtc); 871 return 0;
860 intel_plane->disable_plane(plane); 872
873 if (WARN_ON(!plane->crtc))
874 return -EINVAL;
875
876 intel_enable_primary(plane->crtc);
877 intel_plane->disable_plane(plane, plane->crtc);
861 878
862 if (!intel_plane->obj) 879 if (!intel_plane->obj)
863 goto out; 880 goto out;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 39debd80d190..f2c6d7909ae2 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = {
823 }, 823 },
824}; 824};
825 825
826static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) 826static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
827{ 827{
828 return container_of(encoder, struct intel_tv, base.base); 828 return container_of(encoder, struct intel_tv, base);
829} 829}
830 830
831static struct intel_tv *intel_attached_tv(struct drm_connector *connector) 831static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
832{ 832{
833 return container_of(intel_attached_encoder(connector), 833 return enc_to_tv(intel_attached_encoder(connector));
834 struct intel_tv,
835 base);
836} 834}
837 835
838static bool 836static bool
@@ -908,7 +906,7 @@ static bool
908intel_tv_compute_config(struct intel_encoder *encoder, 906intel_tv_compute_config(struct intel_encoder *encoder,
909 struct intel_crtc_config *pipe_config) 907 struct intel_crtc_config *pipe_config)
910{ 908{
911 struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base); 909 struct intel_tv *intel_tv = enc_to_tv(encoder);
912 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 910 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
913 911
914 if (!tv_mode) 912 if (!tv_mode)
@@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
921 return true; 919 return true;
922} 920}
923 921
924static void 922static void intel_tv_mode_set(struct intel_encoder *encoder)
925intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
926 struct drm_display_mode *adjusted_mode)
927{ 923{
928 struct drm_device *dev = encoder->dev; 924 struct drm_device *dev = encoder->base.dev;
929 struct drm_i915_private *dev_priv = dev->dev_private; 925 struct drm_i915_private *dev_priv = dev->dev_private;
930 struct drm_crtc *crtc = encoder->crtc; 926 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
931 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 927 struct intel_tv *intel_tv = enc_to_tv(encoder);
932 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
933 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 928 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
934 u32 tv_ctl; 929 u32 tv_ctl;
935 u32 hctl1, hctl2, hctl3; 930 u32 hctl1, hctl2, hctl3;
@@ -1305,6 +1300,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1305 struct intel_tv *intel_tv = intel_attached_tv(connector); 1300 struct intel_tv *intel_tv = intel_attached_tv(connector);
1306 int type; 1301 int type;
1307 1302
1303 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
1304 connector->base.id, drm_get_connector_name(connector),
1305 force);
1306
1308 mode = reported_modes[0]; 1307 mode = reported_modes[0];
1309 1308
1310 if (force) { 1309 if (force) {
@@ -1483,10 +1482,6 @@ out:
1483 return ret; 1482 return ret;
1484} 1483}
1485 1484
1486static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1487 .mode_set = intel_tv_mode_set,
1488};
1489
1490static const struct drm_connector_funcs intel_tv_connector_funcs = { 1485static const struct drm_connector_funcs intel_tv_connector_funcs = {
1491 .dpms = intel_connector_dpms, 1486 .dpms = intel_connector_dpms,
1492 .detect = intel_tv_detect, 1487 .detect = intel_tv_detect,
@@ -1619,6 +1614,7 @@ intel_tv_init(struct drm_device *dev)
1619 DRM_MODE_ENCODER_TVDAC); 1614 DRM_MODE_ENCODER_TVDAC);
1620 1615
1621 intel_encoder->compute_config = intel_tv_compute_config; 1616 intel_encoder->compute_config = intel_tv_compute_config;
1617 intel_encoder->mode_set = intel_tv_mode_set;
1622 intel_encoder->enable = intel_enable_tv; 1618 intel_encoder->enable = intel_enable_tv;
1623 intel_encoder->disable = intel_disable_tv; 1619 intel_encoder->disable = intel_disable_tv;
1624 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1620 intel_encoder->get_hw_state = intel_tv_get_hw_state;
@@ -1640,7 +1636,6 @@ intel_tv_init(struct drm_device *dev)
1640 1636
1641 intel_tv->tv_format = tv_modes[initial_mode].name; 1637 intel_tv->tv_format = tv_modes[initial_mode].name;
1642 1638
1643 drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
1644 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1639 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1645 connector->interlace_allowed = false; 1640 connector->interlace_allowed = false;
1646 connector->doublescan_allowed = false; 1641 connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 000000000000..8f5bc869c023
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,595 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26
27#define FORCEWAKE_ACK_TIMEOUT_MS 2
28
29#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31
32#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42
43
44static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
45{
46 u32 gt_thread_status_mask;
47
48 if (IS_HASWELL(dev_priv->dev))
49 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
50 else
51 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
52
53 /* w/a for a sporadic read returning 0 by waiting for the GT
54 * thread to wake up.
55 */
56 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
57 DRM_ERROR("GT thread status wait timed out\n");
58}
59
60static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
61{
62 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
63 /* something from same cacheline, but !FORCEWAKE */
64 __raw_posting_read(dev_priv, ECOBUS);
65}
66
67static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
68{
69 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
70 FORCEWAKE_ACK_TIMEOUT_MS))
71 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
72
73 __raw_i915_write32(dev_priv, FORCEWAKE, 1);
74 /* something from same cacheline, but !FORCEWAKE */
75 __raw_posting_read(dev_priv, ECOBUS);
76
77 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
78 FORCEWAKE_ACK_TIMEOUT_MS))
79 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
80
81 /* WaRsForcewakeWaitTC0:snb */
82 __gen6_gt_wait_for_thread_c0(dev_priv);
83}
84
85static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
86{
87 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
88 /* something from same cacheline, but !FORCEWAKE_MT */
89 __raw_posting_read(dev_priv, ECOBUS);
90}
91
92static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
93{
94 u32 forcewake_ack;
95
96 if (IS_HASWELL(dev_priv->dev))
97 forcewake_ack = FORCEWAKE_ACK_HSW;
98 else
99 forcewake_ack = FORCEWAKE_MT_ACK;
100
101 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
102 FORCEWAKE_ACK_TIMEOUT_MS))
103 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
104
105 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
106 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
107 /* something from same cacheline, but !FORCEWAKE_MT */
108 __raw_posting_read(dev_priv, ECOBUS);
109
110 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
111 FORCEWAKE_ACK_TIMEOUT_MS))
112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
113
114 /* WaRsForcewakeWaitTC0:ivb,hsw */
115 __gen6_gt_wait_for_thread_c0(dev_priv);
116}
117
118static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
119{
120 u32 gtfifodbg;
121
122 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
123 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
124 "MMIO read or write has been dropped %x\n", gtfifodbg))
125 __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
126}
127
128static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
129{
130 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
131 /* something from same cacheline, but !FORCEWAKE */
132 __raw_posting_read(dev_priv, ECOBUS);
133 gen6_gt_check_fifodbg(dev_priv);
134}
135
136static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
137{
138 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
139 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
140 /* something from same cacheline, but !FORCEWAKE_MT */
141 __raw_posting_read(dev_priv, ECOBUS);
142 gen6_gt_check_fifodbg(dev_priv);
143}
144
145static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
146{
147 int ret = 0;
148
149 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
150 int loop = 500;
151 u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
152 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
153 udelay(10);
154 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
155 }
156 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
157 ++ret;
158 dev_priv->uncore.fifo_count = fifo;
159 }
160 dev_priv->uncore.fifo_count--;
161
162 return ret;
163}
164
165static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
166{
167 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
168 _MASKED_BIT_DISABLE(0xffff));
169 /* something from same cacheline, but !FORCEWAKE_VLV */
170 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
171}
172
173static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
174{
175 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
176 FORCEWAKE_ACK_TIMEOUT_MS))
177 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
178
179 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
180 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
181 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
182 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
183
184 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
185 FORCEWAKE_ACK_TIMEOUT_MS))
186 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
187
188 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
189 FORCEWAKE_KERNEL),
190 FORCEWAKE_ACK_TIMEOUT_MS))
191 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
192
193 /* WaRsForcewakeWaitTC0:vlv */
194 __gen6_gt_wait_for_thread_c0(dev_priv);
195}
196
197static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
198{
199 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
200 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
201 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
202 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
203 /* The below doubles as a POSTING_READ */
204 gen6_gt_check_fifodbg(dev_priv);
205}
206
207void intel_uncore_early_sanitize(struct drm_device *dev)
208{
209 struct drm_i915_private *dev_priv = dev->dev_private;
210
211 if (HAS_FPGA_DBG_UNCLAIMED(dev))
212 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
213}
214
215void intel_uncore_init(struct drm_device *dev)
216{
217 struct drm_i915_private *dev_priv = dev->dev_private;
218
219 if (IS_VALLEYVIEW(dev)) {
220 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
221 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
222 } else if (IS_HASWELL(dev)) {
223 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
224 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
225 } else if (IS_IVYBRIDGE(dev)) {
226 u32 ecobus;
227
228 /* IVB configs may use multi-threaded forcewake */
229
230 /* A small trick here - if the bios hasn't configured
231 * MT forcewake, and if the device is in RC6, then
232 * force_wake_mt_get will not wake the device and the
233 * ECOBUS read will return zero. Which will be
234 * (correctly) interpreted by the test below as MT
235 * forcewake being disabled.
236 */
237 mutex_lock(&dev->struct_mutex);
238 __gen6_gt_force_wake_mt_get(dev_priv);
239 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
240 __gen6_gt_force_wake_mt_put(dev_priv);
241 mutex_unlock(&dev->struct_mutex);
242
243 if (ecobus & FORCEWAKE_MT_ENABLE) {
244 dev_priv->uncore.funcs.force_wake_get =
245 __gen6_gt_force_wake_mt_get;
246 dev_priv->uncore.funcs.force_wake_put =
247 __gen6_gt_force_wake_mt_put;
248 } else {
249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
250 DRM_INFO("when using vblank-synced partial screen updates.\n");
251 dev_priv->uncore.funcs.force_wake_get =
252 __gen6_gt_force_wake_get;
253 dev_priv->uncore.funcs.force_wake_put =
254 __gen6_gt_force_wake_put;
255 }
256 } else if (IS_GEN6(dev)) {
257 dev_priv->uncore.funcs.force_wake_get =
258 __gen6_gt_force_wake_get;
259 dev_priv->uncore.funcs.force_wake_put =
260 __gen6_gt_force_wake_put;
261 }
262}
263
264void intel_uncore_sanitize(struct drm_device *dev)
265{
266 struct drm_i915_private *dev_priv = dev->dev_private;
267
268 if (IS_VALLEYVIEW(dev)) {
269 vlv_force_wake_reset(dev_priv);
270 } else if (INTEL_INFO(dev)->gen >= 6) {
271 __gen6_gt_force_wake_reset(dev_priv);
272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
273 __gen6_gt_force_wake_mt_reset(dev_priv);
274 }
275
276 /* BIOS often leaves RC6 enabled, but disable it for hw init */
277 intel_disable_gt_powersave(dev);
278}
279
280/*
281 * Generally this is called implicitly by the register read function. However,
282 * if some sequence requires the GT to not power down then this function should
283 * be called at the beginning of the sequence followed by a call to
284 * gen6_gt_force_wake_put() at the end of the sequence.
285 */
286void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
287{
288 unsigned long irqflags;
289
290 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
291 if (dev_priv->uncore.forcewake_count++ == 0)
292 dev_priv->uncore.funcs.force_wake_get(dev_priv);
293 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
294}
295
296/*
297 * see gen6_gt_force_wake_get()
298 */
299void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
300{
301 unsigned long irqflags;
302
303 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
304 if (--dev_priv->uncore.forcewake_count == 0)
305 dev_priv->uncore.funcs.force_wake_put(dev_priv);
306 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
307}
308
309/* We give fast paths for the really cool registers */
310#define NEEDS_FORCE_WAKE(dev_priv, reg) \
311 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
312 ((reg) < 0x40000) && \
313 ((reg) != FORCEWAKE))
314
315static void
316ilk_dummy_write(struct drm_i915_private *dev_priv)
317{
318 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
319 * the chip from rc6 before touching it for real. MI_MODE is masked,
320 * hence harmless to write 0 into. */
321 __raw_i915_write32(dev_priv, MI_MODE, 0);
322}
323
324static void
325hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
326{
327 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
328 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
329 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
330 reg);
331 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
332 }
333}
334
335static void
336hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
337{
338 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
339 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
340 DRM_ERROR("Unclaimed write to %x\n", reg);
341 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
342 }
343}
344
345#define __i915_read(x) \
346u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
347 unsigned long irqflags; \
348 u##x val = 0; \
349 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
350 if (dev_priv->info->gen == 5) \
351 ilk_dummy_write(dev_priv); \
352 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
353 if (dev_priv->uncore.forcewake_count == 0) \
354 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
355 val = __raw_i915_read##x(dev_priv, reg); \
356 if (dev_priv->uncore.forcewake_count == 0) \
357 dev_priv->uncore.funcs.force_wake_put(dev_priv); \
358 } else { \
359 val = __raw_i915_read##x(dev_priv, reg); \
360 } \
361 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
362 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
363 return val; \
364}
365
366__i915_read(8)
367__i915_read(16)
368__i915_read(32)
369__i915_read(64)
370#undef __i915_read
371
372#define __i915_write(x) \
373void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
374 unsigned long irqflags; \
375 u32 __fifo_ret = 0; \
376 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
377 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
378 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
379 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
380 } \
381 if (dev_priv->info->gen == 5) \
382 ilk_dummy_write(dev_priv); \
383 hsw_unclaimed_reg_clear(dev_priv, reg); \
384 __raw_i915_write##x(dev_priv, reg, val); \
385 if (unlikely(__fifo_ret)) { \
386 gen6_gt_check_fifodbg(dev_priv); \
387 } \
388 hsw_unclaimed_reg_check(dev_priv, reg); \
389 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
390}
391__i915_write(8)
392__i915_write(16)
393__i915_write(32)
394__i915_write(64)
395#undef __i915_write
396
397static const struct register_whitelist {
398 uint64_t offset;
399 uint32_t size;
400 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
401} whitelist[] = {
402 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
403};
404
405int i915_reg_read_ioctl(struct drm_device *dev,
406 void *data, struct drm_file *file)
407{
408 struct drm_i915_private *dev_priv = dev->dev_private;
409 struct drm_i915_reg_read *reg = data;
410 struct register_whitelist const *entry = whitelist;
411 int i;
412
413 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
414 if (entry->offset == reg->offset &&
415 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
416 break;
417 }
418
419 if (i == ARRAY_SIZE(whitelist))
420 return -EINVAL;
421
422 switch (entry->size) {
423 case 8:
424 reg->val = I915_READ64(reg->offset);
425 break;
426 case 4:
427 reg->val = I915_READ(reg->offset);
428 break;
429 case 2:
430 reg->val = I915_READ16(reg->offset);
431 break;
432 case 1:
433 reg->val = I915_READ8(reg->offset);
434 break;
435 default:
436 WARN_ON(1);
437 return -EINVAL;
438 }
439
440 return 0;
441}
442
443static int i8xx_do_reset(struct drm_device *dev)
444{
445 struct drm_i915_private *dev_priv = dev->dev_private;
446
447 if (IS_I85X(dev))
448 return -ENODEV;
449
450 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
451 POSTING_READ(D_STATE);
452
453 if (IS_I830(dev) || IS_845G(dev)) {
454 I915_WRITE(DEBUG_RESET_I830,
455 DEBUG_RESET_DISPLAY |
456 DEBUG_RESET_RENDER |
457 DEBUG_RESET_FULL);
458 POSTING_READ(DEBUG_RESET_I830);
459 msleep(1);
460
461 I915_WRITE(DEBUG_RESET_I830, 0);
462 POSTING_READ(DEBUG_RESET_I830);
463 }
464
465 msleep(1);
466
467 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
468 POSTING_READ(D_STATE);
469
470 return 0;
471}
472
473static int i965_reset_complete(struct drm_device *dev)
474{
475 u8 gdrst;
476 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
477 return (gdrst & GRDOM_RESET_ENABLE) == 0;
478}
479
480static int i965_do_reset(struct drm_device *dev)
481{
482 int ret;
483
484 /*
485 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
486 * well as the reset bit (GR/bit 0). Setting the GR bit
487 * triggers the reset; when done, the hardware will clear it.
488 */
489 pci_write_config_byte(dev->pdev, I965_GDRST,
490 GRDOM_RENDER | GRDOM_RESET_ENABLE);
491 ret = wait_for(i965_reset_complete(dev), 500);
492 if (ret)
493 return ret;
494
495 /* We can't reset render&media without also resetting display ... */
496 pci_write_config_byte(dev->pdev, I965_GDRST,
497 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
498
499 ret = wait_for(i965_reset_complete(dev), 500);
500 if (ret)
501 return ret;
502
503 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
504
505 return 0;
506}
507
508static int ironlake_do_reset(struct drm_device *dev)
509{
510 struct drm_i915_private *dev_priv = dev->dev_private;
511 u32 gdrst;
512 int ret;
513
514 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
515 gdrst &= ~GRDOM_MASK;
516 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
517 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
518 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
519 if (ret)
520 return ret;
521
522 /* We can't reset render&media without also resetting display ... */
523 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
524 gdrst &= ~GRDOM_MASK;
525 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
526 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
527 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
528}
529
530static int gen6_do_reset(struct drm_device *dev)
531{
532 struct drm_i915_private *dev_priv = dev->dev_private;
533 int ret;
534 unsigned long irqflags;
535
536 /* Hold uncore.lock across reset to prevent any register access
537 * with forcewake not set correctly
538 */
539 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
540
541 /* Reset the chip */
542
543 /* GEN6_GDRST is not in the gt power well, no need to check
544 * for fifo space for the write or forcewake the chip for
545 * the read
546 */
547 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
548
549 /* Spin waiting for the device to ack the reset request */
550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
551
552 /* If reset with a user forcewake, try to restore, otherwise turn it off */
553 if (dev_priv->uncore.forcewake_count)
554 dev_priv->uncore.funcs.force_wake_get(dev_priv);
555 else
556 dev_priv->uncore.funcs.force_wake_put(dev_priv);
557
558 /* Restore fifo count */
559 dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
560
561 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
562 return ret;
563}
564
565int intel_gpu_reset(struct drm_device *dev)
566{
567 switch (INTEL_INFO(dev)->gen) {
568 case 7:
569 case 6: return gen6_do_reset(dev);
570 case 5: return ironlake_do_reset(dev);
571 case 4: return i965_do_reset(dev);
572 case 2: return i8xx_do_reset(dev);
573 default: return -ENODEV;
574 }
575}
576
577void intel_uncore_clear_errors(struct drm_device *dev)
578{
579 struct drm_i915_private *dev_priv = dev->dev_private;
580
581 /* XXX needs spinlock around caller's grouping */
582 if (HAS_FPGA_DBG_UNCLAIMED(dev))
583 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
584}
585
586void intel_uncore_check_errors(struct drm_device *dev)
587{
588 struct drm_i915_private *dev_priv = dev->dev_private;
589
590 if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
591 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
592 DRM_ERROR("Unclaimed register before interrupt\n");
593 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
594 }
595}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 17d0a637e4fb..6b1a87c8aac5 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -50,7 +50,6 @@ static const struct file_operations mga_driver_fops = {
50 .unlocked_ioctl = drm_ioctl, 50 .unlocked_ioctl = drm_ioctl,
51 .mmap = drm_mmap, 51 .mmap = drm_mmap,
52 .poll = drm_poll, 52 .poll = drm_poll,
53 .fasync = drm_fasync,
54#ifdef CONFIG_COMPAT 53#ifdef CONFIG_COMPAT
55 .compat_ioctl = mga_compat_ioctl, 54 .compat_ioctl = mga_compat_ioctl,
56#endif 55#endif
@@ -59,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
59 58
60static struct drm_driver driver = { 59static struct drm_driver driver = {
61 .driver_features = 60 .driver_features =
62 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 61 DRIVER_USE_AGP | DRIVER_PCI_DMA |
63 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 62 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
64 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 63 .dev_priv_size = sizeof(drm_mga_buf_priv_t),
65 .load = mga_driver_load, 64 .load = mga_driver_load,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 54558a01969a..ca4bc54ea214 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
149 unsigned int agp_size; 149 unsigned int agp_size;
150} drm_mga_private_t; 150} drm_mga_private_t;
151 151
152extern struct drm_ioctl_desc mga_ioctls[]; 152extern const struct drm_ioctl_desc mga_ioctls[];
153extern int mga_max_ioctl; 153extern int mga_max_ioctl;
154 154
155 /* mga_dma.c */ 155 /* mga_dma.c */
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 9c145143ad0f..37cc2fb4eadd 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1083,7 +1083,7 @@ file_priv)
1083 return 0; 1083 return 0;
1084} 1084}
1085 1085
1086struct drm_ioctl_desc mga_ioctls[] = { 1086const struct drm_ioctl_desc mga_ioctls[] = {
1087 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1087 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1088 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), 1088 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1089 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), 1089 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 122b571ccc7c..fcce7b2f8011 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -81,7 +81,6 @@ static const struct file_operations mgag200_driver_fops = {
81 .unlocked_ioctl = drm_ioctl, 81 .unlocked_ioctl = drm_ioctl,
82 .mmap = mgag200_mmap, 82 .mmap = mgag200_mmap,
83 .poll = drm_poll, 83 .poll = drm_poll,
84 .fasync = drm_fasync,
85#ifdef CONFIG_COMPAT 84#ifdef CONFIG_COMPAT
86 .compat_ioctl = drm_compat_ioctl, 85 .compat_ioctl = drm_compat_ioctl,
87#endif 86#endif
@@ -89,7 +88,7 @@ static const struct file_operations mgag200_driver_fops = {
89}; 88};
90 89
91static struct drm_driver driver = { 90static struct drm_driver driver = {
92 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR, 91 .driver_features = DRIVER_GEM | DRIVER_MODESET,
93 .load = mgag200_driver_load, 92 .load = mgag200_driver_load,
94 .unload = mgag200_driver_unload, 93 .unload = mgag200_driver_unload,
95 .fops = &mgag200_driver_fops, 94 .fops = &mgag200_driver_fops,
@@ -104,7 +103,7 @@ static struct drm_driver driver = {
104 .gem_free_object = mgag200_gem_free_object, 103 .gem_free_object = mgag200_gem_free_object,
105 .dumb_create = mgag200_dumb_create, 104 .dumb_create = mgag200_dumb_create,
106 .dumb_map_offset = mgag200_dumb_mmap_offset, 105 .dumb_map_offset = mgag200_dumb_mmap_offset,
107 .dumb_destroy = mgag200_dumb_destroy, 106 .dumb_destroy = drm_gem_dumb_destroy,
108}; 107};
109 108
110static struct pci_driver mgag200_pci_driver = { 109static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 12e2499d9352..baaae19332e2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
264int mgag200_dumb_create(struct drm_file *file, 264int mgag200_dumb_create(struct drm_file *file,
265 struct drm_device *dev, 265 struct drm_device *dev,
266 struct drm_mode_create_dumb *args); 266 struct drm_mode_create_dumb *args);
267int mgag200_dumb_destroy(struct drm_file *file,
268 struct drm_device *dev,
269 uint32_t handle);
270void mgag200_gem_free_object(struct drm_gem_object *obj); 267void mgag200_gem_free_object(struct drm_gem_object *obj);
271int 268int
272mgag200_dumb_mmap_offset(struct drm_file *file, 269mgag200_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9fa5685baee0..0f8b861b10b3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
310 return 0; 310 return 0;
311} 311}
312 312
313int mgag200_dumb_destroy(struct drm_file *file,
314 struct drm_device *dev,
315 uint32_t handle)
316{
317 return drm_gem_handle_delete(file, handle);
318}
319
320int mgag200_gem_init_object(struct drm_gem_object *obj) 313int mgag200_gem_init_object(struct drm_gem_object *obj)
321{ 314{
322 BUG(); 315 BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
349 342
350static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo) 343static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
351{ 344{
352 return bo->bo.addr_space_offset; 345 return drm_vma_node_offset_addr(&bo->bo.vma_node);
353} 346}
354 347
355int 348int
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d70e4a92773b..07b192fe15c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -148,7 +148,9 @@ mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct mgag200_bo *mgabo = mgag200_bo(bo);
152
153 return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
152} 154}
153 155
154static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,7 +323,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
321 return ret; 323 return ret;
322 } 324 }
323 325
324 mgabo->gem.driver_private = NULL;
325 mgabo->bo.bdev = &mdev->ttm.bdev; 326 mgabo->bo.bdev = &mdev->ttm.bdev;
326 mgabo->bo.bdev->dev_mapping = dev->dev_mapping; 327 mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
327 328
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 000000000000..a06c19cc56f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,34 @@
1
2config DRM_MSM
3 tristate "MSM DRM"
4 depends on DRM
5 depends on ARCH_MSM
6 depends on ARCH_MSM8960
7 select DRM_KMS_HELPER
8 select SHMEM
9 select TMPFS
10 default y
11 help
12 DRM/KMS driver for MSM/snapdragon.
13
14config DRM_MSM_FBDEV
15 bool "Enable legacy fbdev support for MSM modesetting driver"
16 depends on DRM_MSM
17 select FB_SYS_FILLRECT
18 select FB_SYS_COPYAREA
19 select FB_SYS_IMAGEBLIT
20 select FB_SYS_FOPS
21 default y
22 help
23 Choose this option if you have a need for the legacy fbdev
24 support. Note that this support also provide the linux console
25 support on top of the MSM modesetting driver.
26
27config DRM_MSM_REGISTER_LOGGING
28 bool "MSM DRM register logging"
29 depends on DRM_MSM
30 default n
31 help
32 Compile in support for logging register reads/writes in a format
33 that can be parsed by envytools demsm tool. If enabled, register
34 logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 000000000000..e17914889e54
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,30 @@
1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
2ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
3 ccflags-y += -Werror
4endif
5
6msm-y := \
7 adreno/adreno_gpu.o \
8 adreno/a3xx_gpu.o \
9 hdmi/hdmi.o \
10 hdmi/hdmi_bridge.o \
11 hdmi/hdmi_connector.o \
12 hdmi/hdmi_i2c.o \
13 hdmi/hdmi_phy_8960.o \
14 hdmi/hdmi_phy_8x60.o \
15 mdp4/mdp4_crtc.o \
16 mdp4/mdp4_dtv_encoder.o \
17 mdp4/mdp4_format.o \
18 mdp4/mdp4_irq.o \
19 mdp4/mdp4_kms.o \
20 mdp4/mdp4_plane.o \
21 msm_drv.o \
22 msm_fb.o \
23 msm_gem.o \
24 msm_gem_submit.o \
25 msm_gpu.o \
26 msm_ringbuffer.o
27
28msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
29
30obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 000000000000..e036f6c1db94
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,69 @@
1NOTES about msm drm/kms driver:
2
3In the current snapdragon SoC's, we have (at least) 3 different
4display controller blocks at play:
5 + MDP3 - ?? seems to be what is on geeksphone peak device
6 + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
7 + MDSS - snapdragon 800
8
9(I don't have a completely clear picture on which display controller
10maps to which part #)
11
12Plus a handful of blocks around them for HDMI/DSI/etc output.
13
14And on gpu side of things:
15 + zero, one, or two 2d cores (z180)
16 + and either a2xx or a3xx 3d core.
17
18But, HDMI/DSI/etc blocks seem like they can be shared across multiple
19display controller blocks. And I for sure don't want to have to deal
20with N different kms devices from xf86-video-freedreno. Plus, it
21seems like we can do some clever tricks like use GPU to trigger
22pageflip after rendering completes (ie. have the kms/crtc code build
23up gpu cmdstream to update scanout and write FLUSH register after).
24
25So, the approach is one drm driver, with some modularity. Different
26'struct msm_kms' implementations, depending on display controller.
27And one or more 'struct msm_gpu' for the various different gpu sub-
28modules.
29
30(Second part is not implemented yet. So far this is just basic KMS
31driver, and not exposing any custom ioctls to userspace for now.)
32
33The kms module provides the plane, crtc, and encoder objects, and
34loads whatever connectors are appropriate.
35
36For MDP4, the mapping is:
37
38 plane -> PIPE{RGBn,VGn} \
39 crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
40 encoder -> DTV/LCDC/DSI (within MDP4) /
41 connector -> HDMI/DSI/etc --> other device(s)
42
43Since the irq's that drm core mostly cares about are vblank/framedone,
44we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
45and treat the MDP4 block's irq as "the" irq. Even though the connectors
46may have their own irqs which they install themselves. For this reason
47the display controller is the "master" device.
48
49Each connector probably ends up being a separate device, just for the
50logistics of finding/mapping io region, irq, etc. Idealy we would
51have a better way than just stashing the platform device in a global
52(ie. like DT super-node.. but I don't have any snapdragon hw yet that
53is using DT).
54
55Note that so far I've not been able to get any docs on the hw, and it
56seems that access to such docs would prevent me from working on the
57freedreno gallium driver. So there may be some mistakes in register
58names (I had to invent a few, since no sufficient hint was given in
59the downstream android fbdev driver), bitfield sizes, etc. My current
60state of understanding the registers is given in the envytools rnndb
61files at:
62
63 https://github.com/freedreno/envytools/tree/master/rnndb
64 (the mdp4/hdmi/dsi directories)
65
66These files are used both for a parser tool (in the same tree) to
67parse logged register reads/writes (both from downstream android fbdev
68driver, and this driver with register logging enabled), as well as to
69generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 000000000000..35463864b959
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,1438 @@
1#ifndef A2XX_XML
2#define A2XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a2xx_rb_dither_type {
44 DITHER_PIXEL = 0,
45 DITHER_SUBPIXEL = 1,
46};
47
48enum a2xx_colorformatx {
49 COLORX_4_4_4_4 = 0,
50 COLORX_1_5_5_5 = 1,
51 COLORX_5_6_5 = 2,
52 COLORX_8 = 3,
53 COLORX_8_8 = 4,
54 COLORX_8_8_8_8 = 5,
55 COLORX_S8_8_8_8 = 6,
56 COLORX_16_FLOAT = 7,
57 COLORX_16_16_FLOAT = 8,
58 COLORX_16_16_16_16_FLOAT = 9,
59 COLORX_32_FLOAT = 10,
60 COLORX_32_32_FLOAT = 11,
61 COLORX_32_32_32_32_FLOAT = 12,
62 COLORX_2_3_3 = 13,
63 COLORX_8_8_8 = 14,
64};
65
66enum a2xx_sq_surfaceformat {
67 FMT_1_REVERSE = 0,
68 FMT_1 = 1,
69 FMT_8 = 2,
70 FMT_1_5_5_5 = 3,
71 FMT_5_6_5 = 4,
72 FMT_6_5_5 = 5,
73 FMT_8_8_8_8 = 6,
74 FMT_2_10_10_10 = 7,
75 FMT_8_A = 8,
76 FMT_8_B = 9,
77 FMT_8_8 = 10,
78 FMT_Cr_Y1_Cb_Y0 = 11,
79 FMT_Y1_Cr_Y0_Cb = 12,
80 FMT_5_5_5_1 = 13,
81 FMT_8_8_8_8_A = 14,
82 FMT_4_4_4_4 = 15,
83 FMT_10_11_11 = 16,
84 FMT_11_11_10 = 17,
85 FMT_DXT1 = 18,
86 FMT_DXT2_3 = 19,
87 FMT_DXT4_5 = 20,
88 FMT_24_8 = 22,
89 FMT_24_8_FLOAT = 23,
90 FMT_16 = 24,
91 FMT_16_16 = 25,
92 FMT_16_16_16_16 = 26,
93 FMT_16_EXPAND = 27,
94 FMT_16_16_EXPAND = 28,
95 FMT_16_16_16_16_EXPAND = 29,
96 FMT_16_FLOAT = 30,
97 FMT_16_16_FLOAT = 31,
98 FMT_16_16_16_16_FLOAT = 32,
99 FMT_32 = 33,
100 FMT_32_32 = 34,
101 FMT_32_32_32_32 = 35,
102 FMT_32_FLOAT = 36,
103 FMT_32_32_FLOAT = 37,
104 FMT_32_32_32_32_FLOAT = 38,
105 FMT_32_AS_8 = 39,
106 FMT_32_AS_8_8 = 40,
107 FMT_16_MPEG = 41,
108 FMT_16_16_MPEG = 42,
109 FMT_8_INTERLACED = 43,
110 FMT_32_AS_8_INTERLACED = 44,
111 FMT_32_AS_8_8_INTERLACED = 45,
112 FMT_16_INTERLACED = 46,
113 FMT_16_MPEG_INTERLACED = 47,
114 FMT_16_16_MPEG_INTERLACED = 48,
115 FMT_DXN = 49,
116 FMT_8_8_8_8_AS_16_16_16_16 = 50,
117 FMT_DXT1_AS_16_16_16_16 = 51,
118 FMT_DXT2_3_AS_16_16_16_16 = 52,
119 FMT_DXT4_5_AS_16_16_16_16 = 53,
120 FMT_2_10_10_10_AS_16_16_16_16 = 54,
121 FMT_10_11_11_AS_16_16_16_16 = 55,
122 FMT_11_11_10_AS_16_16_16_16 = 56,
123 FMT_32_32_32_FLOAT = 57,
124 FMT_DXT3A = 58,
125 FMT_DXT5A = 59,
126 FMT_CTX1 = 60,
127 FMT_DXT3A_AS_1_1_1_1 = 61,
128};
129
130enum a2xx_sq_ps_vtx_mode {
131 POSITION_1_VECTOR = 0,
132 POSITION_2_VECTORS_UNUSED = 1,
133 POSITION_2_VECTORS_SPRITE = 2,
134 POSITION_2_VECTORS_EDGE = 3,
135 POSITION_2_VECTORS_KILL = 4,
136 POSITION_2_VECTORS_SPRITE_KILL = 5,
137 POSITION_2_VECTORS_EDGE_KILL = 6,
138 MULTIPASS = 7,
139};
140
141enum a2xx_sq_sample_cntl {
142 CENTROIDS_ONLY = 0,
143 CENTERS_ONLY = 1,
144 CENTROIDS_AND_CENTERS = 2,
145};
146
147enum a2xx_dx_clip_space {
148 DXCLIP_OPENGL = 0,
149 DXCLIP_DIRECTX = 1,
150};
151
152enum a2xx_pa_su_sc_polymode {
153 POLY_DISABLED = 0,
154 POLY_DUALMODE = 1,
155};
156
157enum a2xx_rb_edram_mode {
158 EDRAM_NOP = 0,
159 COLOR_DEPTH = 4,
160 DEPTH_ONLY = 5,
161 EDRAM_COPY = 6,
162};
163
164enum a2xx_pa_sc_pattern_bit_order {
165 LITTLE = 0,
166 BIG = 1,
167};
168
169enum a2xx_pa_sc_auto_reset_cntl {
170 NEVER = 0,
171 EACH_PRIMITIVE = 1,
172 EACH_PACKET = 2,
173};
174
175enum a2xx_pa_pixcenter {
176 PIXCENTER_D3D = 0,
177 PIXCENTER_OGL = 1,
178};
179
180enum a2xx_pa_roundmode {
181 TRUNCATE = 0,
182 ROUND = 1,
183 ROUNDTOEVEN = 2,
184 ROUNDTOODD = 3,
185};
186
187enum a2xx_pa_quantmode {
188 ONE_SIXTEENTH = 0,
189 ONE_EIGTH = 1,
190 ONE_QUARTER = 2,
191 ONE_HALF = 3,
192 ONE = 4,
193};
194
195enum a2xx_rb_copy_sample_select {
196 SAMPLE_0 = 0,
197 SAMPLE_1 = 1,
198 SAMPLE_2 = 2,
199 SAMPLE_3 = 3,
200 SAMPLE_01 = 4,
201 SAMPLE_23 = 5,
202 SAMPLE_0123 = 6,
203};
204
205enum sq_tex_clamp {
206 SQ_TEX_WRAP = 0,
207 SQ_TEX_MIRROR = 1,
208 SQ_TEX_CLAMP_LAST_TEXEL = 2,
209 SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
210 SQ_TEX_CLAMP_HALF_BORDER = 4,
211 SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
212 SQ_TEX_CLAMP_BORDER = 6,
213 SQ_TEX_MIRROR_ONCE_BORDER = 7,
214};
215
216enum sq_tex_swiz {
217 SQ_TEX_X = 0,
218 SQ_TEX_Y = 1,
219 SQ_TEX_Z = 2,
220 SQ_TEX_W = 3,
221 SQ_TEX_ZERO = 4,
222 SQ_TEX_ONE = 5,
223};
224
225enum sq_tex_filter {
226 SQ_TEX_FILTER_POINT = 0,
227 SQ_TEX_FILTER_BILINEAR = 1,
228 SQ_TEX_FILTER_BICUBIC = 2,
229};
230
231#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
232
233#define REG_A2XX_RBBM_CNTL 0x0000003b
234
235#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
236
237#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
238
239#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
240
241#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
242
243#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
244
245#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
246
247#define REG_A2XX_RBBM_DEBUG 0x0000039b
248
249#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
250
251#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
252
253#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
254
255#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
256
257#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
258
259#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
260
261#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
262
263#define REG_A2XX_RBBM_INT_ACK 0x000003b6
264
265#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
266
267#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
268
269#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
270
271#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
272
273#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
274
275#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
276
277#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
278
279#define REG_A2XX_CP_ST_BASE 0x0000044d
280
281#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
282
283#define REG_A2XX_CP_IB1_BASE 0x00000458
284
285#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
286
287#define REG_A2XX_CP_IB2_BASE 0x0000045a
288
289#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
290
291#define REG_A2XX_CP_STAT 0x0000047f
292
293#define REG_A2XX_RBBM_STATUS 0x000005d0
294#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
295#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
296static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
297{
298 return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
299}
300#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
301#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
302#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
303#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
304#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
305#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
306#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
307#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
308#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
309#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
310#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
311#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
312#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
313#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
314#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
315#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
316#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
319
320#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
321#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
322#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
323static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
324{
325 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
326}
327#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
328#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
329static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
330{
331 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
332}
333
334static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
335
336static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
337
338static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
339
340static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
341
342#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
343
344#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
345
346#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
347
348#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
349
350#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
351
352#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
353
354#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
355
356#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
357
358#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
359
360#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
361
362#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
363
364#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
365
366#define REG_A2XX_SQ_INT_CNTL 0x00000d34
367
368#define REG_A2XX_SQ_INT_STATUS 0x00000d35
369
370#define REG_A2XX_SQ_INT_ACK 0x00000d36
371
372#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
373
374#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
375
376#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
377
378#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
379
380#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
381
382#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
383
384#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
385
386#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
387
388#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
389
390#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
391
392#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
393
394#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
395
396#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
397
398#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
399
400#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
401
402#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
403
404#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
405
406#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
407
408#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
409
410#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
411
412#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
413#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
414
415#define REG_A2XX_TP0_CHICKEN 0x00000e1e
416
417#define REG_A2XX_RB_BC_CONTROL 0x00000f01
418#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
419#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
420#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
421static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
422{
423 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
424}
425#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
426#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
427#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
428#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
429#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
430#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
431#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
432static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
433{
434 return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
435}
436#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
437#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
438#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
439#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
440#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
441#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
442static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
443{
444 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
445}
446#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
447#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
448#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
449static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
450{
451 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
452}
453#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
454#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
455static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
456{
457 return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
458}
459#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
460#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
461#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
462
463#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
464
465#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
466
467#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
468
469#define REG_A2XX_RB_SURFACE_INFO 0x00002000
470
471#define REG_A2XX_RB_COLOR_INFO 0x00002001
472#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
473#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
474static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
475{
476 return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
477}
478#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
479#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
480static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
481{
482 return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
483}
484#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
485#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
486#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
487static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
488{
489 return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
490}
491#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
492#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
493static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
494{
495 return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
496}
497#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
498#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
499static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
500{
501 return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
502}
503
504#define REG_A2XX_RB_DEPTH_INFO 0x00002002
505#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
506#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
507static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
508{
509 return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
510}
511#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
512#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
513static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
514{
515 return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
516}
517
518#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
519
520#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
521
522#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
523#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
524#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
525#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
526static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
527{
528 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
529}
530#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
531#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
532static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
533{
534 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
535}
536
537#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
538#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
539#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
540#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
541static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
542{
543 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
544}
545#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
546#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
547static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
548{
549 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
550}
551
552#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
553#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
554#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
555static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
556{
557 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
558}
559#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
560#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
561static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
562{
563 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
564}
565#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
566
567#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
568#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
569#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
570#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
571static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
572{
573 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
574}
575#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
576#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
577static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
578{
579 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
580}
581
582#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
583#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
584#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
585#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
586static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
587{
588 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
589}
590#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
591#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
592static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
593{
594 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
595}
596
597#define REG_A2XX_UNKNOWN_2010 0x00002010
598
599#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
600
601#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
602
603#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
604
605#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
606
607#define REG_A2XX_RB_COLOR_MASK 0x00002104
608#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
609#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
610#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
611#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
612
613#define REG_A2XX_RB_BLEND_RED 0x00002105
614
615#define REG_A2XX_RB_BLEND_GREEN 0x00002106
616
617#define REG_A2XX_RB_BLEND_BLUE 0x00002107
618
619#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
620
621#define REG_A2XX_RB_FOG_COLOR 0x00002109
622
623#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
624#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
625#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
626static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
627{
628 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
629}
630#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
631#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
632static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
633{
634 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
635}
636#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
637#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
638static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
639{
640 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
641}
642
643#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
644#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
645#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
646static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
647{
648 return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
649}
650#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
651#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
652static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
653{
654 return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
655}
656#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
657#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
658static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
659{
660 return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
661}
662
663#define REG_A2XX_RB_ALPHA_REF 0x0000210e
664
665#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
666#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
667#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
668static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
669{
670 return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
671}
672
673#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
674#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
675#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
676static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
677{
678 return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
679}
680
681#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
682#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
683#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
684static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
685{
686 return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
687}
688
689#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
690#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
691#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
692static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
693{
694 return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
695}
696
697#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
698#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
699#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
700static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
701{
702 return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
703}
704
705#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
706#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
707#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
708static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
709{
710 return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
711}
712
713#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
714#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
715#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
716static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
717{
718 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
719}
720#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
721#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
722static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
723{
724 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
725}
726#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
727#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
728#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
729#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
730#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
731#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
732static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
733{
734 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
735}
736#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
737#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
738static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
739{
740 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
741}
742#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
743#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
744static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
745{
746 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
747}
748#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
749
750#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
751#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
752#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
753#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
754#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
755static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
756{
757 return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
758}
759#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
760#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
761static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
762{
763 return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
764}
765#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
766#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
767#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
768
769#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
770
771#define REG_A2XX_SQ_WRAPPING_0 0x00002183
772
773#define REG_A2XX_SQ_WRAPPING_1 0x00002184
774
775#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
776
777#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
778
779#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
780#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
781#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
782#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
783#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
784#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
785#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
786static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
787{
788 return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
789}
790#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
791#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
792#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
793static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
794{
795 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
796}
797#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
798#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
799static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
800{
801 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
802}
803#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
804#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
805static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
806{
807 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
808}
809#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
810#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
811static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
812{
813 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
814}
815#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
816#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
817static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
818{
819 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
820}
821#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
822#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
823static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
824{
825 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
826}
827#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
828#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
829static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
830{
831 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
832}
833#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
834#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
835static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
836{
837 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
838}
839
840#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
841#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
842#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
843static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
844{
845 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
846}
847#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
848#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
849static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
850{
851 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
852}
853#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
854#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
855static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
856{
857 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
858}
859#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
860#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
861static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
862{
863 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
864}
865#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
866#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
867static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
868{
869 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
870}
871#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
872#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
873static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
874{
875 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
876}
877#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
878#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
879
880#define REG_A2XX_RB_COLORCONTROL 0x00002202
881#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
882#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
883static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
884{
885 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
886}
887#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
888#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
889#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
890#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
891#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
892#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
893#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
894static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
895{
896 return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
897}
898#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
899#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
900static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
901{
902 return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
903}
904#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
905#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
906static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
907{
908 return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
909}
910#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
911#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
912#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
913static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
914{
915 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
916}
917#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
918#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
919static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
920{
921 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
922}
923#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
924#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
925static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
926{
927 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
928}
929#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
930#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
931static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
932{
933 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
934}
935
936#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
937#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
938#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
939static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
940{
941 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
942}
943#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
944#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
945static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
946{
947 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
948}
949#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
950#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
951static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
952{
953 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
954}
955
956#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
957#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
958#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
959#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
960#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
961static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
962{
963 return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
964}
965#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
966#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
967#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
968#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
969#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
970
971#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
972#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
973#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
974#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
975#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
976#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
977static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
978{
979 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
980}
981#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
982#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
983static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
984{
985 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
986}
987#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
988#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
989static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
990{
991 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
992}
993#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
994#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
995#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
996#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
997#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
998#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
999#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
1000#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
1001#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
1002#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
1003#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
1004#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
1005#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
1006#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
1007#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
1008#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
1009
1010#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
1011#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
1012#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
1013#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
1014#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
1015#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
1016#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
1017#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
1018#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
1019#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
1020#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
1021
1022#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
1023#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
1024#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
1025static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
1026{
1027 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
1028}
1029#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
1030#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
1031static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
1032{
1033 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
1034}
1035#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
1036#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
1037static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
1038{
1039 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
1040}
1041
1042#define REG_A2XX_RB_MODECONTROL 0x00002208
1043#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
1044#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
1045static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
1046{
1047 return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
1048}
1049
1050#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
1051
1052#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
1053
1054#define REG_A2XX_CLEAR_COLOR 0x0000220b
1055#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
1056#define A2XX_CLEAR_COLOR_RED__SHIFT 0
1057static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
1058{
1059 return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
1060}
1061#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
1062#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
1063static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
1064{
1065 return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
1066}
1067#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
1068#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
1069static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
1070{
1071 return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
1072}
1073#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
1074#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
1075static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
1076{
1077 return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
1078}
1079
1080#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
1081
1082#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
1083#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
1084#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
1085static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
1086{
1087 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
1088}
1089#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
1090#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
1091static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1092{
1093 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
1094}
1095
1096#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
1097#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
1098#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
1099static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
1100{
1101 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
1102}
1103#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1104#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
1105static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1106{
1107 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
1108}
1109
1110#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
1111#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
1112#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
1113static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
1114{
1115 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
1116}
1117
1118#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
1119#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
1120#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
1121static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
1122{
1123 return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
1124}
1125#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
1126#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
1127static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
1128{
1129 return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
1130}
1131#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
1132#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
1133static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
1134{
1135 return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
1136}
1137#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
1138#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
1139static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
1140{
1141 return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
1142}
1143
1144#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
1145
1146#define REG_A2XX_VGT_ENHANCE 0x00002294
1147
1148#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
1149#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
1150#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
1151static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
1152{
1153 return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
1154}
1155#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
1156#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
1157#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
1158
1159#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
1160
1161#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
1162#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
1163#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
1164static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
1165{
1166 return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
1167}
1168#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
1169#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
1170static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
1171{
1172 return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
1173}
1174#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
1175#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
1176static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
1177{
1178 return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
1179}
1180
1181#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
1182#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
1183#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
1184static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
1185{
1186 return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
1187}
1188
1189#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
1190#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
1191#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
1192static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
1193{
1194 return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
1195}
1196
1197#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
1198#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
1199#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
1200static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
1201{
1202 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
1203}
1204
1205#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
1206#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
1207#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
1208static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
1209{
1210 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
1211}
1212
1213#define REG_A2XX_SQ_VS_CONST 0x00002307
1214#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
1215#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
1216static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
1217{
1218 return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
1219}
1220#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
1221#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
1222static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
1223{
1224 return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
1225}
1226
1227#define REG_A2XX_SQ_PS_CONST 0x00002308
1228#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
1229#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
1230static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
1231{
1232 return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
1233}
1234#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
1235#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
1236static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
1237{
1238 return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
1239}
1240
1241#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
1242
1243#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
1244
1245#define REG_A2XX_PA_SC_AA_MASK 0x00002312
1246
1247#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
1248
1249#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
1250
1251#define REG_A2XX_RB_COPY_CONTROL 0x00002318
1252#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
1253#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
1254static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
1255{
1256 return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
1257}
1258#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
1259#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
1260#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
1261static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
1262{
1263 return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
1264}
1265
1266#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
1267
1268#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
1269#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
1270#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
1271static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
1272{
1273 return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
1274}
1275
1276#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
1277#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
1278#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
1279static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
1280{
1281 return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
1282}
1283#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
1284#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
1285#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
1286static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
1287{
1288 return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
1289}
1290#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
1291#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
1292static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
1293{
1294 return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
1295}
1296#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
1297#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
1298static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
1299{
1300 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
1301}
1302#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
1303#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
1304static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
1305{
1306 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
1307}
1308#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
1309#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
1310#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
1311#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
1312
1313#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
1314#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
1315#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
1316static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
1317{
1318 return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
1319}
1320#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
1321#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
1322static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
1323{
1324 return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
1325}
1326
1327#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
1328
1329#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
1330
1331#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
1332
1333#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
1334
1335#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
1336
1337#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
1338
1339#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
1340
1341#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
1342
1343#define REG_A2XX_SQ_CONSTANT_0 0x00004000
1344
1345#define REG_A2XX_SQ_FETCH_0 0x00004800
1346
1347#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
1348
1349#define REG_A2XX_SQ_CF_LOOP 0x00004908
1350
1351#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
1352
1353#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
1354
1355#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
1356
1357#define REG_A2XX_SQ_TEX_0 0x00000000
1358#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
1359#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
1360static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
1361{
1362 return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
1363}
1364#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
1365#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
1366static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
1367{
1368 return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
1369}
1370#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
1371#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
1372static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
1373{
1374 return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
1375}
1376#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
1377#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
1378static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
1379{
1380 return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
1381}
1382
1383#define REG_A2XX_SQ_TEX_1 0x00000001
1384
1385#define REG_A2XX_SQ_TEX_2 0x00000002
1386#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
1387#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
1388static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
1389{
1390 return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
1391}
1392#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
1393#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
1394static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
1395{
1396 return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
1397}
1398
1399#define REG_A2XX_SQ_TEX_3 0x00000003
1400#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
1401#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
1402static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
1403{
1404 return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
1405}
1406#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
1407#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
1408static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
1409{
1410 return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
1411}
1412#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
1413#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
1414static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
1415{
1416 return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
1417}
1418#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
1419#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
1420static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
1421{
1422 return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
1423}
1424#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
1425#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
1426static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
1427{
1428 return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
1429}
1430#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
1431#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
1432static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
1433{
1434 return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
1435}
1436
1437
1438#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 000000000000..d183516067b4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,2193 @@
1#ifndef A3XX_XML
2#define A3XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a3xx_render_mode {
44 RB_RENDERING_PASS = 0,
45 RB_TILING_PASS = 1,
46 RB_RESOLVE_PASS = 2,
47};
48
49enum a3xx_tile_mode {
50 LINEAR = 0,
51 TILE_32X32 = 2,
52};
53
54enum a3xx_threadmode {
55 MULTI = 0,
56 SINGLE = 1,
57};
58
59enum a3xx_instrbuffermode {
60 BUFFER = 1,
61};
62
63enum a3xx_threadsize {
64 TWO_QUADS = 0,
65 FOUR_QUADS = 1,
66};
67
68enum a3xx_state_block_id {
69 HLSQ_BLOCK_ID_TP_TEX = 2,
70 HLSQ_BLOCK_ID_TP_MIPMAP = 3,
71 HLSQ_BLOCK_ID_SP_VS = 4,
72 HLSQ_BLOCK_ID_SP_FS = 6,
73};
74
75enum a3xx_cache_opcode {
76 INVALIDATE = 1,
77};
78
79enum a3xx_vtx_fmt {
80 VFMT_FLOAT_32 = 0,
81 VFMT_FLOAT_32_32 = 1,
82 VFMT_FLOAT_32_32_32 = 2,
83 VFMT_FLOAT_32_32_32_32 = 3,
84 VFMT_FLOAT_16 = 4,
85 VFMT_FLOAT_16_16 = 5,
86 VFMT_FLOAT_16_16_16 = 6,
87 VFMT_FLOAT_16_16_16_16 = 7,
88 VFMT_FIXED_32 = 8,
89 VFMT_FIXED_32_32 = 9,
90 VFMT_FIXED_32_32_32 = 10,
91 VFMT_FIXED_32_32_32_32 = 11,
92 VFMT_SHORT_16 = 16,
93 VFMT_SHORT_16_16 = 17,
94 VFMT_SHORT_16_16_16 = 18,
95 VFMT_SHORT_16_16_16_16 = 19,
96 VFMT_USHORT_16 = 20,
97 VFMT_USHORT_16_16 = 21,
98 VFMT_USHORT_16_16_16 = 22,
99 VFMT_USHORT_16_16_16_16 = 23,
100 VFMT_NORM_SHORT_16 = 24,
101 VFMT_NORM_SHORT_16_16 = 25,
102 VFMT_NORM_SHORT_16_16_16 = 26,
103 VFMT_NORM_SHORT_16_16_16_16 = 27,
104 VFMT_NORM_USHORT_16 = 28,
105 VFMT_NORM_USHORT_16_16 = 29,
106 VFMT_NORM_USHORT_16_16_16 = 30,
107 VFMT_NORM_USHORT_16_16_16_16 = 31,
108 VFMT_UBYTE_8 = 40,
109 VFMT_UBYTE_8_8 = 41,
110 VFMT_UBYTE_8_8_8 = 42,
111 VFMT_UBYTE_8_8_8_8 = 43,
112 VFMT_NORM_UBYTE_8 = 44,
113 VFMT_NORM_UBYTE_8_8 = 45,
114 VFMT_NORM_UBYTE_8_8_8 = 46,
115 VFMT_NORM_UBYTE_8_8_8_8 = 47,
116 VFMT_BYTE_8 = 48,
117 VFMT_BYTE_8_8 = 49,
118 VFMT_BYTE_8_8_8 = 50,
119 VFMT_BYTE_8_8_8_8 = 51,
120 VFMT_NORM_BYTE_8 = 52,
121 VFMT_NORM_BYTE_8_8 = 53,
122 VFMT_NORM_BYTE_8_8_8 = 54,
123 VFMT_NORM_BYTE_8_8_8_8 = 55,
124 VFMT_UINT_10_10_10_2 = 60,
125 VFMT_NORM_UINT_10_10_10_2 = 61,
126 VFMT_INT_10_10_10_2 = 62,
127 VFMT_NORM_INT_10_10_10_2 = 63,
128};
129
130enum a3xx_tex_fmt {
131 TFMT_NORM_USHORT_565 = 4,
132 TFMT_NORM_USHORT_5551 = 6,
133 TFMT_NORM_USHORT_4444 = 7,
134 TFMT_NORM_UINT_X8Z24 = 10,
135 TFMT_NORM_UINT_NV12_UV_TILED = 17,
136 TFMT_NORM_UINT_NV12_Y_TILED = 19,
137 TFMT_NORM_UINT_NV12_UV = 21,
138 TFMT_NORM_UINT_NV12_Y = 23,
139 TFMT_NORM_UINT_I420_Y = 24,
140 TFMT_NORM_UINT_I420_U = 26,
141 TFMT_NORM_UINT_I420_V = 27,
142 TFMT_NORM_UINT_2_10_10_10 = 41,
143 TFMT_NORM_UINT_A8 = 44,
144 TFMT_NORM_UINT_L8_A8 = 47,
145 TFMT_NORM_UINT_8 = 48,
146 TFMT_NORM_UINT_8_8 = 49,
147 TFMT_NORM_UINT_8_8_8 = 50,
148 TFMT_NORM_UINT_8_8_8_8 = 51,
149 TFMT_FLOAT_16 = 64,
150 TFMT_FLOAT_16_16 = 65,
151 TFMT_FLOAT_16_16_16_16 = 67,
152 TFMT_FLOAT_32 = 84,
153 TFMT_FLOAT_32_32 = 85,
154 TFMT_FLOAT_32_32_32_32 = 87,
155};
156
157enum a3xx_tex_fetchsize {
158 TFETCH_DISABLE = 0,
159 TFETCH_1_BYTE = 1,
160 TFETCH_2_BYTE = 2,
161 TFETCH_4_BYTE = 3,
162 TFETCH_8_BYTE = 4,
163 TFETCH_16_BYTE = 5,
164};
165
166enum a3xx_color_fmt {
167 RB_R8G8B8_UNORM = 4,
168 RB_R8G8B8A8_UNORM = 8,
169 RB_Z16_UNORM = 12,
170 RB_A8_UNORM = 20,
171};
172
173enum a3xx_color_swap {
174 WZYX = 0,
175 WXYZ = 1,
176 ZYXW = 2,
177 XYZW = 3,
178};
179
180enum a3xx_msaa_samples {
181 MSAA_ONE = 0,
182 MSAA_TWO = 1,
183 MSAA_FOUR = 2,
184};
185
186enum a3xx_sp_perfcounter_select {
187 SP_FS_CFLOW_INSTRUCTIONS = 12,
188 SP_FS_FULL_ALU_INSTRUCTIONS = 14,
189 SP0_ICL1_MISSES = 26,
190 SP_ALU_ACTIVE_CYCLES = 29,
191};
192
193enum adreno_rb_copy_control_mode {
194 RB_COPY_RESOLVE = 1,
195 RB_COPY_DEPTH_STENCIL = 5,
196};
197
198enum a3xx_tex_filter {
199 A3XX_TEX_NEAREST = 0,
200 A3XX_TEX_LINEAR = 1,
201};
202
203enum a3xx_tex_clamp {
204 A3XX_TEX_REPEAT = 0,
205 A3XX_TEX_CLAMP_TO_EDGE = 1,
206 A3XX_TEX_MIRROR_REPEAT = 2,
207 A3XX_TEX_CLAMP_NONE = 3,
208};
209
210enum a3xx_tex_swiz {
211 A3XX_TEX_X = 0,
212 A3XX_TEX_Y = 1,
213 A3XX_TEX_Z = 2,
214 A3XX_TEX_W = 3,
215 A3XX_TEX_ZERO = 4,
216 A3XX_TEX_ONE = 5,
217};
218
219enum a3xx_tex_type {
220 A3XX_TEX_1D = 0,
221 A3XX_TEX_2D = 1,
222 A3XX_TEX_CUBE = 2,
223 A3XX_TEX_3D = 3,
224};
225
226#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
227#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
228#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
229#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
230#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
231#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
232#define A3XX_INT0_VFD_ERROR 0x00000040
233#define A3XX_INT0_CP_SW_INT 0x00000080
234#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
235#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
236#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
237#define A3XX_INT0_CP_HW_FAULT 0x00000800
238#define A3XX_INT0_CP_DMA 0x00001000
239#define A3XX_INT0_CP_IB2_INT 0x00002000
240#define A3XX_INT0_CP_IB1_INT 0x00004000
241#define A3XX_INT0_CP_RB_INT 0x00008000
242#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
243#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
244#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
245#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
246#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
247#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
248#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
249#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
250#define REG_A3XX_RBBM_HW_VERSION 0x00000000
251
252#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
253
254#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
255
256#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
257
258#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
259
260#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
261
262#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
263
264#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
265
266#define REG_A3XX_RBBM_AHB_CMD 0x00000022
267
268#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
269
270#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
271
272#define REG_A3XX_RBBM_STATUS 0x00000030
273#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
274#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
275#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
276#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
277#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
278#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
279#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
280#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
281#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
282#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
283#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
284#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
285#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
286#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
287#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
288#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
289#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
290#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
291#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
292#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
293#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
294
295#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
296
297#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
298
299#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
300
301#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
302
303#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
304
305#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
306
307#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
308
309#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
310
311#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
312
313#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
314
315#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
316
317#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
318
319#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
320
321#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
322
323#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
324
325#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
326
327#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
328
329#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
330
331#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
332
333#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
334
335#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
336
337#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
338
339#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
340
341#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
342
343#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
344
345#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
346
347#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
348
349#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
350
351#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
352
353#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
354
355#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
356
357#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
358
359#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
360
361#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
362
363#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
364
365#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
366
367#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
368
369#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
370
371#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
372
373#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
374
375#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
376
377#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
378
379#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
380
381#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
382
383#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
384
385#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
386
387#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
388
389#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
390
391#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
392
393#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
394
395#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
396
397#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
398
399#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
400
401#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
402
403#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
404
405#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
406
407#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
408
409#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
410
411#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
412
413#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
414
415#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
416
417#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
418
419#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
420
421#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
422
423#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
424
425#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
426
427#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
428
429#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
430
431#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
432
433#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
434
435#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
436
437#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
438
439#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
440
441#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
442
443#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
444
445#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
446
447#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
448
449#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
450
451#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
452
453#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
454
455#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
456
457#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
458
459#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
460
461#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
462
463#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
464
465#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
466
467#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
468
469#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
470
471#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
472
473#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
474
475#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
476
477#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
478
479#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
480
481#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
482
483#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
484
485#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
486
487#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
488
489#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
490
491#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
492
493#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
494
495#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
496
497#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
498
499#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
500
501#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
502
503#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
504
505#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
506
507#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
508
509#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
510
511#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
512
513#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
514
515#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
516
517#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
518
519#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
520
521#define REG_A3XX_CP_ROQ_DATA 0x000001cd
522
523#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
524
525#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
526
527#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
528
529#define REG_A3XX_CP_MEQ_ADDR 0x000001da
530
531#define REG_A3XX_CP_MEQ_DATA 0x000001db
532
533#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
534
535#define REG_A3XX_CP_HW_FAULT 0x0000045c
536
537#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
538
539#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
540
541static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
542
543static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
544
545#define REG_A3XX_CP_AHB_FAULT 0x0000054d
546
547#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
548#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
549#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
550#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
551#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
552#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
553#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
554
555#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
556#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
557#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
558static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
559{
560 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
561}
562#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
563#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
564static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
565{
566 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
567}
568
569#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
570#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
571#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
572static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
573{
574 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
575}
576
577#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
578#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
579#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
580static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
581{
582 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
583}
584
585#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
586#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
587#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
588static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
589{
590 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
591}
592
593#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
594#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
595#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
596static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
597{
598 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
599}
600
601#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
602#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
603#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
604static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
605{
606 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
607}
608
609#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
610#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
611#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
612static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
613{
614 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
615}
616
617#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
618
619#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
620
621#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
622#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
623#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
624static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
625{
626 return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
627}
628
629#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
630#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
631#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
632static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
633{
634 return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
635}
636
637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
640#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
642static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
643{
644 return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
645}
646#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
647
648#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
649#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
650#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
651static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
652{
653 return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
654}
655#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
656#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
657static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
658{
659 return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
660}
661#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
662#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
663static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
664{
665 return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
666}
667
668#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
669#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
670#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
671#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
672static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
673{
674 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
675}
676#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
677#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
678static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
679{
680 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
681}
682
683#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
684#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
685#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
686#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
687static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
688{
689 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
690}
691#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
692#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
693static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
694{
695 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
696}
697
698#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
699#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
700#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
701#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
702static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
703{
704 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
705}
706#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
707#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
708static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
709{
710 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
711}
712
713#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
714#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
715#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
716#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
717static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
718{
719 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
720}
721#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
722#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
723static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
724{
725 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
726}
727
728#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
729#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
730#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
731#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
732static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
733{
734 return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
735}
736#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
737#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
738
739#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
740#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
741#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
742static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
743{
744 return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
745}
746#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
747#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
748#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
750static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
751{
752 return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
753}
754
755#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
756#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
757#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
758#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
759static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
760{
761 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
762}
763#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
764#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
765static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
766{
767 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
768}
769
770#define REG_A3XX_UNKNOWN_20C3 0x000020c3
771
772static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
773
774static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
775#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
776#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
777#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
778#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
779#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
780static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
781{
782 return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
783}
784#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
785#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
786static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
787{
788 return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
789}
790#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
791#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
792static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
793{
794 return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
795}
796
797static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
798#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
799#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
800static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
801{
802 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
803}
804#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
805#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
806static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
807{
808 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
809}
810#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
811#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
812static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
813{
814 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
815}
816#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
817#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
818static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
819{
820 return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
821}
822
823static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
824#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
825#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
826static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
827{
828 return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
829}
830
831static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
832#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
833#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
834static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
835{
836 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
837}
838#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
839#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
840static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
841{
842 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
843}
844#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
845#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
846static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
847{
848 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
849}
850#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
851#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
852static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
853{
854 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
855}
856#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
857#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
858static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
859{
860 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
861}
862#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
863#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
864static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
865{
866 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
867}
868#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
869
870#define REG_A3XX_RB_BLEND_RED 0x000020e4
871#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
872#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
873static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
874{
875 return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
876}
877#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
878#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
879static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
880{
881 return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
882}
883
884#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
885#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
886#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
887static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
888{
889 return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
890}
891#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
892#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
893static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
894{
895 return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
896}
897
898#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
899#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
900#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
901static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
902{
903 return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
904}
905#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
906#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
907static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
908{
909 return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
910}
911
912#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
913#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
914#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
915static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
916{
917 return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
918}
919#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
920#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
921static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
922{
923 return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
924}
925
926#define REG_A3XX_UNKNOWN_20E8 0x000020e8
927
928#define REG_A3XX_UNKNOWN_20E9 0x000020e9
929
930#define REG_A3XX_UNKNOWN_20EA 0x000020ea
931
932#define REG_A3XX_UNKNOWN_20EB 0x000020eb
933
934#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
935#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
936#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
937static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
938{
939 return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
940}
941#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
942#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
943static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
944{
945 return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
946}
947#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
948#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
949static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
950{
951 return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
952}
953
954#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
955#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
956#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
957static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
958{
959 return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
960}
961
962#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
963#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
964#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
965static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
966{
967 return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
968}
969
970#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
971#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
972#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
973static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
974{
975 return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
976}
977#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
978#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
979static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
980{
981 return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
982}
983#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
984#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
985static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
986{
987 return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
988}
989#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
990#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
991static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
992{
993 return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
994}
995#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
996#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
997static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
998{
999 return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
1000}
1001
1002#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
1003#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
1004#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
1005#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
1006#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
1007#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
1008static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
1009{
1010 return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
1011}
1012#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
1013#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
1014
1015#define REG_A3XX_UNKNOWN_2101 0x00002101
1016
1017#define REG_A3XX_RB_DEPTH_INFO 0x00002102
1018#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
1019#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
1020static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
1021{
1022 return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
1023}
1024#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
1025#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
1026static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
1027{
1028 return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
1029}
1030
1031#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
1032#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
1033#define A3XX_RB_DEPTH_PITCH__SHIFT 0
1034static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
1035{
1036 return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
1037}
1038
1039#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
1040#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
1041#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
1042#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
1043#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
1044static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
1045{
1046 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
1047}
1048#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
1049#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
1050static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
1051{
1052 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
1053}
1054#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
1055#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
1056static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
1057{
1058 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
1059}
1060#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
1061#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
1062static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
1063{
1064 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
1065}
1066#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
1067#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
1068static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
1069{
1070 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
1071}
1072#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
1073#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
1074static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
1075{
1076 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
1077}
1078#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
1079#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
1080static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
1081{
1082 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
1083}
1084#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
1085#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
1086static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
1087{
1088 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
1089}
1090
1091#define REG_A3XX_UNKNOWN_2105 0x00002105
1092
1093#define REG_A3XX_UNKNOWN_2106 0x00002106
1094
1095#define REG_A3XX_UNKNOWN_2107 0x00002107
1096
1097#define REG_A3XX_RB_STENCILREFMASK 0x00002108
1098#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
1099#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
1100static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
1101{
1102 return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
1103}
1104#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
1105#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
1106static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
1107{
1108 return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
1109}
1110#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
1111#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
1112static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
1113{
1114 return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
1115}
1116
1117#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
1118#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
1119#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
1120static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
1121{
1122 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
1123}
1124#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
1125#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
1126static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
1127{
1128 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
1129}
1130#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
1131#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
1132static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
1133{
1134 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
1135}
1136
1137#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
1138#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
1139#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
1140static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
1141{
1142 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
1143}
1144#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
1145#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
1146static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
1147{
1148 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
1149}
1150
1151#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
1152
1153#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
1154
1155#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
1156#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
1157#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
1158static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
1159{
1160 return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
1161}
1162#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
1163#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
1164static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
1165{
1166 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
1167}
1168#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
1169#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
1170static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
1171{
1172 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1173}
1174#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1175
1176#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
1177
1178#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
1179#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
1180#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
1181static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
1182{
1183 return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
1184}
1185#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
1186#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1187#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1188#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1189#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
1190#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1191#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1192#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
1193#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
1194
1195#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
1196#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
1197#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
1198static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
1199{
1200 return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
1201}
1202#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
1203#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
1204
1205#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
1206#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
1207#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
1208static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
1209{
1210 return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
1211}
1212
1213#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
1214
1215#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
1216#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1217#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1218static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1219{
1220 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
1221}
1222#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1223#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1224static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1225{
1226 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1227}
1228#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1229#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1230static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1231{
1232 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
1233}
1234
1235#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
1236#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1237#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1238static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1239{
1240 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
1241}
1242#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1243#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1244static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1245{
1246 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1247}
1248#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1249#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1250static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1251{
1252 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
1253}
1254
1255#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
1256#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1257#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1258static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1259{
1260 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
1261}
1262#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1263#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1264static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1265{
1266 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
1267}
1268
1269#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
1270#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1271#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1272static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1273{
1274 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
1275}
1276#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1277#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1278static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1279{
1280 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
1281}
1282
1283#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
1284
1285#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
1286
1287#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
1288
1289#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
1290
1291#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
1292
1293#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
1294
1295#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
1296
1297#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
1298
1299#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
1300
1301#define REG_A3XX_VFD_CONTROL_0 0x00002240
1302#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
1303#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
1304static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
1305{
1306 return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
1307}
1308#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
1309#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
1310static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
1311{
1312 return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
1313}
1314#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
1315#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
1316static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
1317{
1318 return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
1319}
1320#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
1321#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
1322static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
1323{
1324 return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
1325}
1326
1327#define REG_A3XX_VFD_CONTROL_1 0x00002241
1328#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
1329#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
1330static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
1331{
1332 return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
1333}
1334#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
1335#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
1336static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
1337{
1338 return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
1339}
1340#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
1341#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
1342static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1343{
1344 return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
1345}
1346
1347#define REG_A3XX_VFD_INDEX_MIN 0x00002242
1348
1349#define REG_A3XX_VFD_INDEX_MAX 0x00002243
1350
1351#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
1352
1353#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1354
1355static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1356
1357static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1358#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
1359#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
1360static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
1361{
1362 return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
1363}
1364#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
1365#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
1366static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
1367{
1368 return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
1369}
1370#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
1371#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
1372#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
1373static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
1374{
1375 return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
1376}
1377#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
1378#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
1379static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
1380{
1381 return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
1382}
1383
1384static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
1385
1386static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1387
1388static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1389#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
1390#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
1391static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
1392{
1393 return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
1394}
1395#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
1396#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
1397#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
1398static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
1399{
1400 return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
1401}
1402#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
1403#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
1404static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1405{
1406 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
1407}
1408#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
1409#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
1410static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
1411{
1412 return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
1413}
1414#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
1415#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
1416
1417#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
1418#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
1419#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
1420static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
1421{
1422 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
1423}
1424#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
1425#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
1426static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
1427{
1428 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
1429}
1430
1431#define REG_A3XX_VPC_ATTR 0x00002280
1432#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
1433#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
1434static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
1435{
1436 return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
1437}
1438#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
1439#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
1440static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
1441{
1442 return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
1443}
1444#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
1445#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
1446static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
1447{
1448 return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
1449}
1450
1451#define REG_A3XX_VPC_PACK 0x00002281
1452#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
1453#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
1454static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
1455{
1456 return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
1457}
1458#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
1459#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
1460static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1461{
1462 return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
1463}
1464
1465static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1466
1467static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1468
1469static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1470
1471static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1472
1473#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
1474
1475#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
1476
1477#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
1478#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
1479#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
1480#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
1481static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
1482{
1483 return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
1484}
1485#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
1486#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
1487static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
1488{
1489 return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
1490}
1491#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
1492#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
1493static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
1494{
1495 return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
1496}
1497
1498#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
1499#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
1500#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
1501static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1502{
1503 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
1504}
1505#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1506#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1507static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1508{
1509 return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1510}
1511#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
1512#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1513#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1514static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1515{
1516 return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1517}
1518#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1519#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1520static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1521{
1522 return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1523}
1524#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1525#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1526static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1527{
1528 return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1529}
1530#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1531#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
1532static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1533{
1534 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
1535}
1536#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1537#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
1538#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
1539#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
1540static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
1541{
1542 return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
1543}
1544
1545#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
1546#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1547#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1548static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1549{
1550 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
1551}
1552#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1553#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1554static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1555{
1556 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1557}
1558#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000
1559#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
1560static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1561{
1562 return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1563}
1564
1565#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
1566#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
1567#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
1568static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
1569{
1570 return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
1571}
1572#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
1573#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
1574static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
1575{
1576 return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
1577}
1578#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
1579#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
1580static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
1581{
1582 return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
1583}
1584
1585static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1586
1587static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1588#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
1589#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
1590static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
1591{
1592 return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
1593}
1594#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1595#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
1596static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
1597{
1598 return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
1599}
1600#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
1601#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
1602static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
1603{
1604 return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
1605}
1606#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1607#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
1608static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
1609{
1610 return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
1611}
1612
1613static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1614
1615static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1616#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1617#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
1618static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
1619{
1620 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
1621}
1622#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1623#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
1624static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
1625{
1626 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
1627}
1628#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1629#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
1630static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
1631{
1632 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
1633}
1634#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1635#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
1636static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
1637{
1638 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
1639}
1640
1641#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
1642#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1643#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1644static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1645{
1646 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1647}
1648#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1649#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1650static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1651{
1652 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1653}
1654
1655#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
1656
1657#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
1658
1659#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
1660
1661#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
1662
1663#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
1664#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1665#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1666static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1667{
1668 return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
1669}
1670
1671#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
1672#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
1673#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
1674static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1675{
1676 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
1677}
1678#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1679#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1680static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1681{
1682 return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1683}
1684#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
1685#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1686#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1687static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1688{
1689 return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1690}
1691#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1692#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1693static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1694{
1695 return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1696}
1697#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1698#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1699static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1700{
1701 return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1702}
1703#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1704#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
1705static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1706{
1707 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
1708}
1709#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1710#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
1711#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
1712#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
1713static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
1714{
1715 return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
1716}
1717
1718#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
1719#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1720#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1721static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1722{
1723 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
1724}
1725#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1726#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1727static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1728{
1729 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1730}
1731#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
1732#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
1733static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1734{
1735 return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1736}
1737#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
1738#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
1739static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
1740{
1741 return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
1742}
1743
1744#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
1745#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1746#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1747static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1748{
1749 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1750}
1751#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1752#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1753static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1754{
1755 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1756}
1757
1758#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
1759
1760#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
1761
1762#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
1763
1764#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
1765
1766#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
1767
1768#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
1769
1770#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
1771
1772static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1773
1774static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1775#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
1776#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
1777static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
1778{
1779 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
1780}
1781#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
1782
1783static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1784
1785static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1786#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
1787#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
1788static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
1789{
1790 return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
1791}
1792
1793#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
1794#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1795#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1796static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1797{
1798 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
1799}
1800
1801#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
1802#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1803#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1804static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1805{
1806 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1807}
1808#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1809#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1810static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1811{
1812 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1813}
1814#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1815#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1816static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1817{
1818 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
1819}
1820
1821#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
1822
1823#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
1824#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1825#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1826static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1827{
1828 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1829}
1830#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1831#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1832static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1833{
1834 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1835}
1836#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1837#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1838static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1839{
1840 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
1841}
1842
1843#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
1844
1845#define REG_A3XX_VBIF_CLKON 0x00003001
1846
1847#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
1848
1849#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
1850
1851#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
1852
1853#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
1854
1855#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
1856
1857#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
1858
1859#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
1860
1861#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
1862
1863#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
1864
1865#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
1866
1867#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
1868
1869#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
1870
1871#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
1872
1873#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
1874
1875#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
1876
1877#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
1878
1879#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
1880
1881#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
1882
1883#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
1884#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
1885#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
1886static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
1887{
1888 return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
1889}
1890#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
1891#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
1892static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
1893{
1894 return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
1895}
1896
1897#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
1898
1899static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1900
1901static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1902#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
1903#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
1904static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
1905{
1906 return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
1907}
1908#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
1909#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
1910static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
1911{
1912 return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
1913}
1914#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
1915#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
1916static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
1917{
1918 return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
1919}
1920#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
1921#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
1922static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
1923{
1924 return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
1925}
1926
1927static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
1928
1929static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
1930
1931#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
1932
1933#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
1934
1935#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
1936
1937#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
1938
1939#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
1940
1941#define REG_A3XX_UNKNOWN_0C81 0x00000c81
1942
1943#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
1944
1945#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
1946
1947#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
1948
1949#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
1950
1951static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1952
1953static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1954
1955static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
1956
1957static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
1958
1959static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
1960
1961#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
1962
1963#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
1964
1965#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
1966
1967#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
1968#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
1969#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
1970static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
1971{
1972 return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
1973}
1974#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
1975#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
1976static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
1977{
1978 return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
1979}
1980
1981#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
1982
1983#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
1984
1985#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
1986
1987#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
1988
1989#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
1990
1991#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
1992
1993#define REG_A3XX_UNKNOWN_0E43 0x00000e43
1994
1995#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
1996
1997#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
1998
1999#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
2000
2001#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
2002
2003#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
2004
2005#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
2006
2007#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
2008
2009#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
2010
2011#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
2012
2013#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
2014
2015#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
2016
2017#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
2018
2019#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
2020
2021#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
2022#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
2023#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
2024static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
2025{
2026 return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
2027}
2028
2029#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
2030#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
2031#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
2032static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
2033{
2034 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
2035}
2036#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
2037#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
2038static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
2039{
2040 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
2041}
2042#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
2043
2044#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
2045
2046#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
2047
2048#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
2049
2050#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
2051
2052#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
2053
2054#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
2055
2056#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
2057
2058#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
2059
2060#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
2061
2062#define REG_A3XX_UNKNOWN_0F03 0x00000f03
2063
2064#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
2065
2066#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
2067
2068#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
2069
2070#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
2071
2072#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
2073
2074#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
2075
2076#define REG_A3XX_TEX_SAMP_0 0x00000000
2077#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
2078#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
2079static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
2080{
2081 return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
2082}
2083#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
2084#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
2085static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
2086{
2087 return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
2088}
2089#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
2090#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
2091static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
2092{
2093 return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
2094}
2095#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
2096#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
2097static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
2098{
2099 return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
2100}
2101#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
2102#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
2103static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
2104{
2105 return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
2106}
2107#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2108
2109#define REG_A3XX_TEX_SAMP_1 0x00000001
2110
2111#define REG_A3XX_TEX_CONST_0 0x00000000
2112#define A3XX_TEX_CONST_0_TILED 0x00000001
2113#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2114#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2115static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
2116{
2117 return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
2118}
2119#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
2120#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
2121static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
2122{
2123 return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
2124}
2125#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
2126#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
2127static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
2128{
2129 return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
2130}
2131#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
2132#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
2133static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
2134{
2135 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
2136}
2137#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2138#define A3XX_TEX_CONST_0_FMT__SHIFT 22
2139static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
2140{
2141 return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
2142}
2143#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
2144#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
2145static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
2146{
2147 return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
2148}
2149
2150#define REG_A3XX_TEX_CONST_1 0x00000001
2151#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
2152#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
2153static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
2154{
2155 return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
2156}
2157#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
2158#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
2159static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
2160{
2161 return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
2162}
2163#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
2164#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
2165static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
2166{
2167 return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
2168}
2169
2170#define REG_A3XX_TEX_CONST_2 0x00000002
2171#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
2172#define A3XX_TEX_CONST_2_INDX__SHIFT 0
2173static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
2174{
2175 return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
2176}
2177#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
2178#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
2179static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
2180{
2181 return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
2182}
2183#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
2184#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
2185static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2186{
2187 return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
2188}
2189
2190#define REG_A3XX_TEX_CONST_3 0x00000003
2191
2192
2193#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 000000000000..035bd13dc8bd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "a3xx_gpu.h"
19
20#define A3XX_INT0_MASK \
21 (A3XX_INT0_RBBM_AHB_ERROR | \
22 A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
23 A3XX_INT0_CP_T0_PACKET_IN_IB | \
24 A3XX_INT0_CP_OPCODE_ERROR | \
25 A3XX_INT0_CP_RESERVED_BIT_ERROR | \
26 A3XX_INT0_CP_HW_FAULT | \
27 A3XX_INT0_CP_IB1_INT | \
28 A3XX_INT0_CP_IB2_INT | \
29 A3XX_INT0_CP_RB_INT | \
30 A3XX_INT0_CP_REG_PROTECT_FAULT | \
31 A3XX_INT0_CP_AHB_ERROR_HALT | \
32 A3XX_INT0_UCHE_OOB_ACCESS)
33
34static struct platform_device *a3xx_pdev;
35
36static void a3xx_me_init(struct msm_gpu *gpu)
37{
38 struct msm_ringbuffer *ring = gpu->rb;
39
40 OUT_PKT3(ring, CP_ME_INIT, 17);
41 OUT_RING(ring, 0x000003f7);
42 OUT_RING(ring, 0x00000000);
43 OUT_RING(ring, 0x00000000);
44 OUT_RING(ring, 0x00000000);
45 OUT_RING(ring, 0x00000080);
46 OUT_RING(ring, 0x00000100);
47 OUT_RING(ring, 0x00000180);
48 OUT_RING(ring, 0x00006600);
49 OUT_RING(ring, 0x00000150);
50 OUT_RING(ring, 0x0000014e);
51 OUT_RING(ring, 0x00000154);
52 OUT_RING(ring, 0x00000001);
53 OUT_RING(ring, 0x00000000);
54 OUT_RING(ring, 0x00000000);
55 OUT_RING(ring, 0x00000000);
56 OUT_RING(ring, 0x00000000);
57 OUT_RING(ring, 0x00000000);
58
59 gpu->funcs->flush(gpu);
60 gpu->funcs->idle(gpu);
61}
62
63static int a3xx_hw_init(struct msm_gpu *gpu)
64{
65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
66 uint32_t *ptr, len;
67 int i, ret;
68
69 DBG("%s", gpu->name);
70
71 if (adreno_is_a305(adreno_gpu)) {
72 /* Set up 16 deep read/write request queues: */
73 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
74 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
75 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
76 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
77 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
78 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
79 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
80 /* Enable WR-REQ: */
81 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
82 /* Set up round robin arbitration between both AXI ports: */
83 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
84 /* Set up AOOO: */
85 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
86 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
87
88 } else if (adreno_is_a320(adreno_gpu)) {
89 /* Set up 16 deep read/write request queues: */
90 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
91 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
92 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
93 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
94 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
95 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
96 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
97 /* Enable WR-REQ: */
98 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
99 /* Set up round robin arbitration between both AXI ports: */
100 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
101 /* Set up AOOO: */
102 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
103 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
104 /* Enable 1K sort: */
105 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
106 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
107
108 } else if (adreno_is_a330(adreno_gpu)) {
109 /* Set up 16 deep read/write request queues: */
110 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
111 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
112 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
113 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
114 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
115 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
116 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
117 /* Enable WR-REQ: */
118 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
119 /* Set up round robin arbitration between both AXI ports: */
120 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
121 /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
122 gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
123 /* Set up AOOO: */
124 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
125 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
126 /* Enable 1K sort: */
127 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
128 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
129 /* Disable VBIF clock gating. This is to enable AXI running
130 * higher frequency than GPU:
131 */
132 gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
133
134 } else {
135 BUG();
136 }
137
138 /* Make all blocks contribute to the GPU BUSY perf counter: */
139 gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
140
141 /* Tune the hystersis counters for SP and CP idle detection: */
142 gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
143 gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
144
145 /* Enable the RBBM error reporting bits. This lets us get
146 * useful information on failure:
147 */
148 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
149
150 /* Enable AHB error reporting: */
151 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
152
153 /* Turn on the power counters: */
154 gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
155
156 /* Turn on hang detection - this spews a lot of useful information
157 * into the RBBM registers on a hang:
158 */
159 gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
160
161 /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
162 gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
163
164 /* Enable Clock gating: */
165 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
166
167 /* Set the OCMEM base address for A330 */
168//TODO:
169// if (adreno_is_a330(adreno_gpu)) {
170// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
171// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
172// }
173
174 /* Turn on performance counters: */
175 gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
176
177 /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
178 * we will use this to augment our hang detection:
179 */
180 gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
181 SP_FS_FULL_ALU_INSTRUCTIONS);
182
183 gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
184
185 ret = adreno_hw_init(gpu);
186 if (ret)
187 return ret;
188
189 /* setup access protection: */
190 gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
191
192 /* RBBM registers */
193 gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
194 gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
195 gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
196 gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
197 gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
198 gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
199
200 /* CP registers */
201 gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
202 gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
203 gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
204 gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
205 gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
206
207 /* RB registers */
208 gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
209
210 /* VBIF registers */
211 gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
212
213 /* NOTE: PM4/micro-engine firmware registers look to be the same
214 * for a2xx and a3xx.. we could possibly push that part down to
215 * adreno_gpu base class. Or push both PM4 and PFP but
216 * parameterize the pfp ucode addr/data registers..
217 */
218
219 /* Load PM4: */
220 ptr = (uint32_t *)(adreno_gpu->pm4->data);
221 len = adreno_gpu->pm4->size / 4;
222 DBG("loading PM4 ucode version: %u", ptr[0]);
223
224 gpu_write(gpu, REG_AXXX_CP_DEBUG,
225 AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
226 AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
227 gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
228 for (i = 1; i < len; i++)
229 gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
230
231 /* Load PFP: */
232 ptr = (uint32_t *)(adreno_gpu->pfp->data);
233 len = adreno_gpu->pfp->size / 4;
234 DBG("loading PFP ucode version: %u", ptr[0]);
235
236 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
237 for (i = 1; i < len; i++)
238 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
239
240 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
241 if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
242 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
243 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
244 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
245 AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
246
247
248 /* clear ME_HALT to start micro engine */
249 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
250
251 a3xx_me_init(gpu);
252
253 return 0;
254}
255
256static void a3xx_destroy(struct msm_gpu *gpu)
257{
258 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
259 struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
260
261 DBG("%s", gpu->name);
262
263 adreno_gpu_cleanup(adreno_gpu);
264 put_device(&a3xx_gpu->pdev->dev);
265 kfree(a3xx_gpu);
266}
267
268static void a3xx_idle(struct msm_gpu *gpu)
269{
270 unsigned long t;
271
272 /* wait for ringbuffer to drain: */
273 adreno_idle(gpu);
274
275 t = jiffies + ADRENO_IDLE_TIMEOUT;
276
277 /* then wait for GPU to finish: */
278 do {
279 uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
280 if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
281 return;
282 } while(time_before(jiffies, t));
283
284 DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
285
286 /* TODO maybe we need to reset GPU here to recover from hang? */
287}
288
289static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
290{
291 uint32_t status;
292
293 status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
294 DBG("%s: %08x", gpu->name, status);
295
296 // TODO
297
298 gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
299
300 msm_gpu_retire(gpu);
301
302 return IRQ_HANDLED;
303}
304
305#ifdef CONFIG_DEBUG_FS
306static const unsigned int a3xx_registers[] = {
307 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
308 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
309 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
310 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
311 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
312 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
313 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
314 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
315 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
316 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
317 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
318 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
319 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
320 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
321 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
322 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
323 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
324 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
325 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
326 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
327 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
328 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
329 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
330 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
331 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
332 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
333 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
334 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
335 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
336 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
337 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
338 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
339 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
340 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
341 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
342 0x303c, 0x303c, 0x305e, 0x305f,
343};
344
345static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
346{
347 int i;
348
349 adreno_show(gpu, m);
350 seq_printf(m, "status: %08x\n",
351 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
352
353 /* dump these out in a form that can be parsed by demsm: */
354 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
355 for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
356 uint32_t start = a3xx_registers[i];
357 uint32_t end = a3xx_registers[i+1];
358 uint32_t addr;
359
360 for (addr = start; addr <= end; addr++) {
361 uint32_t val = gpu_read(gpu, addr);
362 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
363 }
364 }
365}
366#endif
367
368static const struct adreno_gpu_funcs funcs = {
369 .base = {
370 .get_param = adreno_get_param,
371 .hw_init = a3xx_hw_init,
372 .pm_suspend = msm_gpu_pm_suspend,
373 .pm_resume = msm_gpu_pm_resume,
374 .recover = adreno_recover,
375 .last_fence = adreno_last_fence,
376 .submit = adreno_submit,
377 .flush = adreno_flush,
378 .idle = a3xx_idle,
379 .irq = a3xx_irq,
380 .destroy = a3xx_destroy,
381#ifdef CONFIG_DEBUG_FS
382 .show = a3xx_show,
383#endif
384 },
385};
386
387struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
388{
389 struct a3xx_gpu *a3xx_gpu = NULL;
390 struct msm_gpu *gpu;
391 struct platform_device *pdev = a3xx_pdev;
392 struct adreno_platform_config *config;
393 int ret;
394
395 if (!pdev) {
396 dev_err(dev->dev, "no a3xx device\n");
397 ret = -ENXIO;
398 goto fail;
399 }
400
401 config = pdev->dev.platform_data;
402
403 a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
404 if (!a3xx_gpu) {
405 ret = -ENOMEM;
406 goto fail;
407 }
408
409 gpu = &a3xx_gpu->base.base;
410
411 get_device(&pdev->dev);
412 a3xx_gpu->pdev = pdev;
413
414 gpu->fast_rate = config->fast_rate;
415 gpu->slow_rate = config->slow_rate;
416 gpu->bus_freq = config->bus_freq;
417
418 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
419 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
420
421 ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
422 &funcs, config->rev);
423 if (ret)
424 goto fail;
425
426 return &a3xx_gpu->base.base;
427
428fail:
429 if (a3xx_gpu)
430 a3xx_destroy(&a3xx_gpu->base.base);
431
432 return ERR_PTR(ret);
433}
434
435/*
436 * The a3xx device:
437 */
438
439static int a3xx_probe(struct platform_device *pdev)
440{
441 static struct adreno_platform_config config = {};
442#ifdef CONFIG_OF
443 /* TODO */
444#else
445 uint32_t version = socinfo_get_version();
446 if (cpu_is_apq8064ab()) {
447 config.fast_rate = 450000000;
448 config.slow_rate = 27000000;
449 config.bus_freq = 4;
450 config.rev = ADRENO_REV(3, 2, 1, 0);
451 } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
452 config.fast_rate = 400000000;
453 config.slow_rate = 27000000;
454 config.bus_freq = 4;
455
456 if (SOCINFO_VERSION_MAJOR(version) == 2)
457 config.rev = ADRENO_REV(3, 2, 0, 2);
458 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
459 (SOCINFO_VERSION_MINOR(version) == 1))
460 config.rev = ADRENO_REV(3, 2, 0, 1);
461 else
462 config.rev = ADRENO_REV(3, 2, 0, 0);
463
464 } else if (cpu_is_msm8930()) {
465 config.fast_rate = 400000000;
466 config.slow_rate = 27000000;
467 config.bus_freq = 3;
468
469 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
470 (SOCINFO_VERSION_MINOR(version) == 2))
471 config.rev = ADRENO_REV(3, 0, 5, 2);
472 else
473 config.rev = ADRENO_REV(3, 0, 5, 0);
474
475 }
476#endif
477 pdev->dev.platform_data = &config;
478 a3xx_pdev = pdev;
479 return 0;
480}
481
482static int a3xx_remove(struct platform_device *pdev)
483{
484 a3xx_pdev = NULL;
485 return 0;
486}
487
488static struct platform_driver a3xx_driver = {
489 .probe = a3xx_probe,
490 .remove = a3xx_remove,
491 .driver.name = "kgsl-3d0",
492};
493
494void __init a3xx_register(void)
495{
496 platform_driver_register(&a3xx_driver);
497}
498
499void __exit a3xx_unregister(void)
500{
501 platform_driver_unregister(&a3xx_driver);
502}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 000000000000..32c398c2d00a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __A3XX_GPU_H__
19#define __A3XX_GPU_H__
20
21#include "adreno_gpu.h"
22#include "a3xx.xml.h"
23
24struct a3xx_gpu {
25 struct adreno_gpu base;
26 struct platform_device *pdev;
27};
28#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
29
30#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 000000000000..61979d458ac0
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,432 @@
1#ifndef ADRENO_COMMON_XML
2#define ADRENO_COMMON_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum adreno_pa_su_sc_draw {
44 PC_DRAW_POINTS = 0,
45 PC_DRAW_LINES = 1,
46 PC_DRAW_TRIANGLES = 2,
47};
48
49enum adreno_compare_func {
50 FUNC_NEVER = 0,
51 FUNC_LESS = 1,
52 FUNC_EQUAL = 2,
53 FUNC_LEQUAL = 3,
54 FUNC_GREATER = 4,
55 FUNC_NOTEQUAL = 5,
56 FUNC_GEQUAL = 6,
57 FUNC_ALWAYS = 7,
58};
59
60enum adreno_stencil_op {
61 STENCIL_KEEP = 0,
62 STENCIL_ZERO = 1,
63 STENCIL_REPLACE = 2,
64 STENCIL_INCR_CLAMP = 3,
65 STENCIL_DECR_CLAMP = 4,
66 STENCIL_INVERT = 5,
67 STENCIL_INCR_WRAP = 6,
68 STENCIL_DECR_WRAP = 7,
69};
70
71enum adreno_rb_blend_factor {
72 FACTOR_ZERO = 0,
73 FACTOR_ONE = 1,
74 FACTOR_SRC_COLOR = 4,
75 FACTOR_ONE_MINUS_SRC_COLOR = 5,
76 FACTOR_SRC_ALPHA = 6,
77 FACTOR_ONE_MINUS_SRC_ALPHA = 7,
78 FACTOR_DST_COLOR = 8,
79 FACTOR_ONE_MINUS_DST_COLOR = 9,
80 FACTOR_DST_ALPHA = 10,
81 FACTOR_ONE_MINUS_DST_ALPHA = 11,
82 FACTOR_CONSTANT_COLOR = 12,
83 FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
84 FACTOR_CONSTANT_ALPHA = 14,
85 FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
86 FACTOR_SRC_ALPHA_SATURATE = 16,
87};
88
89enum adreno_rb_blend_opcode {
90 BLEND_DST_PLUS_SRC = 0,
91 BLEND_SRC_MINUS_DST = 1,
92 BLEND_MIN_DST_SRC = 2,
93 BLEND_MAX_DST_SRC = 3,
94 BLEND_DST_MINUS_SRC = 4,
95 BLEND_DST_PLUS_SRC_BIAS = 5,
96};
97
98enum adreno_rb_surface_endian {
99 ENDIAN_NONE = 0,
100 ENDIAN_8IN16 = 1,
101 ENDIAN_8IN32 = 2,
102 ENDIAN_16IN32 = 3,
103 ENDIAN_8IN64 = 4,
104 ENDIAN_8IN128 = 5,
105};
106
107enum adreno_rb_dither_mode {
108 DITHER_DISABLE = 0,
109 DITHER_ALWAYS = 1,
110 DITHER_IF_ALPHA_OFF = 2,
111};
112
113enum adreno_rb_depth_format {
114 DEPTHX_16 = 0,
115 DEPTHX_24_8 = 1,
116};
117
118enum adreno_mmu_clnt_beh {
119 BEH_NEVR = 0,
120 BEH_TRAN_RNG = 1,
121 BEH_TRAN_FLT = 2,
122};
123
124#define REG_AXXX_MH_MMU_CONFIG 0x00000040
125#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
126#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
127#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
128#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
129static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
130{
131 return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
132}
133#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
134#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
135static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
136{
137 return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
138}
139#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
140#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
141static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
142{
143 return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
144}
145#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
146#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
147static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
148{
149 return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
150}
151#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
152#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
153static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
154{
155 return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
156}
157#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
158#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
159static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
160{
161 return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
162}
163#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
164#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
165static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
166{
167 return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
168}
169#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
170#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
171static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
172{
173 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
174}
175#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
176#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
177static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
178{
179 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
180}
181#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
182#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
183static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
184{
185 return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
186}
187#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
188#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
189static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
190{
191 return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
192}
193
194#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
195
196#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
197
198#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
199
200#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
201
202#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
203
204#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
205
206#define REG_AXXX_MH_MMU_MPU_END 0x00000047
207
208#define REG_AXXX_CP_RB_BASE 0x000001c0
209
210#define REG_AXXX_CP_RB_CNTL 0x000001c1
211#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
212#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
213static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
214{
215 return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
216}
217#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
218#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
219static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
220{
221 return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
222}
223#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
224#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
225static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
226{
227 return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
228}
229#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
230#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
231#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
232
233#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
234#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
235#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
236static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
237{
238 return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
239}
240#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
241#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
242static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
243{
244 return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
245}
246
247#define REG_AXXX_CP_RB_RPTR 0x000001c4
248
249#define REG_AXXX_CP_RB_WPTR 0x000001c5
250
251#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
252
253#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
254
255#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
256
257#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
258#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
259#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
260static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
261{
262 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
263}
264#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
265#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
266static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
267{
268 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
269}
270#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
271#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
272static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
273{
274 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
275}
276
277#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
278
279#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
280#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
281#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
282static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
283{
284 return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
285}
286#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
287#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
288static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
289{
290 return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
291}
292#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
293#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
294static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
295{
296 return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
297}
298
299#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
300#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
301#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
302static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
303{
304 return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
305}
306
307#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
308#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
309#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
310static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
311{
312 return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
313}
314
315#define REG_AXXX_SCRATCH_UMSK 0x000001dc
316#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
317#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
318static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
319{
320 return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
321}
322#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
323#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
324static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
325{
326 return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
327}
328
329#define REG_AXXX_SCRATCH_ADDR 0x000001dd
330
331#define REG_AXXX_CP_ME_RDADDR 0x000001ea
332
333#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
334
335#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
336
337#define REG_AXXX_CP_INT_CNTL 0x000001f2
338
339#define REG_AXXX_CP_INT_STATUS 0x000001f3
340
341#define REG_AXXX_CP_INT_ACK 0x000001f4
342
343#define REG_AXXX_CP_ME_CNTL 0x000001f6
344
345#define REG_AXXX_CP_ME_STATUS 0x000001f7
346
347#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
348
349#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
350
351#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
352
353#define REG_AXXX_CP_DEBUG 0x000001fc
354#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
355#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
356#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
357#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
358#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
359#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
360#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
361#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
362
363#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
364#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
365#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
366static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
367{
368 return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
369}
370#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
371#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
372static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
373{
374 return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
375}
376
377#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
378#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
379#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
380static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
381{
382 return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
383}
384#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
385#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
386static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
387{
388 return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
389}
390
391#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
392#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
393#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
394static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
395{
396 return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
397}
398#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
399#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
400static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
401{
402 return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
403}
404
405#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
406
407#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
408
409#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
410
411#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
412
413#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
414
415#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
416
417#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
418
419#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
420
421#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
422
423#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
424
425#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
426
427#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
428
429#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
430
431
432#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 000000000000..a60584763b61
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "adreno_gpu.h"
19#include "msm_gem.h"
20
21struct adreno_info {
22 struct adreno_rev rev;
23 uint32_t revn;
24 const char *name;
25 const char *pm4fw, *pfpfw;
26 uint32_t gmem;
27};
28
29#define ANY_ID 0xff
30
31static const struct adreno_info gpulist[] = {
32 {
33 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
34 .revn = 305,
35 .name = "A305",
36 .pm4fw = "a300_pm4.fw",
37 .pfpfw = "a300_pfp.fw",
38 .gmem = SZ_256K,
39 }, {
40 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
41 .revn = 320,
42 .name = "A320",
43 .pm4fw = "a300_pm4.fw",
44 .pfpfw = "a300_pfp.fw",
45 .gmem = SZ_512K,
46 }, {
47 .rev = ADRENO_REV(3, 3, 0, 0),
48 .revn = 330,
49 .name = "A330",
50 .pm4fw = "a330_pm4.fw",
51 .pfpfw = "a330_pfp.fw",
52 .gmem = SZ_1M,
53 },
54};
55
56#define RB_SIZE SZ_32K
57#define RB_BLKSIZE 16
58
59int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
60{
61 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
62
63 switch (param) {
64 case MSM_PARAM_GPU_ID:
65 *value = adreno_gpu->info->revn;
66 return 0;
67 case MSM_PARAM_GMEM_SIZE:
68 *value = adreno_gpu->info->gmem;
69 return 0;
70 default:
71 DBG("%s: invalid param: %u", gpu->name, param);
72 return -EINVAL;
73 }
74}
75
76#define rbmemptr(adreno_gpu, member) \
77 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
78
79int adreno_hw_init(struct msm_gpu *gpu)
80{
81 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
82
83 DBG("%s", gpu->name);
84
85 /* Setup REG_CP_RB_CNTL: */
86 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
87 /* size is log2(quad-words): */
88 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
89 AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
90
91 /* Setup ringbuffer address: */
92 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
93 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
94
95 /* Setup scratch/timestamp: */
96 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
97
98 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
99
100 return 0;
101}
102
103static uint32_t get_wptr(struct msm_ringbuffer *ring)
104{
105 return ring->cur - ring->start;
106}
107
108uint32_t adreno_last_fence(struct msm_gpu *gpu)
109{
110 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
111 return adreno_gpu->memptrs->fence;
112}
113
114void adreno_recover(struct msm_gpu *gpu)
115{
116 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
117 struct drm_device *dev = gpu->dev;
118 int ret;
119
120 gpu->funcs->pm_suspend(gpu);
121
122 /* reset ringbuffer: */
123 gpu->rb->cur = gpu->rb->start;
124
125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu->memptrs->fence = gpu->submitted_fence;
127
128 gpu->funcs->pm_resume(gpu);
129 ret = gpu->funcs->hw_init(gpu);
130 if (ret) {
131 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
132 /* hmm, oh well? */
133 }
134}
135
136int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
137 struct msm_file_private *ctx)
138{
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct msm_drm_private *priv = gpu->dev->dev_private;
141 struct msm_ringbuffer *ring = gpu->rb;
142 unsigned i, ibs = 0;
143
144 for (i = 0; i < submit->nr_cmds; i++) {
145 switch (submit->cmd[i].type) {
146 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
147 /* ignore IB-targets */
148 break;
149 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
150 /* ignore if there has not been a ctx switch: */
151 if (priv->lastctx == ctx)
152 break;
153 case MSM_SUBMIT_CMD_BUF:
154 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
155 OUT_RING(ring, submit->cmd[i].iova);
156 OUT_RING(ring, submit->cmd[i].size);
157 ibs++;
158 break;
159 }
160 }
161
162 /* on a320, at least, we seem to need to pad things out to an
163 * even number of qwords to avoid issue w/ CP hanging on wrap-
164 * around:
165 */
166 if (ibs % 2)
167 OUT_PKT2(ring);
168
169 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
170 OUT_RING(ring, submit->fence);
171
172 if (adreno_is_a3xx(adreno_gpu)) {
173 /* Flush HLSQ lazy updates to make sure there is nothing
174 * pending for indirect loads after the timestamp has
175 * passed:
176 */
177 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
178 OUT_RING(ring, HLSQ_FLUSH);
179
180 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
181 OUT_RING(ring, 0x00000000);
182 }
183
184 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
185 OUT_RING(ring, CACHE_FLUSH_TS);
186 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
187 OUT_RING(ring, submit->fence);
188
189 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
190 OUT_PKT3(ring, CP_INTERRUPT, 1);
191 OUT_RING(ring, 0x80000000);
192
193#if 0
194 if (adreno_is_a3xx(adreno_gpu)) {
195 /* Dummy set-constant to trigger context rollover */
196 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
197 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
198 OUT_RING(ring, 0x00000000);
199 }
200#endif
201
202 gpu->funcs->flush(gpu);
203
204 return 0;
205}
206
207void adreno_flush(struct msm_gpu *gpu)
208{
209 uint32_t wptr = get_wptr(gpu->rb);
210
211 /* ensure writes to ringbuffer have hit system memory: */
212 mb();
213
214 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
215}
216
217void adreno_idle(struct msm_gpu *gpu)
218{
219 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
220 uint32_t rptr, wptr = get_wptr(gpu->rb);
221 unsigned long t;
222
223 t = jiffies + ADRENO_IDLE_TIMEOUT;
224
225 /* then wait for CP to drain ringbuffer: */
226 do {
227 rptr = adreno_gpu->memptrs->rptr;
228 if (rptr == wptr)
229 return;
230 } while(time_before(jiffies, t));
231
232 DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
233
234 /* TODO maybe we need to reset GPU here to recover from hang? */
235}
236
237#ifdef CONFIG_DEBUG_FS
238void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
239{
240 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
241
242 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
243 adreno_gpu->info->revn, adreno_gpu->rev.core,
244 adreno_gpu->rev.major, adreno_gpu->rev.minor,
245 adreno_gpu->rev.patchid);
246
247 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
248 gpu->submitted_fence);
249 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
250 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
251 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
252}
253#endif
254
255void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
256{
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
258 uint32_t freedwords;
259 do {
260 uint32_t size = gpu->rb->size / 4;
261 uint32_t wptr = get_wptr(gpu->rb);
262 uint32_t rptr = adreno_gpu->memptrs->rptr;
263 freedwords = (rptr + (size - 1) - wptr) % size;
264 } while(freedwords < ndwords);
265}
266
267static const char *iommu_ports[] = {
268 "gfx3d_user", "gfx3d_priv",
269 "gfx3d1_user", "gfx3d1_priv",
270};
271
272static inline bool _rev_match(uint8_t entry, uint8_t id)
273{
274 return (entry == ANY_ID) || (entry == id);
275}
276
277int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
278 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
279 struct adreno_rev rev)
280{
281 int i, ret;
282
283 /* identify gpu: */
284 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
285 const struct adreno_info *info = &gpulist[i];
286 if (_rev_match(info->rev.core, rev.core) &&
287 _rev_match(info->rev.major, rev.major) &&
288 _rev_match(info->rev.minor, rev.minor) &&
289 _rev_match(info->rev.patchid, rev.patchid)) {
290 gpu->info = info;
291 gpu->revn = info->revn;
292 break;
293 }
294 }
295
296 if (i == ARRAY_SIZE(gpulist)) {
297 dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
298 rev.core, rev.major, rev.minor, rev.patchid);
299 return -ENXIO;
300 }
301
302 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
303 rev.core, rev.major, rev.minor, rev.patchid);
304
305 gpu->funcs = funcs;
306 gpu->rev = rev;
307
308 ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
309 if (ret) {
310 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
311 gpu->info->pm4fw, ret);
312 return ret;
313 }
314
315 ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
316 if (ret) {
317 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
318 gpu->info->pfpfw, ret);
319 return ret;
320 }
321
322 ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
323 gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
324 RB_SIZE);
325 if (ret)
326 return ret;
327
328 ret = msm_iommu_attach(drm, gpu->base.iommu,
329 iommu_ports, ARRAY_SIZE(iommu_ports));
330 if (ret)
331 return ret;
332
333 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
334 MSM_BO_UNCACHED);
335 if (IS_ERR(gpu->memptrs_bo)) {
336 ret = PTR_ERR(gpu->memptrs_bo);
337 gpu->memptrs_bo = NULL;
338 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
339 return ret;
340 }
341
342 gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
343 if (!gpu->memptrs) {
344 dev_err(drm->dev, "could not vmap memptrs\n");
345 return -ENOMEM;
346 }
347
348 ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
349 &gpu->memptrs_iova);
350 if (ret) {
351 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
352 return ret;
353 }
354
355 return 0;
356}
357
358void adreno_gpu_cleanup(struct adreno_gpu *gpu)
359{
360 if (gpu->memptrs_bo) {
361 if (gpu->memptrs_iova)
362 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
363 drm_gem_object_unreference(gpu->memptrs_bo);
364 }
365 if (gpu->pm4)
366 release_firmware(gpu->pm4);
367 if (gpu->pfp)
368 release_firmware(gpu->pfp);
369 msm_gpu_cleanup(&gpu->base);
370}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 000000000000..f73abfba7c22
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ADRENO_GPU_H__
19#define __ADRENO_GPU_H__
20
21#include <linux/firmware.h>
22
23#include "msm_gpu.h"
24
25#include "adreno_common.xml.h"
26#include "adreno_pm4.xml.h"
27
28struct adreno_rev {
29 uint8_t core;
30 uint8_t major;
31 uint8_t minor;
32 uint8_t patchid;
33};
34
35#define ADRENO_REV(core, major, minor, patchid) \
36 ((struct adreno_rev){ core, major, minor, patchid })
37
38struct adreno_gpu_funcs {
39 struct msm_gpu_funcs base;
40};
41
42struct adreno_info;
43
44struct adreno_rbmemptrs {
45 volatile uint32_t rptr;
46 volatile uint32_t wptr;
47 volatile uint32_t fence;
48};
49
50struct adreno_gpu {
51 struct msm_gpu base;
52 struct adreno_rev rev;
53 const struct adreno_info *info;
54 uint32_t revn; /* numeric revision name */
55 const struct adreno_gpu_funcs *funcs;
56
57 /* firmware: */
58 const struct firmware *pm4, *pfp;
59
60 /* ringbuffer rptr/wptr: */
61 // TODO should this be in msm_ringbuffer? I think it would be
62 // different for z180..
63 struct adreno_rbmemptrs *memptrs;
64 struct drm_gem_object *memptrs_bo;
65 uint32_t memptrs_iova;
66};
67#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
68
69/* platform config data (ie. from DT, or pdata) */
70struct adreno_platform_config {
71 struct adreno_rev rev;
72 uint32_t fast_rate, slow_rate, bus_freq;
73};
74
75#define ADRENO_IDLE_TIMEOUT (20 * 1000)
76
77static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
78{
79 return (gpu->revn >= 300) && (gpu->revn < 400);
80}
81
82static inline bool adreno_is_a305(struct adreno_gpu *gpu)
83{
84 return gpu->revn == 305;
85}
86
87static inline bool adreno_is_a320(struct adreno_gpu *gpu)
88{
89 return gpu->revn == 320;
90}
91
92static inline bool adreno_is_a330(struct adreno_gpu *gpu)
93{
94 return gpu->revn == 330;
95}
96
97int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
98int adreno_hw_init(struct msm_gpu *gpu);
99uint32_t adreno_last_fence(struct msm_gpu *gpu);
100void adreno_recover(struct msm_gpu *gpu);
101int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
102 struct msm_file_private *ctx);
103void adreno_flush(struct msm_gpu *gpu);
104void adreno_idle(struct msm_gpu *gpu);
105#ifdef CONFIG_DEBUG_FS
106void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
107#endif
108void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
109
110int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
111 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
112 struct adreno_rev rev);
113void adreno_gpu_cleanup(struct adreno_gpu *gpu);
114
115
116/* ringbuffer helpers (the parts that are adreno specific) */
117
118static inline void
119OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
120{
121 adreno_wait_ring(ring->gpu, cnt+1);
122 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
123}
124
125/* no-op packet: */
126static inline void
127OUT_PKT2(struct msm_ringbuffer *ring)
128{
129 adreno_wait_ring(ring->gpu, 1);
130 OUT_RING(ring, CP_TYPE2_PKT);
131}
132
133static inline void
134OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
135{
136 adreno_wait_ring(ring->gpu, cnt+1);
137 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
138}
139
140
141#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 000000000000..94c13f418e75
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,254 @@
1#ifndef ADRENO_PM4_XML
2#define ADRENO_PM4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum vgt_event_type {
44 VS_DEALLOC = 0,
45 PS_DEALLOC = 1,
46 VS_DONE_TS = 2,
47 PS_DONE_TS = 3,
48 CACHE_FLUSH_TS = 4,
49 CONTEXT_DONE = 5,
50 CACHE_FLUSH = 6,
51 HLSQ_FLUSH = 7,
52 VIZQUERY_START = 7,
53 VIZQUERY_END = 8,
54 SC_WAIT_WC = 9,
55 RST_PIX_CNT = 13,
56 RST_VTX_CNT = 14,
57 TILE_FLUSH = 15,
58 CACHE_FLUSH_AND_INV_TS_EVENT = 20,
59 ZPASS_DONE = 21,
60 CACHE_FLUSH_AND_INV_EVENT = 22,
61 PERFCOUNTER_START = 23,
62 PERFCOUNTER_STOP = 24,
63 VS_FETCH_DONE = 27,
64 FACENESS_FLUSH = 28,
65};
66
67enum pc_di_primtype {
68 DI_PT_NONE = 0,
69 DI_PT_POINTLIST = 1,
70 DI_PT_LINELIST = 2,
71 DI_PT_LINESTRIP = 3,
72 DI_PT_TRILIST = 4,
73 DI_PT_TRIFAN = 5,
74 DI_PT_TRISTRIP = 6,
75 DI_PT_RECTLIST = 8,
76 DI_PT_QUADLIST = 13,
77 DI_PT_QUADSTRIP = 14,
78 DI_PT_POLYGON = 15,
79 DI_PT_2D_COPY_RECT_LIST_V0 = 16,
80 DI_PT_2D_COPY_RECT_LIST_V1 = 17,
81 DI_PT_2D_COPY_RECT_LIST_V2 = 18,
82 DI_PT_2D_COPY_RECT_LIST_V3 = 19,
83 DI_PT_2D_FILL_RECT_LIST = 20,
84 DI_PT_2D_LINE_STRIP = 21,
85 DI_PT_2D_TRI_STRIP = 22,
86};
87
88enum pc_di_src_sel {
89 DI_SRC_SEL_DMA = 0,
90 DI_SRC_SEL_IMMEDIATE = 1,
91 DI_SRC_SEL_AUTO_INDEX = 2,
92 DI_SRC_SEL_RESERVED = 3,
93};
94
95enum pc_di_index_size {
96 INDEX_SIZE_IGN = 0,
97 INDEX_SIZE_16_BIT = 0,
98 INDEX_SIZE_32_BIT = 1,
99 INDEX_SIZE_8_BIT = 2,
100 INDEX_SIZE_INVALID = 0,
101};
102
103enum pc_di_vis_cull_mode {
104 IGNORE_VISIBILITY = 0,
105};
106
107enum adreno_pm4_packet_type {
108 CP_TYPE0_PKT = 0,
109 CP_TYPE1_PKT = 0x40000000,
110 CP_TYPE2_PKT = 0x80000000,
111 CP_TYPE3_PKT = 0xc0000000,
112};
113
114enum adreno_pm4_type3_packets {
115 CP_ME_INIT = 72,
116 CP_NOP = 16,
117 CP_INDIRECT_BUFFER = 63,
118 CP_INDIRECT_BUFFER_PFD = 55,
119 CP_WAIT_FOR_IDLE = 38,
120 CP_WAIT_REG_MEM = 60,
121 CP_WAIT_REG_EQ = 82,
122 CP_WAT_REG_GTE = 83,
123 CP_WAIT_UNTIL_READ = 92,
124 CP_WAIT_IB_PFD_COMPLETE = 93,
125 CP_REG_RMW = 33,
126 CP_SET_BIN_DATA = 47,
127 CP_REG_TO_MEM = 62,
128 CP_MEM_WRITE = 61,
129 CP_MEM_WRITE_CNTR = 79,
130 CP_COND_EXEC = 68,
131 CP_COND_WRITE = 69,
132 CP_EVENT_WRITE = 70,
133 CP_EVENT_WRITE_SHD = 88,
134 CP_EVENT_WRITE_CFL = 89,
135 CP_EVENT_WRITE_ZPD = 91,
136 CP_RUN_OPENCL = 49,
137 CP_DRAW_INDX = 34,
138 CP_DRAW_INDX_2 = 54,
139 CP_DRAW_INDX_BIN = 52,
140 CP_DRAW_INDX_2_BIN = 53,
141 CP_VIZ_QUERY = 35,
142 CP_SET_STATE = 37,
143 CP_SET_CONSTANT = 45,
144 CP_IM_LOAD = 39,
145 CP_IM_LOAD_IMMEDIATE = 43,
146 CP_LOAD_CONSTANT_CONTEXT = 46,
147 CP_INVALIDATE_STATE = 59,
148 CP_SET_SHADER_BASES = 74,
149 CP_SET_BIN_MASK = 80,
150 CP_SET_BIN_SELECT = 81,
151 CP_CONTEXT_UPDATE = 94,
152 CP_INTERRUPT = 64,
153 CP_IM_STORE = 44,
154 CP_SET_BIN_BASE_OFFSET = 75,
155 CP_SET_DRAW_INIT_FLAGS = 75,
156 CP_SET_PROTECTED_MODE = 95,
157 CP_LOAD_STATE = 48,
158 CP_COND_INDIRECT_BUFFER_PFE = 58,
159 CP_COND_INDIRECT_BUFFER_PFD = 50,
160 CP_INDIRECT_BUFFER_PFE = 63,
161 CP_SET_BIN = 76,
162};
163
164enum adreno_state_block {
165 SB_VERT_TEX = 0,
166 SB_VERT_MIPADDR = 1,
167 SB_FRAG_TEX = 2,
168 SB_FRAG_MIPADDR = 3,
169 SB_VERT_SHADER = 4,
170 SB_FRAG_SHADER = 6,
171};
172
173enum adreno_state_type {
174 ST_SHADER = 0,
175 ST_CONSTANTS = 1,
176};
177
178enum adreno_state_src {
179 SS_DIRECT = 0,
180 SS_INDIRECT = 4,
181};
182
183#define REG_CP_LOAD_STATE_0 0x00000000
184#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
185#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
186static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
187{
188 return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
189}
190#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
191#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
192static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
193{
194 return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
195}
196#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
197#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
198static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
199{
200 return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
201}
202#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
203#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
204static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
205{
206 return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
207}
208
209#define REG_CP_LOAD_STATE_1 0x00000001
210#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
211#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
212static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
213{
214 return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
215}
216#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
217#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
218static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
219{
220 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
221}
222
223#define REG_CP_SET_BIN_0 0x00000000
224
225#define REG_CP_SET_BIN_1 0x00000001
226#define CP_SET_BIN_1_X1__MASK 0x0000ffff
227#define CP_SET_BIN_1_X1__SHIFT 0
228static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
229{
230 return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
231}
232#define CP_SET_BIN_1_Y1__MASK 0xffff0000
233#define CP_SET_BIN_1_Y1__SHIFT 16
234static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
235{
236 return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
237}
238
239#define REG_CP_SET_BIN_2 0x00000002
240#define CP_SET_BIN_2_X2__MASK 0x0000ffff
241#define CP_SET_BIN_2_X2__SHIFT 0
242static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
243{
244 return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
245}
246#define CP_SET_BIN_2_Y2__MASK 0xffff0000
247#define CP_SET_BIN_2_Y2__SHIFT 16
248static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
249{
250 return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
251}
252
253
254#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 000000000000..6f8396be431d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,502 @@
1#ifndef DSI_XML
2#define DSI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum dsi_traffic_mode {
46 NON_BURST_SYNCH_PULSE = 0,
47 NON_BURST_SYNCH_EVENT = 1,
48 BURST_MODE = 2,
49};
50
51enum dsi_dst_format {
52 DST_FORMAT_RGB565 = 0,
53 DST_FORMAT_RGB666 = 1,
54 DST_FORMAT_RGB666_LOOSE = 2,
55 DST_FORMAT_RGB888 = 3,
56};
57
58enum dsi_rgb_swap {
59 SWAP_RGB = 0,
60 SWAP_RBG = 1,
61 SWAP_BGR = 2,
62 SWAP_BRG = 3,
63 SWAP_GRB = 4,
64 SWAP_GBR = 5,
65};
66
67enum dsi_cmd_trigger {
68 TRIGGER_NONE = 0,
69 TRIGGER_TE = 2,
70 TRIGGER_SW = 4,
71 TRIGGER_SW_SEOF = 5,
72 TRIGGER_SW_TE = 6,
73};
74
75#define DSI_IRQ_CMD_DMA_DONE 0x00000001
76#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
77#define DSI_IRQ_CMD_MDP_DONE 0x00000100
78#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
79#define DSI_IRQ_VIDEO_DONE 0x00010000
80#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
81#define DSI_IRQ_ERROR 0x01000000
82#define DSI_IRQ_MASK_ERROR 0x02000000
83#define REG_DSI_CTRL 0x00000000
84#define DSI_CTRL_ENABLE 0x00000001
85#define DSI_CTRL_VID_MODE_EN 0x00000002
86#define DSI_CTRL_CMD_MODE_EN 0x00000004
87#define DSI_CTRL_LANE0 0x00000010
88#define DSI_CTRL_LANE1 0x00000020
89#define DSI_CTRL_LANE2 0x00000040
90#define DSI_CTRL_LANE3 0x00000080
91#define DSI_CTRL_CLK_EN 0x00000100
92#define DSI_CTRL_ECC_CHECK 0x00100000
93#define DSI_CTRL_CRC_CHECK 0x01000000
94
95#define REG_DSI_STATUS0 0x00000004
96#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
97#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
98#define DSI_STATUS0_DSI_BUSY 0x00000010
99
100#define REG_DSI_FIFO_STATUS 0x00000008
101
102#define REG_DSI_VID_CFG0 0x0000000c
103#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
104#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
105static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
106{
107 return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
108}
109#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
110#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
111static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
112{
113 return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
114}
115#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
116#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
117static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
118{
119 return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
120}
121#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
122#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
123#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
124#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
125#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
126#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
127
128#define REG_DSI_VID_CFG1 0x0000001c
129#define DSI_VID_CFG1_R_SEL 0x00000010
130#define DSI_VID_CFG1_G_SEL 0x00000100
131#define DSI_VID_CFG1_B_SEL 0x00001000
132#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
133#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
134static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
135{
136 return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
137}
138#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
139#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
140static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
141{
142 return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
143}
144
145#define REG_DSI_ACTIVE_H 0x00000020
146#define DSI_ACTIVE_H_START__MASK 0x00000fff
147#define DSI_ACTIVE_H_START__SHIFT 0
148static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
149{
150 return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
151}
152#define DSI_ACTIVE_H_END__MASK 0x0fff0000
153#define DSI_ACTIVE_H_END__SHIFT 16
154static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
155{
156 return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
157}
158
159#define REG_DSI_ACTIVE_V 0x00000024
160#define DSI_ACTIVE_V_START__MASK 0x00000fff
161#define DSI_ACTIVE_V_START__SHIFT 0
162static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
163{
164 return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
165}
166#define DSI_ACTIVE_V_END__MASK 0x0fff0000
167#define DSI_ACTIVE_V_END__SHIFT 16
168static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
169{
170 return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
171}
172
173#define REG_DSI_TOTAL 0x00000028
174#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
175#define DSI_TOTAL_H_TOTAL__SHIFT 0
176static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
177{
178 return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
179}
180#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
181#define DSI_TOTAL_V_TOTAL__SHIFT 16
182static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
183{
184 return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
185}
186
187#define REG_DSI_ACTIVE_HSYNC 0x0000002c
188#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
189#define DSI_ACTIVE_HSYNC_START__SHIFT 0
190static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
191{
192 return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
193}
194#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
195#define DSI_ACTIVE_HSYNC_END__SHIFT 16
196static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
197{
198 return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
199}
200
201#define REG_DSI_ACTIVE_VSYNC 0x00000034
202#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
203#define DSI_ACTIVE_VSYNC_START__SHIFT 0
204static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
205{
206 return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
207}
208#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
209#define DSI_ACTIVE_VSYNC_END__SHIFT 16
210static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
211{
212 return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
213}
214
215#define REG_DSI_CMD_DMA_CTRL 0x00000038
216#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
217#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
218
219#define REG_DSI_CMD_CFG0 0x0000003c
220
221#define REG_DSI_CMD_CFG1 0x00000040
222
223#define REG_DSI_DMA_BASE 0x00000044
224
225#define REG_DSI_DMA_LEN 0x00000048
226
227#define REG_DSI_ACK_ERR_STATUS 0x00000064
228
229static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
230
231static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
232
233#define REG_DSI_TRIG_CTRL 0x00000080
234#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
235#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
236static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
237{
238 return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
239}
240#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
241#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
242static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
243{
244 return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
245}
246#define DSI_TRIG_CTRL_STREAM 0x00000100
247#define DSI_TRIG_CTRL_TE 0x80000000
248
249#define REG_DSI_TRIG_DMA 0x0000008c
250
251#define REG_DSI_DLN0_PHY_ERR 0x000000b0
252
253#define REG_DSI_TIMEOUT_STATUS 0x000000bc
254
255#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
256#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
257#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
258static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
259{
260 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
261}
262#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
263#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
264static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
265{
266 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
267}
268
269#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
270#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
271#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
272
273#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
274
275#define REG_DSI_ERR_INT_MASK0 0x00000108
276
277#define REG_DSI_INTR_CTRL 0x0000010c
278
279#define REG_DSI_RESET 0x00000114
280
281#define REG_DSI_CLK_CTRL 0x00000118
282
283#define REG_DSI_PHY_RESET 0x00000128
284
285#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
286#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
287
288#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
289
290#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
291
292#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
293
294#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
295
296#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
297
298#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
299
300#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
301
302#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
303
304#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
305
306#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
307
308#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
309
310#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
311
312#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
313
314#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
315
316#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
317
318#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
319
320#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
321
322#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
323
324#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
325
326#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
327
328#define REG_DSI_PHY_PLL_STATUS 0x00000280
329#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
330
331#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
332
333#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
334
335#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
336
337#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
338
339#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
340
341#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
342
343#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
344
345#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
346
347#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
348
349#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
350
351#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
352
353#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
354
355#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
356
357#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
358
359#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
360
361#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
362
363#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
364
365#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
366
367#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
368
369#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
370
371#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
372
373#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
374
375#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
376
377#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
378
379#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
380
381#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
382
383#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
384
385#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
386
387#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
388
389#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
390#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
391
392static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; }
393
394static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; }
395
396static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; }
397
398static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; }
399
400static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; }
401
402static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; }
403
404static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; }
405
406#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400
407
408#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404
409
410#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408
411
412#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c
413
414#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414
415
416#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418
417
418#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440
419
420#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444
421
422#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448
423
424#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c
425
426#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450
427
428#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454
429
430#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458
431
432#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c
433
434#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460
435
436#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464
437
438#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468
439
440#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c
441
442#define REG_DSI_8960_PHY_CTRL_0 0x00000470
443
444#define REG_DSI_8960_PHY_CTRL_1 0x00000474
445
446#define REG_DSI_8960_PHY_CTRL_2 0x00000478
447
448#define REG_DSI_8960_PHY_CTRL_3 0x0000047c
449
450#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480
451
452#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484
453
454#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488
455
456#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c
457
458#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490
459
460#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494
461
462#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498
463
464#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c
465
466#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0
467
468#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500
469
470#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504
471
472#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508
473
474#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c
475
476#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510
477
478#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518
479
480#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528
481
482#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c
483
484#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530
485
486#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534
487
488#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538
489
490#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c
491
492#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540
493
494#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544
495
496#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548
497
498#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
499#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
500
501
502#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 000000000000..aefc1b8feae9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,114 @@
1#ifndef MMSS_CC_XML
2#define MMSS_CC_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mmss_cc_clk {
46 CLK = 0,
47 PCLK = 1,
48};
49
50#define REG_MMSS_CC_AHB 0x00000008
51
52static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
53{
54 switch (idx) {
55 case CLK: return 0x0000004c;
56 case PCLK: return 0x00000130;
57 default: return INVALID_IDX(idx);
58 }
59}
60static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
61
62static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
63#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
64#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
65#define MMSS_CC_CLK_CC_MND_EN 0x00000020
66#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
67#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
68static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
69{
70 return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
71}
72#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
73#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
74static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
75{
76 return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
77}
78
79static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
80#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
81#define MMSS_CC_CLK_MD_D__SHIFT 0
82static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
83{
84 return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
85}
86#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
87#define MMSS_CC_CLK_MD_M__SHIFT 8
88static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
89{
90 return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
91}
92
93static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
94#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
95#define MMSS_CC_CLK_NS_SRC__SHIFT 0
96static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
97{
98 return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
99}
100#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
101#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
102static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
103{
104 return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
105}
106#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
107#define MMSS_CC_CLK_NS_VAL__SHIFT 24
108static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
109{
110 return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
111}
112
113
114#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 000000000000..a225e8170b2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,48 @@
1#ifndef SFPB_XML
2#define SFPB_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_SFPB_CFG 0x00000058
46
47
48#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 000000000000..50d11df35b21
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20static struct platform_device *hdmi_pdev;
21
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
23{
24 uint32_t ctrl = 0;
25
26 if (power_on) {
27 ctrl |= HDMI_CTRL_ENABLE;
28 if (!hdmi->hdmi_mode) {
29 ctrl |= HDMI_CTRL_HDMI;
30 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
31 ctrl &= ~HDMI_CTRL_HDMI;
32 } else {
33 ctrl |= HDMI_CTRL_HDMI;
34 }
35 } else {
36 ctrl = HDMI_CTRL_HDMI;
37 }
38
39 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
40 DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
41 power_on ? "Enable" : "Disable", ctrl);
42}
43
44static irqreturn_t hdmi_irq(int irq, void *dev_id)
45{
46 struct hdmi *hdmi = dev_id;
47
48 /* Process HPD: */
49 hdmi_connector_irq(hdmi->connector);
50
51 /* Process DDC: */
52 hdmi_i2c_irq(hdmi->i2c);
53
54 /* TODO audio.. */
55
56 return IRQ_HANDLED;
57}
58
59void hdmi_destroy(struct kref *kref)
60{
61 struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
62 struct hdmi_phy *phy = hdmi->phy;
63
64 if (phy)
65 phy->funcs->destroy(phy);
66
67 if (hdmi->i2c)
68 hdmi_i2c_destroy(hdmi->i2c);
69
70 put_device(&hdmi->pdev->dev);
71}
72
73/* initialize connector */
74int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
75{
76 struct hdmi *hdmi = NULL;
77 struct msm_drm_private *priv = dev->dev_private;
78 struct platform_device *pdev = hdmi_pdev;
79 struct hdmi_platform_config *config;
80 int ret;
81
82 if (!pdev) {
83 dev_err(dev->dev, "no hdmi device\n");
84 ret = -ENXIO;
85 goto fail;
86 }
87
88 config = pdev->dev.platform_data;
89
90 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
91 if (!hdmi) {
92 ret = -ENOMEM;
93 goto fail;
94 }
95
96 kref_init(&hdmi->refcount);
97
98 get_device(&pdev->dev);
99
100 hdmi->dev = dev;
101 hdmi->pdev = pdev;
102 hdmi->encoder = encoder;
103
104 /* not sure about which phy maps to which msm.. probably I miss some */
105 if (config->phy_init)
106 hdmi->phy = config->phy_init(hdmi);
107 else
108 hdmi->phy = ERR_PTR(-ENXIO);
109
110 if (IS_ERR(hdmi->phy)) {
111 ret = PTR_ERR(hdmi->phy);
112 dev_err(dev->dev, "failed to load phy: %d\n", ret);
113 hdmi->phy = NULL;
114 goto fail;
115 }
116
117 hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
118 if (IS_ERR(hdmi->mmio)) {
119 ret = PTR_ERR(hdmi->mmio);
120 goto fail;
121 }
122
123 hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
124 if (IS_ERR(hdmi->mvs))
125 hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
126 if (IS_ERR(hdmi->mvs)) {
127 ret = PTR_ERR(hdmi->mvs);
128 dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
129 goto fail;
130 }
131
132 hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
133 if (IS_ERR(hdmi->mpp0))
134 hdmi->mpp0 = NULL;
135
136 hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
137 if (IS_ERR(hdmi->clk)) {
138 ret = PTR_ERR(hdmi->clk);
139 dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
140 goto fail;
141 }
142
143 hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
144 if (IS_ERR(hdmi->m_pclk)) {
145 ret = PTR_ERR(hdmi->m_pclk);
146 dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
147 goto fail;
148 }
149
150 hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
151 if (IS_ERR(hdmi->s_pclk)) {
152 ret = PTR_ERR(hdmi->s_pclk);
153 dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
154 goto fail;
155 }
156
157 hdmi->i2c = hdmi_i2c_init(hdmi);
158 if (IS_ERR(hdmi->i2c)) {
159 ret = PTR_ERR(hdmi->i2c);
160 dev_err(dev->dev, "failed to get i2c: %d\n", ret);
161 hdmi->i2c = NULL;
162 goto fail;
163 }
164
165 hdmi->bridge = hdmi_bridge_init(hdmi);
166 if (IS_ERR(hdmi->bridge)) {
167 ret = PTR_ERR(hdmi->bridge);
168 dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
169 hdmi->bridge = NULL;
170 goto fail;
171 }
172
173 hdmi->connector = hdmi_connector_init(hdmi);
174 if (IS_ERR(hdmi->connector)) {
175 ret = PTR_ERR(hdmi->connector);
176 dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
177 hdmi->connector = NULL;
178 goto fail;
179 }
180
181 hdmi->irq = platform_get_irq(pdev, 0);
182 if (hdmi->irq < 0) {
183 ret = hdmi->irq;
184 dev_err(dev->dev, "failed to get irq: %d\n", ret);
185 goto fail;
186 }
187
188 ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
189 NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
190 "hdmi_isr", hdmi);
191 if (ret < 0) {
192 dev_err(dev->dev, "failed to request IRQ%u: %d\n",
193 hdmi->irq, ret);
194 goto fail;
195 }
196
197 encoder->bridge = hdmi->bridge;
198
199 priv->bridges[priv->num_bridges++] = hdmi->bridge;
200 priv->connectors[priv->num_connectors++] = hdmi->connector;
201
202 return 0;
203
204fail:
205 if (hdmi) {
206 /* bridge/connector are normally destroyed by drm: */
207 if (hdmi->bridge)
208 hdmi->bridge->funcs->destroy(hdmi->bridge);
209 if (hdmi->connector)
210 hdmi->connector->funcs->destroy(hdmi->connector);
211 hdmi_destroy(&hdmi->refcount);
212 }
213
214 return ret;
215}
216
217/*
218 * The hdmi device:
219 */
220
221static int hdmi_dev_probe(struct platform_device *pdev)
222{
223 static struct hdmi_platform_config config = {};
224#ifdef CONFIG_OF
225 /* TODO */
226#else
227 if (cpu_is_apq8064()) {
228 config.phy_init = hdmi_phy_8960_init;
229 config.ddc_clk_gpio = 70;
230 config.ddc_data_gpio = 71;
231 config.hpd_gpio = 72;
232 config.pmic_gpio = 13 + NR_GPIO_IRQS;
233 } else if (cpu_is_msm8960()) {
234 config.phy_init = hdmi_phy_8960_init;
235 config.ddc_clk_gpio = 100;
236 config.ddc_data_gpio = 101;
237 config.hpd_gpio = 102;
238 config.pmic_gpio = -1;
239 } else if (cpu_is_msm8x60()) {
240 config.phy_init = hdmi_phy_8x60_init;
241 config.ddc_clk_gpio = 170;
242 config.ddc_data_gpio = 171;
243 config.hpd_gpio = 172;
244 config.pmic_gpio = -1;
245 }
246#endif
247 pdev->dev.platform_data = &config;
248 hdmi_pdev = pdev;
249 return 0;
250}
251
252static int hdmi_dev_remove(struct platform_device *pdev)
253{
254 hdmi_pdev = NULL;
255 return 0;
256}
257
258static struct platform_driver hdmi_driver = {
259 .probe = hdmi_dev_probe,
260 .remove = hdmi_dev_remove,
261 .driver.name = "hdmi_msm",
262};
263
264void __init hdmi_register(void)
265{
266 platform_driver_register(&hdmi_driver);
267}
268
269void __exit hdmi_unregister(void)
270{
271 platform_driver_unregister(&hdmi_driver);
272}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 000000000000..2c2ec566394c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,131 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __HDMI_CONNECTOR_H__
19#define __HDMI_CONNECTOR_H__
20
21#include <linux/i2c.h>
22#include <linux/clk.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25
26#include "msm_drv.h"
27#include "hdmi.xml.h"
28
29
30struct hdmi_phy;
31
32struct hdmi {
33 struct kref refcount;
34
35 struct drm_device *dev;
36 struct platform_device *pdev;
37
38 void __iomem *mmio;
39
40 struct regulator *mvs; /* HDMI_5V */
41 struct regulator *mpp0; /* External 5V */
42
43 struct clk *clk;
44 struct clk *m_pclk;
45 struct clk *s_pclk;
46
47 struct hdmi_phy *phy;
48 struct i2c_adapter *i2c;
49 struct drm_connector *connector;
50 struct drm_bridge *bridge;
51
52 /* the encoder we are hooked to (outside of hdmi block) */
53 struct drm_encoder *encoder;
54
55 bool hdmi_mode; /* are we in hdmi mode? */
56
57 int irq;
58};
59
60/* platform config data (ie. from DT, or pdata) */
61struct hdmi_platform_config {
62 struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
63 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
64};
65
66void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
67void hdmi_destroy(struct kref *kref);
68
69static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
70{
71 msm_writel(data, hdmi->mmio + reg);
72}
73
74static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
75{
76 return msm_readl(hdmi->mmio + reg);
77}
78
79static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
80{
81 kref_get(&hdmi->refcount);
82 return hdmi;
83}
84
85static inline void hdmi_unreference(struct hdmi *hdmi)
86{
87 kref_put(&hdmi->refcount, hdmi_destroy);
88}
89
90/*
91 * The phy appears to be different, for example between 8960 and 8x60,
92 * so split the phy related functions out and load the correct one at
93 * runtime:
94 */
95
96struct hdmi_phy_funcs {
97 void (*destroy)(struct hdmi_phy *phy);
98 void (*reset)(struct hdmi_phy *phy);
99 void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
100 void (*powerdown)(struct hdmi_phy *phy);
101};
102
103struct hdmi_phy {
104 const struct hdmi_phy_funcs *funcs;
105};
106
107struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
108struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
109
110/*
111 * hdmi bridge:
112 */
113
114struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
115
116/*
117 * hdmi connector:
118 */
119
120void hdmi_connector_irq(struct drm_connector *connector);
121struct drm_connector *hdmi_connector_init(struct hdmi *hdmi);
122
123/*
124 * i2c adapter for ddc:
125 */
126
127void hdmi_i2c_irq(struct i2c_adapter *i2c);
128void hdmi_i2c_destroy(struct i2c_adapter *i2c);
129struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
130
131#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 000000000000..f5fa4865e059
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,508 @@
1#ifndef HDMI_XML
2#define HDMI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum hdmi_hdcp_key_state {
46 NO_KEYS = 0,
47 NOT_CHECKED = 1,
48 CHECKING = 2,
49 KEYS_VALID = 3,
50 AKSV_INVALID = 4,
51 CHECKSUM_MISMATCH = 5,
52};
53
54enum hdmi_ddc_read_write {
55 DDC_WRITE = 0,
56 DDC_READ = 1,
57};
58
59enum hdmi_acr_cts {
60 ACR_NONE = 0,
61 ACR_32 = 1,
62 ACR_44 = 2,
63 ACR_48 = 3,
64};
65
66#define REG_HDMI_CTRL 0x00000000
67#define HDMI_CTRL_ENABLE 0x00000001
68#define HDMI_CTRL_HDMI 0x00000002
69#define HDMI_CTRL_ENCRYPTED 0x00000004
70
71#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
72#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
73
74#define REG_HDMI_ACR_PKT_CTRL 0x00000024
75#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
76#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
77#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
78#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
79static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
80{
81 return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
82}
83#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
84#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
85#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
86static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
87{
88 return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
89}
90#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
91
92#define REG_HDMI_VBI_PKT_CTRL 0x00000028
93#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
94#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
95#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
96#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
97#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
98#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
99
100#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
101#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
102#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
103#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
104#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
105#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
106#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
107
108#define REG_HDMI_GEN_PKT_CTRL 0x00000034
109#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
110#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
111#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
112#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
113static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
114{
115 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
116}
117#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
118#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
119#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
120#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
121static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
122{
123 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
124}
125#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
126#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
127static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
128{
129 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
130}
131
132#define REG_HDMI_GC 0x00000040
133#define HDMI_GC_MUTE 0x00000001
134
135#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
136#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
137#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
138
139static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
140
141#define REG_HDMI_GENERIC0_HDR 0x00000084
142
143static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
144
145#define REG_HDMI_GENERIC1_HDR 0x000000a4
146
147static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
148
149static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
150
151static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
152#define HDMI_ACR_0_CTS__MASK 0xfffff000
153#define HDMI_ACR_0_CTS__SHIFT 12
154static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
155{
156 return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
157}
158
159static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
160#define HDMI_ACR_1_N__MASK 0xffffffff
161#define HDMI_ACR_1_N__SHIFT 0
162static inline uint32_t HDMI_ACR_1_N(uint32_t val)
163{
164 return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
165}
166
167#define REG_HDMI_AUDIO_INFO0 0x000000e4
168#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
169#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
170static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
171{
172 return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
173}
174#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
175#define HDMI_AUDIO_INFO0_CC__SHIFT 8
176static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
177{
178 return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
179}
180
181#define REG_HDMI_AUDIO_INFO1 0x000000e8
182#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
183#define HDMI_AUDIO_INFO1_CA__SHIFT 0
184static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
185{
186 return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
187}
188#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
189#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
190static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
191{
192 return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
193}
194#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
195
196#define REG_HDMI_HDCP_CTRL 0x00000110
197#define HDMI_HDCP_CTRL_ENABLE 0x00000001
198#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
199
200#define REG_HDMI_HDCP_INT_CTRL 0x00000118
201
202#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
203#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
204#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
205#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
206#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
207static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
208{
209 return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
210}
211
212#define REG_HDMI_HDCP_RESET 0x00000130
213#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
214
215#define REG_HDMI_AUDIO_CFG 0x000001d0
216#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
217#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
218#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
219static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
220{
221 return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
222}
223
224#define REG_HDMI_USEC_REFTIMER 0x00000208
225
226#define REG_HDMI_DDC_CTRL 0x0000020c
227#define HDMI_DDC_CTRL_GO 0x00000001
228#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
229#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
230#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
231#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
232#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
233static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
234{
235 return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
236}
237
238#define REG_HDMI_DDC_INT_CTRL 0x00000214
239#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
240#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
241#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
242
243#define REG_HDMI_DDC_SW_STATUS 0x00000218
244#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
245#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
246#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
247#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
248
249#define REG_HDMI_DDC_HW_STATUS 0x0000021c
250
251#define REG_HDMI_DDC_SPEED 0x00000220
252#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
253#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
254static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
255{
256 return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
257}
258#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
259#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
260static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
261{
262 return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
263}
264
265#define REG_HDMI_DDC_SETUP 0x00000224
266#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
267#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
268static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
269{
270 return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
271}
272
273static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
274
275static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
276#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
277#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
278static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
279{
280 return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
281}
282#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
283#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
284#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
285#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
286#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
287static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
288{
289 return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
290}
291
292#define REG_HDMI_DDC_DATA 0x00000238
293#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
294#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
295static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
296{
297 return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
298}
299#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
300#define HDMI_DDC_DATA_DATA__SHIFT 8
301static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
302{
303 return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
304}
305#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
306#define HDMI_DDC_DATA_INDEX__SHIFT 16
307static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
308{
309 return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
310}
311#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
312
313#define REG_HDMI_HPD_INT_STATUS 0x00000250
314#define HDMI_HPD_INT_STATUS_INT 0x00000001
315#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
316
317#define REG_HDMI_HPD_INT_CTRL 0x00000254
318#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
319#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
320#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
321#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
322#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
323#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
324
325#define REG_HDMI_HPD_CTRL 0x00000258
326#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
327#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
328static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
329{
330 return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
331}
332#define HDMI_HPD_CTRL_ENABLE 0x10000000
333
334#define REG_HDMI_DDC_REF 0x0000027c
335#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
336#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
337#define HDMI_DDC_REF_REFTIMER__SHIFT 0
338static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
339{
340 return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
341}
342
343#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
344#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
345#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
346static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
347{
348 return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
349}
350#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
351#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
352static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
353{
354 return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
355}
356
357#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
358#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
359#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
360static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
361{
362 return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
363}
364#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
365#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
366static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
367{
368 return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
369}
370
371#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
372#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
373#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
374static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
375{
376 return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
377}
378#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
379#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
380static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
381{
382 return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
383}
384
385#define REG_HDMI_TOTAL 0x000002c0
386#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
387#define HDMI_TOTAL_H_TOTAL__SHIFT 0
388static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
389{
390 return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
391}
392#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
393#define HDMI_TOTAL_V_TOTAL__SHIFT 16
394static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
395{
396 return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
397}
398
399#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
400#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
401#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
402static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
403{
404 return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
405}
406
407#define REG_HDMI_FRAME_CTRL 0x000002c8
408#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
409#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
410#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
411#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
412
413#define REG_HDMI_PHY_CTRL 0x000002d4
414#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
415#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
416#define HDMI_PHY_CTRL_SW_RESET 0x00000004
417#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
418
419#define REG_HDMI_AUD_INT 0x000002cc
420#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
421#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
422#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
423#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
424
425#define REG_HDMI_8x60_PHY_REG0 0x00000300
426#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
427#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
428static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
429{
430 return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
431}
432
433#define REG_HDMI_8x60_PHY_REG1 0x00000304
434#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
435#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
436static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
437{
438 return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
439}
440#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
441#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
442static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
443{
444 return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
445}
446
447#define REG_HDMI_8x60_PHY_REG2 0x00000308
448#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
449#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
450#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
451#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
452#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
453#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
454#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
455#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
456
457#define REG_HDMI_8x60_PHY_REG3 0x0000030c
458#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
459
460#define REG_HDMI_8x60_PHY_REG4 0x00000310
461
462#define REG_HDMI_8x60_PHY_REG5 0x00000314
463
464#define REG_HDMI_8x60_PHY_REG6 0x00000318
465
466#define REG_HDMI_8x60_PHY_REG7 0x0000031c
467
468#define REG_HDMI_8x60_PHY_REG8 0x00000320
469
470#define REG_HDMI_8x60_PHY_REG9 0x00000324
471
472#define REG_HDMI_8x60_PHY_REG10 0x00000328
473
474#define REG_HDMI_8x60_PHY_REG11 0x0000032c
475
476#define REG_HDMI_8x60_PHY_REG12 0x00000330
477#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
478#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
479#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
480
481#define REG_HDMI_8960_PHY_REG0 0x00000400
482
483#define REG_HDMI_8960_PHY_REG1 0x00000404
484
485#define REG_HDMI_8960_PHY_REG2 0x00000408
486
487#define REG_HDMI_8960_PHY_REG3 0x0000040c
488
489#define REG_HDMI_8960_PHY_REG4 0x00000410
490
491#define REG_HDMI_8960_PHY_REG5 0x00000414
492
493#define REG_HDMI_8960_PHY_REG6 0x00000418
494
495#define REG_HDMI_8960_PHY_REG7 0x0000041c
496
497#define REG_HDMI_8960_PHY_REG8 0x00000420
498
499#define REG_HDMI_8960_PHY_REG9 0x00000424
500
501#define REG_HDMI_8960_PHY_REG10 0x00000428
502
503#define REG_HDMI_8960_PHY_REG11 0x0000042c
504
505#define REG_HDMI_8960_PHY_REG12 0x00000430
506
507
508#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
new file mode 100644
index 000000000000..5a8ee3473cf5
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -0,0 +1,167 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_bridge {
21 struct drm_bridge base;
22
23 struct hdmi *hdmi;
24
25 unsigned long int pixclock;
26};
27#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
28
29static void hdmi_bridge_destroy(struct drm_bridge *bridge)
30{
31 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
32 hdmi_unreference(hdmi_bridge->hdmi);
33 drm_bridge_cleanup(bridge);
34 kfree(hdmi_bridge);
35}
36
37static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
38{
39 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
40 struct hdmi *hdmi = hdmi_bridge->hdmi;
41 struct hdmi_phy *phy = hdmi->phy;
42
43 DBG("power up");
44 phy->funcs->powerup(phy, hdmi_bridge->pixclock);
45 hdmi_set_mode(hdmi, true);
46}
47
48static void hdmi_bridge_enable(struct drm_bridge *bridge)
49{
50}
51
52static void hdmi_bridge_disable(struct drm_bridge *bridge)
53{
54}
55
56static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
57{
58 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
59 struct hdmi *hdmi = hdmi_bridge->hdmi;
60 struct hdmi_phy *phy = hdmi->phy;
61
62 DBG("power down");
63 hdmi_set_mode(hdmi, false);
64 phy->funcs->powerdown(phy);
65}
66
67static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
68 struct drm_display_mode *mode,
69 struct drm_display_mode *adjusted_mode)
70{
71 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
72 struct hdmi *hdmi = hdmi_bridge->hdmi;
73 int hstart, hend, vstart, vend;
74 uint32_t frame_ctrl;
75
76 mode = adjusted_mode;
77
78 hdmi_bridge->pixclock = mode->clock * 1000;
79
80 hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
81
82 hstart = mode->htotal - mode->hsync_start;
83 hend = mode->htotal - mode->hsync_start + mode->hdisplay;
84
85 vstart = mode->vtotal - mode->vsync_start - 1;
86 vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
87
88 DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
89 mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
90
91 hdmi_write(hdmi, REG_HDMI_TOTAL,
92 HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
93 HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
94
95 hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
96 HDMI_ACTIVE_HSYNC_START(hstart) |
97 HDMI_ACTIVE_HSYNC_END(hend));
98 hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
99 HDMI_ACTIVE_VSYNC_START(vstart) |
100 HDMI_ACTIVE_VSYNC_END(vend));
101
102 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
103 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
104 HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
105 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
106 HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
107 HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
108 } else {
109 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
110 HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
111 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
112 HDMI_VSYNC_ACTIVE_F2_START(0) |
113 HDMI_VSYNC_ACTIVE_F2_END(0));
114 }
115
116 frame_ctrl = 0;
117 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
118 frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
119 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
120 frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
121 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
122 frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
123 DBG("frame_ctrl=%08x", frame_ctrl);
124 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
125
126 // TODO until we have audio, this might be safest:
127 if (hdmi->hdmi_mode)
128 hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
129}
130
131static const struct drm_bridge_funcs hdmi_bridge_funcs = {
132 .pre_enable = hdmi_bridge_pre_enable,
133 .enable = hdmi_bridge_enable,
134 .disable = hdmi_bridge_disable,
135 .post_disable = hdmi_bridge_post_disable,
136 .mode_set = hdmi_bridge_mode_set,
137 .destroy = hdmi_bridge_destroy,
138};
139
140
141/* initialize bridge */
142struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
143{
144 struct drm_bridge *bridge = NULL;
145 struct hdmi_bridge *hdmi_bridge;
146 int ret;
147
148 hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL);
149 if (!hdmi_bridge) {
150 ret = -ENOMEM;
151 goto fail;
152 }
153
154 hdmi_bridge->hdmi = hdmi_reference(hdmi);
155
156 bridge = &hdmi_bridge->base;
157
158 drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs);
159
160 return bridge;
161
162fail:
163 if (bridge)
164 hdmi_bridge_destroy(bridge);
165
166 return ERR_PTR(ret);
167}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 000000000000..823eee521a31
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,367 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/gpio.h>
19
20#include "hdmi.h"
21
22struct hdmi_connector {
23 struct drm_connector base;
24 struct hdmi *hdmi;
25};
26#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
27
28static int gpio_config(struct hdmi *hdmi, bool on)
29{
30 struct drm_device *dev = hdmi->dev;
31 struct hdmi_platform_config *config =
32 hdmi->pdev->dev.platform_data;
33 int ret;
34
35 if (on) {
36 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
37 if (ret) {
38 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
39 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
40 goto error1;
41 }
42 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
43 if (ret) {
44 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
45 "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
46 goto error2;
47 }
48 ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
49 if (ret) {
50 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
51 "HDMI_HPD", config->hpd_gpio, ret);
52 goto error3;
53 }
54 if (config->pmic_gpio != -1) {
55 ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
56 if (ret) {
57 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
58 "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
59 goto error4;
60 }
61 gpio_set_value_cansleep(config->pmic_gpio, 0);
62 }
63 DBG("gpio on");
64 } else {
65 gpio_free(config->ddc_clk_gpio);
66 gpio_free(config->ddc_data_gpio);
67 gpio_free(config->hpd_gpio);
68
69 if (config->pmic_gpio != -1) {
70 gpio_set_value_cansleep(config->pmic_gpio, 1);
71 gpio_free(config->pmic_gpio);
72 }
73 DBG("gpio off");
74 }
75
76 return 0;
77
78error4:
79 gpio_free(config->hpd_gpio);
80error3:
81 gpio_free(config->ddc_data_gpio);
82error2:
83 gpio_free(config->ddc_clk_gpio);
84error1:
85 return ret;
86}
87
88static int hpd_enable(struct hdmi_connector *hdmi_connector)
89{
90 struct hdmi *hdmi = hdmi_connector->hdmi;
91 struct drm_device *dev = hdmi_connector->base.dev;
92 struct hdmi_phy *phy = hdmi->phy;
93 uint32_t hpd_ctrl;
94 int ret;
95
96 ret = gpio_config(hdmi, true);
97 if (ret) {
98 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
99 goto fail;
100 }
101
102 ret = clk_prepare_enable(hdmi->clk);
103 if (ret) {
104 dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
105 goto fail;
106 }
107
108 ret = clk_prepare_enable(hdmi->m_pclk);
109 if (ret) {
110 dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
111 goto fail;
112 }
113
114 ret = clk_prepare_enable(hdmi->s_pclk);
115 if (ret) {
116 dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
117 goto fail;
118 }
119
120 if (hdmi->mpp0)
121 ret = regulator_enable(hdmi->mpp0);
122 if (!ret)
123 ret = regulator_enable(hdmi->mvs);
124 if (ret) {
125 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
126 goto fail;
127 }
128
129 hdmi_set_mode(hdmi, false);
130 phy->funcs->reset(phy);
131 hdmi_set_mode(hdmi, true);
132
133 hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
134
135 /* enable HPD events: */
136 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
137 HDMI_HPD_INT_CTRL_INT_CONNECT |
138 HDMI_HPD_INT_CTRL_INT_EN);
139
140 /* set timeout to 4.1ms (max) for hardware debounce */
141 hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
142 hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
143
144 /* Toggle HPD circuit to trigger HPD sense */
145 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
146 ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
147 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
148 HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
149
150 return 0;
151
152fail:
153 return ret;
154}
155
156static int hdp_disable(struct hdmi_connector *hdmi_connector)
157{
158 struct hdmi *hdmi = hdmi_connector->hdmi;
159 struct drm_device *dev = hdmi_connector->base.dev;
160 int ret = 0;
161
162 /* Disable HPD interrupt */
163 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
164
165 hdmi_set_mode(hdmi, false);
166
167 if (hdmi->mpp0)
168 ret = regulator_disable(hdmi->mpp0);
169 if (!ret)
170 ret = regulator_disable(hdmi->mvs);
171 if (ret) {
172 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
173 goto fail;
174 }
175
176 clk_disable_unprepare(hdmi->clk);
177 clk_disable_unprepare(hdmi->m_pclk);
178 clk_disable_unprepare(hdmi->s_pclk);
179
180 ret = gpio_config(hdmi, false);
181 if (ret) {
182 dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
183 goto fail;
184 }
185
186 return 0;
187
188fail:
189 return ret;
190}
191
192void hdmi_connector_irq(struct drm_connector *connector)
193{
194 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
195 struct hdmi *hdmi = hdmi_connector->hdmi;
196 uint32_t hpd_int_status, hpd_int_ctrl;
197
198 /* Process HPD: */
199 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
200 hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
201
202 if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
203 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
204 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
205
206 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
207
208 /* ack the irq: */
209 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
210 hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
211
212 drm_helper_hpd_irq_event(connector->dev);
213
214 /* detect disconnect if we are connected or visa versa: */
215 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
216 if (!detected)
217 hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
218 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
219 }
220}
221
222static enum drm_connector_status hdmi_connector_detect(
223 struct drm_connector *connector, bool force)
224{
225 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
226 struct hdmi *hdmi = hdmi_connector->hdmi;
227 uint32_t hpd_int_status;
228 int retry = 20;
229
230 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
231
232 /* sense seems to in some cases be momentarily de-asserted, don't
233 * let that trick us into thinking the monitor is gone:
234 */
235 while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
236 mdelay(10);
237 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
238 DBG("status=%08x", hpd_int_status);
239 }
240
241 return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
242 connector_status_connected : connector_status_disconnected;
243}
244
245static void hdmi_connector_destroy(struct drm_connector *connector)
246{
247 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
248
249 hdp_disable(hdmi_connector);
250
251 drm_sysfs_connector_remove(connector);
252 drm_connector_cleanup(connector);
253
254 hdmi_unreference(hdmi_connector->hdmi);
255
256 kfree(hdmi_connector);
257}
258
259static int hdmi_connector_get_modes(struct drm_connector *connector)
260{
261 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
262 struct hdmi *hdmi = hdmi_connector->hdmi;
263 struct edid *edid;
264 uint32_t hdmi_ctrl;
265 int ret = 0;
266
267 hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
268 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
269
270 edid = drm_get_edid(connector, hdmi->i2c);
271
272 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
273
274 drm_mode_connector_update_edid_property(connector, edid);
275
276 if (edid) {
277 ret = drm_add_edid_modes(connector, edid);
278 kfree(edid);
279 }
280
281 return ret;
282}
283
284static int hdmi_connector_mode_valid(struct drm_connector *connector,
285 struct drm_display_mode *mode)
286{
287 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
288 struct msm_drm_private *priv = connector->dev->dev_private;
289 struct msm_kms *kms = priv->kms;
290 long actual, requested;
291
292 requested = 1000 * mode->clock;
293 actual = kms->funcs->round_pixclk(kms,
294 requested, hdmi_connector->hdmi->encoder);
295
296 DBG("requested=%ld, actual=%ld", requested, actual);
297
298 if (actual != requested)
299 return MODE_CLOCK_RANGE;
300
301 return 0;
302}
303
304static struct drm_encoder *
305hdmi_connector_best_encoder(struct drm_connector *connector)
306{
307 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
308 return hdmi_connector->hdmi->encoder;
309}
310
311static const struct drm_connector_funcs hdmi_connector_funcs = {
312 .dpms = drm_helper_connector_dpms,
313 .detect = hdmi_connector_detect,
314 .fill_modes = drm_helper_probe_single_connector_modes,
315 .destroy = hdmi_connector_destroy,
316};
317
318static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
319 .get_modes = hdmi_connector_get_modes,
320 .mode_valid = hdmi_connector_mode_valid,
321 .best_encoder = hdmi_connector_best_encoder,
322};
323
324/* initialize connector */
325struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
326{
327 struct drm_connector *connector = NULL;
328 struct hdmi_connector *hdmi_connector;
329 int ret;
330
331 hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
332 if (!hdmi_connector) {
333 ret = -ENOMEM;
334 goto fail;
335 }
336
337 hdmi_connector->hdmi = hdmi_reference(hdmi);
338
339 connector = &hdmi_connector->base;
340
341 drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
342 DRM_MODE_CONNECTOR_HDMIA);
343 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
344
345 connector->polled = DRM_CONNECTOR_POLL_HPD;
346
347 connector->interlace_allowed = 1;
348 connector->doublescan_allowed = 0;
349
350 drm_sysfs_connector_add(connector);
351
352 ret = hpd_enable(hdmi_connector);
353 if (ret) {
354 dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret);
355 goto fail;
356 }
357
358 drm_mode_connector_attach_encoder(connector, hdmi->encoder);
359
360 return connector;
361
362fail:
363 if (connector)
364 hdmi_connector_destroy(connector);
365
366 return ERR_PTR(ret);
367}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 000000000000..f4ab7f70fed1
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,281 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_i2c_adapter {
21 struct i2c_adapter base;
22 struct hdmi *hdmi;
23 bool sw_done;
24 wait_queue_head_t ddc_event;
25};
26#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
27
28static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
29{
30 struct hdmi *hdmi = hdmi_i2c->hdmi;
31
32 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
33 HDMI_DDC_CTRL_SW_STATUS_RESET);
34 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
35 HDMI_DDC_CTRL_SOFT_RESET);
36
37 hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
38 HDMI_DDC_SPEED_THRESHOLD(2) |
39 HDMI_DDC_SPEED_PRESCALE(10));
40
41 hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
42 HDMI_DDC_SETUP_TIMEOUT(0xff));
43
44 /* enable reference timer for 27us */
45 hdmi_write(hdmi, REG_HDMI_DDC_REF,
46 HDMI_DDC_REF_REFTIMER_ENABLE |
47 HDMI_DDC_REF_REFTIMER(27));
48}
49
50static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
51{
52 struct hdmi *hdmi = hdmi_i2c->hdmi;
53 struct drm_device *dev = hdmi->dev;
54 uint32_t retry = 0xffff;
55 uint32_t ddc_int_ctrl;
56
57 do {
58 --retry;
59
60 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
61 HDMI_DDC_INT_CTRL_SW_DONE_ACK |
62 HDMI_DDC_INT_CTRL_SW_DONE_MASK);
63
64 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
65
66 } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
67
68 if (!retry) {
69 dev_err(dev->dev, "timeout waiting for DDC\n");
70 return -ETIMEDOUT;
71 }
72
73 hdmi_i2c->sw_done = false;
74
75 return 0;
76}
77
78#define MAX_TRANSACTIONS 4
79
80static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
81{
82 struct hdmi *hdmi = hdmi_i2c->hdmi;
83
84 if (!hdmi_i2c->sw_done) {
85 uint32_t ddc_int_ctrl;
86
87 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
88
89 if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
90 (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
91 hdmi_i2c->sw_done = true;
92 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
93 HDMI_DDC_INT_CTRL_SW_DONE_ACK);
94 }
95 }
96
97 return hdmi_i2c->sw_done;
98}
99
100static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
101 struct i2c_msg *msgs, int num)
102{
103 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
104 struct hdmi *hdmi = hdmi_i2c->hdmi;
105 struct drm_device *dev = hdmi->dev;
106 static const uint32_t nack[] = {
107 HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
108 HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
109 };
110 int indices[MAX_TRANSACTIONS];
111 int ret, i, j, index = 0;
112 uint32_t ddc_status, ddc_data, i2c_trans;
113
114 num = min(num, MAX_TRANSACTIONS);
115
116 WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
117
118 if (num == 0)
119 return num;
120
121 init_ddc(hdmi_i2c);
122
123 ret = ddc_clear_irq(hdmi_i2c);
124 if (ret)
125 return ret;
126
127 for (i = 0; i < num; i++) {
128 struct i2c_msg *p = &msgs[i];
129 uint32_t raw_addr = p->addr << 1;
130
131 if (p->flags & I2C_M_RD)
132 raw_addr |= 1;
133
134 ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
135 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
136
137 if (i == 0) {
138 ddc_data |= HDMI_DDC_DATA_INDEX(0) |
139 HDMI_DDC_DATA_INDEX_WRITE;
140 }
141
142 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
143 index++;
144
145 indices[i] = index;
146
147 if (p->flags & I2C_M_RD) {
148 index += p->len;
149 } else {
150 for (j = 0; j < p->len; j++) {
151 ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
152 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
153 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
154 index++;
155 }
156 }
157
158 i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
159 HDMI_I2C_TRANSACTION_REG_RW(
160 (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
161 HDMI_I2C_TRANSACTION_REG_START;
162
163 if (i == (num - 1))
164 i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
165
166 hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
167 }
168
169 /* trigger the transfer: */
170 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
171 HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
172 HDMI_DDC_CTRL_GO);
173
174 ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
175 if (ret <= 0) {
176 if (ret == 0)
177 ret = -ETIMEDOUT;
178 dev_warn(dev->dev, "DDC timeout: %d\n", ret);
179 DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
180 hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
181 hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
182 hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
183 return ret;
184 }
185
186 ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
187
188 /* read back results of any read transactions: */
189 for (i = 0; i < num; i++) {
190 struct i2c_msg *p = &msgs[i];
191
192 if (!(p->flags & I2C_M_RD))
193 continue;
194
195 /* check for NACK: */
196 if (ddc_status & nack[i]) {
197 DBG("ddc_status=%08x", ddc_status);
198 break;
199 }
200
201 ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
202 HDMI_DDC_DATA_INDEX(indices[i]) |
203 HDMI_DDC_DATA_INDEX_WRITE;
204
205 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
206
207 /* discard first byte: */
208 hdmi_read(hdmi, REG_HDMI_DDC_DATA);
209
210 for (j = 0; j < p->len; j++) {
211 ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
212 p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
213 }
214 }
215
216 return i;
217}
218
219static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
220{
221 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
222}
223
224static const struct i2c_algorithm hdmi_i2c_algorithm = {
225 .master_xfer = hdmi_i2c_xfer,
226 .functionality = hdmi_i2c_func,
227};
228
229void hdmi_i2c_irq(struct i2c_adapter *i2c)
230{
231 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
232
233 if (sw_done(hdmi_i2c))
234 wake_up_all(&hdmi_i2c->ddc_event);
235}
236
237void hdmi_i2c_destroy(struct i2c_adapter *i2c)
238{
239 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
240 i2c_del_adapter(i2c);
241 kfree(hdmi_i2c);
242}
243
244struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
245{
246 struct drm_device *dev = hdmi->dev;
247 struct hdmi_i2c_adapter *hdmi_i2c;
248 struct i2c_adapter *i2c = NULL;
249 int ret;
250
251 hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
252 if (!hdmi_i2c) {
253 ret = -ENOMEM;
254 goto fail;
255 }
256
257 i2c = &hdmi_i2c->base;
258
259 hdmi_i2c->hdmi = hdmi;
260 init_waitqueue_head(&hdmi_i2c->ddc_event);
261
262
263 i2c->owner = THIS_MODULE;
264 i2c->class = I2C_CLASS_DDC;
265 snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
266 i2c->dev.parent = &hdmi->pdev->dev;
267 i2c->algo = &hdmi_i2c_algorithm;
268
269 ret = i2c_add_adapter(i2c);
270 if (ret) {
271 dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
272 goto fail;
273 }
274
275 return i2c;
276
277fail:
278 if (i2c)
279 hdmi_i2c_destroy(i2c);
280 return ERR_PTR(ret);
281}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 000000000000..e5b7ed5b8f01
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8960 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
25
26static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
29 kfree(phy_8960);
30}
31
32static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
35 struct hdmi *hdmi = phy_8960->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
51 /* pull low */
52 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
53 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
54 } else {
55 /* pull high */
56 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
57 val | HDMI_PHY_CTRL_SW_RESET_PLL);
58 }
59
60 msleep(100);
61
62 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
63 /* pull high */
64 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
65 val | HDMI_PHY_CTRL_SW_RESET);
66 } else {
67 /* pull low */
68 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
69 val & ~HDMI_PHY_CTRL_SW_RESET);
70 }
71
72 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
73 /* pull high */
74 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
75 val | HDMI_PHY_CTRL_SW_RESET_PLL);
76 } else {
77 /* pull low */
78 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
79 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
80 }
81}
82
83static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
84 unsigned long int pixclock)
85{
86 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
87 struct hdmi *hdmi = phy_8960->hdmi;
88
89 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
90 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
91 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
92 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
93 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
94 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
95 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
96 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
97 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
98 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
99 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
100}
101
102static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
103{
104 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
105 struct hdmi *hdmi = phy_8960->hdmi;
106
107 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
108}
109
110static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
111 .destroy = hdmi_phy_8960_destroy,
112 .reset = hdmi_phy_8960_reset,
113 .powerup = hdmi_phy_8960_powerup,
114 .powerdown = hdmi_phy_8960_powerdown,
115};
116
117struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
118{
119 struct hdmi_phy_8960 *phy_8960;
120 struct hdmi_phy *phy = NULL;
121 int ret;
122
123 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
124 if (!phy_8960) {
125 ret = -ENOMEM;
126 goto fail;
127 }
128
129 phy = &phy_8960->base;
130
131 phy->funcs = &hdmi_phy_8960_funcs;
132
133 phy_8960->hdmi = hdmi;
134
135 return phy;
136
137fail:
138 if (phy)
139 hdmi_phy_8960_destroy(phy);
140 return ERR_PTR(ret);
141}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 000000000000..391433c1af7c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8x60 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
25
26static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
29 kfree(phy_8x60);
30}
31
32static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
35 struct hdmi *hdmi = phy_8x60->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 msleep(100);
51
52 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
53 /* pull high */
54 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
55 val | HDMI_PHY_CTRL_SW_RESET);
56 } else {
57 /* pull low */
58 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
59 val & ~HDMI_PHY_CTRL_SW_RESET);
60 }
61}
62
63static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
64 unsigned long int pixclock)
65{
66 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
67 struct hdmi *hdmi = phy_8x60->hdmi;
68
69 /* De-serializer delay D/C for non-lbk mode: */
70 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
71 HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
72
73 if (pixclock == 27000000) {
74 /* video_format == HDMI_VFRMT_720x480p60_16_9 */
75 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
76 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
77 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
78 } else {
79 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
80 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
81 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
82 }
83
84 /* No matter what, start from the power down mode: */
85 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
86 HDMI_8x60_PHY_REG2_PD_PWRGEN |
87 HDMI_8x60_PHY_REG2_PD_PLL |
88 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
89 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
90 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
91 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
92 HDMI_8x60_PHY_REG2_PD_DESER);
93
94 /* Turn PowerGen on: */
95 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
96 HDMI_8x60_PHY_REG2_PD_PLL |
97 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
98 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
99 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
100 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
101 HDMI_8x60_PHY_REG2_PD_DESER);
102
103 /* Turn PLL power on: */
104 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
105 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
106 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
107 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
108 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
109 HDMI_8x60_PHY_REG2_PD_DESER);
110
111 /* Write to HIGH after PLL power down de-assert: */
112 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
113 HDMI_8x60_PHY_REG3_PLL_ENABLE);
114
115 /* ASIC power on; PHY REG9 = 0 */
116 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
117
118 /* Enable PLL lock detect, PLL lock det will go high after lock
119 * Enable the re-time logic
120 */
121 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
122 HDMI_8x60_PHY_REG12_RETIMING_EN |
123 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
124
125 /* Drivers are on: */
126 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
127 HDMI_8x60_PHY_REG2_PD_DESER);
128
129 /* If the RX detector is needed: */
130 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
131 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
132 HDMI_8x60_PHY_REG2_PD_DESER);
133
134 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
135 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
136 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
137 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
138 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
139 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
140 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
141 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
142
143 /* If we want to use lock enable based on counting: */
144 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
145 HDMI_8x60_PHY_REG12_RETIMING_EN |
146 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
147 HDMI_8x60_PHY_REG12_FORCE_LOCK);
148}
149
150static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
151{
152 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
153 struct hdmi *hdmi = phy_8x60->hdmi;
154
155 /* Assert RESET PHY from controller */
156 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
157 HDMI_PHY_CTRL_SW_RESET);
158 udelay(10);
159 /* De-assert RESET PHY from controller */
160 hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
161 /* Turn off Driver */
162 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
163 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
164 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
165 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
166 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
167 HDMI_8x60_PHY_REG2_PD_DESER);
168 udelay(10);
169 /* Disable PLL */
170 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
171 /* Power down PHY, but keep RX-sense: */
172 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
173 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
174 HDMI_8x60_PHY_REG2_PD_PWRGEN |
175 HDMI_8x60_PHY_REG2_PD_PLL |
176 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
177 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
178 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
179 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
180 HDMI_8x60_PHY_REG2_PD_DESER);
181}
182
183static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
184 .destroy = hdmi_phy_8x60_destroy,
185 .reset = hdmi_phy_8x60_reset,
186 .powerup = hdmi_phy_8x60_powerup,
187 .powerdown = hdmi_phy_8x60_powerdown,
188};
189
190struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
191{
192 struct hdmi_phy_8x60 *phy_8x60;
193 struct hdmi_phy *phy = NULL;
194 int ret;
195
196 phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
197 if (!phy_8x60) {
198 ret = -ENOMEM;
199 goto fail;
200 }
201
202 phy = &phy_8x60->base;
203
204 phy->funcs = &hdmi_phy_8x60_funcs;
205
206 phy_8x60->hdmi = hdmi;
207
208 return phy;
209
210fail:
211 if (phy)
212 hdmi_phy_8x60_destroy(phy);
213 return ERR_PTR(ret);
214}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 000000000000..bee36363bcd0
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,50 @@
1#ifndef QFPROM_XML
2#define QFPROM_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
46#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
47#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
48
49
50#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
new file mode 100644
index 000000000000..bbeeebe2db55
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -0,0 +1,1061 @@
1#ifndef MDP4_XML
2#define MDP4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mpd4_bpc {
46 BPC1 = 0,
47 BPC5 = 1,
48 BPC6 = 2,
49 BPC8 = 3,
50};
51
52enum mpd4_bpc_alpha {
53 BPC1A = 0,
54 BPC4A = 1,
55 BPC6A = 2,
56 BPC8A = 3,
57};
58
59enum mpd4_alpha_type {
60 FG_CONST = 0,
61 BG_CONST = 1,
62 FG_PIXEL = 2,
63 BG_PIXEL = 3,
64};
65
66enum mpd4_pipe {
67 VG1 = 0,
68 VG2 = 1,
69 RGB1 = 2,
70 RGB2 = 3,
71 RGB3 = 4,
72 VG3 = 5,
73 VG4 = 6,
74};
75
76enum mpd4_mixer {
77 MIXER0 = 0,
78 MIXER1 = 1,
79 MIXER2 = 2,
80};
81
82enum mpd4_mixer_stage_id {
83 STAGE_UNUSED = 0,
84 STAGE_BASE = 1,
85 STAGE0 = 2,
86 STAGE1 = 3,
87 STAGE2 = 4,
88 STAGE3 = 5,
89};
90
91enum mdp4_intf {
92 INTF_LCDC_DTV = 0,
93 INTF_DSI_VIDEO = 1,
94 INTF_DSI_CMD = 2,
95 INTF_EBI2_TV = 3,
96};
97
98enum mdp4_cursor_format {
99 CURSOR_ARGB = 1,
100 CURSOR_XRGB = 2,
101};
102
103enum mdp4_dma {
104 DMA_P = 0,
105 DMA_S = 1,
106 DMA_E = 2,
107};
108
109#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
110#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
111#define MDP4_IRQ_DMA_S_DONE 0x00000004
112#define MDP4_IRQ_DMA_E_DONE 0x00000008
113#define MDP4_IRQ_DMA_P_DONE 0x00000010
114#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
115#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
116#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
117#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
118#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
119#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
120#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
121#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
122#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
123#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
124#define REG_MDP4_VERSION 0x00000000
125#define MDP4_VERSION_MINOR__MASK 0x00ff0000
126#define MDP4_VERSION_MINOR__SHIFT 16
127static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
128{
129 return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
130}
131#define MDP4_VERSION_MAJOR__MASK 0xff000000
132#define MDP4_VERSION_MAJOR__SHIFT 24
133static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
134{
135 return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
136}
137
138#define REG_MDP4_OVLP0_KICK 0x00000004
139
140#define REG_MDP4_OVLP1_KICK 0x00000008
141
142#define REG_MDP4_OVLP2_KICK 0x000000d0
143
144#define REG_MDP4_DMA_P_KICK 0x0000000c
145
146#define REG_MDP4_DMA_S_KICK 0x00000010
147
148#define REG_MDP4_DMA_E_KICK 0x00000014
149
150#define REG_MDP4_DISP_STATUS 0x00000018
151
152#define REG_MDP4_DISP_INTF_SEL 0x00000038
153#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
154#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
155static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
156{
157 return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
158}
159#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
160#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
161static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
162{
163 return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
164}
165#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
166#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
167static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
168{
169 return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
170}
171#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
172#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
173
174#define REG_MDP4_RESET_STATUS 0x0000003c
175
176#define REG_MDP4_READ_CNFG 0x0000004c
177
178#define REG_MDP4_INTR_ENABLE 0x00000050
179
180#define REG_MDP4_INTR_STATUS 0x00000054
181
182#define REG_MDP4_INTR_CLEAR 0x00000058
183
184#define REG_MDP4_EBI2_LCD0 0x00000060
185
186#define REG_MDP4_EBI2_LCD1 0x00000064
187
188#define REG_MDP4_PORTMAP_MODE 0x00000070
189
190#define REG_MDP4_CS_CONTROLLER0 0x000000c0
191
192#define REG_MDP4_CS_CONTROLLER1 0x000000c4
193
194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
198{
199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
200}
201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
205{
206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
207}
208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
212{
213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
214}
215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
219{
220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
221}
222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
226{
227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
228}
229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
233{
234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
235}
236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
240{
241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
242}
243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
247{
248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
249}
250#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
251
252#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
253
254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
258{
259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
260}
261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
265{
266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
267}
268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
272{
273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
274}
275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
279{
280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
281}
282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
286{
287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
288}
289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
293{
294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
295}
296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
300{
301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
302}
303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
307{
308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
309}
310#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
311
312#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
313
314#define REG_MDP4_VG2_CONST_COLOR 0x00031008
315
316#define REG_MDP4_OVERLAY_FLUSH 0x00018000
317#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
318#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
319#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
320#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
321#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
322#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
323
324static inline uint32_t __offset_OVLP(uint32_t idx)
325{
326 switch (idx) {
327 case 0: return 0x00010000;
328 case 1: return 0x00018000;
329 case 2: return 0x00088000;
330 default: return INVALID_IDX(idx);
331 }
332}
333static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
334
335static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
336
337static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
338#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
339#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
340static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
341{
342 return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
343}
344#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
345#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
346static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
347{
348 return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
349}
350
351static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
352
353static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
354
355static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
356
357static inline uint32_t __offset_STAGE(uint32_t idx)
358{
359 switch (idx) {
360 case 0: return 0x00000104;
361 case 1: return 0x00000124;
362 case 2: return 0x00000144;
363 case 3: return 0x00000160;
364 default: return INVALID_IDX(idx);
365 }
366}
367static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
368
369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
373{
374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
375}
376#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
381{
382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
383}
384#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
385#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
386#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
387#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
388
389static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
390
391static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
392
393static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
394
395static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
396
397static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
398
399static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
400
401static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
402{
403 switch (idx) {
404 case 0: return 0x00001004;
405 case 1: return 0x00001404;
406 case 2: return 0x00001804;
407 case 3: return 0x00001b84;
408 default: return INVALID_IDX(idx);
409 }
410}
411static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
412
413static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
414#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
415
416static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
417
418static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
419
420static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
421
422static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
423
424static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
425
426static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
427
428
429static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
430
431static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
432
433static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
434
435static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
436
437static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
438
439static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
440
441static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
442
443static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
444
445static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
446
447static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
448
449#define REG_MDP4_DMA_P_OP_MODE 0x00090070
450
451static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
452
453static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
454
455static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
456
457#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
458
459static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
460
461static inline uint32_t __offset_DMA(enum mdp4_dma idx)
462{
463 switch (idx) {
464 case DMA_P: return 0x00090000;
465 case DMA_S: return 0x000a0000;
466 case DMA_E: return 0x000b0000;
467 default: return INVALID_IDX(idx);
468 }
469}
470static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
471
472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
476{
477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
478}
479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
482{
483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
484}
485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
488{
489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
490}
491#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
492#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
493#define MDP4_DMA_CONFIG_PACK__SHIFT 8
494static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
495{
496 return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
497}
498#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
499#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
500
501static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
502#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
503#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
504static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
505{
506 return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
507}
508#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
509#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
510static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
511{
512 return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
513}
514
515static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
516
517static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
518
519static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
520#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
521#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
522static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
523{
524 return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
525}
526#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
527#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
528static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
529{
530 return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
531}
532
533static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
534#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
535#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
536static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
537{
538 return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
539}
540#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
541#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
542static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
543{
544 return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
545}
546
547static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
548
549static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
550#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
551#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
552static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
553{
554 return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
555}
556#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
557#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
558static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
559{
560 return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
561}
562
563static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
564#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
565#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
566#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
567static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
568{
569 return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
570}
571#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
572
573static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
574
575static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
576
577static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
578
579static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
580
581static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
582
583
584static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
585
586static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
587
588static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
589
590static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
591
592static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
593
594static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
595
596static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
597
598static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
599
600static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
601
602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
603
604static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
605
606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
610{
611 return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
612}
613#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
614#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
615static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
616{
617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
618}
619
620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
624{
625 return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
626}
627#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
628#define MDP4_PIPE_SRC_XY_X__SHIFT 0
629static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
630{
631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
632}
633
634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
638{
639 return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
640}
641#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
642#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
643static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
644{
645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
646}
647
648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
650#define MDP4_PIPE_DST_XY_Y__SHIFT 16
651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
652{
653 return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
654}
655#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
656#define MDP4_PIPE_DST_XY_X__SHIFT 0
657static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
658{
659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
660}
661
662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
663
664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
665
666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
667
668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
672{
673 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
674}
675#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
676#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
677static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
678{
679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
680}
681
682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
686{
687 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
688}
689#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
690#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
691static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
692{
693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
694}
695
696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
700{
701 return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
702}
703#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
704#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0
705static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
706{
707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
708}
709
710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
714{
715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
716}
717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
720{
721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
722}
723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
726{
727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
728}
729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
732{
733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
734}
735#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
736#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
737#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
738static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
739{
740 return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
741}
742#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
743#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
744#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
745static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
746{
747 return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
748}
749#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
752
753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
757{
758 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
759}
760#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
761#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
762static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
763{
764 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
765}
766#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
767#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
768static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
769{
770 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
771}
772#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
773#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
774static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
775{
776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
777}
778
779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
783#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
784#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
785#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
786#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
787#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
788#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
791
792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
793
794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
795
796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
797
798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
799
800static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
801
802
803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
804
805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
806
807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
808
809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
810
811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
812
813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
814
815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
816
817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
818
819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
820
821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
822
823#define REG_MDP4_LCDC 0x000c0000
824
825#define REG_MDP4_LCDC_ENABLE 0x000c0000
826
827#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
828#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
829#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
830static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
831{
832 return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
833}
834#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
835#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
836static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
837{
838 return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
839}
840
841#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
842
843#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
844
845#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
846#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
847#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
848static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
849{
850 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
851}
852#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
853#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
854static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
855{
856 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
857}
858
859#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
860
861#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
862
863#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
864#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
865#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
866static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
867{
868 return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
869}
870#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
871#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
872static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
873{
874 return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
875}
876#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
877
878#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
879
880#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
881
882#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
883
884#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
885#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
886#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
887static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
888{
889 return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
890}
891#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
892
893#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
894
895#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
896
897#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
898#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
899#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
900#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
901
902#define REG_MDP4_DTV 0x000d0000
903
904#define REG_MDP4_DTV_ENABLE 0x000d0000
905
906#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
907#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
908#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
909static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
910{
911 return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
912}
913#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
914#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
915static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
916{
917 return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
918}
919
920#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
921
922#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
923
924#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
925#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
926#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
927static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
928{
929 return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
930}
931#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
932#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
933static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
934{
935 return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
936}
937
938#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
939
940#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
941
942#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
943#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
944#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
945static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
946{
947 return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
948}
949#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
950#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
951static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
952{
953 return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
954}
955#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
956
957#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
958
959#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
960
961#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
962
963#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
964#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
965#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
966static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
967{
968 return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
969}
970#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
971
972#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
973
974#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
975
976#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
977#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
978#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
979#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
980
981#define REG_MDP4_DSI 0x000e0000
982
983#define REG_MDP4_DSI_ENABLE 0x000e0000
984
985#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
986#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
987#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
988static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
989{
990 return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
991}
992#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
993#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
994static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
995{
996 return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
997}
998
999#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
1000
1001#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
1002
1003#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
1004#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
1005#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
1006static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
1007{
1008 return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
1009}
1010#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
1011#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
1012static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
1013{
1014 return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
1015}
1016
1017#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
1018
1019#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
1020
1021#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
1022#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
1023#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
1024static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
1025{
1026 return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
1027}
1028#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
1029#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
1030static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
1031{
1032 return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
1033}
1034#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
1035
1036#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
1037
1038#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
1039
1040#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
1041
1042#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
1043#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
1044#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
1045static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
1046{
1047 return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
1048}
1049#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
1050
1051#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
1052
1053#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
1054
1055#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
1056#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
1057#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
1058#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
1059
1060
1061#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
new file mode 100644
index 000000000000..de6bea297cda
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -0,0 +1,685 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20#include <drm/drm_mode.h>
21#include "drm_crtc.h"
22#include "drm_crtc_helper.h"
23#include "drm_flip_work.h"
24
25struct mdp4_crtc {
26 struct drm_crtc base;
27 char name[8];
28 struct drm_plane *plane;
29 int id;
30 int ovlp;
31 enum mdp4_dma dma;
32 bool enabled;
33
34 /* which mixer/encoder we route output to: */
35 int mixer;
36
37 struct {
38 spinlock_t lock;
39 bool stale;
40 uint32_t width, height;
41
42 /* next cursor to scan-out: */
43 uint32_t next_iova;
44 struct drm_gem_object *next_bo;
45
46 /* current cursor being scanned out: */
47 struct drm_gem_object *scanout_bo;
48 } cursor;
49
50
51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event;
53 struct work_struct pageflip_work;
54
55 /* the fb that we currently hold a scanout ref to: */
56 struct drm_framebuffer *fb;
57
58 /* for unref'ing framebuffers after scanout completes: */
59 struct drm_flip_work unref_fb_work;
60
61 /* for unref'ing cursor bo's after scanout completes: */
62 struct drm_flip_work unref_cursor_work;
63
64 struct mdp4_irq vblank;
65 struct mdp4_irq err;
66};
67#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
68
69static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
70{
71 struct msm_drm_private *priv = crtc->dev->dev_private;
72 return to_mdp4_kms(priv->kms);
73}
74
75static void update_fb(struct drm_crtc *crtc, bool async,
76 struct drm_framebuffer *new_fb)
77{
78 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
79 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
80
81 if (old_fb)
82 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
83
84 /* grab reference to incoming scanout fb: */
85 drm_framebuffer_reference(new_fb);
86 mdp4_crtc->base.fb = new_fb;
87 mdp4_crtc->fb = new_fb;
88
89 if (!async) {
90 /* enable vblank to pick up the old_fb */
91 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
92 }
93}
94
95static void complete_flip(struct drm_crtc *crtc, bool canceled)
96{
97 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
98 struct drm_device *dev = crtc->dev;
99 struct drm_pending_vblank_event *event;
100 unsigned long flags;
101
102 spin_lock_irqsave(&dev->event_lock, flags);
103 event = mdp4_crtc->event;
104 if (event) {
105 mdp4_crtc->event = NULL;
106 if (canceled)
107 event->base.destroy(&event->base);
108 else
109 drm_send_vblank_event(dev, mdp4_crtc->id, event);
110 }
111 spin_unlock_irqrestore(&dev->event_lock, flags);
112}
113
114static void crtc_flush(struct drm_crtc *crtc)
115{
116 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
117 struct mdp4_kms *mdp4_kms = get_kms(crtc);
118 uint32_t flush = 0;
119
120 flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
121 flush |= ovlp2flush(mdp4_crtc->ovlp);
122
123 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
124
125 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
126}
127
128static void pageflip_worker(struct work_struct *work)
129{
130 struct mdp4_crtc *mdp4_crtc =
131 container_of(work, struct mdp4_crtc, pageflip_work);
132 struct drm_crtc *crtc = &mdp4_crtc->base;
133
134 mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
135 crtc_flush(crtc);
136
137 /* enable vblank to complete flip: */
138 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
139}
140
141static void unref_fb_worker(struct drm_flip_work *work, void *val)
142{
143 struct mdp4_crtc *mdp4_crtc =
144 container_of(work, struct mdp4_crtc, unref_fb_work);
145 struct drm_device *dev = mdp4_crtc->base.dev;
146
147 mutex_lock(&dev->mode_config.mutex);
148 drm_framebuffer_unreference(val);
149 mutex_unlock(&dev->mode_config.mutex);
150}
151
152static void unref_cursor_worker(struct drm_flip_work *work, void *val)
153{
154 struct mdp4_crtc *mdp4_crtc =
155 container_of(work, struct mdp4_crtc, unref_cursor_work);
156 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
157
158 msm_gem_put_iova(val, mdp4_kms->id);
159 drm_gem_object_unreference_unlocked(val);
160}
161
162static void mdp4_crtc_destroy(struct drm_crtc *crtc)
163{
164 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
165
166 mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
167
168 drm_crtc_cleanup(crtc);
169 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
170 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
171
172 kfree(mdp4_crtc);
173}
174
175static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
176{
177 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
178 struct mdp4_kms *mdp4_kms = get_kms(crtc);
179 bool enabled = (mode == DRM_MODE_DPMS_ON);
180
181 DBG("%s: mode=%d", mdp4_crtc->name, mode);
182
183 if (enabled != mdp4_crtc->enabled) {
184 if (enabled) {
185 mdp4_enable(mdp4_kms);
186 mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
187 } else {
188 mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
189 mdp4_disable(mdp4_kms);
190 }
191 mdp4_crtc->enabled = enabled;
192 }
193}
194
195static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
196 const struct drm_display_mode *mode,
197 struct drm_display_mode *adjusted_mode)
198{
199 return true;
200}
201
202static void blend_setup(struct drm_crtc *crtc)
203{
204 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
205 struct mdp4_kms *mdp4_kms = get_kms(crtc);
206 int i, ovlp = mdp4_crtc->ovlp;
207 uint32_t mixer_cfg = 0;
208
209 /*
210 * This probably would also need to be triggered by any attached
211 * plane when it changes.. for now since we are only using a single
212 * private plane, the configuration is hard-coded:
213 */
214
215 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
216 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
217 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
218 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
219
220 for (i = 0; i < 4; i++) {
221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
223 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
224 MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
225 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
226 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
227 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
228 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
229 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
230 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
231 }
232
233 /* TODO single register for all CRTCs, so this won't work properly
234 * when multiple CRTCs are active..
235 */
236 switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
237 case VG1:
238 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
239 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
240 break;
241 case VG2:
242 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
243 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
244 break;
245 case RGB1:
246 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
247 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
248 break;
249 case RGB2:
250 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
251 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
252 break;
253 case RGB3:
254 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
255 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
256 break;
257 case VG3:
258 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
259 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
260 break;
261 case VG4:
262 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
263 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
264 break;
265 default:
266 WARN_ON("invalid pipe");
267 break;
268 }
269 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
270}
271
272static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
273 struct drm_display_mode *mode,
274 struct drm_display_mode *adjusted_mode,
275 int x, int y,
276 struct drm_framebuffer *old_fb)
277{
278 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
279 struct mdp4_kms *mdp4_kms = get_kms(crtc);
280 enum mdp4_dma dma = mdp4_crtc->dma;
281 int ret, ovlp = mdp4_crtc->ovlp;
282
283 mode = adjusted_mode;
284
285 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
286 mdp4_crtc->name, mode->base.id, mode->name,
287 mode->vrefresh, mode->clock,
288 mode->hdisplay, mode->hsync_start,
289 mode->hsync_end, mode->htotal,
290 mode->vdisplay, mode->vsync_start,
291 mode->vsync_end, mode->vtotal,
292 mode->type, mode->flags);
293
294 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
295 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
296 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
297
298 /* take data from pipe: */
299 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
300 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
301 crtc->fb->pitches[0]);
302 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
303 MDP4_DMA_DST_SIZE_WIDTH(0) |
304 MDP4_DMA_DST_SIZE_HEIGHT(0));
305
306 mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
307 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
308 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
309 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
310 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
311 crtc->fb->pitches[0]);
312
313 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
314
315 update_fb(crtc, false, crtc->fb);
316
317 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
318 0, 0, mode->hdisplay, mode->vdisplay,
319 x << 16, y << 16,
320 mode->hdisplay << 16, mode->vdisplay << 16);
321 if (ret) {
322 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
323 mdp4_crtc->name, ret);
324 return ret;
325 }
326
327 if (dma == DMA_E) {
328 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
329 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
330 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
331 }
332
333 return 0;
334}
335
336static void mdp4_crtc_prepare(struct drm_crtc *crtc)
337{
338 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
339 DBG("%s", mdp4_crtc->name);
340 /* make sure we hold a ref to mdp clks while setting up mode: */
341 mdp4_enable(get_kms(crtc));
342 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
343}
344
345static void mdp4_crtc_commit(struct drm_crtc *crtc)
346{
347 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
348 crtc_flush(crtc);
349 /* drop the ref to mdp clk's that we got in prepare: */
350 mdp4_disable(get_kms(crtc));
351}
352
353static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
354 struct drm_framebuffer *old_fb)
355{
356 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
357 struct drm_plane *plane = mdp4_crtc->plane;
358 struct drm_display_mode *mode = &crtc->mode;
359
360 update_fb(crtc, false, crtc->fb);
361
362 return mdp4_plane_mode_set(plane, crtc, crtc->fb,
363 0, 0, mode->hdisplay, mode->vdisplay,
364 x << 16, y << 16,
365 mode->hdisplay << 16, mode->vdisplay << 16);
366}
367
368static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
369{
370}
371
372static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
373 struct drm_framebuffer *new_fb,
374 struct drm_pending_vblank_event *event,
375 uint32_t page_flip_flags)
376{
377 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
378 struct drm_device *dev = crtc->dev;
379 struct drm_gem_object *obj;
380
381 if (mdp4_crtc->event) {
382 dev_err(dev->dev, "already pending flip!\n");
383 return -EBUSY;
384 }
385
386 obj = msm_framebuffer_bo(new_fb, 0);
387
388 mdp4_crtc->event = event;
389 update_fb(crtc, true, new_fb);
390
391 return msm_gem_queue_inactive_work(obj,
392 &mdp4_crtc->pageflip_work);
393}
394
395static int mdp4_crtc_set_property(struct drm_crtc *crtc,
396 struct drm_property *property, uint64_t val)
397{
398 // XXX
399 return -EINVAL;
400}
401
402#define CURSOR_WIDTH 64
403#define CURSOR_HEIGHT 64
404
405/* called from IRQ to update cursor related registers (if needed). The
406 * cursor registers, other than x/y position, appear not to be double
407 * buffered, and changing them other than from vblank seems to trigger
408 * underflow.
409 */
410static void update_cursor(struct drm_crtc *crtc)
411{
412 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
413 enum mdp4_dma dma = mdp4_crtc->dma;
414 unsigned long flags;
415
416 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
417 if (mdp4_crtc->cursor.stale) {
418 struct mdp4_kms *mdp4_kms = get_kms(crtc);
419 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
420 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
421 uint32_t iova = mdp4_crtc->cursor.next_iova;
422
423 if (next_bo) {
424 /* take a obj ref + iova ref when we start scanning out: */
425 drm_gem_object_reference(next_bo);
426 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
427
428 /* enable cursor: */
429 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
430 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
431 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
432 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
433 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
434 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
435 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
436 } else {
437 /* disable cursor: */
438 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
439 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
440 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
441 }
442
443 /* and drop the iova ref + obj rev when done scanning out: */
444 if (prev_bo)
445 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
446
447 mdp4_crtc->cursor.scanout_bo = next_bo;
448 mdp4_crtc->cursor.stale = false;
449 }
450 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
451}
452
453static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
454 struct drm_file *file_priv, uint32_t handle,
455 uint32_t width, uint32_t height)
456{
457 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
458 struct mdp4_kms *mdp4_kms = get_kms(crtc);
459 struct drm_device *dev = crtc->dev;
460 struct drm_gem_object *cursor_bo, *old_bo;
461 unsigned long flags;
462 uint32_t iova;
463 int ret;
464
465 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
466 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
467 return -EINVAL;
468 }
469
470 if (handle) {
471 cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
472 if (!cursor_bo)
473 return -ENOENT;
474 } else {
475 cursor_bo = NULL;
476 }
477
478 if (cursor_bo) {
479 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
480 if (ret)
481 goto fail;
482 } else {
483 iova = 0;
484 }
485
486 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
487 old_bo = mdp4_crtc->cursor.next_bo;
488 mdp4_crtc->cursor.next_bo = cursor_bo;
489 mdp4_crtc->cursor.next_iova = iova;
490 mdp4_crtc->cursor.width = width;
491 mdp4_crtc->cursor.height = height;
492 mdp4_crtc->cursor.stale = true;
493 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
494
495 if (old_bo) {
496 /* drop our previous reference: */
497 msm_gem_put_iova(old_bo, mdp4_kms->id);
498 drm_gem_object_unreference_unlocked(old_bo);
499 }
500
501 return 0;
502
503fail:
504 drm_gem_object_unreference_unlocked(cursor_bo);
505 return ret;
506}
507
508static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
509{
510 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
511 struct mdp4_kms *mdp4_kms = get_kms(crtc);
512 enum mdp4_dma dma = mdp4_crtc->dma;
513
514 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
515 MDP4_DMA_CURSOR_POS_X(x) |
516 MDP4_DMA_CURSOR_POS_Y(y));
517
518 return 0;
519}
520
521static const struct drm_crtc_funcs mdp4_crtc_funcs = {
522 .set_config = drm_crtc_helper_set_config,
523 .destroy = mdp4_crtc_destroy,
524 .page_flip = mdp4_crtc_page_flip,
525 .set_property = mdp4_crtc_set_property,
526 .cursor_set = mdp4_crtc_cursor_set,
527 .cursor_move = mdp4_crtc_cursor_move,
528};
529
530static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
531 .dpms = mdp4_crtc_dpms,
532 .mode_fixup = mdp4_crtc_mode_fixup,
533 .mode_set = mdp4_crtc_mode_set,
534 .prepare = mdp4_crtc_prepare,
535 .commit = mdp4_crtc_commit,
536 .mode_set_base = mdp4_crtc_mode_set_base,
537 .load_lut = mdp4_crtc_load_lut,
538};
539
540static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
541{
542 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
543 struct drm_crtc *crtc = &mdp4_crtc->base;
544 struct msm_drm_private *priv = crtc->dev->dev_private;
545
546 update_cursor(crtc);
547 complete_flip(crtc, false);
548 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
549
550 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
551 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
552}
553
554static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
555{
556 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
557 struct drm_crtc *crtc = &mdp4_crtc->base;
558 DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
559 crtc_flush(crtc);
560}
561
562uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
563{
564 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
565 return mdp4_crtc->vblank.irqmask;
566}
567
568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
569{
570 complete_flip(crtc, true);
571}
572
573/* set dma config, ie. the format the encoder wants. */
574void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
575{
576 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
577 struct mdp4_kms *mdp4_kms = get_kms(crtc);
578
579 mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
580}
581
582/* set interface for routing crtc->encoder: */
583void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
584{
585 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
586 struct mdp4_kms *mdp4_kms = get_kms(crtc);
587 uint32_t intf_sel;
588
589 intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
590
591 switch (mdp4_crtc->dma) {
592 case DMA_P:
593 intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
594 intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
595 break;
596 case DMA_S:
597 intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
598 intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
599 break;
600 case DMA_E:
601 intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
602 intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
603 break;
604 }
605
606 if (intf == INTF_DSI_VIDEO) {
607 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
608 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
609 mdp4_crtc->mixer = 0;
610 } else if (intf == INTF_DSI_CMD) {
611 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
612 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
613 mdp4_crtc->mixer = 0;
614 } else if (intf == INTF_LCDC_DTV){
615 mdp4_crtc->mixer = 1;
616 }
617
618 blend_setup(crtc);
619
620 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
621
622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
623}
624
625static const char *dma_names[] = {
626 "DMA_P", "DMA_S", "DMA_E",
627};
628
629/* initialize crtc */
630struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
631 struct drm_plane *plane, int id, int ovlp_id,
632 enum mdp4_dma dma_id)
633{
634 struct drm_crtc *crtc = NULL;
635 struct mdp4_crtc *mdp4_crtc;
636 int ret;
637
638 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
639 if (!mdp4_crtc) {
640 ret = -ENOMEM;
641 goto fail;
642 }
643
644 crtc = &mdp4_crtc->base;
645
646 mdp4_crtc->plane = plane;
647 mdp4_crtc->plane->crtc = crtc;
648
649 mdp4_crtc->ovlp = ovlp_id;
650 mdp4_crtc->dma = dma_id;
651
652 mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
653 mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
654
655 mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
656 mdp4_crtc->err.irq = mdp4_crtc_err_irq;
657
658 snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
659 dma_names[dma_id], ovlp_id);
660
661 spin_lock_init(&mdp4_crtc->cursor.lock);
662
663 ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
664 "unref fb", unref_fb_worker);
665 if (ret)
666 goto fail;
667
668 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
669 "unref cursor", unref_cursor_worker);
670
671 INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
672
673 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
674 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
675
676 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
677
678 return crtc;
679
680fail:
681 if (crtc)
682 mdp4_crtc_destroy(crtc);
683
684 return ERR_PTR(ret);
685}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 000000000000..5e0dcae70ab5
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,305 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <mach/clk.h>
19
20#include "mdp4_kms.h"
21
22#include "drm_crtc.h"
23#include "drm_crtc_helper.h"
24
25
26struct mdp4_dtv_encoder {
27 struct drm_encoder base;
28 struct clk *src_clk;
29 struct clk *hdmi_clk;
30 struct clk *mdp_clk;
31 unsigned long int pixclock;
32 bool enabled;
33 uint32_t bsc;
34};
35#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
36
37static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
38{
39 struct msm_drm_private *priv = encoder->dev->dev_private;
40 return to_mdp4_kms(priv->kms);
41}
42
43#ifdef CONFIG_MSM_BUS_SCALING
44#include <mach/board.h>
45/* not ironically named at all.. no, really.. */
46static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
47{
48 struct drm_device *dev = mdp4_dtv_encoder->base.dev;
49 struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
50
51 if (!dtv_pdata) {
52 dev_err(dev->dev, "could not find dtv pdata\n");
53 return;
54 }
55
56 if (dtv_pdata->bus_scale_table) {
57 mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
58 dtv_pdata->bus_scale_table);
59 DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
60 DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
61 if (dtv_pdata->lcdc_power_save)
62 dtv_pdata->lcdc_power_save(1);
63 }
64}
65
66static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
67{
68 if (mdp4_dtv_encoder->bsc) {
69 msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
70 mdp4_dtv_encoder->bsc = 0;
71 }
72}
73
74static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
75{
76 if (mdp4_dtv_encoder->bsc) {
77 DBG("set bus scaling: %d", idx);
78 msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
79 }
80}
81#else
82static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
83static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
84static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
85#endif
86
87static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
88{
89 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
90 bs_fini(mdp4_dtv_encoder);
91 drm_encoder_cleanup(encoder);
92 kfree(mdp4_dtv_encoder);
93}
94
95static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
96 .destroy = mdp4_dtv_encoder_destroy,
97};
98
99static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
100{
101 struct drm_device *dev = encoder->dev;
102 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
103 struct mdp4_kms *mdp4_kms = get_kms(encoder);
104 bool enabled = (mode == DRM_MODE_DPMS_ON);
105
106 DBG("mode=%d", mode);
107
108 if (enabled == mdp4_dtv_encoder->enabled)
109 return;
110
111 if (enabled) {
112 unsigned long pc = mdp4_dtv_encoder->pixclock;
113 int ret;
114
115 bs_set(mdp4_dtv_encoder, 1);
116
117 DBG("setting src_clk=%lu", pc);
118
119 ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
120 if (ret)
121 dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
122 clk_prepare_enable(mdp4_dtv_encoder->src_clk);
123 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
124 if (ret)
125 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
126 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
127 if (ret)
128 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
129
130 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
131 } else {
132 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
133
134 /*
135 * Wait for a vsync so we know the ENABLE=0 latched before
136 * the (connector) source of the vsync's gets disabled,
137 * otherwise we end up in a funny state if we re-enable
138 * before the disable latches, which results that some of
139 * the settings changes for the new modeset (like new
140 * scanout buffer) don't latch properly..
141 */
142 mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
143
144 clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
145 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
146 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
147
148 bs_set(mdp4_dtv_encoder, 0);
149 }
150
151 mdp4_dtv_encoder->enabled = enabled;
152}
153
154static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
155 const struct drm_display_mode *mode,
156 struct drm_display_mode *adjusted_mode)
157{
158 return true;
159}
160
161static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
162 struct drm_display_mode *mode,
163 struct drm_display_mode *adjusted_mode)
164{
165 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
166 struct mdp4_kms *mdp4_kms = get_kms(encoder);
167 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
168 uint32_t display_v_start, display_v_end;
169 uint32_t hsync_start_x, hsync_end_x;
170
171 mode = adjusted_mode;
172
173 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
174 mode->base.id, mode->name,
175 mode->vrefresh, mode->clock,
176 mode->hdisplay, mode->hsync_start,
177 mode->hsync_end, mode->htotal,
178 mode->vdisplay, mode->vsync_start,
179 mode->vsync_end, mode->vtotal,
180 mode->type, mode->flags);
181
182 mdp4_dtv_encoder->pixclock = mode->clock * 1000;
183
184 DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
185
186 ctrl_pol = 0;
187 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
188 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
189 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
190 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
191 /* probably need to get DATA_EN polarity from panel.. */
192
193 dtv_hsync_skew = 0; /* get this from panel? */
194
195 hsync_start_x = (mode->htotal - mode->hsync_start);
196 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
197
198 vsync_period = mode->vtotal * mode->htotal;
199 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
200 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
201 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
202
203 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
204 MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
205 MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
206 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
207 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
208 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
209 MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
210 MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
211 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
212 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
213 mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
214 mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
215 MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
216 MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
217 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
218 mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
219 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
220 MDP4_DTV_ACTIVE_HCTL_START(0) |
221 MDP4_DTV_ACTIVE_HCTL_END(0));
222 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
223 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
224}
225
226static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
227{
228 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
229}
230
231static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
232{
233 mdp4_crtc_set_config(encoder->crtc,
234 MDP4_DMA_CONFIG_R_BPC(BPC8) |
235 MDP4_DMA_CONFIG_G_BPC(BPC8) |
236 MDP4_DMA_CONFIG_B_BPC(BPC8) |
237 MDP4_DMA_CONFIG_PACK(0x21));
238 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
239 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
240}
241
242static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
243 .dpms = mdp4_dtv_encoder_dpms,
244 .mode_fixup = mdp4_dtv_encoder_mode_fixup,
245 .mode_set = mdp4_dtv_encoder_mode_set,
246 .prepare = mdp4_dtv_encoder_prepare,
247 .commit = mdp4_dtv_encoder_commit,
248};
249
250long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
251{
252 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
253 return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
254}
255
256/* initialize encoder */
257struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
258{
259 struct drm_encoder *encoder = NULL;
260 struct mdp4_dtv_encoder *mdp4_dtv_encoder;
261 int ret;
262
263 mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
264 if (!mdp4_dtv_encoder) {
265 ret = -ENOMEM;
266 goto fail;
267 }
268
269 encoder = &mdp4_dtv_encoder->base;
270
271 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
272 DRM_MODE_ENCODER_TMDS);
273 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
274
275 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
276 if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
277 dev_err(dev->dev, "failed to get src_clk\n");
278 ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
279 goto fail;
280 }
281
282 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
283 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
284 dev_err(dev->dev, "failed to get hdmi_clk\n");
285 ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
286 goto fail;
287 }
288
289 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
290 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
291 dev_err(dev->dev, "failed to get mdp_clk\n");
292 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
293 goto fail;
294 }
295
296 bs_init(mdp4_dtv_encoder);
297
298 return encoder;
299
300fail:
301 if (encoder)
302 mdp4_dtv_encoder_destroy(encoder);
303
304 return ERR_PTR(ret);
305}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
new file mode 100644
index 000000000000..7b645f2e837a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
23 .base = { .pixel_format = DRM_FORMAT_ ## name }, \
24 .bpc_a = BPC ## a ## A, \
25 .bpc_r = BPC ## r, \
26 .bpc_g = BPC ## g, \
27 .bpc_b = BPC ## b, \
28 .unpack = { e0, e1, e2, e3 }, \
29 .alpha_enable = alpha, \
30 .unpack_tight = tight, \
31 .cpp = c, \
32 .unpack_count = cnt, \
33 }
34
35#define BPC0A 0
36
37static const struct mdp4_format formats[] = {
38 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
39 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
40 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
41 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
42 FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
43 FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
45};
46
47const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
48{
49 int i;
50 for (i = 0; i < ARRAY_SIZE(formats); i++) {
51 const struct mdp4_format *f = &formats[i];
52 if (f->base.pixel_format == format)
53 return &f->base;
54 }
55 return NULL;
56}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
new file mode 100644
index 000000000000..5c6b7fca4edd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22
23struct mdp4_irq_wait {
24 struct mdp4_irq irq;
25 int count;
26};
27
28static DECLARE_WAIT_QUEUE_HEAD(wait_event);
29
30static DEFINE_SPINLOCK(list_lock);
31
32static void update_irq(struct mdp4_kms *mdp4_kms)
33{
34 struct mdp4_irq *irq;
35 uint32_t irqmask = mdp4_kms->vblank_mask;
36
37 BUG_ON(!spin_is_locked(&list_lock));
38
39 list_for_each_entry(irq, &mdp4_kms->irq_list, node)
40 irqmask |= irq->irqmask;
41
42 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
43}
44
45static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
46{
47 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags);
49 update_irq(mdp4_kms);
50 spin_unlock_irqrestore(&list_lock, flags);
51}
52
53static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
54{
55 DRM_ERROR("errors: %08x\n", irqstatus);
56}
57
58void mdp4_irq_preinstall(struct msm_kms *kms)
59{
60 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
61 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
62}
63
64int mdp4_irq_postinstall(struct msm_kms *kms)
65{
66 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
67 struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
68
69 INIT_LIST_HEAD(&mdp4_kms->irq_list);
70
71 error_handler->irq = mdp4_irq_error_handler;
72 error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
73 MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
74
75 mdp4_irq_register(mdp4_kms, error_handler);
76
77 return 0;
78}
79
80void mdp4_irq_uninstall(struct msm_kms *kms)
81{
82 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
83 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
84}
85
86irqreturn_t mdp4_irq(struct msm_kms *kms)
87{
88 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
89 struct drm_device *dev = mdp4_kms->dev;
90 struct msm_drm_private *priv = dev->dev_private;
91 struct mdp4_irq *handler, *n;
92 unsigned long flags;
93 unsigned int id;
94 uint32_t status;
95
96 status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
97 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
98
99 VERB("status=%08x", status);
100
101 for (id = 0; id < priv->num_crtcs; id++)
102 if (status & mdp4_crtc_vblank(priv->crtcs[id]))
103 drm_handle_vblank(dev, id);
104
105 spin_lock_irqsave(&list_lock, flags);
106 mdp4_kms->in_irq = true;
107 list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
108 if (handler->irqmask & status) {
109 spin_unlock_irqrestore(&list_lock, flags);
110 handler->irq(handler, handler->irqmask & status);
111 spin_lock_irqsave(&list_lock, flags);
112 }
113 }
114 mdp4_kms->in_irq = false;
115 update_irq(mdp4_kms);
116 spin_unlock_irqrestore(&list_lock, flags);
117
118 return IRQ_HANDLED;
119}
120
121int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
122{
123 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
124 unsigned long flags;
125
126 spin_lock_irqsave(&list_lock, flags);
127 mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
128 update_irq(mdp4_kms);
129 spin_unlock_irqrestore(&list_lock, flags);
130
131 return 0;
132}
133
134void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
135{
136 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
137 unsigned long flags;
138
139 spin_lock_irqsave(&list_lock, flags);
140 mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
141 update_irq(mdp4_kms);
142 spin_unlock_irqrestore(&list_lock, flags);
143}
144
145static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
146{
147 struct mdp4_irq_wait *wait =
148 container_of(irq, struct mdp4_irq_wait, irq);
149 wait->count--;
150 wake_up_all(&wait_event);
151}
152
153void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
154{
155 struct mdp4_irq_wait wait = {
156 .irq = {
157 .irq = wait_irq,
158 .irqmask = irqmask,
159 },
160 .count = 1,
161 };
162 mdp4_irq_register(mdp4_kms, &wait.irq);
163 wait_event(wait_event, (wait.count <= 0));
164 mdp4_irq_unregister(mdp4_kms, &wait.irq);
165}
166
167void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
168{
169 unsigned long flags;
170 bool needs_update = false;
171
172 spin_lock_irqsave(&list_lock, flags);
173
174 if (!irq->registered) {
175 irq->registered = true;
176 list_add(&irq->node, &mdp4_kms->irq_list);
177 needs_update = !mdp4_kms->in_irq;
178 }
179
180 spin_unlock_irqrestore(&list_lock, flags);
181
182 if (needs_update)
183 update_irq_unlocked(mdp4_kms);
184}
185
186void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
187{
188 unsigned long flags;
189 bool needs_update = false;
190
191 spin_lock_irqsave(&list_lock, flags);
192
193 if (irq->registered) {
194 irq->registered = false;
195 list_del(&irq->node);
196 needs_update = !mdp4_kms->in_irq;
197 }
198
199 spin_unlock_irqrestore(&list_lock, flags);
200
201 if (needs_update)
202 update_irq_unlocked(mdp4_kms);
203}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
new file mode 100644
index 000000000000..5db5bbaedae2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -0,0 +1,365 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25
26static int mdp4_hw_init(struct msm_kms *kms)
27{
28 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
29 struct drm_device *dev = mdp4_kms->dev;
30 uint32_t version, major, minor, dmap_cfg, vg_cfg;
31 unsigned long clk;
32 int ret = 0;
33
34 pm_runtime_get_sync(dev->dev);
35
36 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
37
38 major = FIELD(version, MDP4_VERSION_MAJOR);
39 minor = FIELD(version, MDP4_VERSION_MINOR);
40
41 DBG("found MDP version v%d.%d", major, minor);
42
43 if (major != 4) {
44 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
45 major, minor);
46 ret = -ENXIO;
47 goto out;
48 }
49
50 mdp4_kms->rev = minor;
51
52 if (mdp4_kms->dsi_pll_vdda) {
53 if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
54 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
55 1200000, 1200000);
56 if (ret) {
57 dev_err(dev->dev,
58 "failed to set dsi_pll_vdda voltage: %d\n", ret);
59 goto out;
60 }
61 }
62 }
63
64 if (mdp4_kms->dsi_pll_vddio) {
65 if (mdp4_kms->rev == 2) {
66 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
67 1800000, 1800000);
68 if (ret) {
69 dev_err(dev->dev,
70 "failed to set dsi_pll_vddio voltage: %d\n", ret);
71 goto out;
72 }
73 }
74 }
75
76 if (mdp4_kms->rev > 1) {
77 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
78 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
79 }
80
81 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
82
83 /* max read pending cmd config, 3 pending requests: */
84 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
85
86 clk = clk_get_rate(mdp4_kms->clk);
87
88 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
89 dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
90 vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
91 } else {
92 dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
93 vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
94 }
95
96 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
97
98 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
99 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
100
101 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
102 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
103 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
104 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
105
106 if (mdp4_kms->rev >= 2)
107 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
108
109 /* disable CSC matrix / YUV by default: */
110 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
112 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
113 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
114 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
115 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
116
117 if (mdp4_kms->rev > 1)
118 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
119
120out:
121 pm_runtime_put_sync(dev->dev);
122
123 return ret;
124}
125
126static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
127 struct drm_encoder *encoder)
128{
129 /* if we had >1 encoder, we'd need something more clever: */
130 return mdp4_dtv_round_pixclk(encoder, rate);
131}
132
133static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
134{
135 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
136 struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
137 unsigned i;
138
139 for (i = 0; i < priv->num_crtcs; i++)
140 mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
141}
142
143static void mdp4_destroy(struct msm_kms *kms)
144{
145 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
146 kfree(mdp4_kms);
147}
148
149static const struct msm_kms_funcs kms_funcs = {
150 .hw_init = mdp4_hw_init,
151 .irq_preinstall = mdp4_irq_preinstall,
152 .irq_postinstall = mdp4_irq_postinstall,
153 .irq_uninstall = mdp4_irq_uninstall,
154 .irq = mdp4_irq,
155 .enable_vblank = mdp4_enable_vblank,
156 .disable_vblank = mdp4_disable_vblank,
157 .get_format = mdp4_get_format,
158 .round_pixclk = mdp4_round_pixclk,
159 .preclose = mdp4_preclose,
160 .destroy = mdp4_destroy,
161};
162
163int mdp4_disable(struct mdp4_kms *mdp4_kms)
164{
165 DBG("");
166
167 clk_disable_unprepare(mdp4_kms->clk);
168 if (mdp4_kms->pclk)
169 clk_disable_unprepare(mdp4_kms->pclk);
170 clk_disable_unprepare(mdp4_kms->lut_clk);
171
172 return 0;
173}
174
175int mdp4_enable(struct mdp4_kms *mdp4_kms)
176{
177 DBG("");
178
179 clk_prepare_enable(mdp4_kms->clk);
180 if (mdp4_kms->pclk)
181 clk_prepare_enable(mdp4_kms->pclk);
182 clk_prepare_enable(mdp4_kms->lut_clk);
183
184 return 0;
185}
186
187static int modeset_init(struct mdp4_kms *mdp4_kms)
188{
189 struct drm_device *dev = mdp4_kms->dev;
190 struct msm_drm_private *priv = dev->dev_private;
191 struct drm_plane *plane;
192 struct drm_crtc *crtc;
193 struct drm_encoder *encoder;
194 int ret;
195
196 /*
197 * NOTE: this is a bit simplistic until we add support
198 * for more than just RGB1->DMA_E->DTV->HDMI
199 */
200
201 /* the CRTCs get constructed with a private plane: */
202 plane = mdp4_plane_init(dev, RGB1, true);
203 if (IS_ERR(plane)) {
204 dev_err(dev->dev, "failed to construct plane for RGB1\n");
205 ret = PTR_ERR(plane);
206 goto fail;
207 }
208
209 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
210 if (IS_ERR(crtc)) {
211 dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
212 ret = PTR_ERR(crtc);
213 goto fail;
214 }
215 priv->crtcs[priv->num_crtcs++] = crtc;
216
217 encoder = mdp4_dtv_encoder_init(dev);
218 if (IS_ERR(encoder)) {
219 dev_err(dev->dev, "failed to construct DTV encoder\n");
220 ret = PTR_ERR(encoder);
221 goto fail;
222 }
223 encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
224 priv->encoders[priv->num_encoders++] = encoder;
225
226 ret = hdmi_init(dev, encoder);
227 if (ret) {
228 dev_err(dev->dev, "failed to initialize HDMI\n");
229 goto fail;
230 }
231
232 return 0;
233
234fail:
235 return ret;
236}
237
238static const char *iommu_ports[] = {
239 "mdp_port0_cb0", "mdp_port1_cb0",
240};
241
242struct msm_kms *mdp4_kms_init(struct drm_device *dev)
243{
244 struct platform_device *pdev = dev->platformdev;
245 struct mdp4_platform_config *config = mdp4_get_config(pdev);
246 struct mdp4_kms *mdp4_kms;
247 struct msm_kms *kms = NULL;
248 int ret;
249
250 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
251 if (!mdp4_kms) {
252 dev_err(dev->dev, "failed to allocate kms\n");
253 ret = -ENOMEM;
254 goto fail;
255 }
256
257 kms = &mdp4_kms->base;
258 kms->funcs = &kms_funcs;
259
260 mdp4_kms->dev = dev;
261
262 mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
263 if (IS_ERR(mdp4_kms->mmio)) {
264 ret = PTR_ERR(mdp4_kms->mmio);
265 goto fail;
266 }
267
268 mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
269 if (IS_ERR(mdp4_kms->dsi_pll_vdda))
270 mdp4_kms->dsi_pll_vdda = NULL;
271
272 mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
273 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
274 mdp4_kms->dsi_pll_vddio = NULL;
275
276 mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
277 if (IS_ERR(mdp4_kms->vdd))
278 mdp4_kms->vdd = NULL;
279
280 if (mdp4_kms->vdd) {
281 ret = regulator_enable(mdp4_kms->vdd);
282 if (ret) {
283 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
284 goto fail;
285 }
286 }
287
288 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
289 if (IS_ERR(mdp4_kms->clk)) {
290 dev_err(dev->dev, "failed to get core_clk\n");
291 ret = PTR_ERR(mdp4_kms->clk);
292 goto fail;
293 }
294
295 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
296 if (IS_ERR(mdp4_kms->pclk))
297 mdp4_kms->pclk = NULL;
298
299 // XXX if (rev >= MDP_REV_42) { ???
300 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
301 if (IS_ERR(mdp4_kms->lut_clk)) {
302 dev_err(dev->dev, "failed to get lut_clk\n");
303 ret = PTR_ERR(mdp4_kms->lut_clk);
304 goto fail;
305 }
306
307 clk_set_rate(mdp4_kms->clk, config->max_clk);
308 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
309
310 if (!config->iommu) {
311 dev_err(dev->dev, "no iommu\n");
312 ret = -ENXIO;
313 goto fail;
314 }
315
316 /* make sure things are off before attaching iommu (bootloader could
317 * have left things on, in which case we'll start getting faults if
318 * we don't disable):
319 */
320 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
321 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
322 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
323 mdelay(16);
324
325 ret = msm_iommu_attach(dev, config->iommu,
326 iommu_ports, ARRAY_SIZE(iommu_ports));
327 if (ret)
328 goto fail;
329
330 mdp4_kms->id = msm_register_iommu(dev, config->iommu);
331 if (mdp4_kms->id < 0) {
332 ret = mdp4_kms->id;
333 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
334 goto fail;
335 }
336
337 ret = modeset_init(mdp4_kms);
338 if (ret) {
339 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
340 goto fail;
341 }
342
343 return kms;
344
345fail:
346 if (kms)
347 mdp4_destroy(kms);
348 return ERR_PTR(ret);
349}
350
351static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
352{
353 static struct mdp4_platform_config config = {};
354#ifdef CONFIG_OF
355 /* TODO */
356#else
357 if (cpu_is_apq8064())
358 config.max_clk = 266667000;
359 else
360 config.max_clk = 200000000;
361
362 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
363#endif
364 return &config;
365}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
new file mode 100644
index 000000000000..1e83554955f3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -0,0 +1,194 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MDP4_KMS_H__
19#define __MDP4_KMS_H__
20
21#include <linux/clk.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/consumer.h>
24
25#include "msm_drv.h"
26#include "mdp4.xml.h"
27
28
29/* For transiently registering for different MDP4 irqs that various parts
30 * of the KMS code need during setup/configuration. We these are not
31 * necessarily the same as what drm_vblank_get/put() are requesting, and
32 * the hysteresis in drm_vblank_put() is not necessarily desirable for
33 * internal housekeeping related irq usage.
34 */
35struct mdp4_irq {
36 struct list_head node;
37 uint32_t irqmask;
38 bool registered;
39 void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
40};
41
42struct mdp4_kms {
43 struct msm_kms base;
44
45 struct drm_device *dev;
46
47 int rev;
48
49 /* mapper-id used to request GEM buffer mapped for scanout: */
50 int id;
51
52 void __iomem *mmio;
53
54 struct regulator *dsi_pll_vdda;
55 struct regulator *dsi_pll_vddio;
56 struct regulator *vdd;
57
58 struct clk *clk;
59 struct clk *pclk;
60 struct clk *lut_clk;
61
62 /* irq handling: */
63 bool in_irq;
64 struct list_head irq_list; /* list of mdp4_irq */
65 uint32_t vblank_mask; /* irq bits set for userspace vblank */
66 struct mdp4_irq error_handler;
67};
68#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp4_platform_config {
72 struct iommu_domain *iommu;
73 uint32_t max_clk;
74};
75
76struct mdp4_format {
77 struct msm_format base;
78 enum mpd4_bpc bpc_r, bpc_g, bpc_b;
79 enum mpd4_bpc_alpha bpc_a;
80 uint8_t unpack[4];
81 bool alpha_enable, unpack_tight;
82 uint8_t cpp, unpack_count;
83};
84#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
85
86static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
87{
88 msm_writel(data, mdp4_kms->mmio + reg);
89}
90
91static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
92{
93 return msm_readl(mdp4_kms->mmio + reg);
94}
95
96static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
97{
98 switch (pipe) {
99 case VG1: return MDP4_OVERLAY_FLUSH_VG1;
100 case VG2: return MDP4_OVERLAY_FLUSH_VG2;
101 case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
102 case RGB2: return MDP4_OVERLAY_FLUSH_RGB1;
103 default: return 0;
104 }
105}
106
107static inline uint32_t ovlp2flush(int ovlp)
108{
109 switch (ovlp) {
110 case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
111 case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
112 default: return 0;
113 }
114}
115
116static inline uint32_t dma2irq(enum mdp4_dma dma)
117{
118 switch (dma) {
119 case DMA_P: return MDP4_IRQ_DMA_P_DONE;
120 case DMA_S: return MDP4_IRQ_DMA_S_DONE;
121 case DMA_E: return MDP4_IRQ_DMA_E_DONE;
122 default: return 0;
123 }
124}
125
126static inline uint32_t dma2err(enum mdp4_dma dma)
127{
128 switch (dma) {
129 case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
130 case DMA_S: return 0; // ???
131 case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
132 default: return 0;
133 }
134}
135
136int mdp4_disable(struct mdp4_kms *mdp4_kms);
137int mdp4_enable(struct mdp4_kms *mdp4_kms);
138
139void mdp4_irq_preinstall(struct msm_kms *kms);
140int mdp4_irq_postinstall(struct msm_kms *kms);
141void mdp4_irq_uninstall(struct msm_kms *kms);
142irqreturn_t mdp4_irq(struct msm_kms *kms);
143void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
144void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
145void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
146int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
147void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
148
149const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
150
151void mdp4_plane_install_properties(struct drm_plane *plane,
152 struct drm_mode_object *obj);
153void mdp4_plane_set_scanout(struct drm_plane *plane,
154 struct drm_framebuffer *fb);
155int mdp4_plane_mode_set(struct drm_plane *plane,
156 struct drm_crtc *crtc, struct drm_framebuffer *fb,
157 int crtc_x, int crtc_y,
158 unsigned int crtc_w, unsigned int crtc_h,
159 uint32_t src_x, uint32_t src_y,
160 uint32_t src_w, uint32_t src_h);
161enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
162struct drm_plane *mdp4_plane_init(struct drm_device *dev,
163 enum mpd4_pipe pipe_id, bool private_plane);
164
165uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
166void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
167void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
168void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
169struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
170 struct drm_plane *plane, int id, int ovlp_id,
171 enum mdp4_dma dma_id);
172
173long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
174struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
175
176#ifdef CONFIG_MSM_BUS_SCALING
177static inline int match_dev_name(struct device *dev, void *data)
178{
179 return !strcmp(dev_name(dev), data);
180}
181/* bus scaling data is associated with extra pointless platform devices,
182 * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
183 * to find their pdata to make the bus-scaling stuff work.
184 */
185static inline void *mdp4_find_pdata(const char *devname)
186{
187 struct device *dev;
188 dev = bus_find_device(&platform_bus_type, NULL,
189 (void *)devname, match_dev_name);
190 return dev ? dev->platform_data : NULL;
191}
192#endif
193
194#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
new file mode 100644
index 000000000000..3468229d58b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20
21struct mdp4_plane {
22 struct drm_plane base;
23 const char *name;
24
25 enum mpd4_pipe pipe;
26
27 uint32_t nformats;
28 uint32_t formats[32];
29
30 bool enabled;
31};
32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
33
34static struct mdp4_kms *get_kms(struct drm_plane *plane)
35{
36 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp4_kms(priv->kms);
38}
39
40static int mdp4_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{
47 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
48
49 mdp4_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp4_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60
61static int mdp4_plane_disable(struct drm_plane *plane)
62{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
64 DBG("%s: TODO", mdp4_plane->name); // XXX
65 return 0;
66}
67
68static void mdp4_plane_destroy(struct drm_plane *plane)
69{
70 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
71
72 mdp4_plane_disable(plane);
73 drm_plane_cleanup(plane);
74
75 kfree(mdp4_plane);
76}
77
78/* helper to install properties which are common to planes and crtcs */
79void mdp4_plane_install_properties(struct drm_plane *plane,
80 struct drm_mode_object *obj)
81{
82 // XXX
83}
84
85int mdp4_plane_set_property(struct drm_plane *plane,
86 struct drm_property *property, uint64_t val)
87{
88 // XXX
89 return -EINVAL;
90}
91
92static const struct drm_plane_funcs mdp4_plane_funcs = {
93 .update_plane = mdp4_plane_update,
94 .disable_plane = mdp4_plane_disable,
95 .destroy = mdp4_plane_destroy,
96 .set_property = mdp4_plane_set_property,
97};
98
99void mdp4_plane_set_scanout(struct drm_plane *plane,
100 struct drm_framebuffer *fb)
101{
102 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
103 struct mdp4_kms *mdp4_kms = get_kms(plane);
104 enum mpd4_pipe pipe = mdp4_plane->pipe;
105 uint32_t iova;
106
107 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
108 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
109 MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
110
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
112 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
113 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
114
115 msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
116 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
117
118 plane->fb = fb;
119}
120
121#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
122
123int mdp4_plane_mode_set(struct drm_plane *plane,
124 struct drm_crtc *crtc, struct drm_framebuffer *fb,
125 int crtc_x, int crtc_y,
126 unsigned int crtc_w, unsigned int crtc_h,
127 uint32_t src_x, uint32_t src_y,
128 uint32_t src_w, uint32_t src_h)
129{
130 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
131 struct mdp4_kms *mdp4_kms = get_kms(plane);
132 enum mpd4_pipe pipe = mdp4_plane->pipe;
133 const struct mdp4_format *format;
134 uint32_t op_mode = 0;
135 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
136 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
137
138 /* src values are in Q16 fixed point, convert to integer: */
139 src_x = src_x >> 16;
140 src_y = src_y >> 16;
141 src_w = src_w >> 16;
142 src_h = src_h >> 16;
143
144 if (src_w != crtc_w) {
145 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
146 /* TODO calc phasex_step */
147 }
148
149 if (src_h != crtc_h) {
150 op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
151 /* TODO calc phasey_step */
152 }
153
154 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
155 MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
156 MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
157
158 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
159 MDP4_PIPE_SRC_XY_X(src_x) |
160 MDP4_PIPE_SRC_XY_Y(src_y));
161
162 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
163 MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
164 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
165
166 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
167 MDP4_PIPE_SRC_XY_X(crtc_x) |
168 MDP4_PIPE_SRC_XY_Y(crtc_y));
169
170 mdp4_plane_set_scanout(plane, fb);
171
172 format = to_mdp4_format(msm_framebuffer_format(fb));
173
174 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
175 MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
176 MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
177 MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
178 MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
179 COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
180 MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
181 MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
182 COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
183
184 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
185 MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
186 MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
187 MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
188 MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
189
190 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
191 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
192 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
193
194 plane->crtc = crtc;
195
196 return 0;
197}
198
199static const char *pipe_names[] = {
200 "VG1", "VG2",
201 "RGB1", "RGB2", "RGB3",
202 "VG3", "VG4",
203};
204
205enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
206{
207 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
208 return mdp4_plane->pipe;
209}
210
211/* initialize plane */
212struct drm_plane *mdp4_plane_init(struct drm_device *dev,
213 enum mpd4_pipe pipe_id, bool private_plane)
214{
215 struct msm_drm_private *priv = dev->dev_private;
216 struct drm_plane *plane = NULL;
217 struct mdp4_plane *mdp4_plane;
218 int ret;
219
220 mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
221 if (!mdp4_plane) {
222 ret = -ENOMEM;
223 goto fail;
224 }
225
226 plane = &mdp4_plane->base;
227
228 mdp4_plane->pipe = pipe_id;
229 mdp4_plane->name = pipe_names[pipe_id];
230
231 drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
232 mdp4_plane->formats, mdp4_plane->nformats, private_plane);
233
234 mdp4_plane_install_properties(plane, &plane->base);
235
236 return plane;
237
238fail:
239 if (plane)
240 mdp4_plane_destroy(plane);
241
242 return ERR_PTR(ret);
243}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 000000000000..864c9773636b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev)
24{
25 struct msm_drm_private *priv = dev->dev_private;
26 if (priv->fbdev)
27 drm_fb_helper_hotplug_event(priv->fbdev);
28}
29
30static const struct drm_mode_config_funcs mode_config_funcs = {
31 .fb_create = msm_framebuffer_create,
32 .output_poll_changed = msm_fb_output_poll_changed,
33};
34
35static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
36 unsigned long iova, int flags, void *arg)
37{
38 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
39 return 0;
40}
41
42int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
43{
44 struct msm_drm_private *priv = dev->dev_private;
45 int idx = priv->num_iommus++;
46
47 if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
48 return -EINVAL;
49
50 priv->iommus[idx] = iommu;
51
52 iommu_set_fault_handler(iommu, msm_fault_handler, dev);
53
54 /* need to iommu_attach_device() somewhere?? on resume?? */
55
56 return idx;
57}
58
59int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
60 const char **names, int cnt)
61{
62 int i, ret;
63
64 for (i = 0; i < cnt; i++) {
65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx)
67 continue;
68 ret = iommu_attach_device(iommu, ctx);
69 if (ret) {
70 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
71 return ret;
72 }
73 }
74 return 0;
75}
76
77#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
78static bool reglog = false;
79MODULE_PARM_DESC(reglog, "Enable register read/write logging");
80module_param(reglog, bool, 0600);
81#else
82#define reglog 0
83#endif
84
85void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
86 const char *dbgname)
87{
88 struct resource *res;
89 unsigned long size;
90 void __iomem *ptr;
91
92 if (name)
93 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
94 else
95 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
96
97 if (!res) {
98 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
99 return ERR_PTR(-EINVAL);
100 }
101
102 size = resource_size(res);
103
104 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
105 if (!ptr) {
106 dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
107 return ERR_PTR(-ENOMEM);
108 }
109
110 if (reglog)
111 printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
112
113 return ptr;
114}
115
116void msm_writel(u32 data, void __iomem *addr)
117{
118 if (reglog)
119 printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
120 writel(data, addr);
121}
122
123u32 msm_readl(const void __iomem *addr)
124{
125 u32 val = readl(addr);
126 if (reglog)
127 printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
128 return val;
129}
130
131/*
132 * DRM operations:
133 */
134
135static int msm_unload(struct drm_device *dev)
136{
137 struct msm_drm_private *priv = dev->dev_private;
138 struct msm_kms *kms = priv->kms;
139 struct msm_gpu *gpu = priv->gpu;
140
141 drm_kms_helper_poll_fini(dev);
142 drm_mode_config_cleanup(dev);
143 drm_vblank_cleanup(dev);
144
145 pm_runtime_get_sync(dev->dev);
146 drm_irq_uninstall(dev);
147 pm_runtime_put_sync(dev->dev);
148
149 flush_workqueue(priv->wq);
150 destroy_workqueue(priv->wq);
151
152 if (kms) {
153 pm_runtime_disable(dev->dev);
154 kms->funcs->destroy(kms);
155 }
156
157 if (gpu) {
158 mutex_lock(&dev->struct_mutex);
159 gpu->funcs->pm_suspend(gpu);
160 gpu->funcs->destroy(gpu);
161 mutex_unlock(&dev->struct_mutex);
162 }
163
164 dev->dev_private = NULL;
165
166 kfree(priv);
167
168 return 0;
169}
170
171static int msm_load(struct drm_device *dev, unsigned long flags)
172{
173 struct platform_device *pdev = dev->platformdev;
174 struct msm_drm_private *priv;
175 struct msm_kms *kms;
176 int ret;
177
178 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
179 if (!priv) {
180 dev_err(dev->dev, "failed to allocate private data\n");
181 return -ENOMEM;
182 }
183
184 dev->dev_private = priv;
185
186 priv->wq = alloc_ordered_workqueue("msm", 0);
187 init_waitqueue_head(&priv->fence_event);
188
189 INIT_LIST_HEAD(&priv->inactive_list);
190
191 drm_mode_config_init(dev);
192
193 kms = mdp4_kms_init(dev);
194 if (IS_ERR(kms)) {
195 /*
196 * NOTE: once we have GPU support, having no kms should not
197 * be considered fatal.. ideally we would still support gpu
198 * and (for example) use dmabuf/prime to share buffers with
199 * imx drm driver on iMX5
200 */
201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms);
203 goto fail;
204 }
205
206 priv->kms = kms;
207
208 if (kms) {
209 pm_runtime_enable(dev->dev);
210 ret = kms->funcs->hw_init(kms);
211 if (ret) {
212 dev_err(dev->dev, "kms hw init failed: %d\n", ret);
213 goto fail;
214 }
215 }
216
217 dev->mode_config.min_width = 0;
218 dev->mode_config.min_height = 0;
219 dev->mode_config.max_width = 2048;
220 dev->mode_config.max_height = 2048;
221 dev->mode_config.funcs = &mode_config_funcs;
222
223 ret = drm_vblank_init(dev, 1);
224 if (ret < 0) {
225 dev_err(dev->dev, "failed to initialize vblank\n");
226 goto fail;
227 }
228
229 pm_runtime_get_sync(dev->dev);
230 ret = drm_irq_install(dev);
231 pm_runtime_put_sync(dev->dev);
232 if (ret < 0) {
233 dev_err(dev->dev, "failed to install IRQ handler\n");
234 goto fail;
235 }
236
237 platform_set_drvdata(pdev, dev);
238
239#ifdef CONFIG_DRM_MSM_FBDEV
240 priv->fbdev = msm_fbdev_init(dev);
241#endif
242
243 drm_kms_helper_poll_init(dev);
244
245 return 0;
246
247fail:
248 msm_unload(dev);
249 return ret;
250}
251
252static void load_gpu(struct drm_device *dev)
253{
254 struct msm_drm_private *priv = dev->dev_private;
255 struct msm_gpu *gpu;
256
257 if (priv->gpu)
258 return;
259
260 mutex_lock(&dev->struct_mutex);
261 gpu = a3xx_gpu_init(dev);
262 if (IS_ERR(gpu)) {
263 dev_warn(dev->dev, "failed to load a3xx gpu\n");
264 gpu = NULL;
265 /* not fatal */
266 }
267 mutex_unlock(&dev->struct_mutex);
268
269 if (gpu) {
270 int ret;
271 gpu->funcs->pm_resume(gpu);
272 ret = gpu->funcs->hw_init(gpu);
273 if (ret) {
274 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
275 gpu->funcs->destroy(gpu);
276 gpu = NULL;
277 }
278 }
279
280 priv->gpu = gpu;
281}
282
283static int msm_open(struct drm_device *dev, struct drm_file *file)
284{
285 struct msm_file_private *ctx;
286
287 /* For now, load gpu on open.. to avoid the requirement of having
288 * firmware in the initrd.
289 */
290 load_gpu(dev);
291
292 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
293 if (!ctx)
294 return -ENOMEM;
295
296 file->driver_priv = ctx;
297
298 return 0;
299}
300
301static void msm_preclose(struct drm_device *dev, struct drm_file *file)
302{
303 struct msm_drm_private *priv = dev->dev_private;
304 struct msm_file_private *ctx = file->driver_priv;
305 struct msm_kms *kms = priv->kms;
306
307 if (kms)
308 kms->funcs->preclose(kms, file);
309
310 mutex_lock(&dev->struct_mutex);
311 if (ctx == priv->lastctx)
312 priv->lastctx = NULL;
313 mutex_unlock(&dev->struct_mutex);
314
315 kfree(ctx);
316}
317
318static void msm_lastclose(struct drm_device *dev)
319{
320 struct msm_drm_private *priv = dev->dev_private;
321 if (priv->fbdev) {
322 drm_modeset_lock_all(dev);
323 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
324 drm_modeset_unlock_all(dev);
325 }
326}
327
328static irqreturn_t msm_irq(DRM_IRQ_ARGS)
329{
330 struct drm_device *dev = arg;
331 struct msm_drm_private *priv = dev->dev_private;
332 struct msm_kms *kms = priv->kms;
333 BUG_ON(!kms);
334 return kms->funcs->irq(kms);
335}
336
337static void msm_irq_preinstall(struct drm_device *dev)
338{
339 struct msm_drm_private *priv = dev->dev_private;
340 struct msm_kms *kms = priv->kms;
341 BUG_ON(!kms);
342 kms->funcs->irq_preinstall(kms);
343}
344
345static int msm_irq_postinstall(struct drm_device *dev)
346{
347 struct msm_drm_private *priv = dev->dev_private;
348 struct msm_kms *kms = priv->kms;
349 BUG_ON(!kms);
350 return kms->funcs->irq_postinstall(kms);
351}
352
353static void msm_irq_uninstall(struct drm_device *dev)
354{
355 struct msm_drm_private *priv = dev->dev_private;
356 struct msm_kms *kms = priv->kms;
357 BUG_ON(!kms);
358 kms->funcs->irq_uninstall(kms);
359}
360
361static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
362{
363 struct msm_drm_private *priv = dev->dev_private;
364 struct msm_kms *kms = priv->kms;
365 if (!kms)
366 return -ENXIO;
367 DBG("dev=%p, crtc=%d", dev, crtc_id);
368 return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
369}
370
371static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
372{
373 struct msm_drm_private *priv = dev->dev_private;
374 struct msm_kms *kms = priv->kms;
375 if (!kms)
376 return;
377 DBG("dev=%p, crtc=%d", dev, crtc_id);
378 kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
379}
380
381/*
382 * DRM debugfs:
383 */
384
385#ifdef CONFIG_DEBUG_FS
386static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
387{
388 struct msm_drm_private *priv = dev->dev_private;
389 struct msm_gpu *gpu = priv->gpu;
390
391 if (gpu) {
392 seq_printf(m, "%s Status:\n", gpu->name);
393 gpu->funcs->show(gpu, m);
394 }
395
396 return 0;
397}
398
399static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
400{
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gpu *gpu = priv->gpu;
403
404 if (gpu) {
405 seq_printf(m, "Active Objects (%s):\n", gpu->name);
406 msm_gem_describe_objects(&gpu->active_list, m);
407 }
408
409 seq_printf(m, "Inactive Objects:\n");
410 msm_gem_describe_objects(&priv->inactive_list, m);
411
412 return 0;
413}
414
415static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
416{
417 return drm_mm_dump_table(m, dev->mm_private);
418}
419
420static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
421{
422 struct msm_drm_private *priv = dev->dev_private;
423 struct drm_framebuffer *fb, *fbdev_fb = NULL;
424
425 if (priv->fbdev) {
426 seq_printf(m, "fbcon ");
427 fbdev_fb = priv->fbdev->fb;
428 msm_framebuffer_describe(fbdev_fb, m);
429 }
430
431 mutex_lock(&dev->mode_config.fb_lock);
432 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
433 if (fb == fbdev_fb)
434 continue;
435
436 seq_printf(m, "user ");
437 msm_framebuffer_describe(fb, m);
438 }
439 mutex_unlock(&dev->mode_config.fb_lock);
440
441 return 0;
442}
443
444static int show_locked(struct seq_file *m, void *arg)
445{
446 struct drm_info_node *node = (struct drm_info_node *) m->private;
447 struct drm_device *dev = node->minor->dev;
448 int (*show)(struct drm_device *dev, struct seq_file *m) =
449 node->info_ent->data;
450 int ret;
451
452 ret = mutex_lock_interruptible(&dev->struct_mutex);
453 if (ret)
454 return ret;
455
456 ret = show(dev, m);
457
458 mutex_unlock(&dev->struct_mutex);
459
460 return ret;
461}
462
463static struct drm_info_list msm_debugfs_list[] = {
464 {"gpu", show_locked, 0, msm_gpu_show},
465 {"gem", show_locked, 0, msm_gem_show},
466 { "mm", show_locked, 0, msm_mm_show },
467 { "fb", show_locked, 0, msm_fb_show },
468};
469
470static int msm_debugfs_init(struct drm_minor *minor)
471{
472 struct drm_device *dev = minor->dev;
473 int ret;
474
475 ret = drm_debugfs_create_files(msm_debugfs_list,
476 ARRAY_SIZE(msm_debugfs_list),
477 minor->debugfs_root, minor);
478
479 if (ret) {
480 dev_err(dev->dev, "could not install msm_debugfs_list\n");
481 return ret;
482 }
483
484 return ret;
485}
486
487static void msm_debugfs_cleanup(struct drm_minor *minor)
488{
489 drm_debugfs_remove_files(msm_debugfs_list,
490 ARRAY_SIZE(msm_debugfs_list), minor);
491}
492#endif
493
494/*
495 * Fences:
496 */
497
498int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
499 struct timespec *timeout)
500{
501 struct msm_drm_private *priv = dev->dev_private;
502 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
503 unsigned long start_jiffies = jiffies;
504 unsigned long remaining_jiffies;
505 int ret;
506
507 if (time_after(start_jiffies, timeout_jiffies))
508 remaining_jiffies = 0;
509 else
510 remaining_jiffies = timeout_jiffies - start_jiffies;
511
512 ret = wait_event_interruptible_timeout(priv->fence_event,
513 priv->completed_fence >= fence,
514 remaining_jiffies);
515 if (ret == 0) {
516 DBG("timeout waiting for fence: %u (completed: %u)",
517 fence, priv->completed_fence);
518 ret = -ETIMEDOUT;
519 } else if (ret != -ERESTARTSYS) {
520 ret = 0;
521 }
522
523 return ret;
524}
525
526/* call under struct_mutex */
527void msm_update_fence(struct drm_device *dev, uint32_t fence)
528{
529 struct msm_drm_private *priv = dev->dev_private;
530
531 if (fence > priv->completed_fence) {
532 priv->completed_fence = fence;
533 wake_up_all(&priv->fence_event);
534 }
535}
536
537/*
538 * DRM ioctls:
539 */
540
541static int msm_ioctl_get_param(struct drm_device *dev, void *data,
542 struct drm_file *file)
543{
544 struct msm_drm_private *priv = dev->dev_private;
545 struct drm_msm_param *args = data;
546 struct msm_gpu *gpu;
547
548 /* for now, we just have 3d pipe.. eventually this would need to
549 * be more clever to dispatch to appropriate gpu module:
550 */
551 if (args->pipe != MSM_PIPE_3D0)
552 return -EINVAL;
553
554 gpu = priv->gpu;
555
556 if (!gpu)
557 return -ENXIO;
558
559 return gpu->funcs->get_param(gpu, args->param, &args->value);
560}
561
562static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
563 struct drm_file *file)
564{
565 struct drm_msm_gem_new *args = data;
566 return msm_gem_new_handle(dev, file, args->size,
567 args->flags, &args->handle);
568}
569
570#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
571
572static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
573 struct drm_file *file)
574{
575 struct drm_msm_gem_cpu_prep *args = data;
576 struct drm_gem_object *obj;
577 int ret;
578
579 obj = drm_gem_object_lookup(dev, file, args->handle);
580 if (!obj)
581 return -ENOENT;
582
583 ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
584
585 drm_gem_object_unreference_unlocked(obj);
586
587 return ret;
588}
589
590static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
591 struct drm_file *file)
592{
593 struct drm_msm_gem_cpu_fini *args = data;
594 struct drm_gem_object *obj;
595 int ret;
596
597 obj = drm_gem_object_lookup(dev, file, args->handle);
598 if (!obj)
599 return -ENOENT;
600
601 ret = msm_gem_cpu_fini(obj);
602
603 drm_gem_object_unreference_unlocked(obj);
604
605 return ret;
606}
607
608static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
609 struct drm_file *file)
610{
611 struct drm_msm_gem_info *args = data;
612 struct drm_gem_object *obj;
613 int ret = 0;
614
615 if (args->pad)
616 return -EINVAL;
617
618 obj = drm_gem_object_lookup(dev, file, args->handle);
619 if (!obj)
620 return -ENOENT;
621
622 args->offset = msm_gem_mmap_offset(obj);
623
624 drm_gem_object_unreference_unlocked(obj);
625
626 return ret;
627}
628
629static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
630 struct drm_file *file)
631{
632 struct drm_msm_wait_fence *args = data;
633 return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
634}
635
636static const struct drm_ioctl_desc msm_ioctls[] = {
637 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
638 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
639 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
640 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
641 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
642 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
643 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
644};
645
646static const struct vm_operations_struct vm_ops = {
647 .fault = msm_gem_fault,
648 .open = drm_gem_vm_open,
649 .close = drm_gem_vm_close,
650};
651
652static const struct file_operations fops = {
653 .owner = THIS_MODULE,
654 .open = drm_open,
655 .release = drm_release,
656 .unlocked_ioctl = drm_ioctl,
657#ifdef CONFIG_COMPAT
658 .compat_ioctl = drm_compat_ioctl,
659#endif
660 .poll = drm_poll,
661 .read = drm_read,
662 .llseek = no_llseek,
663 .mmap = msm_gem_mmap,
664};
665
666static struct drm_driver msm_driver = {
667 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
668 .load = msm_load,
669 .unload = msm_unload,
670 .open = msm_open,
671 .preclose = msm_preclose,
672 .lastclose = msm_lastclose,
673 .irq_handler = msm_irq,
674 .irq_preinstall = msm_irq_preinstall,
675 .irq_postinstall = msm_irq_postinstall,
676 .irq_uninstall = msm_irq_uninstall,
677 .get_vblank_counter = drm_vblank_count,
678 .enable_vblank = msm_enable_vblank,
679 .disable_vblank = msm_disable_vblank,
680 .gem_free_object = msm_gem_free_object,
681 .gem_vm_ops = &vm_ops,
682 .dumb_create = msm_gem_dumb_create,
683 .dumb_map_offset = msm_gem_dumb_map_offset,
684 .dumb_destroy = msm_gem_dumb_destroy,
685#ifdef CONFIG_DEBUG_FS
686 .debugfs_init = msm_debugfs_init,
687 .debugfs_cleanup = msm_debugfs_cleanup,
688#endif
689 .ioctls = msm_ioctls,
690 .num_ioctls = DRM_MSM_NUM_IOCTLS,
691 .fops = &fops,
692 .name = "msm",
693 .desc = "MSM Snapdragon DRM",
694 .date = "20130625",
695 .major = 1,
696 .minor = 0,
697};
698
699#ifdef CONFIG_PM_SLEEP
700static int msm_pm_suspend(struct device *dev)
701{
702 struct drm_device *ddev = dev_get_drvdata(dev);
703
704 drm_kms_helper_poll_disable(ddev);
705
706 return 0;
707}
708
709static int msm_pm_resume(struct device *dev)
710{
711 struct drm_device *ddev = dev_get_drvdata(dev);
712
713 drm_kms_helper_poll_enable(ddev);
714
715 return 0;
716}
717#endif
718
719static const struct dev_pm_ops msm_pm_ops = {
720 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
721};
722
723/*
724 * Platform driver:
725 */
726
727static int msm_pdev_probe(struct platform_device *pdev)
728{
729 return drm_platform_init(&msm_driver, pdev);
730}
731
732static int msm_pdev_remove(struct platform_device *pdev)
733{
734 drm_platform_exit(&msm_driver, pdev);
735
736 return 0;
737}
738
739static const struct platform_device_id msm_id[] = {
740 { "mdp", 0 },
741 { }
742};
743
744static struct platform_driver msm_platform_driver = {
745 .probe = msm_pdev_probe,
746 .remove = msm_pdev_remove,
747 .driver = {
748 .owner = THIS_MODULE,
749 .name = "msm",
750 .pm = &msm_pm_ops,
751 },
752 .id_table = msm_id,
753};
754
755static int __init msm_drm_register(void)
756{
757 DBG("init");
758 hdmi_register();
759 a3xx_register();
760 return platform_driver_register(&msm_platform_driver);
761}
762
763static void __exit msm_drm_unregister(void)
764{
765 DBG("fini");
766 platform_driver_unregister(&msm_platform_driver);
767 hdmi_unregister();
768 a3xx_unregister();
769}
770
771module_init(msm_drm_register);
772module_exit(msm_drm_unregister);
773
774MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
775MODULE_DESCRIPTION("MSM DRM Driver");
776MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 000000000000..80d75094bf0a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,213 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRV_H__
19#define __MSM_DRV_H__
20
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/cpufreq.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm.h>
27#include <linux/pm_runtime.h>
28#include <linux/slab.h>
29#include <linux/list.h>
30#include <linux/iommu.h>
31#include <linux/types.h>
32#include <asm/sizes.h>
33
34#ifndef CONFIG_OF
35#include <mach/board.h>
36#include <mach/socinfo.h>
37#include <mach/iommu_domains.h>
38#endif
39
40#include <drm/drmP.h>
41#include <drm/drm_crtc_helper.h>
42#include <drm/drm_fb_helper.h>
43#include <drm/msm_drm.h>
44
45struct msm_kms;
46struct msm_gpu;
47
48#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
49
50struct msm_file_private {
51 /* currently we don't do anything useful with this.. but when
52 * per-context address spaces are supported we'd keep track of
53 * the context's page-tables here.
54 */
55 int dummy;
56};
57
58struct msm_drm_private {
59
60 struct msm_kms *kms;
61
62 /* when we have more than one 'msm_gpu' these need to be an array: */
63 struct msm_gpu *gpu;
64 struct msm_file_private *lastctx;
65
66 struct drm_fb_helper *fbdev;
67
68 uint32_t next_fence, completed_fence;
69 wait_queue_head_t fence_event;
70
71 /* list of GEM objects: */
72 struct list_head inactive_list;
73
74 struct workqueue_struct *wq;
75
76 /* registered IOMMU domains: */
77 unsigned int num_iommus;
78 struct iommu_domain *iommus[NUM_DOMAINS];
79
80 unsigned int num_crtcs;
81 struct drm_crtc *crtcs[8];
82
83 unsigned int num_encoders;
84 struct drm_encoder *encoders[8];
85
86 unsigned int num_bridges;
87 struct drm_bridge *bridges[8];
88
89 unsigned int num_connectors;
90 struct drm_connector *connectors[8];
91};
92
93struct msm_format {
94 uint32_t pixel_format;
95};
96
97/* As there are different display controller blocks depending on the
98 * snapdragon version, the kms support is split out and the appropriate
99 * implementation is loaded at runtime. The kms module is responsible
100 * for constructing the appropriate planes/crtcs/encoders/connectors.
101 */
102struct msm_kms_funcs {
103 /* hw initialization: */
104 int (*hw_init)(struct msm_kms *kms);
105 /* irq handling: */
106 void (*irq_preinstall)(struct msm_kms *kms);
107 int (*irq_postinstall)(struct msm_kms *kms);
108 void (*irq_uninstall)(struct msm_kms *kms);
109 irqreturn_t (*irq)(struct msm_kms *kms);
110 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
111 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
112 /* misc: */
113 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
114 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
115 struct drm_encoder *encoder);
116 /* cleanup: */
117 void (*preclose)(struct msm_kms *kms, struct drm_file *file);
118 void (*destroy)(struct msm_kms *kms);
119};
120
121struct msm_kms {
122 const struct msm_kms_funcs *funcs;
123};
124
125struct msm_kms *mdp4_kms_init(struct drm_device *dev);
126
127int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
128int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
129 const char **names, int cnt);
130
131int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
132 struct timespec *timeout);
133void msm_update_fence(struct drm_device *dev, uint32_t fence);
134
135int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
136 struct drm_file *file);
137
138int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
139int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
140uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
141int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
142 uint32_t *iova);
143int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
144void msm_gem_put_iova(struct drm_gem_object *obj, int id);
145int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
146 struct drm_mode_create_dumb *args);
147int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
148 uint32_t handle);
149int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
150 uint32_t handle, uint64_t *offset);
151void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
152void *msm_gem_vaddr(struct drm_gem_object *obj);
153int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
154 struct work_struct *work);
155void msm_gem_move_to_active(struct drm_gem_object *obj,
156 struct msm_gpu *gpu, uint32_t fence);
157void msm_gem_move_to_inactive(struct drm_gem_object *obj);
158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
159 struct timespec *timeout);
160int msm_gem_cpu_fini(struct drm_gem_object *obj);
161void msm_gem_free_object(struct drm_gem_object *obj);
162int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
163 uint32_t size, uint32_t flags, uint32_t *handle);
164struct drm_gem_object *msm_gem_new(struct drm_device *dev,
165 uint32_t size, uint32_t flags);
166
167struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
168const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
169struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
170 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
171struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
172 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
173
174struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
175
176int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
177void __init hdmi_register(void);
178void __exit hdmi_unregister(void);
179
180#ifdef CONFIG_DEBUG_FS
181void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
182void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
183void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
184#endif
185
186void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
187 const char *dbgname);
188void msm_writel(u32 data, void __iomem *addr);
189u32 msm_readl(const void __iomem *addr);
190
191#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
192#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
193
194static inline int align_pitch(int width, int bpp)
195{
196 int bytespp = (bpp + 7) / 8;
197 /* adreno needs pitch aligned to 32 pixels: */
198 return bytespp * ALIGN(width, 32);
199}
200
201/* for the generated headers: */
202#define INVALID_IDX(idx) ({BUG(); 0;})
203#define fui(x) ({BUG(); 0;})
204#define util_float_to_half(x) ({BUG(); 0;})
205
206
207#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
208
209/* for conditionally setting boolean flag(s): */
210#define COND(bool, val) ((bool) ? (val) : 0)
211
212
213#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 000000000000..0286c0eeb10c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_crtc_helper.h"
22
23struct msm_framebuffer {
24 struct drm_framebuffer base;
25 const struct msm_format *format;
26 struct drm_gem_object *planes[2];
27};
28#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
29
30
31static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
32 struct drm_file *file_priv,
33 unsigned int *handle)
34{
35 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
36 return drm_gem_handle_create(file_priv,
37 msm_fb->planes[0], handle);
38}
39
40static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
41{
42 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
43 int i, n = drm_format_num_planes(fb->pixel_format);
44
45 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
46
47 drm_framebuffer_cleanup(fb);
48
49 for (i = 0; i < n; i++) {
50 struct drm_gem_object *bo = msm_fb->planes[i];
51 if (bo)
52 drm_gem_object_unreference_unlocked(bo);
53 }
54
55 kfree(msm_fb);
56}
57
58static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
59 struct drm_file *file_priv, unsigned flags, unsigned color,
60 struct drm_clip_rect *clips, unsigned num_clips)
61{
62 return 0;
63}
64
65static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
66 .create_handle = msm_framebuffer_create_handle,
67 .destroy = msm_framebuffer_destroy,
68 .dirty = msm_framebuffer_dirty,
69};
70
71#ifdef CONFIG_DEBUG_FS
72void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
73{
74 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
75 int i, n = drm_format_num_planes(fb->pixel_format);
76
77 seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
78 fb->width, fb->height, (char *)&fb->pixel_format,
79 fb->refcount.refcount.counter, fb->base.id);
80
81 for (i = 0; i < n; i++) {
82 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
83 i, fb->offsets[i], fb->pitches[i]);
84 msm_gem_describe(msm_fb->planes[i], m);
85 }
86}
87#endif
88
89struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
90{
91 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
92 return msm_fb->planes[plane];
93}
94
95const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
96{
97 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
98 return msm_fb->format;
99}
100
101struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
102 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
103{
104 struct drm_gem_object *bos[4] = {0};
105 struct drm_framebuffer *fb;
106 int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
107
108 for (i = 0; i < n; i++) {
109 bos[i] = drm_gem_object_lookup(dev, file,
110 mode_cmd->handles[i]);
111 if (!bos[i]) {
112 ret = -ENXIO;
113 goto out_unref;
114 }
115 }
116
117 fb = msm_framebuffer_init(dev, mode_cmd, bos);
118 if (IS_ERR(fb)) {
119 ret = PTR_ERR(fb);
120 goto out_unref;
121 }
122
123 return fb;
124
125out_unref:
126 for (i = 0; i < n; i++)
127 drm_gem_object_unreference_unlocked(bos[i]);
128 return ERR_PTR(ret);
129}
130
131struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
132 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
133{
134 struct msm_drm_private *priv = dev->dev_private;
135 struct msm_kms *kms = priv->kms;
136 struct msm_framebuffer *msm_fb;
137 struct drm_framebuffer *fb = NULL;
138 const struct msm_format *format;
139 int ret, i, n;
140 unsigned int hsub, vsub;
141
142 DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
143 dev, mode_cmd, mode_cmd->width, mode_cmd->height,
144 (char *)&mode_cmd->pixel_format);
145
146 n = drm_format_num_planes(mode_cmd->pixel_format);
147 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
148 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
149
150 format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
151 if (!format) {
152 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
153 (char *)&mode_cmd->pixel_format);
154 ret = -EINVAL;
155 goto fail;
156 }
157
158 msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
159 if (!msm_fb) {
160 ret = -ENOMEM;
161 goto fail;
162 }
163
164 fb = &msm_fb->base;
165
166 msm_fb->format = format;
167
168 for (i = 0; i < n; i++) {
169 unsigned int width = mode_cmd->width / (i ? hsub : 1);
170 unsigned int height = mode_cmd->height / (i ? vsub : 1);
171 unsigned int min_size;
172
173 min_size = (height - 1) * mode_cmd->pitches[i]
174 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
175 + mode_cmd->offsets[i];
176
177 if (bos[i]->size < min_size) {
178 ret = -EINVAL;
179 goto fail;
180 }
181
182 msm_fb->planes[i] = bos[i];
183 }
184
185 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
186
187 ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
188 if (ret) {
189 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
190 goto fail;
191 }
192
193 DBG("create: FB ID: %d (%p)", fb->base.id, fb);
194
195 return fb;
196
197fail:
198 if (fb)
199 msm_framebuffer_destroy(fb);
200
201 return ERR_PTR(ret);
202}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 000000000000..6c6d7d4c9b4e
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,258 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_fb_helper.h"
22
23/*
24 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
25 */
26
27#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
28
29struct msm_fbdev {
30 struct drm_fb_helper base;
31 struct drm_framebuffer *fb;
32 struct drm_gem_object *bo;
33};
34
35static struct fb_ops msm_fb_ops = {
36 .owner = THIS_MODULE,
37
38 /* Note: to properly handle manual update displays, we wrap the
39 * basic fbdev ops which write to the framebuffer
40 */
41 .fb_read = fb_sys_read,
42 .fb_write = fb_sys_write,
43 .fb_fillrect = sys_fillrect,
44 .fb_copyarea = sys_copyarea,
45 .fb_imageblit = sys_imageblit,
46
47 .fb_check_var = drm_fb_helper_check_var,
48 .fb_set_par = drm_fb_helper_set_par,
49 .fb_pan_display = drm_fb_helper_pan_display,
50 .fb_blank = drm_fb_helper_blank,
51 .fb_setcmap = drm_fb_helper_setcmap,
52};
53
54static int msm_fbdev_create(struct drm_fb_helper *helper,
55 struct drm_fb_helper_surface_size *sizes)
56{
57 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
58 struct drm_device *dev = helper->dev;
59 struct drm_framebuffer *fb = NULL;
60 struct fb_info *fbi = NULL;
61 struct drm_mode_fb_cmd2 mode_cmd = {0};
62 dma_addr_t paddr;
63 int ret, size;
64
65 /* only doing ARGB32 since this is what is needed to alpha-blend
66 * with video overlays:
67 */
68 sizes->surface_bpp = 32;
69 sizes->surface_depth = 32;
70
71 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
72 sizes->surface_height, sizes->surface_bpp,
73 sizes->fb_width, sizes->fb_height);
74
75 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
76 sizes->surface_depth);
77
78 mode_cmd.width = sizes->surface_width;
79 mode_cmd.height = sizes->surface_height;
80
81 mode_cmd.pitches[0] = align_pitch(
82 mode_cmd.width, sizes->surface_bpp);
83
84 /* allocate backing bo */
85 size = mode_cmd.pitches[0] * mode_cmd.height;
86 DBG("allocating %d bytes for fb %d", size, dev->primary->index);
87 mutex_lock(&dev->struct_mutex);
88 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
89 mutex_unlock(&dev->struct_mutex);
90 if (IS_ERR(fbdev->bo)) {
91 ret = PTR_ERR(fbdev->bo);
92 fbdev->bo = NULL;
93 dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
94 goto fail;
95 }
96
97 fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
98 if (IS_ERR(fb)) {
99 dev_err(dev->dev, "failed to allocate fb\n");
100 /* note: if fb creation failed, we can't rely on fb destroy
101 * to unref the bo:
102 */
103 drm_gem_object_unreference(fbdev->bo);
104 ret = PTR_ERR(fb);
105 goto fail;
106 }
107
108 mutex_lock(&dev->struct_mutex);
109
110 /* TODO implement our own fb_mmap so we don't need this: */
111 msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
112
113 fbi = framebuffer_alloc(0, dev->dev);
114 if (!fbi) {
115 dev_err(dev->dev, "failed to allocate fb info\n");
116 ret = -ENOMEM;
117 goto fail_unlock;
118 }
119
120 DBG("fbi=%p, dev=%p", fbi, dev);
121
122 fbdev->fb = fb;
123 helper->fb = fb;
124 helper->fbdev = fbi;
125
126 fbi->par = helper;
127 fbi->flags = FBINFO_DEFAULT;
128 fbi->fbops = &msm_fb_ops;
129
130 strcpy(fbi->fix.id, "msm");
131
132 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
133 if (ret) {
134 ret = -ENOMEM;
135 goto fail_unlock;
136 }
137
138 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
139 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
140
141 dev->mode_config.fb_base = paddr;
142
143 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
144 fbi->screen_size = fbdev->bo->size;
145 fbi->fix.smem_start = paddr;
146 fbi->fix.smem_len = fbdev->bo->size;
147
148 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
149 DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
150
151 mutex_unlock(&dev->struct_mutex);
152
153 return 0;
154
155fail_unlock:
156 mutex_unlock(&dev->struct_mutex);
157fail:
158
159 if (ret) {
160 if (fbi)
161 framebuffer_release(fbi);
162 if (fb) {
163 drm_framebuffer_unregister_private(fb);
164 drm_framebuffer_remove(fb);
165 }
166 }
167
168 return ret;
169}
170
171static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
172 u16 red, u16 green, u16 blue, int regno)
173{
174 DBG("fbdev: set gamma");
175}
176
177static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
178 u16 *red, u16 *green, u16 *blue, int regno)
179{
180 DBG("fbdev: get gamma");
181}
182
183static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
184 .gamma_set = msm_crtc_fb_gamma_set,
185 .gamma_get = msm_crtc_fb_gamma_get,
186 .fb_probe = msm_fbdev_create,
187};
188
189/* initialize fbdev helper */
190struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
191{
192 struct msm_drm_private *priv = dev->dev_private;
193 struct msm_fbdev *fbdev = NULL;
194 struct drm_fb_helper *helper;
195 int ret = 0;
196
197 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
198 if (!fbdev)
199 goto fail;
200
201 helper = &fbdev->base;
202
203 helper->funcs = &msm_fb_helper_funcs;
204
205 ret = drm_fb_helper_init(dev, helper,
206 priv->num_crtcs, priv->num_connectors);
207 if (ret) {
208 dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
209 goto fail;
210 }
211
212 drm_fb_helper_single_add_all_connectors(helper);
213
214 /* disable all the possible outputs/crtcs before entering KMS mode */
215 drm_helper_disable_unused_functions(dev);
216
217 drm_fb_helper_initial_config(helper, 32);
218
219 priv->fbdev = helper;
220
221 return helper;
222
223fail:
224 kfree(fbdev);
225 return NULL;
226}
227
228void msm_fbdev_free(struct drm_device *dev)
229{
230 struct msm_drm_private *priv = dev->dev_private;
231 struct drm_fb_helper *helper = priv->fbdev;
232 struct msm_fbdev *fbdev;
233 struct fb_info *fbi;
234
235 DBG();
236
237 fbi = helper->fbdev;
238
239 /* only cleanup framebuffer if it is present */
240 if (fbi) {
241 unregister_framebuffer(fbi);
242 framebuffer_release(fbi);
243 }
244
245 drm_fb_helper_fini(helper);
246
247 fbdev = to_msm_fbdev(priv->fbdev);
248
249 /* this will free the backing object */
250 if (fbdev->fb) {
251 drm_framebuffer_unregister_private(fbdev->fb);
252 drm_framebuffer_remove(fbdev->fb);
253 }
254
255 kfree(fbdev);
256
257 priv->fbdev = NULL;
258}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 000000000000..6b5a6c8c7658
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,597 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20
21#include "msm_drv.h"
22#include "msm_gem.h"
23#include "msm_gpu.h"
24
25
26/* called with dev->struct_mutex held */
27static struct page **get_pages(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30
31 if (!msm_obj->pages) {
32 struct drm_device *dev = obj->dev;
33 struct page **p = drm_gem_get_pages(obj, 0);
34 int npages = obj->size >> PAGE_SHIFT;
35
36 if (IS_ERR(p)) {
37 dev_err(dev->dev, "could not get pages: %ld\n",
38 PTR_ERR(p));
39 return p;
40 }
41
42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
43 if (!msm_obj->sgt) {
44 dev_err(dev->dev, "failed to allocate sgt\n");
45 return ERR_PTR(-ENOMEM);
46 }
47
48 msm_obj->pages = p;
49
50 /* For non-cached buffers, ensure the new pages are clean
51 * because display controller, GPU, etc. are not coherent:
52 */
53 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
54 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 }
57
58 return msm_obj->pages;
59}
60
61static void put_pages(struct drm_gem_object *obj)
62{
63 struct msm_gem_object *msm_obj = to_msm_bo(obj);
64
65 if (msm_obj->pages) {
66 /* For non-cached buffers, ensure the new pages are clean
67 * because display controller, GPU, etc. are not coherent:
68 */
69 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
70 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 sg_free_table(msm_obj->sgt);
73 kfree(msm_obj->sgt);
74
75 drm_gem_put_pages(obj, msm_obj->pages, true, false);
76 msm_obj->pages = NULL;
77 }
78}
79
80int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 struct vm_area_struct *vma)
82{
83 struct msm_gem_object *msm_obj = to_msm_bo(obj);
84
85 vma->vm_flags &= ~VM_PFNMAP;
86 vma->vm_flags |= VM_MIXEDMAP;
87
88 if (msm_obj->flags & MSM_BO_WC) {
89 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
90 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
91 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
92 } else {
93 /*
94 * Shunt off cached objs to shmem file so they have their own
95 * address_space (so unmap_mapping_range does what we want,
96 * in particular in the case of mmap'd dmabufs)
97 */
98 fput(vma->vm_file);
99 get_file(obj->filp);
100 vma->vm_pgoff = 0;
101 vma->vm_file = obj->filp;
102
103 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
104 }
105
106 return 0;
107}
108
109int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
110{
111 int ret;
112
113 ret = drm_gem_mmap(filp, vma);
114 if (ret) {
115 DBG("mmap failed: %d", ret);
116 return ret;
117 }
118
119 return msm_gem_mmap_obj(vma->vm_private_data, vma);
120}
121
122int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123{
124 struct drm_gem_object *obj = vma->vm_private_data;
125 struct msm_gem_object *msm_obj = to_msm_bo(obj);
126 struct drm_device *dev = obj->dev;
127 struct page **pages;
128 unsigned long pfn;
129 pgoff_t pgoff;
130 int ret;
131
132 /* Make sure we don't parallel update on a fault, nor move or remove
133 * something from beneath our feet
134 */
135 ret = mutex_lock_interruptible(&dev->struct_mutex);
136 if (ret)
137 goto out;
138
139 /* make sure we have pages attached now */
140 pages = get_pages(obj);
141 if (IS_ERR(pages)) {
142 ret = PTR_ERR(pages);
143 goto out_unlock;
144 }
145
146 /* We don't use vmf->pgoff since that has the fake offset: */
147 pgoff = ((unsigned long)vmf->virtual_address -
148 vma->vm_start) >> PAGE_SHIFT;
149
150 pfn = page_to_pfn(msm_obj->pages[pgoff]);
151
152 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
153 pfn, pfn << PAGE_SHIFT);
154
155 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
156
157out_unlock:
158 mutex_unlock(&dev->struct_mutex);
159out:
160 switch (ret) {
161 case -EAGAIN:
162 set_need_resched();
163 case 0:
164 case -ERESTARTSYS:
165 case -EINTR:
166 return VM_FAULT_NOPAGE;
167 case -ENOMEM:
168 return VM_FAULT_OOM;
169 default:
170 return VM_FAULT_SIGBUS;
171 }
172}
173
174/** get mmap offset */
175static uint64_t mmap_offset(struct drm_gem_object *obj)
176{
177 struct drm_device *dev = obj->dev;
178 int ret;
179
180 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
181
182 /* Make it mmapable */
183 ret = drm_gem_create_mmap_offset(obj);
184
185 if (ret) {
186 dev_err(dev->dev, "could not allocate mmap offset\n");
187 return 0;
188 }
189
190 return drm_vma_node_offset_addr(&obj->vma_node);
191}
192
193uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
194{
195 uint64_t offset;
196 mutex_lock(&obj->dev->struct_mutex);
197 offset = mmap_offset(obj);
198 mutex_unlock(&obj->dev->struct_mutex);
199 return offset;
200}
201
202/* helpers for dealing w/ iommu: */
203static int map_range(struct iommu_domain *domain, unsigned int iova,
204 struct sg_table *sgt, unsigned int len, int prot)
205{
206 struct scatterlist *sg;
207 unsigned int da = iova;
208 unsigned int i, j;
209 int ret;
210
211 if (!domain || !sgt)
212 return -EINVAL;
213
214 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
215 u32 pa = sg_phys(sg) - sg->offset;
216 size_t bytes = sg->length + sg->offset;
217
218 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
219
220 ret = iommu_map(domain, da, pa, bytes, prot);
221 if (ret)
222 goto fail;
223
224 da += bytes;
225 }
226
227 return 0;
228
229fail:
230 da = iova;
231
232 for_each_sg(sgt->sgl, sg, i, j) {
233 size_t bytes = sg->length + sg->offset;
234 iommu_unmap(domain, da, bytes);
235 da += bytes;
236 }
237 return ret;
238}
239
240static void unmap_range(struct iommu_domain *domain, unsigned int iova,
241 struct sg_table *sgt, unsigned int len)
242{
243 struct scatterlist *sg;
244 unsigned int da = iova;
245 int i;
246
247 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
248 size_t bytes = sg->length + sg->offset;
249 size_t unmapped;
250
251 unmapped = iommu_unmap(domain, da, bytes);
252 if (unmapped < bytes)
253 break;
254
255 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
256
257 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
258
259 da += bytes;
260 }
261}
262
263/* should be called under struct_mutex.. although it can be called
264 * from atomic context without struct_mutex to acquire an extra
265 * iova ref if you know one is already held.
266 *
267 * That means when I do eventually need to add support for unpinning
268 * the refcnt counter needs to be atomic_t.
269 */
270int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
271 uint32_t *iova)
272{
273 struct msm_gem_object *msm_obj = to_msm_bo(obj);
274 int ret = 0;
275
276 if (!msm_obj->domain[id].iova) {
277 struct msm_drm_private *priv = obj->dev->dev_private;
278 uint32_t offset = (uint32_t)mmap_offset(obj);
279 struct page **pages;
280 pages = get_pages(obj);
281 if (IS_ERR(pages))
282 return PTR_ERR(pages);
283 // XXX ideally we would not map buffers writable when not needed...
284 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
285 obj->size, IOMMU_READ | IOMMU_WRITE);
286 msm_obj->domain[id].iova = offset;
287 }
288
289 if (!ret)
290 *iova = msm_obj->domain[id].iova;
291
292 return ret;
293}
294
295int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
296{
297 int ret;
298 mutex_lock(&obj->dev->struct_mutex);
299 ret = msm_gem_get_iova_locked(obj, id, iova);
300 mutex_unlock(&obj->dev->struct_mutex);
301 return ret;
302}
303
304void msm_gem_put_iova(struct drm_gem_object *obj, int id)
305{
306 // XXX TODO ..
307 // NOTE: probably don't need a _locked() version.. we wouldn't
308 // normally unmap here, but instead just mark that it could be
309 // unmapped (if the iova refcnt drops to zero), but then later
310 // if another _get_iova_locked() fails we can start unmapping
311 // things that are no longer needed..
312}
313
314int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
315 struct drm_mode_create_dumb *args)
316{
317 args->pitch = align_pitch(args->width, args->bpp);
318 args->size = PAGE_ALIGN(args->pitch * args->height);
319 return msm_gem_new_handle(dev, file, args->size,
320 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321}
322
323int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 uint32_t handle)
325{
326 /* No special work needed, drop the reference and see what falls out */
327 return drm_gem_handle_delete(file, handle);
328}
329
330int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 uint32_t handle, uint64_t *offset)
332{
333 struct drm_gem_object *obj;
334 int ret = 0;
335
336 /* GEM does all our handle to object mapping */
337 obj = drm_gem_object_lookup(dev, file, handle);
338 if (obj == NULL) {
339 ret = -ENOENT;
340 goto fail;
341 }
342
343 *offset = msm_gem_mmap_offset(obj);
344
345 drm_gem_object_unreference_unlocked(obj);
346
347fail:
348 return ret;
349}
350
351void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
352{
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
355 if (!msm_obj->vaddr) {
356 struct page **pages = get_pages(obj);
357 if (IS_ERR(pages))
358 return ERR_CAST(pages);
359 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
360 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
361 }
362 return msm_obj->vaddr;
363}
364
365void *msm_gem_vaddr(struct drm_gem_object *obj)
366{
367 void *ret;
368 mutex_lock(&obj->dev->struct_mutex);
369 ret = msm_gem_vaddr_locked(obj);
370 mutex_unlock(&obj->dev->struct_mutex);
371 return ret;
372}
373
374int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
375 struct work_struct *work)
376{
377 struct drm_device *dev = obj->dev;
378 struct msm_drm_private *priv = dev->dev_private;
379 struct msm_gem_object *msm_obj = to_msm_bo(obj);
380 int ret = 0;
381
382 mutex_lock(&dev->struct_mutex);
383 if (!list_empty(&work->entry)) {
384 ret = -EINVAL;
385 } else if (is_active(msm_obj)) {
386 list_add_tail(&work->entry, &msm_obj->inactive_work);
387 } else {
388 queue_work(priv->wq, work);
389 }
390 mutex_unlock(&dev->struct_mutex);
391
392 return ret;
393}
394
395void msm_gem_move_to_active(struct drm_gem_object *obj,
396 struct msm_gpu *gpu, uint32_t fence)
397{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu;
400 msm_obj->fence = fence;
401 list_del_init(&msm_obj->mm_list);
402 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
403}
404
405void msm_gem_move_to_inactive(struct drm_gem_object *obj)
406{
407 struct drm_device *dev = obj->dev;
408 struct msm_drm_private *priv = dev->dev_private;
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410
411 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
412
413 msm_obj->gpu = NULL;
414 msm_obj->fence = 0;
415 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
417
418 while (!list_empty(&msm_obj->inactive_work)) {
419 struct work_struct *work;
420
421 work = list_first_entry(&msm_obj->inactive_work,
422 struct work_struct, entry);
423
424 list_del_init(&work->entry);
425 queue_work(priv->wq, work);
426 }
427}
428
429int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
430 struct timespec *timeout)
431{
432 struct drm_device *dev = obj->dev;
433 struct msm_gem_object *msm_obj = to_msm_bo(obj);
434 int ret = 0;
435
436 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
437 ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
438
439 /* TODO cache maintenance */
440
441 return ret;
442}
443
444int msm_gem_cpu_fini(struct drm_gem_object *obj)
445{
446 /* TODO cache maintenance */
447 return 0;
448}
449
450#ifdef CONFIG_DEBUG_FS
451void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
452{
453 struct drm_device *dev = obj->dev;
454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 uint64_t off = drm_vma_node_start(&obj->vma_node);
456
457 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
458 seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
460 msm_obj->fence, obj->name, obj->refcount.refcount.counter,
461 off, msm_obj->vaddr, obj->size);
462}
463
464void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
465{
466 struct msm_gem_object *msm_obj;
467 int count = 0;
468 size_t size = 0;
469
470 list_for_each_entry(msm_obj, list, mm_list) {
471 struct drm_gem_object *obj = &msm_obj->base;
472 seq_printf(m, " ");
473 msm_gem_describe(obj, m);
474 count++;
475 size += obj->size;
476 }
477
478 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
479}
480#endif
481
482void msm_gem_free_object(struct drm_gem_object *obj)
483{
484 struct drm_device *dev = obj->dev;
485 struct msm_gem_object *msm_obj = to_msm_bo(obj);
486 int id;
487
488 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
489
490 /* object should not be on active list: */
491 WARN_ON(is_active(msm_obj));
492
493 list_del(&msm_obj->mm_list);
494
495 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
496 if (msm_obj->domain[id].iova) {
497 struct msm_drm_private *priv = obj->dev->dev_private;
498 uint32_t offset = (uint32_t)mmap_offset(obj);
499 unmap_range(priv->iommus[id], offset,
500 msm_obj->sgt, obj->size);
501 }
502 }
503
504 drm_gem_free_mmap_offset(obj);
505
506 if (msm_obj->vaddr)
507 vunmap(msm_obj->vaddr);
508
509 put_pages(obj);
510
511 if (msm_obj->resv == &msm_obj->_resv)
512 reservation_object_fini(msm_obj->resv);
513
514 drm_gem_object_release(obj);
515
516 kfree(msm_obj);
517}
518
519/* convenience method to construct a GEM buffer object, and userspace handle */
520int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
521 uint32_t size, uint32_t flags, uint32_t *handle)
522{
523 struct drm_gem_object *obj;
524 int ret;
525
526 ret = mutex_lock_interruptible(&dev->struct_mutex);
527 if (ret)
528 return ret;
529
530 obj = msm_gem_new(dev, size, flags);
531
532 mutex_unlock(&dev->struct_mutex);
533
534 if (IS_ERR(obj))
535 return PTR_ERR(obj);
536
537 ret = drm_gem_handle_create(file, obj, handle);
538
539 /* drop reference from allocate - handle holds it now */
540 drm_gem_object_unreference_unlocked(obj);
541
542 return ret;
543}
544
545struct drm_gem_object *msm_gem_new(struct drm_device *dev,
546 uint32_t size, uint32_t flags)
547{
548 struct msm_drm_private *priv = dev->dev_private;
549 struct msm_gem_object *msm_obj;
550 struct drm_gem_object *obj = NULL;
551 int ret;
552
553 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
554
555 size = PAGE_ALIGN(size);
556
557 switch (flags & MSM_BO_CACHE_MASK) {
558 case MSM_BO_UNCACHED:
559 case MSM_BO_CACHED:
560 case MSM_BO_WC:
561 break;
562 default:
563 dev_err(dev->dev, "invalid cache flag: %x\n",
564 (flags & MSM_BO_CACHE_MASK));
565 ret = -EINVAL;
566 goto fail;
567 }
568
569 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
570 if (!msm_obj) {
571 ret = -ENOMEM;
572 goto fail;
573 }
574
575 obj = &msm_obj->base;
576
577 ret = drm_gem_object_init(dev, obj, size);
578 if (ret)
579 goto fail;
580
581 msm_obj->flags = flags;
582
583 msm_obj->resv = &msm_obj->_resv;
584 reservation_object_init(msm_obj->resv);
585
586 INIT_LIST_HEAD(&msm_obj->submit_entry);
587 INIT_LIST_HEAD(&msm_obj->inactive_work);
588 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
589
590 return obj;
591
592fail:
593 if (obj)
594 drm_gem_object_unreference_unlocked(obj);
595
596 return ERR_PTR(ret);
597}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 000000000000..d746f13d283c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__
20
21#include <linux/reservation.h>
22#include "msm_drv.h"
23
24struct msm_gem_object {
25 struct drm_gem_object base;
26
27 uint32_t flags;
28
29 /* And object is either:
30 * inactive - on priv->inactive_list
31 * active - on one one of the gpu's active_list.. well, at
32 * least for now we don't have (I don't think) hw sync between
33 * 2d and 3d one devices which have both, meaning we need to
34 * block on submit if a bo is already on other ring
35 *
36 */
37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t fence;
40
41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for
43 * the duration of the ioctl, so one bo can never be on multiple
44 * submit lists.
45 */
46 struct list_head submit_entry;
47
48 /* work defered until bo is inactive: */
49 struct list_head inactive_work;
50
51 struct page **pages;
52 struct sg_table *sgt;
53 void *vaddr;
54
55 struct {
56 // XXX
57 uint32_t iova;
58 } domain[NUM_DOMAINS];
59
60 /* normally (resv == &_resv) except for imported bo's */
61 struct reservation_object *resv;
62 struct reservation_object _resv;
63};
64#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
65
66static inline bool is_active(struct msm_gem_object *msm_obj)
67{
68 return msm_obj->gpu != NULL;
69}
70
71#define MAX_CMDS 4
72
73/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
74 * associated with the cmdstream submission for synchronization (and
75 * make it easier to unwind when things go wrong, etc). This only
76 * lasts for the duration of the submit-ioctl.
77 */
78struct msm_gem_submit {
79 struct drm_device *dev;
80 struct msm_gpu *gpu;
81 struct list_head bo_list;
82 struct ww_acquire_ctx ticket;
83 uint32_t fence;
84 bool valid;
85 unsigned int nr_cmds;
86 unsigned int nr_bos;
87 struct {
88 uint32_t type;
89 uint32_t size; /* in dwords */
90 uint32_t iova;
91 } cmd[MAX_CMDS];
92 struct {
93 uint32_t flags;
94 struct msm_gem_object *obj;
95 uint32_t iova;
96 } bos[0];
97};
98
99#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 000000000000..3e1ef3a00f60
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20#include "msm_gem.h"
21
22/*
23 * Cmdstream submission:
24 */
25
26#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
27/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
28#define BO_VALID 0x8000
29#define BO_LOCKED 0x4000
30#define BO_PINNED 0x2000
31
32static inline void __user *to_user_ptr(u64 address)
33{
34 return (void __user *)(uintptr_t)address;
35}
36
37static struct msm_gem_submit *submit_create(struct drm_device *dev,
38 struct msm_gpu *gpu, int nr)
39{
40 struct msm_gem_submit *submit;
41 int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
42
43 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
44 if (submit) {
45 submit->dev = dev;
46 submit->gpu = gpu;
47
48 /* initially, until copy_from_user() and bo lookup succeeds: */
49 submit->nr_bos = 0;
50 submit->nr_cmds = 0;
51
52 INIT_LIST_HEAD(&submit->bo_list);
53 ww_acquire_init(&submit->ticket, &reservation_ww_class);
54 }
55
56 return submit;
57}
58
59static int submit_lookup_objects(struct msm_gem_submit *submit,
60 struct drm_msm_gem_submit *args, struct drm_file *file)
61{
62 unsigned i;
63 int ret = 0;
64
65 spin_lock(&file->table_lock);
66
67 for (i = 0; i < args->nr_bos; i++) {
68 struct drm_msm_gem_submit_bo submit_bo;
69 struct drm_gem_object *obj;
70 struct msm_gem_object *msm_obj;
71 void __user *userptr =
72 to_user_ptr(args->bos + (i * sizeof(submit_bo)));
73
74 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
75 if (ret) {
76 ret = -EFAULT;
77 goto out_unlock;
78 }
79
80 if (submit_bo.flags & BO_INVALID_FLAGS) {
81 DBG("invalid flags: %x", submit_bo.flags);
82 ret = -EINVAL;
83 goto out_unlock;
84 }
85
86 submit->bos[i].flags = submit_bo.flags;
87 /* in validate_objects() we figure out if this is true: */
88 submit->bos[i].iova = submit_bo.presumed;
89
90 /* normally use drm_gem_object_lookup(), but for bulk lookup
91 * all under single table_lock just hit object_idr directly:
92 */
93 obj = idr_find(&file->object_idr, submit_bo.handle);
94 if (!obj) {
95 DBG("invalid handle %u at index %u", submit_bo.handle, i);
96 ret = -EINVAL;
97 goto out_unlock;
98 }
99
100 msm_obj = to_msm_bo(obj);
101
102 if (!list_empty(&msm_obj->submit_entry)) {
103 DBG("handle %u at index %u already on submit list",
104 submit_bo.handle, i);
105 ret = -EINVAL;
106 goto out_unlock;
107 }
108
109 drm_gem_object_reference(obj);
110
111 submit->bos[i].obj = msm_obj;
112
113 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
114 }
115
116out_unlock:
117 submit->nr_bos = i;
118 spin_unlock(&file->table_lock);
119
120 return ret;
121}
122
123static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
124{
125 struct msm_gem_object *msm_obj = submit->bos[i].obj;
126
127 if (submit->bos[i].flags & BO_PINNED)
128 msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
129
130 if (submit->bos[i].flags & BO_LOCKED)
131 ww_mutex_unlock(&msm_obj->resv->lock);
132
133 if (!(submit->bos[i].flags & BO_VALID))
134 submit->bos[i].iova = 0;
135
136 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
137}
138
139/* This is where we make sure all the bo's are reserved and pin'd: */
140static int submit_validate_objects(struct msm_gem_submit *submit)
141{
142 int contended, slow_locked = -1, i, ret = 0;
143
144retry:
145 submit->valid = true;
146
147 for (i = 0; i < submit->nr_bos; i++) {
148 struct msm_gem_object *msm_obj = submit->bos[i].obj;
149 uint32_t iova;
150
151 if (slow_locked == i)
152 slow_locked = -1;
153
154 contended = i;
155
156 if (!(submit->bos[i].flags & BO_LOCKED)) {
157 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
158 &submit->ticket);
159 if (ret)
160 goto fail;
161 submit->bos[i].flags |= BO_LOCKED;
162 }
163
164
165 /* if locking succeeded, pin bo: */
166 ret = msm_gem_get_iova(&msm_obj->base,
167 submit->gpu->id, &iova);
168
169 /* this would break the logic in the fail path.. there is no
170 * reason for this to happen, but just to be on the safe side
171 * let's notice if this starts happening in the future:
172 */
173 WARN_ON(ret == -EDEADLK);
174
175 if (ret)
176 goto fail;
177
178 submit->bos[i].flags |= BO_PINNED;
179
180 if (iova == submit->bos[i].iova) {
181 submit->bos[i].flags |= BO_VALID;
182 } else {
183 submit->bos[i].iova = iova;
184 submit->bos[i].flags &= ~BO_VALID;
185 submit->valid = false;
186 }
187 }
188
189 ww_acquire_done(&submit->ticket);
190
191 return 0;
192
193fail:
194 for (; i >= 0; i--)
195 submit_unlock_unpin_bo(submit, i);
196
197 if (slow_locked > 0)
198 submit_unlock_unpin_bo(submit, slow_locked);
199
200 if (ret == -EDEADLK) {
201 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
202 /* we lost out in a seqno race, lock and retry.. */
203 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
204 &submit->ticket);
205 if (!ret) {
206 submit->bos[contended].flags |= BO_LOCKED;
207 slow_locked = contended;
208 goto retry;
209 }
210 }
211
212 return ret;
213}
214
215static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
216 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
217{
218 if (idx >= submit->nr_bos) {
219 DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
220 return EINVAL;
221 }
222
223 if (obj)
224 *obj = submit->bos[idx].obj;
225 if (iova)
226 *iova = submit->bos[idx].iova;
227 if (valid)
228 *valid = !!(submit->bos[idx].flags & BO_VALID);
229
230 return 0;
231}
232
233/* process the reloc's and patch up the cmdstream as needed: */
234static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236{
237 uint32_t i, last_offset = 0;
238 uint32_t *ptr;
239 int ret;
240
241 if (offset % 4) {
242 DBG("non-aligned cmdstream buffer: %u", offset);
243 return -EINVAL;
244 }
245
246 /* For now, just map the entire thing. Eventually we probably
247 * to do it page-by-page, w/ kmap() if not vmap()d..
248 */
249 ptr = msm_gem_vaddr(&obj->base);
250
251 if (IS_ERR(ptr)) {
252 ret = PTR_ERR(ptr);
253 DBG("failed to map: %d", ret);
254 return ret;
255 }
256
257 for (i = 0; i < nr_relocs; i++) {
258 struct drm_msm_gem_submit_reloc submit_reloc;
259 void __user *userptr =
260 to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261 uint32_t iova, off;
262 bool valid;
263
264 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265 if (ret)
266 return -EFAULT;
267
268 if (submit_reloc.submit_offset % 4) {
269 DBG("non-aligned reloc offset: %u",
270 submit_reloc.submit_offset);
271 return -EINVAL;
272 }
273
274 /* offset in dwords: */
275 off = submit_reloc.submit_offset / 4;
276
277 if ((off >= (obj->base.size / 4)) ||
278 (off < last_offset)) {
279 DBG("invalid offset %u at reloc %u", off, i);
280 return -EINVAL;
281 }
282
283 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284 if (ret)
285 return ret;
286
287 if (valid)
288 continue;
289
290 iova += submit_reloc.reloc_offset;
291
292 if (submit_reloc.shift < 0)
293 iova >>= -submit_reloc.shift;
294 else
295 iova <<= submit_reloc.shift;
296
297 ptr[off] = iova | submit_reloc.or;
298
299 last_offset = off;
300 }
301
302 return 0;
303}
304
305static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
306{
307 unsigned i;
308
309 mutex_lock(&submit->dev->struct_mutex);
310 for (i = 0; i < submit->nr_bos; i++) {
311 struct msm_gem_object *msm_obj = submit->bos[i].obj;
312 submit_unlock_unpin_bo(submit, i);
313 list_del_init(&msm_obj->submit_entry);
314 drm_gem_object_unreference(&msm_obj->base);
315 }
316 mutex_unlock(&submit->dev->struct_mutex);
317
318 ww_acquire_fini(&submit->ticket);
319 kfree(submit);
320}
321
322int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
323 struct drm_file *file)
324{
325 struct msm_drm_private *priv = dev->dev_private;
326 struct drm_msm_gem_submit *args = data;
327 struct msm_file_private *ctx = file->driver_priv;
328 struct msm_gem_submit *submit;
329 struct msm_gpu *gpu;
330 unsigned i;
331 int ret;
332
333 /* for now, we just have 3d pipe.. eventually this would need to
334 * be more clever to dispatch to appropriate gpu module:
335 */
336 if (args->pipe != MSM_PIPE_3D0)
337 return -EINVAL;
338
339 gpu = priv->gpu;
340
341 if (args->nr_cmds > MAX_CMDS)
342 return -EINVAL;
343
344 submit = submit_create(dev, gpu, args->nr_bos);
345 if (!submit) {
346 ret = -ENOMEM;
347 goto out;
348 }
349
350 ret = submit_lookup_objects(submit, args, file);
351 if (ret)
352 goto out;
353
354 ret = submit_validate_objects(submit);
355 if (ret)
356 goto out;
357
358 for (i = 0; i < args->nr_cmds; i++) {
359 struct drm_msm_gem_submit_cmd submit_cmd;
360 void __user *userptr =
361 to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
362 struct msm_gem_object *msm_obj;
363 uint32_t iova;
364
365 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
366 if (ret) {
367 ret = -EFAULT;
368 goto out;
369 }
370
371 ret = submit_bo(submit, submit_cmd.submit_idx,
372 &msm_obj, &iova, NULL);
373 if (ret)
374 goto out;
375
376 if (submit_cmd.size % 4) {
377 DBG("non-aligned cmdstream buffer size: %u",
378 submit_cmd.size);
379 ret = -EINVAL;
380 goto out;
381 }
382
383 if (submit_cmd.size >= msm_obj->base.size) {
384 DBG("invalid cmdstream size: %u", submit_cmd.size);
385 ret = -EINVAL;
386 goto out;
387 }
388
389 submit->cmd[i].type = submit_cmd.type;
390 submit->cmd[i].size = submit_cmd.size / 4;
391 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
392
393 if (submit->valid)
394 continue;
395
396 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
397 submit_cmd.nr_relocs, submit_cmd.relocs);
398 if (ret)
399 goto out;
400 }
401
402 submit->nr_cmds = i;
403
404 ret = msm_gpu_submit(gpu, submit, ctx);
405
406 args->fence = submit->fence;
407
408out:
409 if (submit)
410 submit_cleanup(submit, !!ret);
411 return ret;
412}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 000000000000..e1e1ec9321ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,463 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
20
21
22/*
23 * Power Management:
24 */
25
26#ifdef CONFIG_MSM_BUS_SCALING
27#include <mach/board.h>
28#include <mach/kgsl.h>
29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
30{
31 struct drm_device *dev = gpu->dev;
32 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
33
34 if (!pdev) {
35 dev_err(dev->dev, "could not find dtv pdata\n");
36 return;
37 }
38
39 if (pdata->bus_scale_table) {
40 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
41 DBG("bus scale client: %08x", gpu->bsc);
42 }
43}
44
45static void bs_fini(struct msm_gpu *gpu)
46{
47 if (gpu->bsc) {
48 msm_bus_scale_unregister_client(gpu->bsc);
49 gpu->bsc = 0;
50 }
51}
52
53static void bs_set(struct msm_gpu *gpu, int idx)
54{
55 if (gpu->bsc) {
56 DBG("set bus scaling: %d", idx);
57 msm_bus_scale_client_update_request(gpu->bsc, idx);
58 }
59}
60#else
61static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
62static void bs_fini(struct msm_gpu *gpu) {}
63static void bs_set(struct msm_gpu *gpu, int idx) {}
64#endif
65
66static int enable_pwrrail(struct msm_gpu *gpu)
67{
68 struct drm_device *dev = gpu->dev;
69 int ret = 0;
70
71 if (gpu->gpu_reg) {
72 ret = regulator_enable(gpu->gpu_reg);
73 if (ret) {
74 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
75 return ret;
76 }
77 }
78
79 if (gpu->gpu_cx) {
80 ret = regulator_enable(gpu->gpu_cx);
81 if (ret) {
82 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
83 return ret;
84 }
85 }
86
87 return 0;
88}
89
90static int disable_pwrrail(struct msm_gpu *gpu)
91{
92 if (gpu->gpu_cx)
93 regulator_disable(gpu->gpu_cx);
94 if (gpu->gpu_reg)
95 regulator_disable(gpu->gpu_reg);
96 return 0;
97}
98
99static int enable_clk(struct msm_gpu *gpu)
100{
101 struct clk *rate_clk = NULL;
102 int i;
103
104 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
105 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
106 if (gpu->grp_clks[i]) {
107 clk_prepare(gpu->grp_clks[i]);
108 rate_clk = gpu->grp_clks[i];
109 }
110 }
111
112 if (rate_clk && gpu->fast_rate)
113 clk_set_rate(rate_clk, gpu->fast_rate);
114
115 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
116 if (gpu->grp_clks[i])
117 clk_enable(gpu->grp_clks[i]);
118
119 return 0;
120}
121
122static int disable_clk(struct msm_gpu *gpu)
123{
124 struct clk *rate_clk = NULL;
125 int i;
126
127 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
128 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
129 if (gpu->grp_clks[i]) {
130 clk_disable(gpu->grp_clks[i]);
131 rate_clk = gpu->grp_clks[i];
132 }
133 }
134
135 if (rate_clk && gpu->slow_rate)
136 clk_set_rate(rate_clk, gpu->slow_rate);
137
138 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
139 if (gpu->grp_clks[i])
140 clk_unprepare(gpu->grp_clks[i]);
141
142 return 0;
143}
144
145static int enable_axi(struct msm_gpu *gpu)
146{
147 if (gpu->ebi1_clk)
148 clk_prepare_enable(gpu->ebi1_clk);
149 if (gpu->bus_freq)
150 bs_set(gpu, gpu->bus_freq);
151 return 0;
152}
153
154static int disable_axi(struct msm_gpu *gpu)
155{
156 if (gpu->ebi1_clk)
157 clk_disable_unprepare(gpu->ebi1_clk);
158 if (gpu->bus_freq)
159 bs_set(gpu, 0);
160 return 0;
161}
162
163int msm_gpu_pm_resume(struct msm_gpu *gpu)
164{
165 int ret;
166
167 DBG("%s", gpu->name);
168
169 ret = enable_pwrrail(gpu);
170 if (ret)
171 return ret;
172
173 ret = enable_clk(gpu);
174 if (ret)
175 return ret;
176
177 ret = enable_axi(gpu);
178 if (ret)
179 return ret;
180
181 return 0;
182}
183
184int msm_gpu_pm_suspend(struct msm_gpu *gpu)
185{
186 int ret;
187
188 DBG("%s", gpu->name);
189
190 ret = disable_axi(gpu);
191 if (ret)
192 return ret;
193
194 ret = disable_clk(gpu);
195 if (ret)
196 return ret;
197
198 ret = disable_pwrrail(gpu);
199 if (ret)
200 return ret;
201
202 return 0;
203}
204
205/*
206 * Hangcheck detection for locked gpu:
207 */
208
209static void recover_worker(struct work_struct *work)
210{
211 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
212 struct drm_device *dev = gpu->dev;
213
214 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
215
216 mutex_lock(&dev->struct_mutex);
217 gpu->funcs->recover(gpu);
218 mutex_unlock(&dev->struct_mutex);
219
220 msm_gpu_retire(gpu);
221}
222
223static void hangcheck_timer_reset(struct msm_gpu *gpu)
224{
225 DBG("%s", gpu->name);
226 mod_timer(&gpu->hangcheck_timer,
227 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
228}
229
230static void hangcheck_handler(unsigned long data)
231{
232 struct msm_gpu *gpu = (struct msm_gpu *)data;
233 uint32_t fence = gpu->funcs->last_fence(gpu);
234
235 if (fence != gpu->hangcheck_fence) {
236 /* some progress has been made.. ya! */
237 gpu->hangcheck_fence = fence;
238 } else if (fence < gpu->submitted_fence) {
239 /* no progress and not done.. hung! */
240 struct msm_drm_private *priv = gpu->dev->dev_private;
241 gpu->hangcheck_fence = fence;
242 queue_work(priv->wq, &gpu->recover_work);
243 }
244
245 /* if still more pending work, reset the hangcheck timer: */
246 if (gpu->submitted_fence > gpu->hangcheck_fence)
247 hangcheck_timer_reset(gpu);
248}
249
250/*
251 * Cmdstream submission/retirement:
252 */
253
254static void retire_worker(struct work_struct *work)
255{
256 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
257 struct drm_device *dev = gpu->dev;
258 uint32_t fence = gpu->funcs->last_fence(gpu);
259
260 mutex_lock(&dev->struct_mutex);
261
262 while (!list_empty(&gpu->active_list)) {
263 struct msm_gem_object *obj;
264
265 obj = list_first_entry(&gpu->active_list,
266 struct msm_gem_object, mm_list);
267
268 if (obj->fence <= fence) {
269 /* move to inactive: */
270 msm_gem_move_to_inactive(&obj->base);
271 msm_gem_put_iova(&obj->base, gpu->id);
272 drm_gem_object_unreference(&obj->base);
273 } else {
274 break;
275 }
276 }
277
278 msm_update_fence(gpu->dev, fence);
279
280 mutex_unlock(&dev->struct_mutex);
281}
282
283/* call from irq handler to schedule work to retire bo's */
284void msm_gpu_retire(struct msm_gpu *gpu)
285{
286 struct msm_drm_private *priv = gpu->dev->dev_private;
287 queue_work(priv->wq, &gpu->retire_work);
288}
289
290/* add bo's to gpu's ring, and kick gpu: */
291int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
292 struct msm_file_private *ctx)
293{
294 struct drm_device *dev = gpu->dev;
295 struct msm_drm_private *priv = dev->dev_private;
296 int i, ret;
297
298 mutex_lock(&dev->struct_mutex);
299
300 submit->fence = ++priv->next_fence;
301
302 gpu->submitted_fence = submit->fence;
303
304 ret = gpu->funcs->submit(gpu, submit, ctx);
305 priv->lastctx = ctx;
306
307 for (i = 0; i < submit->nr_bos; i++) {
308 struct msm_gem_object *msm_obj = submit->bos[i].obj;
309
310 /* can't happen yet.. but when we add 2d support we'll have
311 * to deal w/ cross-ring synchronization:
312 */
313 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
314
315 if (!is_active(msm_obj)) {
316 uint32_t iova;
317
318 /* ring takes a reference to the bo and iova: */
319 drm_gem_object_reference(&msm_obj->base);
320 msm_gem_get_iova_locked(&msm_obj->base,
321 submit->gpu->id, &iova);
322 }
323
324 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
325 }
326 hangcheck_timer_reset(gpu);
327 mutex_unlock(&dev->struct_mutex);
328
329 return ret;
330}
331
332/*
333 * Init/Cleanup:
334 */
335
336static irqreturn_t irq_handler(int irq, void *data)
337{
338 struct msm_gpu *gpu = data;
339 return gpu->funcs->irq(gpu);
340}
341
342static const char *clk_names[] = {
343 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
344};
345
346int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
347 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
348 const char *name, const char *ioname, const char *irqname, int ringsz)
349{
350 int i, ret;
351
352 gpu->dev = drm;
353 gpu->funcs = funcs;
354 gpu->name = name;
355
356 INIT_LIST_HEAD(&gpu->active_list);
357 INIT_WORK(&gpu->retire_work, retire_worker);
358 INIT_WORK(&gpu->recover_work, recover_worker);
359
360 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
361 (unsigned long)gpu);
362
363 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
364
365 /* Map registers: */
366 gpu->mmio = msm_ioremap(pdev, ioname, name);
367 if (IS_ERR(gpu->mmio)) {
368 ret = PTR_ERR(gpu->mmio);
369 goto fail;
370 }
371
372 /* Get Interrupt: */
373 gpu->irq = platform_get_irq_byname(pdev, irqname);
374 if (gpu->irq < 0) {
375 ret = gpu->irq;
376 dev_err(drm->dev, "failed to get irq: %d\n", ret);
377 goto fail;
378 }
379
380 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
381 IRQF_TRIGGER_HIGH, gpu->name, gpu);
382 if (ret) {
383 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
384 goto fail;
385 }
386
387 /* Acquire clocks: */
388 for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
389 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
390 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
391 if (IS_ERR(gpu->grp_clks[i]))
392 gpu->grp_clks[i] = NULL;
393 }
394
395 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
396 DBG("ebi1_clk: %p", gpu->ebi1_clk);
397 if (IS_ERR(gpu->ebi1_clk))
398 gpu->ebi1_clk = NULL;
399
400 /* Acquire regulators: */
401 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
402 DBG("gpu_reg: %p", gpu->gpu_reg);
403 if (IS_ERR(gpu->gpu_reg))
404 gpu->gpu_reg = NULL;
405
406 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
407 DBG("gpu_cx: %p", gpu->gpu_cx);
408 if (IS_ERR(gpu->gpu_cx))
409 gpu->gpu_cx = NULL;
410
411 /* Setup IOMMU.. eventually we will (I think) do this once per context
412 * and have separate page tables per context. For now, to keep things
413 * simple and to get something working, just use a single address space:
414 */
415 gpu->iommu = iommu_domain_alloc(&platform_bus_type);
416 if (!gpu->iommu) {
417 dev_err(drm->dev, "failed to allocate IOMMU\n");
418 ret = -ENOMEM;
419 goto fail;
420 }
421 gpu->id = msm_register_iommu(drm, gpu->iommu);
422
423 /* Create ringbuffer: */
424 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
425 if (IS_ERR(gpu->rb)) {
426 ret = PTR_ERR(gpu->rb);
427 gpu->rb = NULL;
428 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
429 goto fail;
430 }
431
432 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
433 if (ret) {
434 gpu->rb_iova = 0;
435 dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
436 goto fail;
437 }
438
439 bs_init(gpu, pdev);
440
441 return 0;
442
443fail:
444 return ret;
445}
446
447void msm_gpu_cleanup(struct msm_gpu *gpu)
448{
449 DBG("%s", gpu->name);
450
451 WARN_ON(!list_empty(&gpu->active_list));
452
453 bs_fini(gpu);
454
455 if (gpu->rb) {
456 if (gpu->rb_iova)
457 msm_gem_put_iova(gpu->rb->bo, gpu->id);
458 msm_ringbuffer_destroy(gpu->rb);
459 }
460
461 if (gpu->iommu)
462 iommu_domain_free(gpu->iommu);
463}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 000000000000..8cd829e520bb
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GPU_H__
19#define __MSM_GPU_H__
20
21#include <linux/clk.h>
22#include <linux/regulator/consumer.h>
23
24#include "msm_drv.h"
25#include "msm_ringbuffer.h"
26
27struct msm_gem_submit;
28
29/* So far, with hardware that I've seen to date, we can have:
30 * + zero, one, or two z180 2d cores
31 * + a3xx or a2xx 3d core, which share a common CP (the firmware
32 * for the CP seems to implement some different PM4 packet types
33 * but the basics of cmdstream submission are the same)
34 *
35 * Which means that the eventual complete "class" hierarchy, once
36 * support for all past and present hw is in place, becomes:
37 * + msm_gpu
38 * + adreno_gpu
39 * + a3xx_gpu
40 * + a2xx_gpu
41 * + z180_gpu
42 */
43struct msm_gpu_funcs {
44 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
45 int (*hw_init)(struct msm_gpu *gpu);
46 int (*pm_suspend)(struct msm_gpu *gpu);
47 int (*pm_resume)(struct msm_gpu *gpu);
48 int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
49 struct msm_file_private *ctx);
50 void (*flush)(struct msm_gpu *gpu);
51 void (*idle)(struct msm_gpu *gpu);
52 irqreturn_t (*irq)(struct msm_gpu *irq);
53 uint32_t (*last_fence)(struct msm_gpu *gpu);
54 void (*recover)(struct msm_gpu *gpu);
55 void (*destroy)(struct msm_gpu *gpu);
56#ifdef CONFIG_DEBUG_FS
57 /* show GPU status in debugfs: */
58 void (*show)(struct msm_gpu *gpu, struct seq_file *m);
59#endif
60};
61
62struct msm_gpu {
63 const char *name;
64 struct drm_device *dev;
65 const struct msm_gpu_funcs *funcs;
66
67 struct msm_ringbuffer *rb;
68 uint32_t rb_iova;
69
70 /* list of GEM active objects: */
71 struct list_head active_list;
72
73 uint32_t submitted_fence;
74
75 /* worker for handling active-list retiring: */
76 struct work_struct retire_work;
77
78 void __iomem *mmio;
79 int irq;
80
81 struct iommu_domain *iommu;
82 int id;
83
84 /* Power Control: */
85 struct regulator *gpu_reg, *gpu_cx;
86 struct clk *ebi1_clk, *grp_clks[5];
87 uint32_t fast_rate, slow_rate, bus_freq;
88 uint32_t bsc;
89
90 /* Hang Detction: */
91#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
92#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
93 struct timer_list hangcheck_timer;
94 uint32_t hangcheck_fence;
95 struct work_struct recover_work;
96};
97
98static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
99{
100 msm_writel(data, gpu->mmio + (reg << 2));
101}
102
103static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
104{
105 return msm_readl(gpu->mmio + (reg << 2));
106}
107
108int msm_gpu_pm_suspend(struct msm_gpu *gpu);
109int msm_gpu_pm_resume(struct msm_gpu *gpu);
110
111void msm_gpu_retire(struct msm_gpu *gpu);
112int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
113 struct msm_file_private *ctx);
114
115int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
116 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
117 const char *name, const char *ioname, const char *irqname, int ringsz);
118void msm_gpu_cleanup(struct msm_gpu *gpu);
119
120struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
121void __init a3xx_register(void);
122void __exit a3xx_unregister(void);
123
124#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 000000000000..8171537dd7d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_ringbuffer.h"
19#include "msm_gpu.h"
20
21struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
22{
23 struct msm_ringbuffer *ring;
24 int ret;
25
26 size = ALIGN(size, 4); /* size should be dword aligned */
27
28 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
29 if (!ring) {
30 ret = -ENOMEM;
31 goto fail;
32 }
33
34 ring->gpu = gpu;
35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
36 if (IS_ERR(ring->bo)) {
37 ret = PTR_ERR(ring->bo);
38 ring->bo = NULL;
39 goto fail;
40 }
41
42 ring->start = msm_gem_vaddr_locked(ring->bo);
43 ring->end = ring->start + (size / 4);
44 ring->cur = ring->start;
45
46 ring->size = size;
47
48 return ring;
49
50fail:
51 if (ring)
52 msm_ringbuffer_destroy(ring);
53 return ERR_PTR(ret);
54}
55
56void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
57{
58 if (ring->bo)
59 drm_gem_object_unreference(ring->bo);
60 kfree(ring);
61}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 000000000000..6e0e1049fa4f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_RINGBUFFER_H__
19#define __MSM_RINGBUFFER_H__
20
21#include "msm_drv.h"
22
23struct msm_ringbuffer {
24 struct msm_gpu *gpu;
25 int size;
26 struct drm_gem_object *bo;
27 uint32_t *start, *end, *cur;
28};
29
30struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
31void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
32
33/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
34
35static inline void
36OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
37{
38 if (ring->cur == ring->end)
39 ring->cur = ring->start;
40 *(ring->cur++) = data;
41}
42
43#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 6161eaf5447c..52fb2aa129e8 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,6 +27,8 @@
27#include <core/subdev.h> 27#include <core/subdev.h>
28#include <core/printk.h> 28#include <core/printk.h>
29 29
30int nv_printk_suspend_level = NV_DBG_DEBUG;
31
30void 32void
31nv_printk_(struct nouveau_object *object, const char *pfx, int level, 33nv_printk_(struct nouveau_object *object, const char *pfx, int level,
32 const char *fmt, ...) 34 const char *fmt, ...)
@@ -72,3 +74,20 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
72 vprintk(mfmt, args); 74 vprintk(mfmt, args);
73 va_end(args); 75 va_end(args);
74} 76}
77
78#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
79
80const char *nv_printk_level_to_pfx(int level)
81{
82 switch (level) {
83 CONV_LEVEL(FATAL);
84 CONV_LEVEL(ERROR);
85 CONV_LEVEL(WARN);
86 CONV_LEVEL(INFO);
87 CONV_LEVEL(DEBUG);
88 CONV_LEVEL(PARANOIA);
89 CONV_LEVEL(TRACE);
90 CONV_LEVEL(SPAM);
91 }
92 return NV_PRINTK_DEBUG;
93}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
index 86a64045dd60..f3b9bddc3875 100644
--- a/drivers/gpu/drm/nouveau/core/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -22,7 +22,6 @@
22 22
23#include <core/object.h> 23#include <core/object.h>
24#include <core/ramht.h> 24#include <core/ramht.h>
25#include <core/math.h>
26 25
27#include <subdev/bar.h> 26#include <subdev/bar.h>
28 27
@@ -104,6 +103,6 @@ nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
104 if (ret) 103 if (ret)
105 return ret; 104 return ret;
106 105
107 ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3); 106 ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
108 return 0; 107 return 0;
109} 108}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
index 8bf92b0e6d82..6b089e022fd2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
@@ -19,16 +19,14 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */ 23 */
24 24
25#include <core/engctx.h> 25#include <engine/falcon.h>
26#include <core/class.h>
27
28#include <engine/bsp.h> 26#include <engine/bsp.h>
29 27
30struct nv98_bsp_priv { 28struct nv98_bsp_priv {
31 struct nouveau_engine base; 29 struct nouveau_falcon base;
32}; 30};
33 31
34/******************************************************************************* 32/*******************************************************************************
@@ -37,31 +35,49 @@ struct nv98_bsp_priv {
37 35
38static struct nouveau_oclass 36static struct nouveau_oclass
39nv98_bsp_sclass[] = { 37nv98_bsp_sclass[] = {
38 { 0x88b1, &nouveau_object_ofuncs },
39 { 0x85b1, &nouveau_object_ofuncs },
40 { 0x86b1, &nouveau_object_ofuncs },
40 {}, 41 {},
41}; 42};
42 43
43/******************************************************************************* 44/*******************************************************************************
44 * BSP context 45 * PBSP context
45 ******************************************************************************/ 46 ******************************************************************************/
46 47
47static struct nouveau_oclass 48static struct nouveau_oclass
48nv98_bsp_cclass = { 49nv98_bsp_cclass = {
49 .handle = NV_ENGCTX(BSP, 0x98), 50 .handle = NV_ENGCTX(BSP, 0x98),
50 .ofuncs = &(struct nouveau_ofuncs) { 51 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_engctx_ctor, 52 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_engctx_dtor, 53 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_engctx_init, 54 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_engctx_fini, 55 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_engctx_rd32, 56 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_engctx_wr32, 57 .wr32 = _nouveau_falcon_context_wr32,
57 }, 58 },
58}; 59};
59 60
60/******************************************************************************* 61/*******************************************************************************
61 * BSP engine/subdev functions 62 * PBSP engine/subdev functions
62 ******************************************************************************/ 63 ******************************************************************************/
63 64
64static int 65static int
66nv98_bsp_init(struct nouveau_object *object)
67{
68 struct nv98_bsp_priv *priv = (void *)object;
69 int ret;
70
71 ret = nouveau_falcon_init(&priv->base);
72 if (ret)
73 return ret;
74
75 nv_wr32(priv, 0x084010, 0x0000ffd2);
76 nv_wr32(priv, 0x08401c, 0x0000fff2);
77 return 0;
78}
79
80static int
65nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 81nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size, 82 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject) 83 struct nouveau_object **pobject)
@@ -69,7 +85,7 @@ nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
69 struct nv98_bsp_priv *priv; 85 struct nv98_bsp_priv *priv;
70 int ret; 86 int ret;
71 87
72 ret = nouveau_engine_create(parent, engine, oclass, true, 88 ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
73 "PBSP", "bsp", &priv); 89 "PBSP", "bsp", &priv);
74 *pobject = nv_object(priv); 90 *pobject = nv_object(priv);
75 if (ret) 91 if (ret)
@@ -86,8 +102,10 @@ nv98_bsp_oclass = {
86 .handle = NV_ENGINE(BSP, 0x98), 102 .handle = NV_ENGINE(BSP, 0x98),
87 .ofuncs = &(struct nouveau_ofuncs) { 103 .ofuncs = &(struct nouveau_ofuncs) {
88 .ctor = nv98_bsp_ctor, 104 .ctor = nv98_bsp_ctor,
89 .dtor = _nouveau_engine_dtor, 105 .dtor = _nouveau_falcon_dtor,
90 .init = _nouveau_engine_init, 106 .init = nv98_bsp_init,
91 .fini = _nouveau_engine_fini, 107 .fini = _nouveau_falcon_fini,
108 .rd32 = _nouveau_falcon_rd32,
109 .wr32 = _nouveau_falcon_wr32,
92 }, 110 },
93}; 111};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index f02fd9f443ff..a66b27c0fcab 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -49,18 +49,23 @@ int
49nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) 49nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
50{ 50{
51 const u32 doff = (or * 0x800); 51 const u32 doff = (or * 0x800);
52 int load = -EINVAL; 52
53 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); 53 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
54 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 54 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
55
55 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); 56 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
56 mdelay(9); 57 mdelay(9);
57 udelay(500); 58 udelay(500);
58 nv_wr32(priv, 0x61a00c + doff, 0x80000000); 59 loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
59 load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; 60
60 nv_wr32(priv, 0x61a00c + doff, 0x00000000);
61 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); 61 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
62 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 62 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
63 return load; 63
64 nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
65 if (!(loadval & 0x80000000))
66 return -ETIMEDOUT;
67
68 return (loadval & 0x38000000) >> 27;
64} 69}
65 70
66int 71int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 31cc8fe8e7f0..054d9cff4f53 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 DBG("status %*ph\n", 6, dp->stat); 153 DBG("status %6ph\n", dp->stat);
154 return 0; 154 return 0;
155} 155}
156 156
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 7ffe2f309f12..c168ae3eaa97 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -628,7 +628,7 @@ nv50_disp_base_init(struct nouveau_object *object)
628 } 628 }
629 629
630 /* ... PIOR caps */ 630 /* ... PIOR caps */
631 for (i = 0; i < 3; i++) { 631 for (i = 0; i < priv->pior.nr; i++) {
632 tmp = nv_rd32(priv, 0x61e000 + (i * 0x800)); 632 tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
633 nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp); 633 nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
634 } 634 }
@@ -834,10 +834,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
834 u8 ver, hdr, cnt, len; 834 u8 ver, hdr, cnt, len;
835 u16 data; 835 u16 data;
836 u32 ctrl = 0x00000000; 836 u32 ctrl = 0x00000000;
837 u32 reg;
837 int i; 838 int i;
838 839
839 /* DAC */ 840 /* DAC */
840 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 841 for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
841 ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); 842 ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
842 843
843 /* SOR */ 844 /* SOR */
@@ -845,19 +846,18 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
845 if (nv_device(priv)->chipset < 0x90 || 846 if (nv_device(priv)->chipset < 0x90 ||
846 nv_device(priv)->chipset == 0x92 || 847 nv_device(priv)->chipset == 0x92 ||
847 nv_device(priv)->chipset == 0xa0) { 848 nv_device(priv)->chipset == 0xa0) {
848 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 849 reg = 0x610b74;
849 ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
850 i += 4;
851 } else { 850 } else {
852 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 851 reg = 0x610798;
853 ctrl = nv_rd32(priv, 0x610798 + (i * 8));
854 i += 4;
855 } 852 }
853 for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
854 ctrl = nv_rd32(priv, reg + (i * 8));
855 i += 4;
856 } 856 }
857 857
858 /* PIOR */ 858 /* PIOR */
859 if (!(ctrl & (1 << head))) { 859 if (!(ctrl & (1 << head))) {
860 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 860 for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
861 ctrl = nv_rd32(priv, 0x610b84 + (i * 8)); 861 ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
862 i += 8; 862 i += 8;
863 } 863 }
@@ -893,10 +893,11 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
893 u8 ver, hdr, cnt, len; 893 u8 ver, hdr, cnt, len;
894 u32 ctrl = 0x00000000; 894 u32 ctrl = 0x00000000;
895 u32 data, conf = ~0; 895 u32 data, conf = ~0;
896 u32 reg;
896 int i; 897 int i;
897 898
898 /* DAC */ 899 /* DAC */
899 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 900 for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
900 ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); 901 ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
901 902
902 /* SOR */ 903 /* SOR */
@@ -904,19 +905,18 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
904 if (nv_device(priv)->chipset < 0x90 || 905 if (nv_device(priv)->chipset < 0x90 ||
905 nv_device(priv)->chipset == 0x92 || 906 nv_device(priv)->chipset == 0x92 ||
906 nv_device(priv)->chipset == 0xa0) { 907 nv_device(priv)->chipset == 0xa0) {
907 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 908 reg = 0x610b70;
908 ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
909 i += 4;
910 } else { 909 } else {
911 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 910 reg = 0x610794;
912 ctrl = nv_rd32(priv, 0x610794 + (i * 8));
913 i += 4;
914 } 911 }
912 for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
913 ctrl = nv_rd32(priv, reg + (i * 8));
914 i += 4;
915 } 915 }
916 916
917 /* PIOR */ 917 /* PIOR */
918 if (!(ctrl & (1 << head))) { 918 if (!(ctrl & (1 << head))) {
919 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 919 for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
920 ctrl = nv_rd32(priv, 0x610b80 + (i * 8)); 920 ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
921 i += 8; 921 i += 8;
922 } 922 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index e9b8217d0075..7e5dff51d3c5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -26,7 +26,6 @@
26#include <core/engctx.h> 26#include <core/engctx.h>
27#include <core/ramht.h> 27#include <core/ramht.h>
28#include <core/class.h> 28#include <core/class.h>
29#include <core/math.h>
30 29
31#include <subdev/timer.h> 30#include <subdev/timer.h>
32#include <subdev/bar.h> 31#include <subdev/bar.h>
@@ -278,7 +277,7 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
278 return ret; 277 return ret;
279 278
280 ioffset = args->ioffset; 279 ioffset = args->ioffset;
281 ilength = log2i(args->ilength / 8); 280 ilength = order_base_2(args->ilength / 8);
282 281
283 nv_wo32(base->ramfc, 0x3c, 0x403f6078); 282 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
284 nv_wo32(base->ramfc, 0x44, 0x01003fff); 283 nv_wo32(base->ramfc, 0x44, 0x01003fff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 7f53196cff52..91a87cd7195a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -28,7 +28,6 @@
28#include <core/ramht.h> 28#include <core/ramht.h>
29#include <core/event.h> 29#include <core/event.h>
30#include <core/class.h> 30#include <core/class.h>
31#include <core/math.h>
32 31
33#include <subdev/timer.h> 32#include <subdev/timer.h>
34#include <subdev/bar.h> 33#include <subdev/bar.h>
@@ -57,6 +56,7 @@ nv84_fifo_context_attach(struct nouveau_object *parent,
57 case NVDEV_ENGINE_SW : return 0; 56 case NVDEV_ENGINE_SW : return 0;
58 case NVDEV_ENGINE_GR : addr = 0x0020; break; 57 case NVDEV_ENGINE_GR : addr = 0x0020; break;
59 case NVDEV_ENGINE_VP : addr = 0x0040; break; 58 case NVDEV_ENGINE_VP : addr = 0x0040; break;
59 case NVDEV_ENGINE_PPP :
60 case NVDEV_ENGINE_MPEG : addr = 0x0060; break; 60 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
61 case NVDEV_ENGINE_BSP : addr = 0x0080; break; 61 case NVDEV_ENGINE_BSP : addr = 0x0080; break;
62 case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break; 62 case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
@@ -92,6 +92,7 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
92 case NVDEV_ENGINE_SW : return 0; 92 case NVDEV_ENGINE_SW : return 0;
93 case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break; 93 case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
94 case NVDEV_ENGINE_VP : engn = 3; addr = 0x0040; break; 94 case NVDEV_ENGINE_VP : engn = 3; addr = 0x0040; break;
95 case NVDEV_ENGINE_PPP :
95 case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break; 96 case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
96 case NVDEV_ENGINE_BSP : engn = 5; addr = 0x0080; break; 97 case NVDEV_ENGINE_BSP : engn = 5; addr = 0x0080; break;
97 case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break; 98 case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
@@ -258,7 +259,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
258 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 259 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
259 260
260 ioffset = args->ioffset; 261 ioffset = args->ioffset;
261 ilength = log2i(args->ilength / 8); 262 ilength = order_base_2(args->ilength / 8);
262 263
263 nv_wo32(base->ramfc, 0x3c, 0x403f6078); 264 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
264 nv_wo32(base->ramfc, 0x44, 0x01003fff); 265 nv_wo32(base->ramfc, 0x44, 0x01003fff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 46dfa68c47bb..ce92f289e751 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -29,7 +29,6 @@
29#include <core/engctx.h> 29#include <core/engctx.h>
30#include <core/event.h> 30#include <core/event.h>
31#include <core/class.h> 31#include <core/class.h>
32#include <core/math.h>
33#include <core/enum.h> 32#include <core/enum.h>
34 33
35#include <subdev/timer.h> 34#include <subdev/timer.h>
@@ -200,7 +199,7 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
200 199
201 usermem = chan->base.chid * 0x1000; 200 usermem = chan->base.chid * 0x1000;
202 ioffset = args->ioffset; 201 ioffset = args->ioffset;
203 ilength = log2i(args->ilength / 8); 202 ilength = order_base_2(args->ilength / 8);
204 203
205 for (i = 0; i < 0x1000; i += 4) 204 for (i = 0; i < 0x1000; i += 4)
206 nv_wo32(priv->user.mem, usermem + i, 0x00000000); 205 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 09644fa9602c..8e8121abe31b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -29,7 +29,6 @@
29#include <core/engctx.h> 29#include <core/engctx.h>
30#include <core/event.h> 30#include <core/event.h>
31#include <core/class.h> 31#include <core/class.h>
32#include <core/math.h>
33#include <core/enum.h> 32#include <core/enum.h>
34 33
35#include <subdev/timer.h> 34#include <subdev/timer.h>
@@ -240,7 +239,7 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
240 239
241 usermem = chan->base.chid * 0x200; 240 usermem = chan->base.chid * 0x200;
242 ioffset = args->ioffset; 241 ioffset = args->ioffset;
243 ilength = log2i(args->ilength / 8); 242 ilength = order_base_2(args->ilength / 8);
244 243
245 for (i = 0; i < 0x200; i += 4) 244 for (i = 0; i < 0x200; i += 4)
246 nv_wo32(priv->user.mem, usermem + i, 0x00000000); 245 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
index 7da35a4e7970..ad8209377529 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -1,6 +1,9 @@
1#ifndef __NV40_GRAPH_H__ 1#ifndef __NV40_GRAPH_H__
2#define __NV40_GRAPH_H__ 2#define __NV40_GRAPH_H__
3 3
4#include <core/device.h>
5#include <core/gpuobj.h>
6
4/* returns 1 if device is one of the nv4x using the 0x4497 object class, 7/* returns 1 if device is one of the nv4x using the 0x4497 object class,
5 * helpful to determine a number of other hardware features 8 * helpful to determine a number of other hardware features
6 */ 9 */
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 5a5b2a773ed7..13bf31c40aa1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -19,21 +19,14 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */ 23 */
24 24
25#include <core/engine.h> 25#include <engine/falcon.h>
26#include <core/engctx.h>
27#include <core/class.h>
28
29#include <engine/ppp.h> 26#include <engine/ppp.h>
30 27
31struct nv98_ppp_priv { 28struct nv98_ppp_priv {
32 struct nouveau_engine base; 29 struct nouveau_falcon base;
33};
34
35struct nv98_ppp_chan {
36 struct nouveau_engctx base;
37}; 30};
38 31
39/******************************************************************************* 32/*******************************************************************************
@@ -42,6 +35,8 @@ struct nv98_ppp_chan {
42 35
43static struct nouveau_oclass 36static struct nouveau_oclass
44nv98_ppp_sclass[] = { 37nv98_ppp_sclass[] = {
38 { 0x88b3, &nouveau_object_ofuncs },
39 { 0x85b3, &nouveau_object_ofuncs },
45 {}, 40 {},
46}; 41};
47 42
@@ -53,12 +48,12 @@ static struct nouveau_oclass
53nv98_ppp_cclass = { 48nv98_ppp_cclass = {
54 .handle = NV_ENGCTX(PPP, 0x98), 49 .handle = NV_ENGCTX(PPP, 0x98),
55 .ofuncs = &(struct nouveau_ofuncs) { 50 .ofuncs = &(struct nouveau_ofuncs) {
56 .ctor = _nouveau_engctx_ctor, 51 .ctor = _nouveau_falcon_context_ctor,
57 .dtor = _nouveau_engctx_dtor, 52 .dtor = _nouveau_falcon_context_dtor,
58 .init = _nouveau_engctx_init, 53 .init = _nouveau_falcon_context_init,
59 .fini = _nouveau_engctx_fini, 54 .fini = _nouveau_falcon_context_fini,
60 .rd32 = _nouveau_engctx_rd32, 55 .rd32 = _nouveau_falcon_context_rd32,
61 .wr32 = _nouveau_engctx_wr32, 56 .wr32 = _nouveau_falcon_context_wr32,
62 }, 57 },
63}; 58};
64 59
@@ -67,6 +62,21 @@ nv98_ppp_cclass = {
67 ******************************************************************************/ 62 ******************************************************************************/
68 63
69static int 64static int
65nv98_ppp_init(struct nouveau_object *object)
66{
67 struct nv98_ppp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x086010, 0x0000ffd2);
75 nv_wr32(priv, 0x08601c, 0x0000fff2);
76 return 0;
77}
78
79static int
70nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 80nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
71 struct nouveau_oclass *oclass, void *data, u32 size, 81 struct nouveau_oclass *oclass, void *data, u32 size,
72 struct nouveau_object **pobject) 82 struct nouveau_object **pobject)
@@ -74,7 +84,7 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 struct nv98_ppp_priv *priv; 84 struct nv98_ppp_priv *priv;
75 int ret; 85 int ret;
76 86
77 ret = nouveau_engine_create(parent, engine, oclass, true, 87 ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
78 "PPPP", "ppp", &priv); 88 "PPPP", "ppp", &priv);
79 *pobject = nv_object(priv); 89 *pobject = nv_object(priv);
80 if (ret) 90 if (ret)
@@ -91,8 +101,10 @@ nv98_ppp_oclass = {
91 .handle = NV_ENGINE(PPP, 0x98), 101 .handle = NV_ENGINE(PPP, 0x98),
92 .ofuncs = &(struct nouveau_ofuncs) { 102 .ofuncs = &(struct nouveau_ofuncs) {
93 .ctor = nv98_ppp_ctor, 103 .ctor = nv98_ppp_ctor,
94 .dtor = _nouveau_engine_dtor, 104 .dtor = _nouveau_falcon_dtor,
95 .init = _nouveau_engine_init, 105 .init = nv98_ppp_init,
96 .fini = _nouveau_engine_fini, 106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
97 }, 109 },
98}; 110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
index 8a8236bc84de..fc9ae0ff1ef5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
@@ -19,16 +19,14 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */ 23 */
24 24
25#include <core/engctx.h> 25#include <engine/falcon.h>
26#include <core/class.h>
27
28#include <engine/vp.h> 26#include <engine/vp.h>
29 27
30struct nv98_vp_priv { 28struct nv98_vp_priv {
31 struct nouveau_engine base; 29 struct nouveau_falcon base;
32}; 30};
33 31
34/******************************************************************************* 32/*******************************************************************************
@@ -37,6 +35,8 @@ struct nv98_vp_priv {
37 35
38static struct nouveau_oclass 36static struct nouveau_oclass
39nv98_vp_sclass[] = { 37nv98_vp_sclass[] = {
38 { 0x88b2, &nouveau_object_ofuncs },
39 { 0x85b2, &nouveau_object_ofuncs },
40 {}, 40 {},
41}; 41};
42 42
@@ -48,12 +48,12 @@ static struct nouveau_oclass
48nv98_vp_cclass = { 48nv98_vp_cclass = {
49 .handle = NV_ENGCTX(VP, 0x98), 49 .handle = NV_ENGCTX(VP, 0x98),
50 .ofuncs = &(struct nouveau_ofuncs) { 50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_engctx_ctor, 51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_engctx_dtor, 52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_engctx_init, 53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_engctx_fini, 54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_engctx_rd32, 55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_engctx_wr32, 56 .wr32 = _nouveau_falcon_context_wr32,
57 }, 57 },
58}; 58};
59 59
@@ -62,6 +62,21 @@ nv98_vp_cclass = {
62 ******************************************************************************/ 62 ******************************************************************************/
63 63
64static int 64static int
65nv98_vp_init(struct nouveau_object *object)
66{
67 struct nv98_vp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x085010, 0x0000ffd2);
75 nv_wr32(priv, 0x08501c, 0x0000fff2);
76 return 0;
77}
78
79static int
65nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 80nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size, 81 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject) 82 struct nouveau_object **pobject)
@@ -69,7 +84,7 @@ nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
69 struct nv98_vp_priv *priv; 84 struct nv98_vp_priv *priv;
70 int ret; 85 int ret;
71 86
72 ret = nouveau_engine_create(parent, engine, oclass, true, 87 ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
73 "PVP", "vp", &priv); 88 "PVP", "vp", &priv);
74 *pobject = nv_object(priv); 89 *pobject = nv_object(priv);
75 if (ret) 90 if (ret)
@@ -86,8 +101,10 @@ nv98_vp_oclass = {
86 .handle = NV_ENGINE(VP, 0x98), 101 .handle = NV_ENGINE(VP, 0x98),
87 .ofuncs = &(struct nouveau_ofuncs) { 102 .ofuncs = &(struct nouveau_ofuncs) {
88 .ctor = nv98_vp_ctor, 103 .ctor = nv98_vp_ctor,
89 .dtor = _nouveau_engine_dtor, 104 .dtor = _nouveau_falcon_dtor,
90 .init = _nouveau_engine_init, 105 .init = nv98_vp_init,
91 .fini = _nouveau_engine_fini, 106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
92 }, 109 },
93}; 110};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h
deleted file mode 100644
index f808131c5cd8..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/math.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __NOUVEAU_MATH_H__
2#define __NOUVEAU_MATH_H__
3
4static inline int
5log2i(u64 base)
6{
7 u64 temp = base >> 1;
8 int log2;
9
10 for (log2 = 0; temp; log2++, temp >>= 1) {
11 }
12
13 return (base & (base - 1)) ? log2 + 1: log2;
14}
15
16#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index febed2ea5c80..d87836e3a704 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,6 +15,12 @@ struct nouveau_object;
15#define NV_PRINTK_TRACE KERN_DEBUG 15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG 16#define NV_PRINTK_SPAM KERN_DEBUG
17 17
18extern int nv_printk_suspend_level;
19
20#define NV_DBG_SUSPEND (nv_printk_suspend_level)
21#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
22
23const char *nv_printk_level_to_pfx(int level);
18void __printf(4, 5) 24void __printf(4, 5)
19nv_printk_(struct nouveau_object *, const char *, int, const char *, ...); 25nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
20 26
@@ -31,6 +37,13 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
31#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) 37#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
32#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) 38#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
33 39
40#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
41
42static inline void nv_suspend_set_printk_level(int level)
43{
44 nv_printk_suspend_level = level;
45}
46
34#define nv_assert(f,a...) do { \ 47#define nv_assert(f,a...) do { \
35 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ 48 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
36 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \ 49 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 888384c0bed8..7e4e2775f249 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -39,8 +39,8 @@ struct nouveau_i2c_func {
39 int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe); 39 int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
40}; 40};
41 41
42#define nouveau_i2c_port_create(p,e,o,i,a,d) \ 42#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
43 nouveau_i2c_port_create_((p), (e), (o), (i), (a), \ 43 nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
44 sizeof(**d), (void **)d) 44 sizeof(**d), (void **)d)
45#define nouveau_i2c_port_destroy(p) ({ \ 45#define nouveau_i2c_port_destroy(p) ({ \
46 struct nouveau_i2c_port *port = (p); \ 46 struct nouveau_i2c_port *port = (p); \
@@ -53,7 +53,9 @@ struct nouveau_i2c_func {
53 53
54int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *, 54int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
55 struct nouveau_oclass *, u8, 55 struct nouveau_oclass *, u8,
56 const struct i2c_algorithm *, int, void **); 56 const struct i2c_algorithm *,
57 const struct nouveau_i2c_func *,
58 int, void **);
57void _nouveau_i2c_port_dtor(struct nouveau_object *); 59void _nouveau_i2c_port_dtor(struct nouveau_object *);
58#define _nouveau_i2c_port_init nouveau_object_init 60#define _nouveau_i2c_port_init nouveau_object_init
59#define _nouveau_i2c_port_fini nouveau_object_fini 61#define _nouveau_i2c_port_fini nouveau_object_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index 9d2cd2006250..ce6569f365a7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -12,6 +12,7 @@ struct nouveau_mc_intr {
12struct nouveau_mc { 12struct nouveau_mc {
13 struct nouveau_subdev base; 13 struct nouveau_subdev base;
14 const struct nouveau_mc_intr *intr_map; 14 const struct nouveau_mc_intr *intr_map;
15 bool use_msi;
15}; 16};
16 17
17static inline struct nouveau_mc * 18static inline struct nouveau_mc *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index e465d158d352..9ab70dfe5b02 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -22,6 +22,7 @@ bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
22bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data); 22bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
23bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data); 23bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
24void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *); 24void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
25void nouveau_timer_alarm_cancel(void *, struct nouveau_alarm *);
25 26
26#define NV_WAIT_DEFAULT 2000000000ULL 27#define NV_WAIT_DEFAULT 2000000000ULL
27#define nv_wait(o,a,m,v) \ 28#define nv_wait(o,a,m,v) \
@@ -35,6 +36,7 @@ struct nouveau_timer {
35 struct nouveau_subdev base; 36 struct nouveau_subdev base;
36 u64 (*read)(struct nouveau_timer *); 37 u64 (*read)(struct nouveau_timer *);
37 void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *); 38 void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *);
39 void (*alarm_cancel)(struct nouveau_timer *, struct nouveau_alarm *);
38}; 40};
39 41
40static inline struct nouveau_timer * 42static inline struct nouveau_timer *
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index 3bd9be2ab37f..191e739f30d1 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -13,11 +13,13 @@
13#include <linux/i2c-algo-bit.h> 13#include <linux/i2c-algo-bit.h>
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/io-mapping.h> 15#include <linux/io-mapping.h>
16#include <linux/vmalloc.h>
17#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/vmalloc.h>
18#include <linux/dmi.h> 18#include <linux/dmi.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/log2.h>
22#include <linux/pm_runtime.h>
21 23
22#include <asm/unaligned.h> 24#include <asm/unaligned.h>
23 25
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 0687e6481438..2e11ea02cf87 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2165,7 +2165,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
2165 u16 data; 2165 u16 data;
2166 2166
2167 if (execute) 2167 if (execute)
2168 nv_info(bios, "running init tables\n"); 2168 nv_suspend(bios, "running init tables\n");
2169 while (!ret && (data = (init_script(bios, ++i)))) { 2169 while (!ret && (data = (init_script(bios, ++i)))) {
2170 struct nvbios_init init = { 2170 struct nvbios_init init = {
2171 .subdev = subdev, 2171 .subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 22a20573ed1b..22ac6dbd6c8f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -184,7 +184,8 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
184 cur_trip->fan_duty = value; 184 cur_trip->fan_duty = value;
185 break; 185 break;
186 case 0x26: 186 case 0x26:
187 fan->pwm_freq = value; 187 if (!fan->pwm_freq)
188 fan->pwm_freq = value;
188 break; 189 break;
189 case 0x3b: 190 case 0x3b:
190 fan->bump_period = value; 191 fan->bump_period = value;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
index dec94e9d776a..4b195ac4da66 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -118,7 +118,8 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent,
118 int ret; 118 int ret;
119 119
120 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 120 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
121 &nouveau_i2c_aux_algo, &chan); 121 &nouveau_i2c_aux_algo, &anx9805_aux_func,
122 &chan);
122 *pobject = nv_object(chan); 123 *pobject = nv_object(chan);
123 if (ret) 124 if (ret)
124 return ret; 125 return ret;
@@ -140,8 +141,6 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent,
140 struct i2c_algo_bit_data *algo = mast->adapter.algo_data; 141 struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
141 algo->udelay = max(algo->udelay, 40); 142 algo->udelay = max(algo->udelay, 40);
142 } 143 }
143
144 chan->base.func = &anx9805_aux_func;
145 return 0; 144 return 0;
146} 145}
147 146
@@ -234,7 +233,8 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent,
234 int ret; 233 int ret;
235 234
236 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 235 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
237 &anx9805_i2c_algo, &port); 236 &anx9805_i2c_algo, &anx9805_i2c_func,
237 &port);
238 *pobject = nv_object(port); 238 *pobject = nv_object(port);
239 if (ret) 239 if (ret)
240 return ret; 240 return ret;
@@ -256,8 +256,6 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent,
256 struct i2c_algo_bit_data *algo = mast->adapter.algo_data; 256 struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
257 algo->udelay = max(algo->udelay, 40); 257 algo->udelay = max(algo->udelay, 40);
258 } 258 }
259
260 port->base.func = &anx9805_i2c_func;
261 return 0; 259 return 0;
262} 260}
263 261
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 8ae2625415e1..2895c19bb152 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -95,6 +95,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
95 struct nouveau_object *engine, 95 struct nouveau_object *engine,
96 struct nouveau_oclass *oclass, u8 index, 96 struct nouveau_oclass *oclass, u8 index,
97 const struct i2c_algorithm *algo, 97 const struct i2c_algorithm *algo,
98 const struct nouveau_i2c_func *func,
98 int size, void **pobject) 99 int size, void **pobject)
99{ 100{
100 struct nouveau_device *device = nv_device(parent); 101 struct nouveau_device *device = nv_device(parent);
@@ -112,6 +113,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
112 port->adapter.owner = THIS_MODULE; 113 port->adapter.owner = THIS_MODULE;
113 port->adapter.dev.parent = &device->pdev->dev; 114 port->adapter.dev.parent = &device->pdev->dev;
114 port->index = index; 115 port->index = index;
116 port->func = func;
115 i2c_set_adapdata(&port->adapter, i2c); 117 i2c_set_adapdata(&port->adapter, i2c);
116 118
117 if ( algo == &nouveau_i2c_bit_algo && 119 if ( algo == &nouveau_i2c_bit_algo &&
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
index 2ad18840fe63..860d5d2365da 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -91,12 +91,12 @@ nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
91 int ret; 91 int ret;
92 92
93 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 93 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
94 &nouveau_i2c_bit_algo, &port); 94 &nouveau_i2c_bit_algo, &nv04_i2c_func,
95 &port);
95 *pobject = nv_object(port); 96 *pobject = nv_object(port);
96 if (ret) 97 if (ret)
97 return ret; 98 return ret;
98 99
99 port->base.func = &nv04_i2c_func;
100 port->drive = info->drive; 100 port->drive = info->drive;
101 port->sense = info->sense; 101 port->sense = info->sense;
102 return 0; 102 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
index f501ae25dbb3..0c2655a03bb4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -84,12 +84,12 @@ nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
84 int ret; 84 int ret;
85 85
86 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 86 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
87 &nouveau_i2c_bit_algo, &port); 87 &nouveau_i2c_bit_algo, &nv4e_i2c_func,
88 &port);
88 *pobject = nv_object(port); 89 *pobject = nv_object(port);
89 if (ret) 90 if (ret)
90 return ret; 91 return ret;
91 92
92 port->base.func = &nv4e_i2c_func;
93 port->addr = 0x600800 + info->drive; 93 port->addr = 0x600800 + info->drive;
94 return 0; 94 return 0;
95} 95}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
index 378dfa324e5f..a8d67a287704 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -85,7 +85,8 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
85 int ret; 85 int ret;
86 86
87 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 87 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
88 &nouveau_i2c_bit_algo, &port); 88 &nouveau_i2c_bit_algo, &nv50_i2c_func,
89 &port);
89 *pobject = nv_object(port); 90 *pobject = nv_object(port);
90 if (ret) 91 if (ret)
91 return ret; 92 return ret;
@@ -93,7 +94,6 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
93 if (info->drive >= nv50_i2c_addr_nr) 94 if (info->drive >= nv50_i2c_addr_nr)
94 return -EINVAL; 95 return -EINVAL;
95 96
96 port->base.func = &nv50_i2c_func;
97 port->state = 0x00000007; 97 port->state = 0x00000007;
98 port->addr = nv50_i2c_addr[info->drive]; 98 port->addr = nv50_i2c_addr[info->drive];
99 return 0; 99 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index 61b771670bfe..df6d3e4b68be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -186,7 +186,8 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
186 int ret; 186 int ret;
187 187
188 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 188 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
189 &nouveau_i2c_bit_algo, &port); 189 &nouveau_i2c_bit_algo, &nv94_i2c_func,
190 &port);
190 *pobject = nv_object(port); 191 *pobject = nv_object(port);
191 if (ret) 192 if (ret)
192 return ret; 193 return ret;
@@ -194,7 +195,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
194 if (info->drive >= nv50_i2c_addr_nr) 195 if (info->drive >= nv50_i2c_addr_nr)
195 return -EINVAL; 196 return -EINVAL;
196 197
197 port->base.func = &nv94_i2c_func;
198 port->state = 7; 198 port->state = 7;
199 port->addr = nv50_i2c_addr[info->drive]; 199 port->addr = nv50_i2c_addr[info->drive];
200 if (info->share != DCB_I2C_UNUSED) { 200 if (info->share != DCB_I2C_UNUSED) {
@@ -221,12 +221,12 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
221 int ret; 221 int ret;
222 222
223 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 223 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
224 &nouveau_i2c_aux_algo, &port); 224 &nouveau_i2c_aux_algo, &nv94_aux_func,
225 &port);
225 *pobject = nv_object(port); 226 *pobject = nv_object(port);
226 if (ret) 227 if (ret)
227 return ret; 228 return ret;
228 229
229 port->base.func = &nv94_aux_func;
230 port->addr = info->drive; 230 port->addr = info->drive;
231 if (info->share != DCB_I2C_UNUSED) { 231 if (info->share != DCB_I2C_UNUSED) {
232 port->ctrl = 0x00e500 + (info->drive * 0x50); 232 port->ctrl = 0x00e500 + (info->drive * 0x50);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index f761b8a610f1..29967d30f97c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -60,12 +60,12 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 int ret; 60 int ret;
61 61
62 ret = nouveau_i2c_port_create(parent, engine, oclass, index, 62 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
63 &nouveau_i2c_bit_algo, &port); 63 &nouveau_i2c_bit_algo, &nvd0_i2c_func,
64 &port);
64 *pobject = nv_object(port); 65 *pobject = nv_object(port);
65 if (ret) 66 if (ret)
66 return ret; 67 return ret;
67 68
68 port->base.func = &nvd0_i2c_func;
69 port->state = 0x00000007; 69 port->state = 0x00000007;
70 port->addr = 0x00d014 + (info->drive * 0x20); 70 port->addr = 0x00d014 + (info->drive * 0x20);
71 if (info->share != DCB_I2C_UNUSED) { 71 if (info->share != DCB_I2C_UNUSED) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 716bf41bc3c1..b10a143787a7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -22,15 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "nv04.h" 25#include <engine/graph/nv40.h>
26 26
27static inline int 27#include "nv04.h"
28nv44_graph_class(struct nv04_instmem_priv *priv)
29{
30 if ((nv_device(priv)->chipset & 0xf0) == 0x60)
31 return 1;
32 return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
33}
34 28
35static int 29static int
36nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 30nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index ec9cd6f10f91..37712a6df923 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -23,16 +23,20 @@
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include <subdev/mc.h>
26#include <core/option.h>
26 27
27static irqreturn_t 28static irqreturn_t
28nouveau_mc_intr(int irq, void *arg) 29nouveau_mc_intr(int irq, void *arg)
29{ 30{
30 struct nouveau_mc *pmc = arg; 31 struct nouveau_mc *pmc = arg;
31 const struct nouveau_mc_intr *map = pmc->intr_map; 32 const struct nouveau_mc_intr *map = pmc->intr_map;
33 struct nouveau_device *device = nv_device(pmc);
32 struct nouveau_subdev *unit; 34 struct nouveau_subdev *unit;
33 u32 stat, intr; 35 u32 stat, intr;
34 36
35 intr = stat = nv_rd32(pmc, 0x000100); 37 intr = stat = nv_rd32(pmc, 0x000100);
38 if (intr == 0xffffffff)
39 return IRQ_NONE;
36 while (stat && map->stat) { 40 while (stat && map->stat) {
37 if (stat & map->stat) { 41 if (stat & map->stat) {
38 unit = nouveau_subdev(pmc, map->unit); 42 unit = nouveau_subdev(pmc, map->unit);
@@ -43,10 +47,15 @@ nouveau_mc_intr(int irq, void *arg)
43 map++; 47 map++;
44 } 48 }
45 49
50 if (pmc->use_msi)
51 nv_wr08(pmc->base.base.parent, 0x00088068, 0xff);
52
46 if (intr) { 53 if (intr) {
47 nv_error(pmc, "unknown intr 0x%08x\n", stat); 54 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 } 55 }
49 56
57 if (stat == IRQ_HANDLED)
58 pm_runtime_mark_last_busy(&device->pdev->dev);
50 return stat ? IRQ_HANDLED : IRQ_NONE; 59 return stat ? IRQ_HANDLED : IRQ_NONE;
51} 60}
52 61
@@ -75,6 +84,8 @@ _nouveau_mc_dtor(struct nouveau_object *object)
75 struct nouveau_device *device = nv_device(object); 84 struct nouveau_device *device = nv_device(object);
76 struct nouveau_mc *pmc = (void *)object; 85 struct nouveau_mc *pmc = (void *)object;
77 free_irq(device->pdev->irq, pmc); 86 free_irq(device->pdev->irq, pmc);
87 if (pmc->use_msi)
88 pci_disable_msi(device->pdev);
78 nouveau_subdev_destroy(&pmc->base); 89 nouveau_subdev_destroy(&pmc->base);
79} 90}
80 91
@@ -96,6 +107,23 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
96 107
97 pmc->intr_map = intr_map; 108 pmc->intr_map = intr_map;
98 109
110 switch (device->pdev->device & 0x0ff0) {
111 case 0x00f0: /* BR02? */
112 case 0x02e0: /* BR02? */
113 pmc->use_msi = false;
114 break;
115 default:
116 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true);
117 if (pmc->use_msi) {
118 pmc->use_msi = pci_enable_msi(device->pdev) == 0;
119 if (pmc->use_msi) {
120 nv_info(pmc, "MSI interrupts enabled\n");
121 nv_wr08(device, 0x00088068, 0xff);
122 }
123 }
124 break;
125 }
126
99 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 127 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
100 IRQF_SHARED, "nouveau", pmc); 128 IRQF_SHARED, "nouveau", pmc);
101 if (ret < 0) 129 if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 0d57b4d3e001..06710419a59b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -35,6 +35,7 @@ nv98_mc_intr[] = {
35 { 0x00001000, NVDEV_ENGINE_GR }, 35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */ 36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
37 { 0x00008000, NVDEV_ENGINE_BSP }, 37 { 0x00008000, NVDEV_ENGINE_BSP },
38 { 0x00020000, NVDEV_ENGINE_VP },
38 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */ 39 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
39 { 0x00100000, NVDEV_SUBDEV_TIMER }, 40 { 0x00100000, NVDEV_SUBDEV_TIMER },
40 { 0x00200000, NVDEV_SUBDEV_GPIO }, 41 { 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -42,7 +43,7 @@ nv98_mc_intr[] = {
42 { 0x04000000, NVDEV_ENGINE_DISP }, 43 { 0x04000000, NVDEV_ENGINE_DISP },
43 { 0x10000000, NVDEV_SUBDEV_BUS }, 44 { 0x10000000, NVDEV_SUBDEV_BUS },
44 { 0x80000000, NVDEV_ENGINE_SW }, 45 { 0x80000000, NVDEV_ENGINE_SW },
45 { 0x0040d101, NVDEV_SUBDEV_FB }, 46 { 0x0042d101, NVDEV_SUBDEV_FB },
46 {}, 47 {},
47}; 48};
48 49
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index a00a5a76e2d6..f1de7a9c572b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -95,12 +95,14 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
95 int duty; 95 int duty;
96 96
97 spin_lock_irqsave(&priv->lock, flags); 97 spin_lock_irqsave(&priv->lock, flags);
98 nv_debug(therm, "FAN speed check\n");
98 if (mode < 0) 99 if (mode < 0)
99 mode = priv->mode; 100 mode = priv->mode;
100 priv->mode = mode; 101 priv->mode = mode;
101 102
102 switch (mode) { 103 switch (mode) {
103 case NOUVEAU_THERM_CTRL_MANUAL: 104 case NOUVEAU_THERM_CTRL_MANUAL:
105 ptimer->alarm_cancel(ptimer, &priv->alarm);
104 duty = nouveau_therm_fan_get(therm); 106 duty = nouveau_therm_fan_get(therm);
105 if (duty < 0) 107 if (duty < 0)
106 duty = 100; 108 duty = 100;
@@ -113,6 +115,7 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
113 break; 115 break;
114 case NOUVEAU_THERM_CTRL_NONE: 116 case NOUVEAU_THERM_CTRL_NONE:
115 default: 117 default:
118 ptimer->alarm_cancel(ptimer, &priv->alarm);
116 goto done; 119 goto done;
117 } 120 }
118 121
@@ -122,6 +125,8 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
122done: 125done:
123 if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO)) 126 if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
124 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm); 127 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
128 else if (!list_empty(&priv->alarm.head))
129 nv_debug(therm, "therm fan alarm list is not empty\n");
125 spin_unlock_irqrestore(&priv->lock, flags); 130 spin_unlock_irqrestore(&priv->lock, flags);
126} 131}
127 132
@@ -267,9 +272,15 @@ _nouveau_therm_init(struct nouveau_object *object)
267 if (ret) 272 if (ret)
268 return ret; 273 return ret;
269 274
270 if (priv->suspend >= 0) 275 if (priv->suspend >= 0) {
271 nouveau_therm_fan_mode(therm, priv->mode); 276 /* restore the pwm value only when on manual or auto mode */
272 priv->sensor.program_alarms(therm); 277 if (priv->suspend > 0)
278 nouveau_therm_fan_set(therm, true, priv->fan->percent);
279
280 nouveau_therm_fan_mode(therm, priv->suspend);
281 }
282 nouveau_therm_sensor_init(therm);
283 nouveau_therm_fan_init(therm);
273 return 0; 284 return 0;
274} 285}
275 286
@@ -279,6 +290,8 @@ _nouveau_therm_fini(struct nouveau_object *object, bool suspend)
279 struct nouveau_therm *therm = (void *)object; 290 struct nouveau_therm *therm = (void *)object;
280 struct nouveau_therm_priv *priv = (void *)therm; 291 struct nouveau_therm_priv *priv = (void *)therm;
281 292
293 nouveau_therm_fan_fini(therm, suspend);
294 nouveau_therm_sensor_fini(therm, suspend);
282 if (suspend) { 295 if (suspend) {
283 priv->suspend = priv->mode; 296 priv->suspend = priv->mode;
284 priv->mode = NOUVEAU_THERM_CTRL_NONE; 297 priv->mode = NOUVEAU_THERM_CTRL_NONE;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index c728380d3d62..39f47b950ad1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -204,6 +204,23 @@ nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
204} 204}
205 205
206int 206int
207nouveau_therm_fan_init(struct nouveau_therm *therm)
208{
209 return 0;
210}
211
212int
213nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend)
214{
215 struct nouveau_therm_priv *priv = (void *)therm;
216 struct nouveau_timer *ptimer = nouveau_timer(therm);
217
218 if (suspend)
219 ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
220 return 0;
221}
222
223int
207nouveau_therm_fan_ctor(struct nouveau_therm *therm) 224nouveau_therm_fan_ctor(struct nouveau_therm *therm)
208{ 225{
209 struct nouveau_therm_priv *priv = (void *)therm; 226 struct nouveau_therm_priv *priv = (void *)therm;
@@ -234,6 +251,9 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
234 251
235 nv_info(therm, "FAN control: %s\n", priv->fan->type); 252 nv_info(therm, "FAN control: %s\n", priv->fan->type);
236 253
254 /* read the current speed, it is useful when resuming */
255 priv->fan->percent = nouveau_therm_fan_get(therm);
256
237 /* attempt to detect a tachometer connection */ 257 /* attempt to detect a tachometer connection */
238 ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach); 258 ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
239 if (ret) 259 if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 15ca64e481f1..dd38529262fb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -113,6 +113,8 @@ void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
113int nouveau_therm_sensor_ctor(struct nouveau_therm *therm); 113int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
114 114
115int nouveau_therm_fan_ctor(struct nouveau_therm *therm); 115int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
116int nouveau_therm_fan_init(struct nouveau_therm *therm);
117int nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend);
116int nouveau_therm_fan_get(struct nouveau_therm *therm); 118int nouveau_therm_fan_get(struct nouveau_therm *therm);
117int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent); 119int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
118int nouveau_therm_fan_user_get(struct nouveau_therm *therm); 120int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
@@ -122,6 +124,8 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm);
122 124
123int nouveau_therm_preinit(struct nouveau_therm *); 125int nouveau_therm_preinit(struct nouveau_therm *);
124 126
127int nouveau_therm_sensor_init(struct nouveau_therm *therm);
128int nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend);
125void nouveau_therm_sensor_preinit(struct nouveau_therm *); 129void nouveau_therm_sensor_preinit(struct nouveau_therm *);
126void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, 130void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
127 enum nouveau_therm_thrs thrs, 131 enum nouveau_therm_thrs thrs,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index dde746c78c8a..b80a33011b93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,6 +180,8 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
180 180
181 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 181 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
182 182
183 nv_debug(therm, "polling the internal temperature\n");
184
183 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost, 185 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
184 NOUVEAU_THERM_THRS_FANBOOST); 186 NOUVEAU_THERM_THRS_FANBOOST);
185 187
@@ -216,6 +218,25 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
216 alarm_timer_callback(&priv->sensor.therm_poll_alarm); 218 alarm_timer_callback(&priv->sensor.therm_poll_alarm);
217} 219}
218 220
221int
222nouveau_therm_sensor_init(struct nouveau_therm *therm)
223{
224 struct nouveau_therm_priv *priv = (void *)therm;
225 priv->sensor.program_alarms(therm);
226 return 0;
227}
228
229int
230nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend)
231{
232 struct nouveau_therm_priv *priv = (void *)therm;
233 struct nouveau_timer *ptimer = nouveau_timer(therm);
234
235 if (suspend)
236 ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
237 return 0;
238}
239
219void 240void
220nouveau_therm_sensor_preinit(struct nouveau_therm *therm) 241nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
221{ 242{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
index 5d417cc9949b..cf8a0e0f8ee3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
@@ -85,3 +85,10 @@ nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
85 struct nouveau_timer *ptimer = nouveau_timer(obj); 85 struct nouveau_timer *ptimer = nouveau_timer(obj);
86 ptimer->alarm(ptimer, nsec, alarm); 86 ptimer->alarm(ptimer, nsec, alarm);
87} 87}
88
89void
90nouveau_timer_alarm_cancel(void *obj, struct nouveau_alarm *alarm)
91{
92 struct nouveau_timer *ptimer = nouveau_timer(obj);
93 ptimer->alarm_cancel(ptimer, alarm);
94}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 9469b8275675..57711ecb566c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -36,6 +36,7 @@ struct nv04_timer_priv {
36 struct nouveau_timer base; 36 struct nouveau_timer base;
37 struct list_head alarms; 37 struct list_head alarms;
38 spinlock_t lock; 38 spinlock_t lock;
39 u64 suspend_time;
39}; 40};
40 41
41static u64 42static u64
@@ -113,6 +114,25 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
113} 114}
114 115
115static void 116static void
117nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
118 struct nouveau_alarm *alarm)
119{
120 struct nv04_timer_priv *priv = (void *)ptimer;
121 unsigned long flags;
122
123 /* avoid deleting an entry while the alarm intr is running */
124 spin_lock_irqsave(&priv->lock, flags);
125
126 /* delete the alarm from the list */
127 list_del(&alarm->head);
128
129 /* reset the head so as list_empty returns 1 */
130 INIT_LIST_HEAD(&alarm->head);
131
132 spin_unlock_irqrestore(&priv->lock, flags);
133}
134
135static void
116nv04_timer_intr(struct nouveau_subdev *subdev) 136nv04_timer_intr(struct nouveau_subdev *subdev)
117{ 137{
118 struct nv04_timer_priv *priv = (void *)subdev; 138 struct nv04_timer_priv *priv = (void *)subdev;
@@ -146,6 +166,8 @@ nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
146 priv->base.base.intr = nv04_timer_intr; 166 priv->base.base.intr = nv04_timer_intr;
147 priv->base.read = nv04_timer_read; 167 priv->base.read = nv04_timer_read;
148 priv->base.alarm = nv04_timer_alarm; 168 priv->base.alarm = nv04_timer_alarm;
169 priv->base.alarm_cancel = nv04_timer_alarm_cancel;
170 priv->suspend_time = 0;
149 171
150 INIT_LIST_HEAD(&priv->alarms); 172 INIT_LIST_HEAD(&priv->alarms);
151 spin_lock_init(&priv->lock); 173 spin_lock_init(&priv->lock);
@@ -164,7 +186,7 @@ nv04_timer_init(struct nouveau_object *object)
164{ 186{
165 struct nouveau_device *device = nv_device(object); 187 struct nouveau_device *device = nv_device(object);
166 struct nv04_timer_priv *priv = (void *)object; 188 struct nv04_timer_priv *priv = (void *)object;
167 u32 m = 1, f, n, d; 189 u32 m = 1, f, n, d, lo, hi;
168 int ret; 190 int ret;
169 191
170 ret = nouveau_timer_init(&priv->base); 192 ret = nouveau_timer_init(&priv->base);
@@ -221,16 +243,25 @@ nv04_timer_init(struct nouveau_object *object)
221 d >>= 1; 243 d >>= 1;
222 } 244 }
223 245
246 /* restore the time before suspend */
247 lo = priv->suspend_time;
248 hi = (priv->suspend_time >> 32);
249
224 nv_debug(priv, "input frequency : %dHz\n", f); 250 nv_debug(priv, "input frequency : %dHz\n", f);
225 nv_debug(priv, "input multiplier: %d\n", m); 251 nv_debug(priv, "input multiplier: %d\n", m);
226 nv_debug(priv, "numerator : 0x%08x\n", n); 252 nv_debug(priv, "numerator : 0x%08x\n", n);
227 nv_debug(priv, "denominator : 0x%08x\n", d); 253 nv_debug(priv, "denominator : 0x%08x\n", d);
228 nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n); 254 nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
255 nv_debug(priv, "time low : 0x%08x\n", lo);
256 nv_debug(priv, "time high : 0x%08x\n", hi);
229 257
230 nv_wr32(priv, NV04_PTIMER_NUMERATOR, n); 258 nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
231 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d); 259 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
232 nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff); 260 nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
233 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); 261 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
262 nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
263 nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
264
234 return 0; 265 return 0;
235} 266}
236 267
@@ -238,6 +269,8 @@ static int
238nv04_timer_fini(struct nouveau_object *object, bool suspend) 269nv04_timer_fini(struct nouveau_object *object, bool suspend)
239{ 270{
240 struct nv04_timer_priv *priv = (void *)object; 271 struct nv04_timer_priv *priv = (void *)object;
272 if (suspend)
273 priv->suspend_time = nv04_timer_read(&priv->base);
241 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); 274 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
242 return nouveau_timer_fini(&priv->base, suspend); 275 return nouveau_timer_fini(&priv->base, suspend);
243} 276}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 07dd1fe2d6fb..a4aa81a2173b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -174,6 +174,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
174 case NVDEV_ENGINE_GR : vme = 0x00; break; 174 case NVDEV_ENGINE_GR : vme = 0x00; break;
175 case NVDEV_ENGINE_VP : vme = 0x01; break; 175 case NVDEV_ENGINE_VP : vme = 0x01; break;
176 case NVDEV_SUBDEV_BAR : vme = 0x06; break; 176 case NVDEV_SUBDEV_BAR : vme = 0x06; break;
177 case NVDEV_ENGINE_PPP :
177 case NVDEV_ENGINE_MPEG : vme = 0x08; break; 178 case NVDEV_ENGINE_MPEG : vme = 0x08; break;
178 case NVDEV_ENGINE_BSP : vme = 0x09; break; 179 case NVDEV_ENGINE_BSP : vme = 0x09; break;
179 case NVDEV_ENGINE_CRYPT: vme = 0x0a; break; 180 case NVDEV_ENGINE_CRYPT: vme = 0x0a; break;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6a13ffb53bdb..d4fbf11360fe 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -22,6 +22,7 @@
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
24 */ 24 */
25#include <linux/pm_runtime.h>
25 26
26#include <drm/drmP.h> 27#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
@@ -1034,13 +1035,59 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1034 return 0; 1035 return 0;
1035} 1036}
1036 1037
1038int
1039nouveau_crtc_set_config(struct drm_mode_set *set)
1040{
1041 struct drm_device *dev;
1042 struct nouveau_drm *drm;
1043 int ret;
1044 struct drm_crtc *crtc;
1045 bool active = false;
1046 if (!set || !set->crtc)
1047 return -EINVAL;
1048
1049 dev = set->crtc->dev;
1050
1051 /* get a pm reference here */
1052 ret = pm_runtime_get_sync(dev->dev);
1053 if (ret < 0)
1054 return ret;
1055
1056 ret = drm_crtc_helper_set_config(set);
1057
1058 drm = nouveau_drm(dev);
1059
1060 /* if we get here with no crtcs active then we can drop a reference */
1061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1062 if (crtc->enabled)
1063 active = true;
1064 }
1065
1066 pm_runtime_mark_last_busy(dev->dev);
1067 /* if we have active crtcs and we don't have a power ref,
1068 take the current one */
1069 if (active && !drm->have_disp_power_ref) {
1070 drm->have_disp_power_ref = true;
1071 return ret;
1072 }
1073 /* if we have no active crtcs, then drop the power ref
1074 we got before */
1075 if (!active && drm->have_disp_power_ref) {
1076 pm_runtime_put_autosuspend(dev->dev);
1077 drm->have_disp_power_ref = false;
1078 }
1079 /* drop the power reference we got coming in here */
1080 pm_runtime_put_autosuspend(dev->dev);
1081 return ret;
1082}
1083
1037static const struct drm_crtc_funcs nv04_crtc_funcs = { 1084static const struct drm_crtc_funcs nv04_crtc_funcs = {
1038 .save = nv_crtc_save, 1085 .save = nv_crtc_save,
1039 .restore = nv_crtc_restore, 1086 .restore = nv_crtc_restore,
1040 .cursor_set = nv04_crtc_cursor_set, 1087 .cursor_set = nv04_crtc_cursor_set,
1041 .cursor_move = nv04_crtc_cursor_move, 1088 .cursor_move = nv04_crtc_cursor_move,
1042 .gamma_set = nv_crtc_gamma_set, 1089 .gamma_set = nv_crtc_gamma_set,
1043 .set_config = drm_crtc_helper_set_config, 1090 .set_config = nouveau_crtc_set_config,
1044 .page_flip = nouveau_crtc_page_flip, 1091 .page_flip = nouveau_crtc_page_flip,
1045 .destroy = nv_crtc_destroy, 1092 .destroy = nv_crtc_destroy,
1046}; 1093};
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d97f20069d3e..dd7d2e182719 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -25,8 +25,27 @@
25#define NOUVEAU_DSM_POWER_SPEED 0x01 25#define NOUVEAU_DSM_POWER_SPEED 0x01
26#define NOUVEAU_DSM_POWER_STAMINA 0x02 26#define NOUVEAU_DSM_POWER_STAMINA 0x02
27 27
28#define NOUVEAU_DSM_OPTIMUS_FN 0x1A 28#define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A
29#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 29#define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B
30
31#define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24)
32#define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24)
33#define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1)
34
35#define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED)
36
37/* result of the optimus caps function */
38#define OPTIMUS_ENABLED (1 << 0)
39#define OPTIMUS_STATUS_MASK (3 << 3)
40#define OPTIMUS_STATUS_OFF (0 << 3)
41#define OPTIMUS_STATUS_ON_ENABLED (1 << 3)
42#define OPTIMUS_STATUS_PWR_STABLE (3 << 3)
43#define OPTIMUS_DISPLAY_HOTPLUG (1 << 6)
44#define OPTIMUS_CAPS_MASK (7 << 24)
45#define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24)
46
47#define OPTIMUS_AUDIO_CAPS_MASK (3 << 27)
48#define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */
30 49
31static struct nouveau_dsm_priv { 50static struct nouveau_dsm_priv {
32 bool dsm_detected; 51 bool dsm_detected;
@@ -251,9 +270,18 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
251 retval |= NOUVEAU_DSM_HAS_MUX; 270 retval |= NOUVEAU_DSM_HAS_MUX;
252 271
253 if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, 272 if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
254 NOUVEAU_DSM_OPTIMUS_FN)) 273 NOUVEAU_DSM_OPTIMUS_CAPS))
255 retval |= NOUVEAU_DSM_HAS_OPT; 274 retval |= NOUVEAU_DSM_HAS_OPT;
256 275
276 if (retval & NOUVEAU_DSM_HAS_OPT) {
277 uint32_t result;
278 nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
279 &result);
280 dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n",
281 (result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
282 (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
283 (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
284 }
257 if (retval) 285 if (retval)
258 nouveau_dsm_priv.dhandle = dhandle; 286 nouveau_dsm_priv.dhandle = dhandle;
259 287
@@ -328,8 +356,12 @@ void nouveau_switcheroo_optimus_dsm(void)
328 if (!nouveau_dsm_priv.optimus_detected) 356 if (!nouveau_dsm_priv.optimus_detected)
329 return; 357 return;
330 358
331 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, 359 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
332 NOUVEAU_DSM_OPTIMUS_ARGS, &result); 360 0x3, &result);
361
362 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
363 NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
364
333} 365}
334 366
335void nouveau_unregister_dsm_handler(void) 367void nouveau_unregister_dsm_handler(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index af20fba3a1a4..755c38d06271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1265,7 +1265,9 @@ out:
1265static int 1265static int
1266nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 1266nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1267{ 1267{
1268 return 0; 1268 struct nouveau_bo *nvbo = nouveau_bo(bo);
1269
1270 return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
1269} 1271}
1270 1272
1271static int 1273static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4da776f344d7..c5b36f9e9a10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -26,6 +26,8 @@
26 26
27#include <acpi/button.h> 27#include <acpi/button.h>
28 28
29#include <linux/pm_runtime.h>
30
29#include <drm/drmP.h> 31#include <drm/drmP.h>
30#include <drm/drm_edid.h> 32#include <drm/drm_edid.h>
31#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
@@ -240,6 +242,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
240 struct nouveau_encoder *nv_partner; 242 struct nouveau_encoder *nv_partner;
241 struct nouveau_i2c_port *i2c; 243 struct nouveau_i2c_port *i2c;
242 int type; 244 int type;
245 int ret;
246 enum drm_connector_status conn_status = connector_status_disconnected;
243 247
244 /* Cleanup the previous EDID block. */ 248 /* Cleanup the previous EDID block. */
245 if (nv_connector->edid) { 249 if (nv_connector->edid) {
@@ -248,6 +252,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
248 nv_connector->edid = NULL; 252 nv_connector->edid = NULL;
249 } 253 }
250 254
255 ret = pm_runtime_get_sync(connector->dev->dev);
256 if (ret < 0)
257 return conn_status;
258
251 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 259 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
252 if (i2c) { 260 if (i2c) {
253 nv_connector->edid = drm_get_edid(connector, &i2c->adapter); 261 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
@@ -263,7 +271,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
263 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { 271 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
264 NV_ERROR(drm, "Detected %s, but failed init\n", 272 NV_ERROR(drm, "Detected %s, but failed init\n",
265 drm_get_connector_name(connector)); 273 drm_get_connector_name(connector));
266 return connector_status_disconnected; 274 conn_status = connector_status_disconnected;
275 goto out;
267 } 276 }
268 277
269 /* Override encoder type for DVI-I based on whether EDID 278 /* Override encoder type for DVI-I based on whether EDID
@@ -290,13 +299,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
290 } 299 }
291 300
292 nouveau_connector_set_encoder(connector, nv_encoder); 301 nouveau_connector_set_encoder(connector, nv_encoder);
293 return connector_status_connected; 302 conn_status = connector_status_connected;
303 goto out;
294 } 304 }
295 305
296 nv_encoder = nouveau_connector_of_detect(connector); 306 nv_encoder = nouveau_connector_of_detect(connector);
297 if (nv_encoder) { 307 if (nv_encoder) {
298 nouveau_connector_set_encoder(connector, nv_encoder); 308 nouveau_connector_set_encoder(connector, nv_encoder);
299 return connector_status_connected; 309 conn_status = connector_status_connected;
310 goto out;
300 } 311 }
301 312
302detect_analog: 313detect_analog:
@@ -311,12 +322,18 @@ detect_analog:
311 if (helper->detect(encoder, connector) == 322 if (helper->detect(encoder, connector) ==
312 connector_status_connected) { 323 connector_status_connected) {
313 nouveau_connector_set_encoder(connector, nv_encoder); 324 nouveau_connector_set_encoder(connector, nv_encoder);
314 return connector_status_connected; 325 conn_status = connector_status_connected;
326 goto out;
315 } 327 }
316 328
317 } 329 }
318 330
319 return connector_status_disconnected; 331 out:
332
333 pm_runtime_mark_last_busy(connector->dev->dev);
334 pm_runtime_put_autosuspend(connector->dev->dev);
335
336 return conn_status;
320} 337}
321 338
322static enum drm_connector_status 339static enum drm_connector_status
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a03e75deacaf..d2712e6e5d31 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -107,6 +107,11 @@ nouveau_framebuffer_init(struct drm_device *dev,
107 return -EINVAL; 107 return -EINVAL;
108 } 108 }
109 109
110 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
111 NV_ERROR(drm, "framebuffer requires contiguous bo\n");
112 return -EINVAL;
113 }
114
110 if (nv_device(drm->device)->chipset == 0x50) 115 if (nv_device(drm->device)->chipset == 0x50)
111 nv_fb->r_format |= (tile_flags << 8); 116 nv_fb->r_format |= (tile_flags << 8);
112 117
@@ -394,7 +399,7 @@ nouveau_display_suspend(struct drm_device *dev)
394 399
395 nouveau_display_fini(dev); 400 nouveau_display_fini(dev);
396 401
397 NV_INFO(drm, "unpinning framebuffer(s)...\n"); 402 NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
398 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 403 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
399 struct nouveau_framebuffer *nouveau_fb; 404 struct nouveau_framebuffer *nouveau_fb;
400 405
@@ -416,7 +421,7 @@ nouveau_display_suspend(struct drm_device *dev)
416} 421}
417 422
418void 423void
419nouveau_display_resume(struct drm_device *dev) 424nouveau_display_repin(struct drm_device *dev)
420{ 425{
421 struct nouveau_drm *drm = nouveau_drm(dev); 426 struct nouveau_drm *drm = nouveau_drm(dev);
422 struct drm_crtc *crtc; 427 struct drm_crtc *crtc;
@@ -441,10 +446,12 @@ nouveau_display_resume(struct drm_device *dev)
441 if (ret) 446 if (ret)
442 NV_ERROR(drm, "Could not pin/map cursor.\n"); 447 NV_ERROR(drm, "Could not pin/map cursor.\n");
443 } 448 }
449}
444 450
445 nouveau_fbcon_set_suspend(dev, 0); 451void
446 nouveau_fbcon_zfill_all(dev); 452nouveau_display_resume(struct drm_device *dev)
447 453{
454 struct drm_crtc *crtc;
448 nouveau_display_init(dev); 455 nouveau_display_init(dev);
449 456
450 /* Force CLUT to get re-loaded during modeset */ 457 /* Force CLUT to get re-loaded during modeset */
@@ -519,7 +526,8 @@ fail:
519 526
520int 527int
521nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 528nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
522 struct drm_pending_vblank_event *event) 529 struct drm_pending_vblank_event *event,
530 uint32_t page_flip_flags)
523{ 531{
524 struct drm_device *dev = crtc->dev; 532 struct drm_device *dev = crtc->dev;
525 struct nouveau_drm *drm = nouveau_drm(dev); 533 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -677,13 +685,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
677} 685}
678 686
679int 687int
680nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
681 uint32_t handle)
682{
683 return drm_gem_handle_delete(file_priv, handle);
684}
685
686int
687nouveau_display_dumb_map_offset(struct drm_file *file_priv, 688nouveau_display_dumb_map_offset(struct drm_file *file_priv,
688 struct drm_device *dev, 689 struct drm_device *dev,
689 uint32_t handle, uint64_t *poffset) 690 uint32_t handle, uint64_t *poffset)
@@ -693,7 +694,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
693 gem = drm_gem_object_lookup(dev, file_priv, handle); 694 gem = drm_gem_object_lookup(dev, file_priv, handle);
694 if (gem) { 695 if (gem) {
695 struct nouveau_bo *bo = gem->driver_private; 696 struct nouveau_bo *bo = gem->driver_private;
696 *poffset = bo->bo.addr_space_offset; 697 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
697 drm_gem_object_unreference_unlocked(gem); 698 drm_gem_object_unreference_unlocked(gem);
698 return 0; 699 return 0;
699 } 700 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1ea3e4734b62..025c66f8e0ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -57,10 +57,12 @@ void nouveau_display_destroy(struct drm_device *dev);
57int nouveau_display_init(struct drm_device *dev); 57int nouveau_display_init(struct drm_device *dev);
58void nouveau_display_fini(struct drm_device *dev); 58void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev); 59int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_repin(struct drm_device *dev);
60void nouveau_display_resume(struct drm_device *dev); 61void nouveau_display_resume(struct drm_device *dev);
61 62
62int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 63int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
63 struct drm_pending_vblank_event *event); 64 struct drm_pending_vblank_event *event,
65 uint32_t page_flip_flags);
64int nouveau_finish_page_flip(struct nouveau_channel *, 66int nouveau_finish_page_flip(struct nouveau_channel *,
65 struct nouveau_page_flip_state *); 67 struct nouveau_page_flip_state *);
66 68
@@ -68,11 +70,10 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
68 struct drm_mode_create_dumb *args); 70 struct drm_mode_create_dumb *args);
69int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, 71int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
70 u32 handle, u64 *offset); 72 u32 handle, u64 *offset);
71int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
72 u32 handle);
73 73
74void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); 74void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
75 75
76int nouveau_crtc_set_config(struct drm_mode_set *set);
76#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 77#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
77extern int nouveau_backlight_init(struct drm_device *); 78extern int nouveau_backlight_init(struct drm_device *);
78extern void nouveau_backlight_exit(struct drm_device *); 79extern void nouveau_backlight_exit(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 61972668fd05..8863644024b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -25,7 +25,10 @@
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/pci.h> 27#include <linux/pci.h>
28 28#include <linux/pm_runtime.h>
29#include <linux/vga_switcheroo.h>
30#include "drmP.h"
31#include "drm_crtc_helper.h"
29#include <core/device.h> 32#include <core/device.h>
30#include <core/client.h> 33#include <core/client.h>
31#include <core/gpuobj.h> 34#include <core/gpuobj.h>
@@ -69,6 +72,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
69int nouveau_modeset = -1; 72int nouveau_modeset = -1;
70module_param_named(modeset, nouveau_modeset, int, 0400); 73module_param_named(modeset, nouveau_modeset, int, 0400);
71 74
75MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
76int nouveau_runtime_pm = -1;
77module_param_named(runpm, nouveau_runtime_pm, int, 0400);
78
72static struct drm_driver driver; 79static struct drm_driver driver;
73 80
74static int 81static int
@@ -296,6 +303,31 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
296 return 0; 303 return 0;
297} 304}
298 305
306#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
307
308static void
309nouveau_get_hdmi_dev(struct drm_device *dev)
310{
311 struct nouveau_drm *drm = dev->dev_private;
312 struct pci_dev *pdev = dev->pdev;
313
314 /* subfunction one is a hdmi audio device? */
315 drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
316 PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
317
318 if (!drm->hdmi_device) {
319 DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
320 return;
321 }
322
323 if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
324 DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class);
325 pci_dev_put(drm->hdmi_device);
326 drm->hdmi_device = NULL;
327 return;
328 }
329}
330
299static int 331static int
300nouveau_drm_load(struct drm_device *dev, unsigned long flags) 332nouveau_drm_load(struct drm_device *dev, unsigned long flags)
301{ 333{
@@ -314,6 +346,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
314 INIT_LIST_HEAD(&drm->clients); 346 INIT_LIST_HEAD(&drm->clients);
315 spin_lock_init(&drm->tile.lock); 347 spin_lock_init(&drm->tile.lock);
316 348
349 nouveau_get_hdmi_dev(dev);
350
317 /* make sure AGP controller is in a consistent state before we 351 /* make sure AGP controller is in a consistent state before we
318 * (possibly) execute vbios init tables (see nouveau_agp.h) 352 * (possibly) execute vbios init tables (see nouveau_agp.h)
319 */ 353 */
@@ -388,6 +422,15 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
388 422
389 nouveau_accel_init(drm); 423 nouveau_accel_init(drm);
390 nouveau_fbcon_init(dev); 424 nouveau_fbcon_init(dev);
425
426 if (nouveau_runtime_pm != 0) {
427 pm_runtime_use_autosuspend(dev->dev);
428 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
429 pm_runtime_set_active(dev->dev);
430 pm_runtime_allow(dev->dev);
431 pm_runtime_mark_last_busy(dev->dev);
432 pm_runtime_put(dev->dev);
433 }
391 return 0; 434 return 0;
392 435
393fail_dispinit: 436fail_dispinit:
@@ -409,6 +452,7 @@ nouveau_drm_unload(struct drm_device *dev)
409{ 452{
410 struct nouveau_drm *drm = nouveau_drm(dev); 453 struct nouveau_drm *drm = nouveau_drm(dev);
411 454
455 pm_runtime_get_sync(dev->dev);
412 nouveau_fbcon_fini(dev); 456 nouveau_fbcon_fini(dev);
413 nouveau_accel_fini(drm); 457 nouveau_accel_fini(drm);
414 458
@@ -424,6 +468,8 @@ nouveau_drm_unload(struct drm_device *dev)
424 nouveau_agp_fini(drm); 468 nouveau_agp_fini(drm);
425 nouveau_vga_fini(drm); 469 nouveau_vga_fini(drm);
426 470
471 if (drm->hdmi_device)
472 pci_dev_put(drm->hdmi_device);
427 nouveau_cli_destroy(&drm->client); 473 nouveau_cli_destroy(&drm->client);
428 return 0; 474 return 0;
429} 475}
@@ -450,19 +496,16 @@ nouveau_do_suspend(struct drm_device *dev)
450 int ret; 496 int ret;
451 497
452 if (dev->mode_config.num_crtc) { 498 if (dev->mode_config.num_crtc) {
453 NV_INFO(drm, "suspending fbcon...\n"); 499 NV_SUSPEND(drm, "suspending display...\n");
454 nouveau_fbcon_set_suspend(dev, 1);
455
456 NV_INFO(drm, "suspending display...\n");
457 ret = nouveau_display_suspend(dev); 500 ret = nouveau_display_suspend(dev);
458 if (ret) 501 if (ret)
459 return ret; 502 return ret;
460 } 503 }
461 504
462 NV_INFO(drm, "evicting buffers...\n"); 505 NV_SUSPEND(drm, "evicting buffers...\n");
463 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); 506 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
464 507
465 NV_INFO(drm, "waiting for kernel channels to go idle...\n"); 508 NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
466 if (drm->cechan) { 509 if (drm->cechan) {
467 ret = nouveau_channel_idle(drm->cechan); 510 ret = nouveau_channel_idle(drm->cechan);
468 if (ret) 511 if (ret)
@@ -475,7 +518,7 @@ nouveau_do_suspend(struct drm_device *dev)
475 return ret; 518 return ret;
476 } 519 }
477 520
478 NV_INFO(drm, "suspending client object trees...\n"); 521 NV_SUSPEND(drm, "suspending client object trees...\n");
479 if (drm->fence && nouveau_fence(drm)->suspend) { 522 if (drm->fence && nouveau_fence(drm)->suspend) {
480 if (!nouveau_fence(drm)->suspend(drm)) 523 if (!nouveau_fence(drm)->suspend(drm))
481 return -ENOMEM; 524 return -ENOMEM;
@@ -487,7 +530,7 @@ nouveau_do_suspend(struct drm_device *dev)
487 goto fail_client; 530 goto fail_client;
488 } 531 }
489 532
490 NV_INFO(drm, "suspending kernel object tree...\n"); 533 NV_SUSPEND(drm, "suspending kernel object tree...\n");
491 ret = nouveau_client_fini(&drm->client.base, true); 534 ret = nouveau_client_fini(&drm->client.base, true);
492 if (ret) 535 if (ret)
493 goto fail_client; 536 goto fail_client;
@@ -501,7 +544,7 @@ fail_client:
501 } 544 }
502 545
503 if (dev->mode_config.num_crtc) { 546 if (dev->mode_config.num_crtc) {
504 NV_INFO(drm, "resuming display...\n"); 547 NV_SUSPEND(drm, "resuming display...\n");
505 nouveau_display_resume(dev); 548 nouveau_display_resume(dev);
506 } 549 }
507 return ret; 550 return ret;
@@ -513,9 +556,14 @@ int nouveau_pmops_suspend(struct device *dev)
513 struct drm_device *drm_dev = pci_get_drvdata(pdev); 556 struct drm_device *drm_dev = pci_get_drvdata(pdev);
514 int ret; 557 int ret;
515 558
516 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 559 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
560 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
517 return 0; 561 return 0;
518 562
563 if (drm_dev->mode_config.num_crtc)
564 nouveau_fbcon_set_suspend(drm_dev, 1);
565
566 nv_suspend_set_printk_level(NV_DBG_INFO);
519 ret = nouveau_do_suspend(drm_dev); 567 ret = nouveau_do_suspend(drm_dev);
520 if (ret) 568 if (ret)
521 return ret; 569 return ret;
@@ -523,6 +571,7 @@ int nouveau_pmops_suspend(struct device *dev)
523 pci_save_state(pdev); 571 pci_save_state(pdev);
524 pci_disable_device(pdev); 572 pci_disable_device(pdev);
525 pci_set_power_state(pdev, PCI_D3hot); 573 pci_set_power_state(pdev, PCI_D3hot);
574 nv_suspend_set_printk_level(NV_DBG_DEBUG);
526 575
527 return 0; 576 return 0;
528} 577}
@@ -533,15 +582,15 @@ nouveau_do_resume(struct drm_device *dev)
533 struct nouveau_drm *drm = nouveau_drm(dev); 582 struct nouveau_drm *drm = nouveau_drm(dev);
534 struct nouveau_cli *cli; 583 struct nouveau_cli *cli;
535 584
536 NV_INFO(drm, "re-enabling device...\n"); 585 NV_SUSPEND(drm, "re-enabling device...\n");
537 586
538 nouveau_agp_reset(drm); 587 nouveau_agp_reset(drm);
539 588
540 NV_INFO(drm, "resuming kernel object tree...\n"); 589 NV_SUSPEND(drm, "resuming kernel object tree...\n");
541 nouveau_client_init(&drm->client.base); 590 nouveau_client_init(&drm->client.base);
542 nouveau_agp_init(drm); 591 nouveau_agp_init(drm);
543 592
544 NV_INFO(drm, "resuming client object trees...\n"); 593 NV_SUSPEND(drm, "resuming client object trees...\n");
545 if (drm->fence && nouveau_fence(drm)->resume) 594 if (drm->fence && nouveau_fence(drm)->resume)
546 nouveau_fence(drm)->resume(drm); 595 nouveau_fence(drm)->resume(drm);
547 596
@@ -553,9 +602,10 @@ nouveau_do_resume(struct drm_device *dev)
553 nouveau_pm_resume(dev); 602 nouveau_pm_resume(dev);
554 603
555 if (dev->mode_config.num_crtc) { 604 if (dev->mode_config.num_crtc) {
556 NV_INFO(drm, "resuming display...\n"); 605 NV_SUSPEND(drm, "resuming display...\n");
557 nouveau_display_resume(dev); 606 nouveau_display_repin(dev);
558 } 607 }
608
559 return 0; 609 return 0;
560} 610}
561 611
@@ -565,7 +615,8 @@ int nouveau_pmops_resume(struct device *dev)
565 struct drm_device *drm_dev = pci_get_drvdata(pdev); 615 struct drm_device *drm_dev = pci_get_drvdata(pdev);
566 int ret; 616 int ret;
567 617
568 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 618 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
619 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
569 return 0; 620 return 0;
570 621
571 pci_set_power_state(pdev, PCI_D0); 622 pci_set_power_state(pdev, PCI_D0);
@@ -575,23 +626,54 @@ int nouveau_pmops_resume(struct device *dev)
575 return ret; 626 return ret;
576 pci_set_master(pdev); 627 pci_set_master(pdev);
577 628
578 return nouveau_do_resume(drm_dev); 629 nv_suspend_set_printk_level(NV_DBG_INFO);
630 ret = nouveau_do_resume(drm_dev);
631 if (ret) {
632 nv_suspend_set_printk_level(NV_DBG_DEBUG);
633 return ret;
634 }
635 if (drm_dev->mode_config.num_crtc)
636 nouveau_fbcon_set_suspend(drm_dev, 0);
637
638 nouveau_fbcon_zfill_all(drm_dev);
639 nouveau_display_resume(drm_dev);
640 nv_suspend_set_printk_level(NV_DBG_DEBUG);
641 return 0;
579} 642}
580 643
581static int nouveau_pmops_freeze(struct device *dev) 644static int nouveau_pmops_freeze(struct device *dev)
582{ 645{
583 struct pci_dev *pdev = to_pci_dev(dev); 646 struct pci_dev *pdev = to_pci_dev(dev);
584 struct drm_device *drm_dev = pci_get_drvdata(pdev); 647 struct drm_device *drm_dev = pci_get_drvdata(pdev);
648 int ret;
649
650 nv_suspend_set_printk_level(NV_DBG_INFO);
651 if (drm_dev->mode_config.num_crtc)
652 nouveau_fbcon_set_suspend(drm_dev, 1);
585 653
586 return nouveau_do_suspend(drm_dev); 654 ret = nouveau_do_suspend(drm_dev);
655 nv_suspend_set_printk_level(NV_DBG_DEBUG);
656 return ret;
587} 657}
588 658
589static int nouveau_pmops_thaw(struct device *dev) 659static int nouveau_pmops_thaw(struct device *dev)
590{ 660{
591 struct pci_dev *pdev = to_pci_dev(dev); 661 struct pci_dev *pdev = to_pci_dev(dev);
592 struct drm_device *drm_dev = pci_get_drvdata(pdev); 662 struct drm_device *drm_dev = pci_get_drvdata(pdev);
663 int ret;
593 664
594 return nouveau_do_resume(drm_dev); 665 nv_suspend_set_printk_level(NV_DBG_INFO);
666 ret = nouveau_do_resume(drm_dev);
667 if (ret) {
668 nv_suspend_set_printk_level(NV_DBG_DEBUG);
669 return ret;
670 }
671 if (drm_dev->mode_config.num_crtc)
672 nouveau_fbcon_set_suspend(drm_dev, 0);
673 nouveau_fbcon_zfill_all(drm_dev);
674 nouveau_display_resume(drm_dev);
675 nv_suspend_set_printk_level(NV_DBG_DEBUG);
676 return 0;
595} 677}
596 678
597 679
@@ -604,19 +686,24 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
604 char name[32], tmpname[TASK_COMM_LEN]; 686 char name[32], tmpname[TASK_COMM_LEN];
605 int ret; 687 int ret;
606 688
689 /* need to bring up power immediately if opening device */
690 ret = pm_runtime_get_sync(dev->dev);
691 if (ret < 0)
692 return ret;
693
607 get_task_comm(tmpname, current); 694 get_task_comm(tmpname, current);
608 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); 695 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
609 696
610 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli); 697 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
611 if (ret) 698 if (ret)
612 return ret; 699 goto out_suspend;
613 700
614 if (nv_device(drm->device)->card_type >= NV_50) { 701 if (nv_device(drm->device)->card_type >= NV_50) {
615 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), 702 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
616 0x1000, &cli->base.vm); 703 0x1000, &cli->base.vm);
617 if (ret) { 704 if (ret) {
618 nouveau_cli_destroy(cli); 705 nouveau_cli_destroy(cli);
619 return ret; 706 goto out_suspend;
620 } 707 }
621 } 708 }
622 709
@@ -625,7 +712,12 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
625 mutex_lock(&drm->client.mutex); 712 mutex_lock(&drm->client.mutex);
626 list_add(&cli->head, &drm->clients); 713 list_add(&cli->head, &drm->clients);
627 mutex_unlock(&drm->client.mutex); 714 mutex_unlock(&drm->client.mutex);
628 return 0; 715
716out_suspend:
717 pm_runtime_mark_last_busy(dev->dev);
718 pm_runtime_put_autosuspend(dev->dev);
719
720 return ret;
629} 721}
630 722
631static void 723static void
@@ -634,12 +726,15 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
634 struct nouveau_cli *cli = nouveau_cli(fpriv); 726 struct nouveau_cli *cli = nouveau_cli(fpriv);
635 struct nouveau_drm *drm = nouveau_drm(dev); 727 struct nouveau_drm *drm = nouveau_drm(dev);
636 728
729 pm_runtime_get_sync(dev->dev);
730
637 if (cli->abi16) 731 if (cli->abi16)
638 nouveau_abi16_fini(cli->abi16); 732 nouveau_abi16_fini(cli->abi16);
639 733
640 mutex_lock(&drm->client.mutex); 734 mutex_lock(&drm->client.mutex);
641 list_del(&cli->head); 735 list_del(&cli->head);
642 mutex_unlock(&drm->client.mutex); 736 mutex_unlock(&drm->client.mutex);
737
643} 738}
644 739
645static void 740static void
@@ -647,33 +742,52 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
647{ 742{
648 struct nouveau_cli *cli = nouveau_cli(fpriv); 743 struct nouveau_cli *cli = nouveau_cli(fpriv);
649 nouveau_cli_destroy(cli); 744 nouveau_cli_destroy(cli);
745 pm_runtime_mark_last_busy(dev->dev);
746 pm_runtime_put_autosuspend(dev->dev);
650} 747}
651 748
652static struct drm_ioctl_desc 749static const struct drm_ioctl_desc
653nouveau_ioctls[] = { 750nouveau_ioctls[] = {
654 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), 751 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
655 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 752 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
656 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH), 753 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
657 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH), 754 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
658 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), 755 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
659 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH), 756 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
660 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), 757 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
661 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), 758 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
662 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), 759 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
663 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), 760 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
664 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), 761 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
665 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), 762 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
666}; 763};
667 764
765long nouveau_drm_ioctl(struct file *filp,
766 unsigned int cmd, unsigned long arg)
767{
768 struct drm_file *file_priv = filp->private_data;
769 struct drm_device *dev;
770 long ret;
771 dev = file_priv->minor->dev;
772
773 ret = pm_runtime_get_sync(dev->dev);
774 if (ret < 0)
775 return ret;
776
777 ret = drm_ioctl(filp, cmd, arg);
778
779 pm_runtime_mark_last_busy(dev->dev);
780 pm_runtime_put_autosuspend(dev->dev);
781 return ret;
782}
668static const struct file_operations 783static const struct file_operations
669nouveau_driver_fops = { 784nouveau_driver_fops = {
670 .owner = THIS_MODULE, 785 .owner = THIS_MODULE,
671 .open = drm_open, 786 .open = drm_open,
672 .release = drm_release, 787 .release = drm_release,
673 .unlocked_ioctl = drm_ioctl, 788 .unlocked_ioctl = nouveau_drm_ioctl,
674 .mmap = nouveau_ttm_mmap, 789 .mmap = nouveau_ttm_mmap,
675 .poll = drm_poll, 790 .poll = drm_poll,
676 .fasync = drm_fasync,
677 .read = drm_read, 791 .read = drm_read,
678#if defined(CONFIG_COMPAT) 792#if defined(CONFIG_COMPAT)
679 .compat_ioctl = nouveau_compat_ioctl, 793 .compat_ioctl = nouveau_compat_ioctl,
@@ -684,8 +798,8 @@ nouveau_driver_fops = {
684static struct drm_driver 798static struct drm_driver
685driver = { 799driver = {
686 .driver_features = 800 .driver_features =
687 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | 801 DRIVER_USE_AGP |
688 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, 802 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
689 803
690 .load = nouveau_drm_load, 804 .load = nouveau_drm_load,
691 .unload = nouveau_drm_unload, 805 .unload = nouveau_drm_unload,
@@ -704,6 +818,7 @@ driver = {
704 .disable_vblank = nouveau_drm_vblank_disable, 818 .disable_vblank = nouveau_drm_vblank_disable,
705 819
706 .ioctls = nouveau_ioctls, 820 .ioctls = nouveau_ioctls,
821 .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
707 .fops = &nouveau_driver_fops, 822 .fops = &nouveau_driver_fops,
708 823
709 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 824 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -724,7 +839,7 @@ driver = {
724 839
725 .dumb_create = nouveau_display_dumb_create, 840 .dumb_create = nouveau_display_dumb_create,
726 .dumb_map_offset = nouveau_display_dumb_map_offset, 841 .dumb_map_offset = nouveau_display_dumb_map_offset,
727 .dumb_destroy = nouveau_display_dumb_destroy, 842 .dumb_destroy = drm_gem_dumb_destroy,
728 843
729 .name = DRIVER_NAME, 844 .name = DRIVER_NAME,
730 .desc = DRIVER_DESC, 845 .desc = DRIVER_DESC,
@@ -753,6 +868,90 @@ nouveau_drm_pci_table[] = {
753 {} 868 {}
754}; 869};
755 870
871static int nouveau_pmops_runtime_suspend(struct device *dev)
872{
873 struct pci_dev *pdev = to_pci_dev(dev);
874 struct drm_device *drm_dev = pci_get_drvdata(pdev);
875 int ret;
876
877 if (nouveau_runtime_pm == 0)
878 return -EINVAL;
879
880 drm_kms_helper_poll_disable(drm_dev);
881 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
882 nouveau_switcheroo_optimus_dsm();
883 ret = nouveau_do_suspend(drm_dev);
884 pci_save_state(pdev);
885 pci_disable_device(pdev);
886 pci_set_power_state(pdev, PCI_D3cold);
887 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
888 return ret;
889}
890
891static int nouveau_pmops_runtime_resume(struct device *dev)
892{
893 struct pci_dev *pdev = to_pci_dev(dev);
894 struct drm_device *drm_dev = pci_get_drvdata(pdev);
895 struct nouveau_device *device = nouveau_dev(drm_dev);
896 int ret;
897
898 if (nouveau_runtime_pm == 0)
899 return -EINVAL;
900
901 pci_set_power_state(pdev, PCI_D0);
902 pci_restore_state(pdev);
903 ret = pci_enable_device(pdev);
904 if (ret)
905 return ret;
906 pci_set_master(pdev);
907
908 ret = nouveau_do_resume(drm_dev);
909 nouveau_display_resume(drm_dev);
910 drm_kms_helper_poll_enable(drm_dev);
911 /* do magic */
912 nv_mask(device, 0x88488, (1 << 25), (1 << 25));
913 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
914 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
915 return ret;
916}
917
918static int nouveau_pmops_runtime_idle(struct device *dev)
919{
920 struct pci_dev *pdev = to_pci_dev(dev);
921 struct drm_device *drm_dev = pci_get_drvdata(pdev);
922 struct nouveau_drm *drm = nouveau_drm(drm_dev);
923 struct drm_crtc *crtc;
924
925 if (nouveau_runtime_pm == 0)
926 return -EBUSY;
927
928 /* are we optimus enabled? */
929 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
930 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
931 return -EBUSY;
932 }
933
934 /* if we have a hdmi audio device - make sure it has a driver loaded */
935 if (drm->hdmi_device) {
936 if (!drm->hdmi_device->driver) {
937 DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
938 pm_runtime_mark_last_busy(dev);
939 return -EBUSY;
940 }
941 }
942
943 list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
944 if (crtc->enabled) {
945 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
946 return -EBUSY;
947 }
948 }
949 pm_runtime_mark_last_busy(dev);
950 pm_runtime_autosuspend(dev);
951 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
952 return 1;
953}
954
756static const struct dev_pm_ops nouveau_pm_ops = { 955static const struct dev_pm_ops nouveau_pm_ops = {
757 .suspend = nouveau_pmops_suspend, 956 .suspend = nouveau_pmops_suspend,
758 .resume = nouveau_pmops_resume, 957 .resume = nouveau_pmops_resume,
@@ -760,6 +959,9 @@ static const struct dev_pm_ops nouveau_pm_ops = {
760 .thaw = nouveau_pmops_thaw, 959 .thaw = nouveau_pmops_thaw,
761 .poweroff = nouveau_pmops_freeze, 960 .poweroff = nouveau_pmops_freeze,
762 .restore = nouveau_pmops_resume, 961 .restore = nouveau_pmops_resume,
962 .runtime_suspend = nouveau_pmops_runtime_suspend,
963 .runtime_resume = nouveau_pmops_runtime_resume,
964 .runtime_idle = nouveau_pmops_runtime_idle,
763}; 965};
764 966
765static struct pci_driver 967static struct pci_driver
@@ -774,8 +976,6 @@ nouveau_drm_pci_driver = {
774static int __init 976static int __init
775nouveau_drm_init(void) 977nouveau_drm_init(void)
776{ 978{
777 driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
778
779 if (nouveau_modeset == -1) { 979 if (nouveau_modeset == -1) {
780#ifdef CONFIG_VGA_CONSOLE 980#ifdef CONFIG_VGA_CONSOLE
781 if (vgacon_text_force()) 981 if (vgacon_text_force())
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 41ff7e0d403a..994fd6ec373b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -70,6 +70,8 @@ nouveau_cli(struct drm_file *fpriv)
70 return fpriv ? fpriv->driver_priv : NULL; 70 return fpriv ? fpriv->driver_priv : NULL;
71} 71}
72 72
73extern int nouveau_runtime_pm;
74
73struct nouveau_drm { 75struct nouveau_drm {
74 struct nouveau_cli client; 76 struct nouveau_cli client;
75 struct drm_device *dev; 77 struct drm_device *dev;
@@ -129,6 +131,12 @@ struct nouveau_drm {
129 131
130 /* power management */ 132 /* power management */
131 struct nouveau_pm *pm; 133 struct nouveau_pm *pm;
134
135 /* display power reference */
136 bool have_disp_power_ref;
137
138 struct dev_pm_domain vga_pm_domain;
139 struct pci_dev *hdmi_device;
132}; 140};
133 141
134static inline struct nouveau_drm * 142static inline struct nouveau_drm *
@@ -146,6 +154,7 @@ nouveau_dev(struct drm_device *dev)
146int nouveau_pmops_suspend(struct device *); 154int nouveau_pmops_suspend(struct device *);
147int nouveau_pmops_resume(struct device *); 155int nouveau_pmops_resume(struct device *);
148 156
157#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
149#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 158#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
150#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 159#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
151#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) 160#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 830cb7bad922..f32b71238c03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
220 } 220 }
221 221
222 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 222 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
223 rep->map_handle = nvbo->bo.addr_space_offset; 223 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
224 rep->tile_mode = nvbo->tile_mode; 224 rep->tile_mode = nvbo->tile_mode;
225 rep->tile_flags = nvbo->tile_flags; 225 rep->tile_flags = nvbo->tile_flags;
226 return 0; 226 return 0;
@@ -579,18 +579,31 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
579 return 0; 579 return 0;
580} 580}
581 581
582static inline void
583u_free(void *addr)
584{
585 if (!is_vmalloc_addr(addr))
586 kfree(addr);
587 else
588 vfree(addr);
589}
590
582static inline void * 591static inline void *
583u_memcpya(uint64_t user, unsigned nmemb, unsigned size) 592u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
584{ 593{
585 void *mem; 594 void *mem;
586 void __user *userptr = (void __force __user *)(uintptr_t)user; 595 void __user *userptr = (void __force __user *)(uintptr_t)user;
587 596
588 mem = kmalloc(nmemb * size, GFP_KERNEL); 597 size *= nmemb;
598
599 mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
600 if (!mem)
601 mem = vmalloc(size);
589 if (!mem) 602 if (!mem)
590 return ERR_PTR(-ENOMEM); 603 return ERR_PTR(-ENOMEM);
591 604
592 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { 605 if (DRM_COPY_FROM_USER(mem, userptr, size)) {
593 kfree(mem); 606 u_free(mem);
594 return ERR_PTR(-EFAULT); 607 return ERR_PTR(-EFAULT);
595 } 608 }
596 609
@@ -676,7 +689,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
676 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); 689 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
677 } 690 }
678 691
679 kfree(reloc); 692 u_free(reloc);
680 return ret; 693 return ret;
681} 694}
682 695
@@ -738,7 +751,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
738 751
739 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 752 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
740 if (IS_ERR(bo)) { 753 if (IS_ERR(bo)) {
741 kfree(push); 754 u_free(push);
742 return nouveau_abi16_put(abi16, PTR_ERR(bo)); 755 return nouveau_abi16_put(abi16, PTR_ERR(bo));
743 } 756 }
744 757
@@ -849,8 +862,8 @@ out:
849 nouveau_fence_unref(&fence); 862 nouveau_fence_unref(&fence);
850 863
851out_prevalid: 864out_prevalid:
852 kfree(bo); 865 u_free(bo);
853 kfree(push); 866 u_free(push);
854 867
855out_next: 868out_next:
856 if (chan->dma.ib_max) { 869 if (chan->dma.ib_max) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index 08214bcdcb12..c1a7e5a73a26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -63,7 +63,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
63 if (fn != NULL) 63 if (fn != NULL)
64 ret = (*fn)(filp, cmd, arg); 64 ret = (*fn)(filp, cmd, arg);
65 else 65 else
66 ret = drm_ioctl(filp, cmd, arg); 66 ret = nouveau_drm_ioctl(filp, cmd, arg);
67 67
68 return ret; 68 return ret;
69} 69}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
index ef2b2906d9e6..3b9f2e5463a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -2,5 +2,6 @@
2#define __NOUVEAU_IOCTL_H__ 2#define __NOUVEAU_IOCTL_H__
3 3
4long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg); 4long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
5long nouveau_drm_ioctl(struct file *, unsigned int cmd, unsigned long arg);
5 6
6#endif 7#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 25d3495725eb..81638d7f2eff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -32,6 +32,9 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
32{ 32{
33 struct drm_device *dev = pci_get_drvdata(pdev); 33 struct drm_device *dev = pci_get_drvdata(pdev);
34 34
35 if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
36 return;
37
35 if (state == VGA_SWITCHEROO_ON) { 38 if (state == VGA_SWITCHEROO_ON) {
36 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 39 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
37 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 40 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -78,8 +81,17 @@ void
78nouveau_vga_init(struct nouveau_drm *drm) 81nouveau_vga_init(struct nouveau_drm *drm)
79{ 82{
80 struct drm_device *dev = drm->dev; 83 struct drm_device *dev = drm->dev;
84 bool runtime = false;
81 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 85 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
82 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops); 86
87 if (nouveau_runtime_pm == 1)
88 runtime = true;
89 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
90 runtime = true;
91 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
92
93 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
94 vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
83} 95}
84 96
85void 97void
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b40a36c1b57..f8e66c08b11a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1326,7 +1326,7 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
1326 .cursor_set = nv50_crtc_cursor_set, 1326 .cursor_set = nv50_crtc_cursor_set,
1327 .cursor_move = nv50_crtc_cursor_move, 1327 .cursor_move = nv50_crtc_cursor_move,
1328 .gamma_set = nv50_crtc_gamma_set, 1328 .gamma_set = nv50_crtc_gamma_set,
1329 .set_config = drm_crtc_helper_set_config, 1329 .set_config = nouveau_crtc_set_config,
1330 .destroy = nv50_crtc_destroy, 1330 .destroy = nv50_crtc_destroy,
1331 .page_flip = nouveau_crtc_page_flip, 1331 .page_flip = nouveau_crtc_page_flip,
1332}; 1332};
@@ -1583,7 +1583,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1583 load = 340; 1583 load = 340;
1584 1584
1585 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); 1585 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
1586 if (ret || load != 7) 1586 if (ret || !load)
1587 return connector_status_disconnected; 1587 return connector_status_disconnected;
1588 1588
1589 return connector_status_connected; 1589 return connector_status_connected;
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f2845..778372b062ad 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -18,7 +18,4 @@ omapdrm-y := omap_drv.o \
18 omap_dmm_tiler.o \ 18 omap_dmm_tiler.o \
19 tcm-sita.o 19 tcm-sita.o
20 20
21# temporary:
22omapdrm-y += omap_gem_helpers.o
23
24obj-$(CONFIG_DRM_OMAP) += omapdrm.o 21obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 11a5263a5e9f..0fd2eb139f6e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -331,7 +331,8 @@ static void page_flip_cb(void *arg)
331 331
332static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, 332static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
333 struct drm_framebuffer *fb, 333 struct drm_framebuffer *fb,
334 struct drm_pending_vblank_event *event) 334 struct drm_pending_vblank_event *event,
335 uint32_t page_flip_flags)
335{ 336{
336 struct drm_device *dev = crtc->dev; 337 struct drm_device *dev = crtc->dev;
337 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 338 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c81..acf667859cb6 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -871,7 +871,7 @@ int tiler_map_show(struct seq_file *s, void *arg)
871 goto error; 871 goto error;
872 872
873 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { 873 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
874 memset(map, 0, sizeof(h_adj * sizeof(*map))); 874 memset(map, 0, h_adj * sizeof(*map));
875 memset(global_map, ' ', (w_adj + 1) * h_adj); 875 memset(global_map, ' ', (w_adj + 1) * h_adj);
876 876
877 for (i = 0; i < omap_dmm->container_height; i++) { 877 for (i = 0; i < omap_dmm->container_height; i++) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index a3004f12b9a3..2603d909f49c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
419 return ret; 419 return ret;
420} 420}
421 421
422static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { 422static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
423 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 423 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
424 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 424 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
425 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 425 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -524,12 +524,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
524 return 0; 524 return 0;
525} 525}
526 526
527static int dev_firstopen(struct drm_device *dev)
528{
529 DBG("firstopen: dev=%p", dev);
530 return 0;
531}
532
533/** 527/**
534 * lastclose - clean up after all DRM clients have exited 528 * lastclose - clean up after all DRM clients have exited
535 * @dev: DRM device 529 * @dev: DRM device
@@ -598,7 +592,6 @@ static const struct file_operations omapdriver_fops = {
598 .release = drm_release, 592 .release = drm_release,
599 .mmap = omap_gem_mmap, 593 .mmap = omap_gem_mmap,
600 .poll = drm_poll, 594 .poll = drm_poll,
601 .fasync = drm_fasync,
602 .read = drm_read, 595 .read = drm_read,
603 .llseek = noop_llseek, 596 .llseek = noop_llseek,
604}; 597};
@@ -609,7 +602,6 @@ static struct drm_driver omap_drm_driver = {
609 .load = dev_load, 602 .load = dev_load,
610 .unload = dev_unload, 603 .unload = dev_unload,
611 .open = dev_open, 604 .open = dev_open,
612 .firstopen = dev_firstopen,
613 .lastclose = dev_lastclose, 605 .lastclose = dev_lastclose,
614 .preclose = dev_preclose, 606 .preclose = dev_preclose,
615 .postclose = dev_postclose, 607 .postclose = dev_postclose,
@@ -633,7 +625,7 @@ static struct drm_driver omap_drm_driver = {
633 .gem_vm_ops = &omap_gem_vm_ops, 625 .gem_vm_ops = &omap_gem_vm_ops,
634 .dumb_create = omap_gem_dumb_create, 626 .dumb_create = omap_gem_dumb_create,
635 .dumb_map_offset = omap_gem_dumb_map_offset, 627 .dumb_map_offset = omap_gem_dumb_map_offset,
636 .dumb_destroy = omap_gem_dumb_destroy, 628 .dumb_destroy = drm_gem_dumb_destroy,
637 .ioctls = ioctls, 629 .ioctls = ioctls,
638 .num_ioctls = DRM_OMAP_NUM_IOCTLS, 630 .num_ioctls = DRM_OMAP_NUM_IOCTLS,
639 .fops = &omapdriver_fops, 631 .fops = &omapdriver_fops,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 14f17da2ce25..30b95b736658 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -203,9 +203,8 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
203struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 203struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
204 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 204 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
205struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 205struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
206int omap_framebuffer_replace(struct drm_framebuffer *a, 206int omap_framebuffer_pin(struct drm_framebuffer *fb);
207 struct drm_framebuffer *b, void *arg, 207int omap_framebuffer_unpin(struct drm_framebuffer *fb);
208 void (*unpin)(void *arg, struct drm_gem_object *bo));
209void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 208void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
210 struct omap_drm_window *win, struct omap_overlay_info *info); 209 struct omap_drm_window *win, struct omap_overlay_info *info);
211struct drm_connector *omap_framebuffer_get_next_connector( 210struct drm_connector *omap_framebuffer_get_next_connector(
@@ -225,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
225void *omap_gem_vaddr(struct drm_gem_object *obj); 224void *omap_gem_vaddr(struct drm_gem_object *obj);
226int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 225int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
227 uint32_t handle, uint64_t *offset); 226 uint32_t handle, uint64_t *offset);
228int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
229 uint32_t handle);
230int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 227int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
231 struct drm_mode_create_dumb *args); 228 struct drm_mode_create_dumb *args);
232int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); 229int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8031402e7951..f2b8f0668c0c 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -237,55 +237,49 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
237 } 237 }
238} 238}
239 239
240/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although 240/* pin, prepare for scanout: */
241 * buffers to unpin are just pushed to the unpin fifo so that the 241int omap_framebuffer_pin(struct drm_framebuffer *fb)
242 * caller can defer unpin until vblank.
243 *
244 * Note if this fails (ie. something went very wrong!), all buffers are
245 * unpinned, and the caller disables the overlay. We could have tried
246 * to revert back to the previous set of pinned buffers but if things are
247 * hosed there is no guarantee that would succeed.
248 */
249int omap_framebuffer_replace(struct drm_framebuffer *a,
250 struct drm_framebuffer *b, void *arg,
251 void (*unpin)(void *arg, struct drm_gem_object *bo))
252{ 242{
253 int ret = 0, i, na, nb; 243 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
254 struct omap_framebuffer *ofba = to_omap_framebuffer(a); 244 int ret, i, n = drm_format_num_planes(fb->pixel_format);
255 struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
256 uint32_t pinned_mask = 0;
257 245
258 na = a ? drm_format_num_planes(a->pixel_format) : 0; 246 for (i = 0; i < n; i++) {
259 nb = b ? drm_format_num_planes(b->pixel_format) : 0; 247 struct plane *plane = &omap_fb->planes[i];
248 ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
249 if (ret)
250 goto fail;
251 omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
252 }
260 253
261 for (i = 0; i < max(na, nb); i++) { 254 return 0;
262 struct plane *pa, *pb;
263 255
264 pa = (i < na) ? &ofba->planes[i] : NULL; 256fail:
265 pb = (i < nb) ? &ofbb->planes[i] : NULL; 257 for (i--; i >= 0; i--) {
258 struct plane *plane = &omap_fb->planes[i];
259 omap_gem_put_paddr(plane->bo);
260 plane->paddr = 0;
261 }
266 262
267 if (pa) 263 return ret;
268 unpin(arg, pa->bo); 264}
269 265
270 if (pb && !ret) { 266/* unpin, no longer being scanned out: */
271 ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true); 267int omap_framebuffer_unpin(struct drm_framebuffer *fb)
272 if (!ret) { 268{
273 omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE); 269 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
274 pinned_mask |= (1 << i); 270 int ret, i, n = drm_format_num_planes(fb->pixel_format);
275 }
276 }
277 }
278 271
279 if (ret) { 272 for (i = 0; i < n; i++) {
280 /* something went wrong.. unpin what has been pinned */ 273 struct plane *plane = &omap_fb->planes[i];
281 for (i = 0; i < nb; i++) { 274 ret = omap_gem_put_paddr(plane->bo);
282 if (pinned_mask & (1 << i)) { 275 if (ret)
283 struct plane *pb = &ofba->planes[i]; 276 goto fail;
284 unpin(arg, pb->bo); 277 plane->paddr = 0;
285 }
286 }
287 } 278 }
288 279
280 return 0;
281
282fail:
289 return ret; 283 return ret;
290} 284}
291 285
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index ebbdf4132e9c..533f6ebec531 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h> 22#include <linux/shmem_fs.h>
23#include <drm/drm_vma_manager.h>
23 24
24#include "omap_drv.h" 25#include "omap_drv.h"
25#include "omap_dmm_tiler.h" 26#include "omap_dmm_tiler.h"
@@ -236,7 +237,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
236 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably 237 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
237 * we actually want CMA memory for it all anyways.. 238 * we actually want CMA memory for it all anyways..
238 */ 239 */
239 pages = _drm_gem_get_pages(obj, GFP_KERNEL); 240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
240 if (IS_ERR(pages)) { 241 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages); 243 return PTR_ERR(pages);
@@ -270,7 +271,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
270 return 0; 271 return 0;
271 272
272free_pages: 273free_pages:
273 _drm_gem_put_pages(obj, pages, true, false); 274 drm_gem_put_pages(obj, pages, true, false);
274 275
275 return ret; 276 return ret;
276} 277}
@@ -294,7 +295,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
294 kfree(omap_obj->addrs); 295 kfree(omap_obj->addrs);
295 omap_obj->addrs = NULL; 296 omap_obj->addrs = NULL;
296 297
297 _drm_gem_put_pages(obj, omap_obj->pages, true, false); 298 drm_gem_put_pages(obj, omap_obj->pages, true, false);
298 omap_obj->pages = NULL; 299 omap_obj->pages = NULL;
299} 300}
300 301
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
308static uint64_t mmap_offset(struct drm_gem_object *obj) 309static uint64_t mmap_offset(struct drm_gem_object *obj)
309{ 310{
310 struct drm_device *dev = obj->dev; 311 struct drm_device *dev = obj->dev;
312 int ret;
313 size_t size;
311 314
312 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
313 316
314 if (!obj->map_list.map) { 317 /* Make it mmapable */
315 /* Make it mmapable */ 318 size = omap_gem_mmap_size(obj);
316 size_t size = omap_gem_mmap_size(obj); 319 ret = drm_gem_create_mmap_offset_size(obj, size);
317 int ret = _drm_gem_create_mmap_offset_size(obj, size); 320 if (ret) {
318 321 dev_err(dev->dev, "could not allocate mmap offset\n");
319 if (ret) { 322 return 0;
320 dev_err(dev->dev, "could not allocate mmap offset\n");
321 return 0;
322 }
323 } 323 }
324 324
325 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; 325 return drm_vma_node_offset_addr(&obj->vma_node);
326} 326}
327 327
328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
629} 629}
630 630
631/** 631/**
632 * omap_gem_dumb_destroy - destroy a dumb buffer
633 * @file: client file
634 * @dev: our DRM device
635 * @handle: the object handle
636 *
637 * Destroy a handle that was created via omap_gem_dumb_create.
638 */
639int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
640 uint32_t handle)
641{
642 /* No special work needed, drop the reference and see what falls out */
643 return drm_gem_handle_delete(file, handle);
644}
645
646/**
647 * omap_gem_dumb_map - buffer mapping for dumb interface 632 * omap_gem_dumb_map - buffer mapping for dumb interface
648 * @file: our drm client file 633 * @file: our drm client file
649 * @dev: drm device 634 * @dev: drm device
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
997{ 982{
998 struct drm_device *dev = obj->dev; 983 struct drm_device *dev = obj->dev;
999 struct omap_gem_object *omap_obj = to_omap_bo(obj); 984 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1000 uint64_t off = 0; 985 uint64_t off;
1001 986
1002 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 987 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1003 988
1004 if (obj->map_list.map) 989 off = drm_vma_node_start(&obj->vma_node);
1005 off = (uint64_t)obj->map_list.hash.key;
1006 990
1007 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", 991 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
1008 omap_obj->flags, obj->name, obj->refcount.refcount.counter, 992 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1309 1293
1310 list_del(&omap_obj->mm_list); 1294 list_del(&omap_obj->mm_list);
1311 1295
1312 if (obj->map_list.map) 1296 drm_gem_free_mmap_offset(obj);
1313 drm_gem_free_mmap_offset(obj);
1314 1297
1315 /* this means the object is still pinned.. which really should 1298 /* this means the object is still pinned.. which really should
1316 * not happen. I think.. 1299 * not happen. I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1427 omap_obj->height = gsize.tiled.height; 1410 omap_obj->height = gsize.tiled.height;
1428 } 1411 }
1429 1412
1413 ret = 0;
1430 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) 1414 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1431 ret = drm_gem_private_object_init(dev, obj, size); 1415 drm_gem_private_object_init(dev, obj, size);
1432 else 1416 else
1433 ret = drm_gem_object_init(dev, obj, size); 1417 ret = drm_gem_object_init(dev, obj, size);
1434 1418
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
deleted file mode 100644
index f9eb679eb79b..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+++ /dev/null
@@ -1,169 +0,0 @@
1/*
2 * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/* temporary copy of drm_gem_{get,put}_pages() until the
21 * "drm/gem: add functions to get/put pages" patch is merged..
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/shmem_fs.h>
27
28#include <drm/drmP.h>
29
30/**
31 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
32 * @obj: obj in question
33 * @gfpmask: gfp mask of requested pages
34 */
35struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
36{
37 struct inode *inode;
38 struct address_space *mapping;
39 struct page *p, **pages;
40 int i, npages;
41
42 /* This is the shared memory object that backs the GEM resource */
43 inode = file_inode(obj->filp);
44 mapping = inode->i_mapping;
45
46 npages = obj->size >> PAGE_SHIFT;
47
48 pages = drm_malloc_ab(npages, sizeof(struct page *));
49 if (pages == NULL)
50 return ERR_PTR(-ENOMEM);
51
52 gfpmask |= mapping_gfp_mask(mapping);
53
54 for (i = 0; i < npages; i++) {
55 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
56 if (IS_ERR(p))
57 goto fail;
58 pages[i] = p;
59
60 /* There is a hypothetical issue w/ drivers that require
61 * buffer memory in the low 4GB.. if the pages are un-
62 * pinned, and swapped out, they can end up swapped back
63 * in above 4GB. If pages are already in memory, then
64 * shmem_read_mapping_page_gfp will ignore the gfpmask,
65 * even if the already in-memory page disobeys the mask.
66 *
67 * It is only a theoretical issue today, because none of
68 * the devices with this limitation can be populated with
69 * enough memory to trigger the issue. But this BUG_ON()
70 * is here as a reminder in case the problem with
71 * shmem_read_mapping_page_gfp() isn't solved by the time
72 * it does become a real issue.
73 *
74 * See this thread: http://lkml.org/lkml/2011/7/11/238
75 */
76 BUG_ON((gfpmask & __GFP_DMA32) &&
77 (page_to_pfn(p) >= 0x00100000UL));
78 }
79
80 return pages;
81
82fail:
83 while (i--)
84 page_cache_release(pages[i]);
85
86 drm_free_large(pages);
87 return ERR_CAST(p);
88}
89
90/**
91 * drm_gem_put_pages - helper to free backing pages for a GEM object
92 * @obj: obj in question
93 * @pages: pages to free
94 */
95void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
96 bool dirty, bool accessed)
97{
98 int i, npages;
99
100 npages = obj->size >> PAGE_SHIFT;
101
102 for (i = 0; i < npages; i++) {
103 if (dirty)
104 set_page_dirty(pages[i]);
105
106 if (accessed)
107 mark_page_accessed(pages[i]);
108
109 /* Undo the reference we took when populating the table */
110 page_cache_release(pages[i]);
111 }
112
113 drm_free_large(pages);
114}
115
116int
117_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
118{
119 struct drm_device *dev = obj->dev;
120 struct drm_gem_mm *mm = dev->mm_private;
121 struct drm_map_list *list;
122 struct drm_local_map *map;
123 int ret = 0;
124
125 /* Set the object up for mmap'ing */
126 list = &obj->map_list;
127 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
128 if (!list->map)
129 return -ENOMEM;
130
131 map = list->map;
132 map->type = _DRM_GEM;
133 map->size = size;
134 map->handle = obj;
135
136 /* Get a DRM GEM mmap offset allocated... */
137 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
138 size / PAGE_SIZE, 0, 0);
139
140 if (!list->file_offset_node) {
141 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
142 ret = -ENOSPC;
143 goto out_free_list;
144 }
145
146 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
147 size / PAGE_SIZE, 0);
148 if (!list->file_offset_node) {
149 ret = -ENOMEM;
150 goto out_free_list;
151 }
152
153 list->hash.key = list->file_offset_node->start;
154 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
155 if (ret) {
156 DRM_ERROR("failed to add to map hash\n");
157 goto out_free_mm;
158 }
159
160 return 0;
161
162out_free_mm:
163 drm_mm_put_block(list->file_offset_node);
164out_free_list:
165 kfree(list->map);
166 list->map = NULL;
167
168 return ret;
169}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 8d225d7ff4e3..046d5e660c04 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,7 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/kfifo.h> 20#include "drm_flip_work.h"
21 21
22#include "omap_drv.h" 22#include "omap_drv.h"
23#include "omap_dmm_tiler.h" 23#include "omap_dmm_tiler.h"
@@ -58,26 +58,23 @@ struct omap_plane {
58 58
59 struct omap_drm_irq error_irq; 59 struct omap_drm_irq error_irq;
60 60
61 /* set of bo's pending unpin until next post_apply() */ 61 /* for deferring bo unpin's until next post_apply(): */
62 DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *); 62 struct drm_flip_work unpin_work;
63 63
64 // XXX maybe get rid of this and handle vblank in crtc too? 64 // XXX maybe get rid of this and handle vblank in crtc too?
65 struct callback apply_done_cb; 65 struct callback apply_done_cb;
66}; 66};
67 67
68static void unpin(void *arg, struct drm_gem_object *bo) 68static void unpin_worker(struct drm_flip_work *work, void *val)
69{ 69{
70 struct drm_plane *plane = arg; 70 struct omap_plane *omap_plane =
71 struct omap_plane *omap_plane = to_omap_plane(plane); 71 container_of(work, struct omap_plane, unpin_work);
72 struct drm_device *dev = omap_plane->base.dev;
72 73
73 if (kfifo_put(&omap_plane->unpin_fifo, 74 omap_framebuffer_unpin(val);
74 (const struct drm_gem_object **)&bo)) { 75 mutex_lock(&dev->mode_config.mutex);
75 /* also hold a ref so it isn't free'd while pinned */ 76 drm_framebuffer_unreference(val);
76 drm_gem_object_reference(bo); 77 mutex_unlock(&dev->mode_config.mutex);
77 } else {
78 dev_err(plane->dev->dev, "unpin fifo full!\n");
79 omap_gem_put_paddr(bo);
80 }
81} 78}
82 79
83/* update which fb (if any) is pinned for scanout */ 80/* update which fb (if any) is pinned for scanout */
@@ -87,23 +84,22 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
87 struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; 84 struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
88 85
89 if (pinned_fb != fb) { 86 if (pinned_fb != fb) {
90 int ret; 87 int ret = 0;
91 88
92 DBG("%p -> %p", pinned_fb, fb); 89 DBG("%p -> %p", pinned_fb, fb);
93 90
94 if (fb) 91 if (fb) {
95 drm_framebuffer_reference(fb); 92 drm_framebuffer_reference(fb);
96 93 ret = omap_framebuffer_pin(fb);
97 ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin); 94 }
98 95
99 if (pinned_fb) 96 if (pinned_fb)
100 drm_framebuffer_unreference(pinned_fb); 97 drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb);
101 98
102 if (ret) { 99 if (ret) {
103 dev_err(plane->dev->dev, "could not swap %p -> %p\n", 100 dev_err(plane->dev->dev, "could not swap %p -> %p\n",
104 omap_plane->pinned_fb, fb); 101 omap_plane->pinned_fb, fb);
105 if (fb) 102 drm_framebuffer_unreference(fb);
106 drm_framebuffer_unreference(fb);
107 omap_plane->pinned_fb = NULL; 103 omap_plane->pinned_fb = NULL;
108 return ret; 104 return ret;
109 } 105 }
@@ -170,17 +166,14 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
170 struct omap_plane *omap_plane = 166 struct omap_plane *omap_plane =
171 container_of(apply, struct omap_plane, apply); 167 container_of(apply, struct omap_plane, apply);
172 struct drm_plane *plane = &omap_plane->base; 168 struct drm_plane *plane = &omap_plane->base;
169 struct omap_drm_private *priv = plane->dev->dev_private;
173 struct omap_overlay_info *info = &omap_plane->info; 170 struct omap_overlay_info *info = &omap_plane->info;
174 struct drm_gem_object *bo = NULL;
175 struct callback cb; 171 struct callback cb;
176 172
177 cb = omap_plane->apply_done_cb; 173 cb = omap_plane->apply_done_cb;
178 omap_plane->apply_done_cb.fxn = NULL; 174 omap_plane->apply_done_cb.fxn = NULL;
179 175
180 while (kfifo_get(&omap_plane->unpin_fifo, &bo)) { 176 drm_flip_work_commit(&omap_plane->unpin_work, priv->wq);
181 omap_gem_put_paddr(bo);
182 drm_gem_object_unreference_unlocked(bo);
183 }
184 177
185 if (cb.fxn) 178 if (cb.fxn)
186 cb.fxn(cb.arg); 179 cb.fxn(cb.arg);
@@ -277,8 +270,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
277 omap_plane_disable(plane); 270 omap_plane_disable(plane);
278 drm_plane_cleanup(plane); 271 drm_plane_cleanup(plane);
279 272
280 WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo)); 273 drm_flip_work_cleanup(&omap_plane->unpin_work);
281 kfifo_free(&omap_plane->unpin_fifo);
282 274
283 kfree(omap_plane); 275 kfree(omap_plane);
284} 276}
@@ -399,7 +391,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
399 if (!omap_plane) 391 if (!omap_plane)
400 goto fail; 392 goto fail;
401 393
402 ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL); 394 ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
395 "unpin", unpin_worker);
403 if (ret) { 396 if (ret) {
404 dev_err(dev->dev, "could not allocate unpin FIFO\n"); 397 dev_err(dev->dev, "could not allocate unpin FIFO\n");
405 goto fail; 398 goto fail;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index df0b577a6608..514118ae72d4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -84,7 +84,6 @@ static const struct file_operations qxl_fops = {
84 .release = drm_release, 84 .release = drm_release,
85 .unlocked_ioctl = drm_ioctl, 85 .unlocked_ioctl = drm_ioctl,
86 .poll = drm_poll, 86 .poll = drm_poll,
87 .fasync = drm_fasync,
88 .mmap = qxl_mmap, 87 .mmap = qxl_mmap,
89}; 88};
90 89
@@ -221,7 +220,7 @@ static struct drm_driver qxl_driver = {
221 220
222 .dumb_create = qxl_mode_dumb_create, 221 .dumb_create = qxl_mode_dumb_create,
223 .dumb_map_offset = qxl_mode_dumb_mmap, 222 .dumb_map_offset = qxl_mode_dumb_mmap,
224 .dumb_destroy = qxl_mode_dumb_destroy, 223 .dumb_destroy = drm_gem_dumb_destroy,
225#if defined(CONFIG_DEBUG_FS) 224#if defined(CONFIG_DEBUG_FS)
226 .debugfs_init = qxl_debugfs_init, 225 .debugfs_init = qxl_debugfs_init,
227 .debugfs_cleanup = qxl_debugfs_takedown, 226 .debugfs_cleanup = qxl_debugfs_takedown,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7e96f4f11738..f7c9adde46a0 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -328,7 +328,7 @@ struct qxl_device {
328/* forward declaration for QXL_INFO_IO */ 328/* forward declaration for QXL_INFO_IO */
329void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); 329void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
330 330
331extern struct drm_ioctl_desc qxl_ioctls[]; 331extern const struct drm_ioctl_desc qxl_ioctls[];
332extern int qxl_max_ioctl; 332extern int qxl_max_ioctl;
333 333
334int qxl_driver_load(struct drm_device *dev, unsigned long flags); 334int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -405,9 +405,6 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
405 bool discardable, bool kernel, 405 bool discardable, bool kernel,
406 struct qxl_surface *surf, 406 struct qxl_surface *surf,
407 struct drm_gem_object **obj); 407 struct drm_gem_object **obj);
408int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
409 uint64_t *gpu_addr);
410void qxl_gem_object_unpin(struct drm_gem_object *obj);
411int qxl_gem_object_create_with_handle(struct qxl_device *qdev, 408int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
412 struct drm_file *file_priv, 409 struct drm_file *file_priv,
413 u32 domain, 410 u32 domain,
@@ -427,9 +424,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
427int qxl_mode_dumb_create(struct drm_file *file_priv, 424int qxl_mode_dumb_create(struct drm_file *file_priv,
428 struct drm_device *dev, 425 struct drm_device *dev,
429 struct drm_mode_create_dumb *args); 426 struct drm_mode_create_dumb *args);
430int qxl_mode_dumb_destroy(struct drm_file *file_priv,
431 struct drm_device *dev,
432 uint32_t handle);
433int qxl_mode_dumb_mmap(struct drm_file *filp, 427int qxl_mode_dumb_mmap(struct drm_file *filp,
434 struct drm_device *dev, 428 struct drm_device *dev,
435 uint32_t handle, uint64_t *offset_p); 429 uint32_t handle, uint64_t *offset_p);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 847c4ee798f7..d34bb4130ff0 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
68 return 0; 68 return 0;
69} 69}
70 70
71int qxl_mode_dumb_destroy(struct drm_file *file_priv,
72 struct drm_device *dev,
73 uint32_t handle)
74{
75 return drm_gem_handle_delete(file_priv, handle);
76}
77
78int qxl_mode_dumb_mmap(struct drm_file *file_priv, 71int qxl_mode_dumb_mmap(struct drm_file *file_priv,
79 struct drm_device *dev, 72 struct drm_device *dev,
80 uint32_t handle, uint64_t *offset_p) 73 uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 25e1777fb0a2..1648e4125af7 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -101,32 +101,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
101 return 0; 101 return 0;
102} 102}
103 103
104int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
105 uint64_t *gpu_addr)
106{
107 struct qxl_bo *qobj = obj->driver_private;
108 int r;
109
110 r = qxl_bo_reserve(qobj, false);
111 if (unlikely(r != 0))
112 return r;
113 r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
114 qxl_bo_unreserve(qobj);
115 return r;
116}
117
118void qxl_gem_object_unpin(struct drm_gem_object *obj)
119{
120 struct qxl_bo *qobj = obj->driver_private;
121 int r;
122
123 r = qxl_bo_reserve(qobj, false);
124 if (likely(r == 0)) {
125 qxl_bo_unpin(qobj);
126 qxl_bo_unreserve(qobj);
127 }
128}
129
130int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 104int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
131{ 105{
132 return 0; 106 return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6de33563d6f1..7b95c75e9626 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -433,7 +433,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
433 return ret; 433 return ret;
434} 434}
435 435
436struct drm_ioctl_desc qxl_ioctls[] = { 436const struct drm_ioctl_desc qxl_ioctls[] = {
437 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), 437 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
438 438
439 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), 439 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index aa161cddd87e..8691c76c5ef0 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -98,7 +98,6 @@ int qxl_bo_create(struct qxl_device *qdev,
98 kfree(bo); 98 kfree(bo);
99 return r; 99 return r;
100 } 100 }
101 bo->gem_base.driver_private = NULL;
102 bo->type = domain; 101 bo->type = domain;
103 bo->pin_count = pinned ? 1 : 0; 102 bo->pin_count = pinned ? 1 : 0;
104 bo->surface_id = 0; 103 bo->surface_id = 0;
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 8cb6167038e5..d458a140c024 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
59 59
60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) 60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
61{ 61{
62 return bo->tbo.addr_space_offset; 62 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
63} 63}
64 64
65static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, 65static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b61449e52cd5..0109a9644cb2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -88,7 +88,7 @@ qxl_release_free(struct qxl_device *qdev,
88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { 88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
90 QXL_INFO(qdev, "release %llx\n", 90 QXL_INFO(qdev, "release %llx\n",
91 entry->tv.bo->addr_space_offset 91 drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
92 - DRM_FILE_OFFSET); 92 - DRM_FILE_OFFSET);
93 qxl_fence_remove_release(&bo->fence, release->id); 93 qxl_fence_remove_release(&bo->fence, release->id);
94 qxl_bo_unref(&bo); 94 qxl_bo_unref(&bo);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 1dfd84cda2a1..037786d7c1dc 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -212,7 +212,9 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
212 212
213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) 213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
214{ 214{
215 return 0; 215 struct qxl_bo *qbo = to_qxl_bo(bo);
216
217 return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
216} 218}
217 219
218static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 220static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index d4660cf942a5..c451257f08fb 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
540 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle 540 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
541 + init->ring_size / sizeof(u32)); 541 + init->ring_size / sizeof(u32));
542 dev_priv->ring.size = init->ring_size; 542 dev_priv->ring.size = init->ring_size;
543 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 543 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
544 544
545 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 545 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
546 546
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 472c38fe123f..5bd307cd8da1 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations r128_driver_fops = {
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51 .fasync = drm_fasync,
52#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
53 .compat_ioctl = r128_compat_ioctl, 52 .compat_ioctl = r128_compat_ioctl,
54#endif 53#endif
@@ -57,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
57 56
58static struct drm_driver driver = { 57static struct drm_driver driver = {
59 .driver_features = 58 .driver_features =
60 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 59 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
61 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 60 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
62 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 61 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
63 .load = r128_driver_load, 62 .load = r128_driver_load,
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 930c71b2fb5e..56eb5e3f5439 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
131 drm_r128_freelist_t *list_entry; 131 drm_r128_freelist_t *list_entry;
132} drm_r128_buf_priv_t; 132} drm_r128_buf_priv_t;
133 133
134extern struct drm_ioctl_desc r128_ioctls[]; 134extern const struct drm_ioctl_desc r128_ioctls[];
135extern int r128_max_ioctl; 135extern int r128_max_ioctl;
136 136
137 /* r128_cce.c */ 137 /* r128_cce.c */
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 19bb7e6f3d9a..01dd9aef9f0e 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
1643 r128_do_cleanup_cce(dev); 1643 r128_do_cleanup_cce(dev);
1644} 1644}
1645 1645
1646struct drm_ioctl_desc r128_ioctls[] = { 1646const struct drm_ioctl_desc r128_ioctls[] = {
1647 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1647 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1648 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1648 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1649 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1649 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c3df52c1a60c..306364a1ecda 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,14 +72,32 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ 74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
75 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 75 radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
83 ci_dpm.o dce6_afmt.o
84
85# add async DMA block
86radeon-y += \
87 r600_dma.o \
88 rv770_dma.o \
89 evergreen_dma.o \
90 ni_dma.o \
91 si_dma.o \
92 cik_sdma.o \
93
94# add UVD block
95radeon-y += \
96 radeon_uvd.o \
97 uvd_v1_0.o \
98 uvd_v2_2.o \
99 uvd_v3_1.o \
100 uvd_v4_2.o
83 101
84radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 102radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
85radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 103radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 16b120c3f144..af10f8571d87 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -7661,618 +7661,6 @@ typedef struct _ATOM_POWERPLAY_INFO_V3
7661 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 7661 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
7662}ATOM_POWERPLAY_INFO_V3; 7662}ATOM_POWERPLAY_INFO_V3;
7663 7663
7664/* New PPlib */
7665/**************************************************************************/
7666typedef struct _ATOM_PPLIB_THERMALCONTROLLER
7667
7668{
7669 UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
7670 UCHAR ucI2cLine; // as interpreted by DAL I2C
7671 UCHAR ucI2cAddress;
7672 UCHAR ucFanParameters; // Fan Control Parameters.
7673 UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
7674 UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
7675 UCHAR ucReserved; // ----
7676 UCHAR ucFlags; // to be defined
7677} ATOM_PPLIB_THERMALCONTROLLER;
7678
7679#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
7680#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
7681
7682#define ATOM_PP_THERMALCONTROLLER_NONE 0
7683#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
7684#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
7685#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
7686#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
7687#define ATOM_PP_THERMALCONTROLLER_LM64 5
7688#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
7689#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
7690#define ATOM_PP_THERMALCONTROLLER_RV770 8
7691#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
7692#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
7693#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
7694#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
7695#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
7696#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
7697#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
7698#define ATOM_PP_THERMALCONTROLLER_LM96163 17
7699#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
7700
7701// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
7702// We probably should reserve the bit 0x80 for this use.
7703// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
7704// The driver can pick the correct internal controller based on the ASIC.
7705
7706#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
7707#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
7708
7709typedef struct _ATOM_PPLIB_STATE
7710{
7711 UCHAR ucNonClockStateIndex;
7712 UCHAR ucClockStateIndices[1]; // variable-sized
7713} ATOM_PPLIB_STATE;
7714
7715
7716typedef struct _ATOM_PPLIB_FANTABLE
7717{
7718 UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
7719 UCHAR ucTHyst; // Temperature hysteresis. Integer.
7720 USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
7721 USHORT usTMed; // The middle temperature where we change slopes.
7722 USHORT usTHigh; // The high point above TMed for adjusting the second slope.
7723 USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
7724 USHORT usPWMMed; // The PWM value (in percent) at TMed.
7725 USHORT usPWMHigh; // The PWM value at THigh.
7726} ATOM_PPLIB_FANTABLE;
7727
7728typedef struct _ATOM_PPLIB_FANTABLE2
7729{
7730 ATOM_PPLIB_FANTABLE basicTable;
7731 USHORT usTMax; // The max temperature
7732} ATOM_PPLIB_FANTABLE2;
7733
7734typedef struct _ATOM_PPLIB_EXTENDEDHEADER
7735{
7736 USHORT usSize;
7737 ULONG ulMaxEngineClock; // For Overdrive.
7738 ULONG ulMaxMemoryClock; // For Overdrive.
7739 // Add extra system parameters here, always adjust size to include all fields.
7740 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
7741 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
7742 USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
7743 USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
7744} ATOM_PPLIB_EXTENDEDHEADER;
7745
7746//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
7747#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
7748#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
7749#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
7750#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
7751#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
7752#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
7753#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
7754#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
7755#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
7756#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
7757#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
7758#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
7759#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
7760#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
7761#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
7762#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
7763#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
7764#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
7765#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
7766#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
7767#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
7768#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
7769
7770typedef struct _ATOM_PPLIB_POWERPLAYTABLE
7771{
7772 ATOM_COMMON_TABLE_HEADER sHeader;
7773
7774 UCHAR ucDataRevision;
7775
7776 UCHAR ucNumStates;
7777 UCHAR ucStateEntrySize;
7778 UCHAR ucClockInfoSize;
7779 UCHAR ucNonClockSize;
7780
7781 // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
7782 USHORT usStateArrayOffset;
7783
7784 // offset from start of this table to array of ASIC-specific structures,
7785 // currently ATOM_PPLIB_CLOCK_INFO.
7786 USHORT usClockInfoArrayOffset;
7787
7788 // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
7789 USHORT usNonClockInfoArrayOffset;
7790
7791 USHORT usBackbiasTime; // in microseconds
7792 USHORT usVoltageTime; // in microseconds
7793 USHORT usTableSize; //the size of this structure, or the extended structure
7794
7795 ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
7796
7797 ATOM_PPLIB_THERMALCONTROLLER sThermalController;
7798
7799 USHORT usBootClockInfoOffset;
7800 USHORT usBootNonClockInfoOffset;
7801
7802} ATOM_PPLIB_POWERPLAYTABLE;
7803
7804typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
7805{
7806 ATOM_PPLIB_POWERPLAYTABLE basicTable;
7807 UCHAR ucNumCustomThermalPolicy;
7808 USHORT usCustomThermalPolicyArrayOffset;
7809}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
7810
7811typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
7812{
7813 ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
7814 USHORT usFormatID; // To be used ONLY by PPGen.
7815 USHORT usFanTableOffset;
7816 USHORT usExtendendedHeaderOffset;
7817} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
7818
7819typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
7820{
7821 ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
7822 ULONG ulGoldenPPID; // PPGen use only
7823 ULONG ulGoldenRevision; // PPGen use only
7824 USHORT usVddcDependencyOnSCLKOffset;
7825 USHORT usVddciDependencyOnMCLKOffset;
7826 USHORT usVddcDependencyOnMCLKOffset;
7827 USHORT usMaxClockVoltageOnDCOffset;
7828 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
7829 USHORT usMvddDependencyOnMCLKOffset;
7830} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
7831
7832typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
7833{
7834 ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
7835 ULONG ulTDPLimit;
7836 ULONG ulNearTDPLimit;
7837 ULONG ulSQRampingThreshold;
7838 USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
7839 ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
7840 USHORT usTDPODLimit;
7841 USHORT usLoadLineSlope; // in milliOhms * 100
7842} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
7843
7844//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
7845#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
7846#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
7847#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
7848#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
7849#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
7850#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
7851// 2, 4, 6, 7 are reserved
7852
7853#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
7854#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
7855#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
7856#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
7857#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
7858#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
7859#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
7860#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
7861#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
7862#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
7863#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
7864#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
7865#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
7866
7867//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
7868#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
7869#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
7870#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
7871
7872//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
7873#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
7874#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
7875
7876// 0 is 2.5Gb/s, 1 is 5Gb/s
7877#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
7878#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
7879
7880// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
7881#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
7882#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
7883
7884// lookup into reduced refresh-rate table
7885#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
7886#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
7887
7888#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
7889#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
7890// 2-15 TBD as needed.
7891
7892#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
7893#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
7894
7895#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
7896
7897#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
7898
7899//memory related flags
7900#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
7901
7902//M3 Arb //2bits, current 3 sets of parameters in total
7903#define ATOM_PPLIB_M3ARB_MASK 0x00060000
7904#define ATOM_PPLIB_M3ARB_SHIFT 17
7905
7906#define ATOM_PPLIB_ENABLE_DRR 0x00080000
7907
7908// remaining 16 bits are reserved
7909typedef struct _ATOM_PPLIB_THERMAL_STATE
7910{
7911 UCHAR ucMinTemperature;
7912 UCHAR ucMaxTemperature;
7913 UCHAR ucThermalAction;
7914}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
7915
7916// Contained in an array starting at the offset
7917// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
7918// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
7919#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
7920#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
7921typedef struct _ATOM_PPLIB_NONCLOCK_INFO
7922{
7923 USHORT usClassification;
7924 UCHAR ucMinTemperature;
7925 UCHAR ucMaxTemperature;
7926 ULONG ulCapsAndSettings;
7927 UCHAR ucRequiredPower;
7928 USHORT usClassification2;
7929 ULONG ulVCLK;
7930 ULONG ulDCLK;
7931 UCHAR ucUnused[5];
7932} ATOM_PPLIB_NONCLOCK_INFO;
7933
7934// Contained in an array starting at the offset
7935// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
7936// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
7937typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
7938{
7939 USHORT usEngineClockLow;
7940 UCHAR ucEngineClockHigh;
7941
7942 USHORT usMemoryClockLow;
7943 UCHAR ucMemoryClockHigh;
7944
7945 USHORT usVDDC;
7946 USHORT usUnused1;
7947 USHORT usUnused2;
7948
7949 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
7950
7951} ATOM_PPLIB_R600_CLOCK_INFO;
7952
7953// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
7954#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
7955#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
7956#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
7957#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
7958#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
7959#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
7960
7961typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
7962{
7963 USHORT usEngineClockLow;
7964 UCHAR ucEngineClockHigh;
7965
7966 USHORT usMemoryClockLow;
7967 UCHAR ucMemoryClockHigh;
7968
7969 USHORT usVDDC;
7970 USHORT usVDDCI;
7971 USHORT usUnused;
7972
7973 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
7974
7975} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
7976
7977typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
7978{
7979 USHORT usEngineClockLow;
7980 UCHAR ucEngineClockHigh;
7981
7982 USHORT usMemoryClockLow;
7983 UCHAR ucMemoryClockHigh;
7984
7985 USHORT usVDDC;
7986 USHORT usVDDCI;
7987 UCHAR ucPCIEGen;
7988 UCHAR ucUnused1;
7989
7990 ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
7991
7992} ATOM_PPLIB_SI_CLOCK_INFO;
7993
7994typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
7995{
7996 USHORT usEngineClockLow;
7997 UCHAR ucEngineClockHigh;
7998
7999 USHORT usMemoryClockLow;
8000 UCHAR ucMemoryClockHigh;
8001
8002 UCHAR ucPCIEGen;
8003 USHORT usPCIELane;
8004} ATOM_PPLIB_CI_CLOCK_INFO;
8005
8006typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
8007
8008{
8009 USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
8010 UCHAR ucLowEngineClockHigh;
8011 USHORT usHighEngineClockLow; // High Engine clock in MHz.
8012 UCHAR ucHighEngineClockHigh;
8013 USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
8014 UCHAR ucMemoryClockHigh; // Currentyl unused.
8015 UCHAR ucPadding; // For proper alignment and size.
8016 USHORT usVDDC; // For the 780, use: None, Low, High, Variable
8017 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
8018 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
8019 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
8020 ULONG ulFlags;
8021} ATOM_PPLIB_RS780_CLOCK_INFO;
8022
8023#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
8024#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
8025#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
8026#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
8027
8028#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
8029#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
8030#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
8031
8032#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
8033#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
8034#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
8035
8036typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
8037 USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
8038 UCHAR ucEngineClockHigh; //clockfrequency >> 16.
8039 UCHAR vddcIndex; //2-bit vddc index;
8040 USHORT tdpLimit;
8041 //please initalize to 0
8042 USHORT rsv1;
8043 //please initialize to 0s
8044 ULONG rsv2[2];
8045}ATOM_PPLIB_SUMO_CLOCK_INFO;
8046
8047
8048
8049typedef struct _ATOM_PPLIB_STATE_V2
8050{
8051 //number of valid dpm levels in this state; Driver uses it to calculate the whole
8052 //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
8053 UCHAR ucNumDPMLevels;
8054
8055 //a index to the array of nonClockInfos
8056 UCHAR nonClockInfoIndex;
8057 /**
8058 * Driver will read the first ucNumDPMLevels in this array
8059 */
8060 UCHAR clockInfoIndex[1];
8061} ATOM_PPLIB_STATE_V2;
8062
8063typedef struct _StateArray{
8064 //how many states we have
8065 UCHAR ucNumEntries;
8066
8067 ATOM_PPLIB_STATE_V2 states[1];
8068}StateArray;
8069
8070
8071typedef struct _ClockInfoArray{
8072 //how many clock levels we have
8073 UCHAR ucNumEntries;
8074
8075 //sizeof(ATOM_PPLIB_CLOCK_INFO)
8076 UCHAR ucEntrySize;
8077
8078 UCHAR clockInfo[1];
8079}ClockInfoArray;
8080
8081typedef struct _NonClockInfoArray{
8082
8083 //how many non-clock levels we have. normally should be same as number of states
8084 UCHAR ucNumEntries;
8085 //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
8086 UCHAR ucEntrySize;
8087
8088 ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
8089}NonClockInfoArray;
8090
8091typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
8092{
8093 USHORT usClockLow;
8094 UCHAR ucClockHigh;
8095 USHORT usVoltage;
8096}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
8097
8098typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
8099{
8100 UCHAR ucNumEntries; // Number of entries.
8101 ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
8102}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
8103
8104typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
8105{
8106 USHORT usSclkLow;
8107 UCHAR ucSclkHigh;
8108 USHORT usMclkLow;
8109 UCHAR ucMclkHigh;
8110 USHORT usVddc;
8111 USHORT usVddci;
8112}ATOM_PPLIB_Clock_Voltage_Limit_Record;
8113
8114typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
8115{
8116 UCHAR ucNumEntries; // Number of entries.
8117 ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
8118}ATOM_PPLIB_Clock_Voltage_Limit_Table;
8119
8120typedef struct _ATOM_PPLIB_CAC_Leakage_Record
8121{
8122 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value.
8123 ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value.
8124}ATOM_PPLIB_CAC_Leakage_Record;
8125
8126typedef struct _ATOM_PPLIB_CAC_Leakage_Table
8127{
8128 UCHAR ucNumEntries; // Number of entries.
8129 ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
8130}ATOM_PPLIB_CAC_Leakage_Table;
8131
8132typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
8133{
8134 USHORT usVoltage;
8135 USHORT usSclkLow;
8136 UCHAR ucSclkHigh;
8137 USHORT usMclkLow;
8138 UCHAR ucMclkHigh;
8139}ATOM_PPLIB_PhaseSheddingLimits_Record;
8140
8141typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
8142{
8143 UCHAR ucNumEntries; // Number of entries.
8144 ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
8145}ATOM_PPLIB_PhaseSheddingLimits_Table;
8146
8147typedef struct _VCEClockInfo{
8148 USHORT usEVClkLow;
8149 UCHAR ucEVClkHigh;
8150 USHORT usECClkLow;
8151 UCHAR ucECClkHigh;
8152}VCEClockInfo;
8153
8154typedef struct _VCEClockInfoArray{
8155 UCHAR ucNumEntries;
8156 VCEClockInfo entries[1];
8157}VCEClockInfoArray;
8158
8159typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
8160{
8161 USHORT usVoltage;
8162 UCHAR ucVCEClockInfoIndex;
8163}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
8164
8165typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
8166{
8167 UCHAR numEntries;
8168 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
8169}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
8170
8171typedef struct _ATOM_PPLIB_VCE_State_Record
8172{
8173 UCHAR ucVCEClockInfoIndex;
8174 UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
8175}ATOM_PPLIB_VCE_State_Record;
8176
8177typedef struct _ATOM_PPLIB_VCE_State_Table
8178{
8179 UCHAR numEntries;
8180 ATOM_PPLIB_VCE_State_Record entries[1];
8181}ATOM_PPLIB_VCE_State_Table;
8182
8183
8184typedef struct _ATOM_PPLIB_VCE_Table
8185{
8186 UCHAR revid;
8187// VCEClockInfoArray array;
8188// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
8189// ATOM_PPLIB_VCE_State_Table states;
8190}ATOM_PPLIB_VCE_Table;
8191
8192
8193typedef struct _UVDClockInfo{
8194 USHORT usVClkLow;
8195 UCHAR ucVClkHigh;
8196 USHORT usDClkLow;
8197 UCHAR ucDClkHigh;
8198}UVDClockInfo;
8199
8200typedef struct _UVDClockInfoArray{
8201 UCHAR ucNumEntries;
8202 UVDClockInfo entries[1];
8203}UVDClockInfoArray;
8204
8205typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
8206{
8207 USHORT usVoltage;
8208 UCHAR ucUVDClockInfoIndex;
8209}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
8210
8211typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
8212{
8213 UCHAR numEntries;
8214 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
8215}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
8216
8217typedef struct _ATOM_PPLIB_UVD_State_Record
8218{
8219 UCHAR ucUVDClockInfoIndex;
8220 UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
8221}ATOM_PPLIB_UVD_State_Record;
8222
8223typedef struct _ATOM_PPLIB_UVD_State_Table
8224{
8225 UCHAR numEntries;
8226 ATOM_PPLIB_UVD_State_Record entries[1];
8227}ATOM_PPLIB_UVD_State_Table;
8228
8229
8230typedef struct _ATOM_PPLIB_UVD_Table
8231{
8232 UCHAR revid;
8233// UVDClockInfoArray array;
8234// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
8235// ATOM_PPLIB_UVD_State_Table states;
8236}ATOM_PPLIB_UVD_Table;
8237
8238
8239typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
8240{
8241 USHORT usVoltage;
8242 USHORT usSAMClockLow;
8243 UCHAR ucSAMClockHigh;
8244}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
8245
8246typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
8247 UCHAR numEntries;
8248 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
8249}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
8250
8251typedef struct _ATOM_PPLIB_SAMU_Table
8252{
8253 UCHAR revid;
8254 ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
8255}ATOM_PPLIB_SAMU_Table;
8256
8257#define ATOM_PPM_A_A 1
8258#define ATOM_PPM_A_I 2
8259typedef struct _ATOM_PPLIB_PPM_Table
8260{
8261 UCHAR ucRevId;
8262 UCHAR ucPpmDesign; //A+I or A+A
8263 USHORT usCpuCoreNumber;
8264 ULONG ulPlatformTDP;
8265 ULONG ulSmallACPlatformTDP;
8266 ULONG ulPlatformTDC;
8267 ULONG ulSmallACPlatformTDC;
8268 ULONG ulApuTDP;
8269 ULONG ulDGpuTDP;
8270 ULONG ulDGpuUlvPower;
8271 ULONG ulTjmax;
8272} ATOM_PPLIB_PPM_Table;
8273
8274/**************************************************************************/
8275
8276 7664
8277// Following definitions are for compatibility issue in different SW components. 7665// Following definitions are for compatibility issue in different SW components.
8278#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 7666#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
@@ -8485,3 +7873,6 @@ typedef struct {
8485 7873
8486 7874
8487#endif /* _ATOMBIOS_H */ 7875#endif /* _ATOMBIOS_H */
7876
7877#include "pptable.h"
7878
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b9d3b43f19c0..bf87f6d435f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1910,6 +1910,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1910 int i; 1910 int i;
1911 1911
1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1913 /* disable the GRPH */
1914 if (ASIC_IS_DCE4(rdev))
1915 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
1916 else if (ASIC_IS_AVIVO(rdev))
1917 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
1918
1913 if (ASIC_IS_DCE6(rdev)) 1919 if (ASIC_IS_DCE6(rdev))
1914 atombios_powergate_crtc(crtc, ATOM_ENABLE); 1920 atombios_powergate_crtc(crtc, ATOM_ENABLE);
1915 1921
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 32501f6ec991..00885417ffff 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -50,7 +50,7 @@ static char *pre_emph_names[] = {
50 * or from atom. Note that atom operates on 50 * or from atom. Note that atom operates on
51 * dw units. 51 * dw units.
52 */ 52 */
53static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) 53void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
54{ 54{
55#ifdef __BIG_ENDIAN 55#ifdef __BIG_ENDIAN
56 u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ 56 u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
@@ -100,7 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
100 100
101 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); 101 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
102 102
103 radeon_copy_swap(base, send, send_bytes, true); 103 radeon_atom_copy_swap(base, send, send_bytes, true);
104 104
105 args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); 105 args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
106 args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); 106 args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
@@ -137,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
137 recv_bytes = recv_size; 137 recv_bytes = recv_size;
138 138
139 if (recv && recv_size) 139 if (recv && recv_size)
140 radeon_copy_swap(recv, base + 16, recv_bytes, false); 140 radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
141 141
142 return recv_bytes; 142 return recv_bytes;
143} 143}
@@ -585,7 +585,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
585 return false; 585 return false;
586 } 586 }
587 587
588 DRM_DEBUG_KMS("link status %*ph\n", 6, link_status); 588 DRM_DEBUG_KMS("link status %6ph\n", link_status);
589 return true; 589 return true;
590} 590}
591 591
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 092275d53d4a..dfac7965ea28 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -682,8 +682,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
682int 682int
683atombios_get_encoder_mode(struct drm_encoder *encoder) 683atombios_get_encoder_mode(struct drm_encoder *encoder)
684{ 684{
685 struct drm_device *dev = encoder->dev;
686 struct radeon_device *rdev = dev->dev_private;
687 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 685 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
688 struct drm_connector *connector; 686 struct drm_connector *connector;
689 struct radeon_connector *radeon_connector; 687 struct radeon_connector *radeon_connector;
@@ -710,8 +708,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
710 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
711 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
712 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 710 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
713 radeon_audio && 711 radeon_audio)
714 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
715 return ATOM_ENCODER_MODE_HDMI; 712 return ATOM_ENCODER_MODE_HDMI;
716 else if (radeon_connector->use_digital) 713 else if (radeon_connector->use_digital)
717 return ATOM_ENCODER_MODE_DVI; 714 return ATOM_ENCODER_MODE_DVI;
@@ -722,8 +719,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
722 case DRM_MODE_CONNECTOR_HDMIA: 719 case DRM_MODE_CONNECTOR_HDMIA:
723 default: 720 default:
724 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 721 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
725 radeon_audio && 722 radeon_audio)
726 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
727 return ATOM_ENCODER_MODE_HDMI; 723 return ATOM_ENCODER_MODE_HDMI;
728 else 724 else
729 return ATOM_ENCODER_MODE_DVI; 725 return ATOM_ENCODER_MODE_DVI;
@@ -737,8 +733,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
737 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
738 return ATOM_ENCODER_MODE_DP; 734 return ATOM_ENCODER_MODE_DP;
739 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 735 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
740 radeon_audio && 736 radeon_audio)
741 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
742 return ATOM_ENCODER_MODE_HDMI; 737 return ATOM_ENCODER_MODE_HDMI;
743 else 738 else
744 return ATOM_ENCODER_MODE_DVI; 739 return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 082338df708a..deaf98cdca3a 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,10 +27,12 @@
27#include "radeon.h" 27#include "radeon.h"
28#include "atom.h" 28#include "atom.h"
29 29
30extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
31
30#define TARGET_HW_I2C_CLOCK 50 32#define TARGET_HW_I2C_CLOCK 50
31 33
32/* these are a limitation of ProcessI2cChannelTransaction not the hw */ 34/* these are a limitation of ProcessI2cChannelTransaction not the hw */
33#define ATOM_MAX_HW_I2C_WRITE 2 35#define ATOM_MAX_HW_I2C_WRITE 3
34#define ATOM_MAX_HW_I2C_READ 255 36#define ATOM_MAX_HW_I2C_READ 255
35 37
36static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, 38static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
@@ -50,20 +52,24 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
50 52
51 if (flags & HW_I2C_WRITE) { 53 if (flags & HW_I2C_WRITE) {
52 if (num > ATOM_MAX_HW_I2C_WRITE) { 54 if (num > ATOM_MAX_HW_I2C_WRITE) {
53 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num); 55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
54 return -EINVAL; 56 return -EINVAL;
55 } 57 }
56 memcpy(&out, buf, num); 58 args.ucRegIndex = buf[0];
59 if (num > 1)
60 memcpy(&out, &buf[1], num - 1);
57 args.lpI2CDataOut = cpu_to_le16(out); 61 args.lpI2CDataOut = cpu_to_le16(out);
58 } else { 62 } else {
59 if (num > ATOM_MAX_HW_I2C_READ) { 63 if (num > ATOM_MAX_HW_I2C_READ) {
60 DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); 64 DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
61 return -EINVAL; 65 return -EINVAL;
62 } 66 }
67 args.ucRegIndex = 0;
68 args.lpI2CDataOut = 0;
63 } 69 }
64 70
71 args.ucFlag = flags;
65 args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; 72 args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
66 args.ucRegIndex = 0;
67 args.ucTransBytes = num; 73 args.ucTransBytes = num;
68 args.ucSlaveAddr = slave_addr << 1; 74 args.ucSlaveAddr = slave_addr << 1;
69 args.ucLineNumber = chan->rec.i2c_id; 75 args.ucLineNumber = chan->rec.i2c_id;
@@ -77,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
77 } 83 }
78 84
79 if (!(flags & HW_I2C_WRITE)) 85 if (!(flags & HW_I2C_WRITE))
80 memcpy(buf, base, num); 86 radeon_atom_copy_swap(buf, base, num, false);
81 87
82 return 0; 88 return 0;
83} 89}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 9953e1fbc46d..084e69414fd1 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2699,6 +2699,12 @@ int btc_dpm_init(struct radeon_device *rdev)
2699 else 2699 else
2700 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000; 2700 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
2701 2701
2702 /* make sure dc limits are valid */
2703 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
2704 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
2705 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
2706 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2707
2702 return 0; 2708 return 0;
2703} 2709}
2704 2710
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index 19a0114d2e3b..98d009e154bf 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -317,58 +317,4 @@ const u32 cayman_default_state[] =
317 0x00000010, /* */ 317 0x00000010, /* */
318}; 318};
319 319
320const u32 cayman_vs[] =
321{
322 0x00000004,
323 0x80400400,
324 0x0000a03c,
325 0x95000688,
326 0x00004000,
327 0x15000688,
328 0x00000000,
329 0x88000000,
330 0x04000000,
331 0x67961001,
332#ifdef __BIG_ENDIAN
333 0x00020000,
334#else
335 0x00000000,
336#endif
337 0x00000000,
338 0x04000000,
339 0x67961000,
340#ifdef __BIG_ENDIAN
341 0x00020008,
342#else
343 0x00000008,
344#endif
345 0x00000000,
346};
347
348const u32 cayman_ps[] =
349{
350 0x00000004,
351 0xa00c0000,
352 0x00000008,
353 0x80400000,
354 0x00000000,
355 0x95000688,
356 0x00000000,
357 0x88000000,
358 0x00380400,
359 0x00146b10,
360 0x00380000,
361 0x20146b10,
362 0x00380400,
363 0x40146b00,
364 0x80380000,
365 0x60146b00,
366 0x00000010,
367 0x000d1000,
368 0xb0800000,
369 0x00000000,
370};
371
372const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
373const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
374const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state); 320const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
new file mode 100644
index 000000000000..3cce533397c6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -0,0 +1,5243 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "cikd.h"
27#include "r600_dpm.h"
28#include "ci_dpm.h"
29#include "atom.h"
30#include <linux/seq_file.h>
31
32#define MC_CG_ARB_FREQ_F0 0x0a
33#define MC_CG_ARB_FREQ_F1 0x0b
34#define MC_CG_ARB_FREQ_F2 0x0c
35#define MC_CG_ARB_FREQ_F3 0x0d
36
37#define SMC_RAM_END 0x40000
38
39#define VOLTAGE_SCALE 4
40#define VOLTAGE_VID_OFFSET_SCALE1 625
41#define VOLTAGE_VID_OFFSET_SCALE2 100
42
43static const struct ci_pt_defaults defaults_bonaire_xt =
44{
45 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
46 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
47 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
48};
49
50static const struct ci_pt_defaults defaults_bonaire_pro =
51{
52 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
53 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
54 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
55};
56
57static const struct ci_pt_defaults defaults_saturn_xt =
58{
59 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
60 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
61 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
62};
63
64static const struct ci_pt_defaults defaults_saturn_pro =
65{
66 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
67 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
68 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
69};
70
71static const struct ci_pt_config_reg didt_config_ci[] =
72{
73 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
74 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
75 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
76 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
77 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
86 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
87 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
88 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
89 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
90 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0xFFFFFFFF }
146};
147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
152extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
153extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
154 u32 max_voltage_steps,
155 struct atom_voltage_table *voltage_table);
156extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
157extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
158extern void cik_update_cg(struct radeon_device *rdev,
159 u32 block, bool enable);
160
161static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
162 struct atom_voltage_table_entry *voltage_table,
163 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
164static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
165static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
166 u32 target_tdp);
167static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
168
169static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
170{
171 struct ci_power_info *pi = rdev->pm.dpm.priv;
172
173 return pi;
174}
175
176static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
177{
178 struct ci_ps *ps = rps->ps_priv;
179
180 return ps;
181}
182
183static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
184{
185 struct ci_power_info *pi = ci_get_pi(rdev);
186
187 switch (rdev->pdev->device) {
188 case 0x6650:
189 case 0x6658:
190 case 0x665C:
191 default:
192 pi->powertune_defaults = &defaults_bonaire_xt;
193 break;
194 case 0x6651:
195 case 0x665D:
196 pi->powertune_defaults = &defaults_bonaire_pro;
197 break;
198 case 0x6640:
199 pi->powertune_defaults = &defaults_saturn_xt;
200 break;
201 case 0x6641:
202 pi->powertune_defaults = &defaults_saturn_pro;
203 break;
204 }
205
206 pi->dte_tj_offset = 0;
207
208 pi->caps_power_containment = true;
209 pi->caps_cac = false;
210 pi->caps_sq_ramping = false;
211 pi->caps_db_ramping = false;
212 pi->caps_td_ramping = false;
213 pi->caps_tcp_ramping = false;
214
215 if (pi->caps_power_containment) {
216 pi->caps_cac = true;
217 pi->enable_bapm_feature = true;
218 pi->enable_tdc_limit_feature = true;
219 pi->enable_pkg_pwr_tracking_feature = true;
220 }
221}
222
223static u8 ci_convert_to_vid(u16 vddc)
224{
225 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
226}
227
228static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
229{
230 struct ci_power_info *pi = ci_get_pi(rdev);
231 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
232 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
233 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
234 u32 i;
235
236 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
237 return -EINVAL;
238 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
239 return -EINVAL;
240 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
241 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
242 return -EINVAL;
243
244 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
245 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
246 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
247 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
248 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
249 } else {
250 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
251 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
252 }
253 }
254 return 0;
255}
256
257static int ci_populate_vddc_vid(struct radeon_device *rdev)
258{
259 struct ci_power_info *pi = ci_get_pi(rdev);
260 u8 *vid = pi->smc_powertune_table.VddCVid;
261 u32 i;
262
263 if (pi->vddc_voltage_table.count > 8)
264 return -EINVAL;
265
266 for (i = 0; i < pi->vddc_voltage_table.count; i++)
267 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
268
269 return 0;
270}
271
272static int ci_populate_svi_load_line(struct radeon_device *rdev)
273{
274 struct ci_power_info *pi = ci_get_pi(rdev);
275 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
276
277 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
278 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
279 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
280 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
281
282 return 0;
283}
284
285static int ci_populate_tdc_limit(struct radeon_device *rdev)
286{
287 struct ci_power_info *pi = ci_get_pi(rdev);
288 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
289 u16 tdc_limit;
290
291 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
292 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
293 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
294 pt_defaults->tdc_vddc_throttle_release_limit_perc;
295 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
296
297 return 0;
298}
299
300static int ci_populate_dw8(struct radeon_device *rdev)
301{
302 struct ci_power_info *pi = ci_get_pi(rdev);
303 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
304 int ret;
305
306 ret = ci_read_smc_sram_dword(rdev,
307 SMU7_FIRMWARE_HEADER_LOCATION +
308 offsetof(SMU7_Firmware_Header, PmFuseTable) +
309 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
310 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
311 pi->sram_end);
312 if (ret)
313 return -EINVAL;
314 else
315 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
316
317 return 0;
318}
319
320static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
321{
322 struct ci_power_info *pi = ci_get_pi(rdev);
323 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
324 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
325 int i, min, max;
326
327 min = max = hi_vid[0];
328 for (i = 0; i < 8; i++) {
329 if (0 != hi_vid[i]) {
330 if (min > hi_vid[i])
331 min = hi_vid[i];
332 if (max < hi_vid[i])
333 max = hi_vid[i];
334 }
335
336 if (0 != lo_vid[i]) {
337 if (min > lo_vid[i])
338 min = lo_vid[i];
339 if (max < lo_vid[i])
340 max = lo_vid[i];
341 }
342 }
343
344 if ((min == 0) || (max == 0))
345 return -EINVAL;
346 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
347 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
348
349 return 0;
350}
351
352static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
353{
354 struct ci_power_info *pi = ci_get_pi(rdev);
355 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
356 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
357 struct radeon_cac_tdp_table *cac_tdp_table =
358 rdev->pm.dpm.dyn_state.cac_tdp_table;
359
360 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
361 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
362
363 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
364 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
365
366 return 0;
367}
368
369static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
370{
371 struct ci_power_info *pi = ci_get_pi(rdev);
372 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
373 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
374 struct radeon_cac_tdp_table *cac_tdp_table =
375 rdev->pm.dpm.dyn_state.cac_tdp_table;
376 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
377 int i, j, k;
378 const u16 *def1;
379 const u16 *def2;
380
381 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
382 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
383
384 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
385 dpm_table->GpuTjMax =
386 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
387 dpm_table->GpuTjHyst = 8;
388
389 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
390
391 if (ppm) {
392 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
393 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
394 } else {
395 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
396 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
397 }
398
399 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
400 def1 = pt_defaults->bapmti_r;
401 def2 = pt_defaults->bapmti_rc;
402
403 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
404 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
405 for (k = 0; k < SMU7_DTE_SINKS; k++) {
406 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
407 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
408 def1++;
409 def2++;
410 }
411 }
412 }
413
414 return 0;
415}
416
417static int ci_populate_pm_base(struct radeon_device *rdev)
418{
419 struct ci_power_info *pi = ci_get_pi(rdev);
420 u32 pm_fuse_table_offset;
421 int ret;
422
423 if (pi->caps_power_containment) {
424 ret = ci_read_smc_sram_dword(rdev,
425 SMU7_FIRMWARE_HEADER_LOCATION +
426 offsetof(SMU7_Firmware_Header, PmFuseTable),
427 &pm_fuse_table_offset, pi->sram_end);
428 if (ret)
429 return ret;
430 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
431 if (ret)
432 return ret;
433 ret = ci_populate_vddc_vid(rdev);
434 if (ret)
435 return ret;
436 ret = ci_populate_svi_load_line(rdev);
437 if (ret)
438 return ret;
439 ret = ci_populate_tdc_limit(rdev);
440 if (ret)
441 return ret;
442 ret = ci_populate_dw8(rdev);
443 if (ret)
444 return ret;
445 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
446 if (ret)
447 return ret;
448 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
449 if (ret)
450 return ret;
451 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
452 (u8 *)&pi->smc_powertune_table,
453 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
454 if (ret)
455 return ret;
456 }
457
458 return 0;
459}
460
461static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
462{
463 struct ci_power_info *pi = ci_get_pi(rdev);
464 u32 data;
465
466 if (pi->caps_sq_ramping) {
467 data = RREG32_DIDT(DIDT_SQ_CTRL0);
468 if (enable)
469 data |= DIDT_CTRL_EN;
470 else
471 data &= ~DIDT_CTRL_EN;
472 WREG32_DIDT(DIDT_SQ_CTRL0, data);
473 }
474
475 if (pi->caps_db_ramping) {
476 data = RREG32_DIDT(DIDT_DB_CTRL0);
477 if (enable)
478 data |= DIDT_CTRL_EN;
479 else
480 data &= ~DIDT_CTRL_EN;
481 WREG32_DIDT(DIDT_DB_CTRL0, data);
482 }
483
484 if (pi->caps_td_ramping) {
485 data = RREG32_DIDT(DIDT_TD_CTRL0);
486 if (enable)
487 data |= DIDT_CTRL_EN;
488 else
489 data &= ~DIDT_CTRL_EN;
490 WREG32_DIDT(DIDT_TD_CTRL0, data);
491 }
492
493 if (pi->caps_tcp_ramping) {
494 data = RREG32_DIDT(DIDT_TCP_CTRL0);
495 if (enable)
496 data |= DIDT_CTRL_EN;
497 else
498 data &= ~DIDT_CTRL_EN;
499 WREG32_DIDT(DIDT_TCP_CTRL0, data);
500 }
501}
502
503static int ci_program_pt_config_registers(struct radeon_device *rdev,
504 const struct ci_pt_config_reg *cac_config_regs)
505{
506 const struct ci_pt_config_reg *config_regs = cac_config_regs;
507 u32 data;
508 u32 cache = 0;
509
510 if (config_regs == NULL)
511 return -EINVAL;
512
513 while (config_regs->offset != 0xFFFFFFFF) {
514 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
515 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
516 } else {
517 switch (config_regs->type) {
518 case CISLANDS_CONFIGREG_SMC_IND:
519 data = RREG32_SMC(config_regs->offset);
520 break;
521 case CISLANDS_CONFIGREG_DIDT_IND:
522 data = RREG32_DIDT(config_regs->offset);
523 break;
524 default:
525 data = RREG32(config_regs->offset << 2);
526 break;
527 }
528
529 data &= ~config_regs->mask;
530 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
531 data |= cache;
532
533 switch (config_regs->type) {
534 case CISLANDS_CONFIGREG_SMC_IND:
535 WREG32_SMC(config_regs->offset, data);
536 break;
537 case CISLANDS_CONFIGREG_DIDT_IND:
538 WREG32_DIDT(config_regs->offset, data);
539 break;
540 default:
541 WREG32(config_regs->offset << 2, data);
542 break;
543 }
544 cache = 0;
545 }
546 config_regs++;
547 }
548 return 0;
549}
550
551static int ci_enable_didt(struct radeon_device *rdev, bool enable)
552{
553 struct ci_power_info *pi = ci_get_pi(rdev);
554 int ret;
555
556 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
557 pi->caps_td_ramping || pi->caps_tcp_ramping) {
558 cik_enter_rlc_safe_mode(rdev);
559
560 if (enable) {
561 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
562 if (ret) {
563 cik_exit_rlc_safe_mode(rdev);
564 return ret;
565 }
566 }
567
568 ci_do_enable_didt(rdev, enable);
569
570 cik_exit_rlc_safe_mode(rdev);
571 }
572
573 return 0;
574}
575
576static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
577{
578 struct ci_power_info *pi = ci_get_pi(rdev);
579 PPSMC_Result smc_result;
580 int ret = 0;
581
582 if (enable) {
583 pi->power_containment_features = 0;
584 if (pi->caps_power_containment) {
585 if (pi->enable_bapm_feature) {
586 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
587 if (smc_result != PPSMC_Result_OK)
588 ret = -EINVAL;
589 else
590 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
591 }
592
593 if (pi->enable_tdc_limit_feature) {
594 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
595 if (smc_result != PPSMC_Result_OK)
596 ret = -EINVAL;
597 else
598 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
599 }
600
601 if (pi->enable_pkg_pwr_tracking_feature) {
602 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
603 if (smc_result != PPSMC_Result_OK) {
604 ret = -EINVAL;
605 } else {
606 struct radeon_cac_tdp_table *cac_tdp_table =
607 rdev->pm.dpm.dyn_state.cac_tdp_table;
608 u32 default_pwr_limit =
609 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
610
611 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
612
613 ci_set_power_limit(rdev, default_pwr_limit);
614 }
615 }
616 }
617 } else {
618 if (pi->caps_power_containment && pi->power_containment_features) {
619 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
620 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
621
622 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
623 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
624
625 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
626 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
627 pi->power_containment_features = 0;
628 }
629 }
630
631 return ret;
632}
633
634static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
635{
636 struct ci_power_info *pi = ci_get_pi(rdev);
637 PPSMC_Result smc_result;
638 int ret = 0;
639
640 if (pi->caps_cac) {
641 if (enable) {
642 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
643 if (smc_result != PPSMC_Result_OK) {
644 ret = -EINVAL;
645 pi->cac_enabled = false;
646 } else {
647 pi->cac_enabled = true;
648 }
649 } else if (pi->cac_enabled) {
650 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
651 pi->cac_enabled = false;
652 }
653 }
654
655 return ret;
656}
657
658static int ci_power_control_set_level(struct radeon_device *rdev)
659{
660 struct ci_power_info *pi = ci_get_pi(rdev);
661 struct radeon_cac_tdp_table *cac_tdp_table =
662 rdev->pm.dpm.dyn_state.cac_tdp_table;
663 s32 adjust_percent;
664 s32 target_tdp;
665 int ret = 0;
666 bool adjust_polarity = false; /* ??? */
667
668 if (pi->caps_power_containment &&
669 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
670 adjust_percent = adjust_polarity ?
671 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
672 target_tdp = ((100 + adjust_percent) *
673 (s32)cac_tdp_table->configurable_tdp) / 100;
674 target_tdp *= 256;
675
676 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
677 }
678
679 return ret;
680}
681
682void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
683{
684 struct ci_power_info *pi = ci_get_pi(rdev);
685
686 if (pi->uvd_power_gated == gate)
687 return;
688
689 pi->uvd_power_gated = gate;
690
691 ci_update_uvd_dpm(rdev, gate);
692}
693
694bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
695{
696 struct ci_power_info *pi = ci_get_pi(rdev);
697 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
698 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
699
700 if (vblank_time < switch_limit)
701 return true;
702 else
703 return false;
704
705}
706
707static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
708 struct radeon_ps *rps)
709{
710 struct ci_ps *ps = ci_get_ps(rps);
711 struct ci_power_info *pi = ci_get_pi(rdev);
712 struct radeon_clock_and_voltage_limits *max_limits;
713 bool disable_mclk_switching;
714 u32 sclk, mclk;
715 int i;
716
717 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
718 ci_dpm_vblank_too_short(rdev))
719 disable_mclk_switching = true;
720 else
721 disable_mclk_switching = false;
722
723 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
724 pi->battery_state = true;
725 else
726 pi->battery_state = false;
727
728 if (rdev->pm.dpm.ac_power)
729 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
730 else
731 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
732
733 if (rdev->pm.dpm.ac_power == false) {
734 for (i = 0; i < ps->performance_level_count; i++) {
735 if (ps->performance_levels[i].mclk > max_limits->mclk)
736 ps->performance_levels[i].mclk = max_limits->mclk;
737 if (ps->performance_levels[i].sclk > max_limits->sclk)
738 ps->performance_levels[i].sclk = max_limits->sclk;
739 }
740 }
741
742 /* XXX validate the min clocks required for display */
743
744 if (disable_mclk_switching) {
745 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
746 sclk = ps->performance_levels[0].sclk;
747 } else {
748 mclk = ps->performance_levels[0].mclk;
749 sclk = ps->performance_levels[0].sclk;
750 }
751
752 ps->performance_levels[0].sclk = sclk;
753 ps->performance_levels[0].mclk = mclk;
754
755 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
756 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
757
758 if (disable_mclk_switching) {
759 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
760 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
761 } else {
762 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
763 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
764 }
765}
766
767static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
768 int min_temp, int max_temp)
769{
770 int low_temp = 0 * 1000;
771 int high_temp = 255 * 1000;
772 u32 tmp;
773
774 if (low_temp < min_temp)
775 low_temp = min_temp;
776 if (high_temp > max_temp)
777 high_temp = max_temp;
778 if (high_temp < low_temp) {
779 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
780 return -EINVAL;
781 }
782
783 tmp = RREG32_SMC(CG_THERMAL_INT);
784 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
785 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
786 CI_DIG_THERM_INTL(low_temp / 1000);
787 WREG32_SMC(CG_THERMAL_INT, tmp);
788
789#if 0
790 /* XXX: need to figure out how to handle this properly */
791 tmp = RREG32_SMC(CG_THERMAL_CTRL);
792 tmp &= DIG_THERM_DPM_MASK;
793 tmp |= DIG_THERM_DPM(high_temp / 1000);
794 WREG32_SMC(CG_THERMAL_CTRL, tmp);
795#endif
796
797 return 0;
798}
799
800#if 0
801static int ci_read_smc_soft_register(struct radeon_device *rdev,
802 u16 reg_offset, u32 *value)
803{
804 struct ci_power_info *pi = ci_get_pi(rdev);
805
806 return ci_read_smc_sram_dword(rdev,
807 pi->soft_regs_start + reg_offset,
808 value, pi->sram_end);
809}
810#endif
811
812static int ci_write_smc_soft_register(struct radeon_device *rdev,
813 u16 reg_offset, u32 value)
814{
815 struct ci_power_info *pi = ci_get_pi(rdev);
816
817 return ci_write_smc_sram_dword(rdev,
818 pi->soft_regs_start + reg_offset,
819 value, pi->sram_end);
820}
821
822static void ci_init_fps_limits(struct radeon_device *rdev)
823{
824 struct ci_power_info *pi = ci_get_pi(rdev);
825 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
826
827 if (pi->caps_fps) {
828 u16 tmp;
829
830 tmp = 45;
831 table->FpsHighT = cpu_to_be16(tmp);
832
833 tmp = 30;
834 table->FpsLowT = cpu_to_be16(tmp);
835 }
836}
837
838static int ci_update_sclk_t(struct radeon_device *rdev)
839{
840 struct ci_power_info *pi = ci_get_pi(rdev);
841 int ret = 0;
842 u32 low_sclk_interrupt_t = 0;
843
844 if (pi->caps_sclk_throttle_low_notification) {
845 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
846
847 ret = ci_copy_bytes_to_smc(rdev,
848 pi->dpm_table_start +
849 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
850 (u8 *)&low_sclk_interrupt_t,
851 sizeof(u32), pi->sram_end);
852
853 }
854
855 return ret;
856}
857
858static void ci_get_leakage_voltages(struct radeon_device *rdev)
859{
860 struct ci_power_info *pi = ci_get_pi(rdev);
861 u16 leakage_id, virtual_voltage_id;
862 u16 vddc, vddci;
863 int i;
864
865 pi->vddc_leakage.count = 0;
866 pi->vddci_leakage.count = 0;
867
868 if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
869 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
870 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
871 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
872 virtual_voltage_id,
873 leakage_id) == 0) {
874 if (vddc != 0 && vddc != virtual_voltage_id) {
875 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
876 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
877 pi->vddc_leakage.count++;
878 }
879 if (vddci != 0 && vddci != virtual_voltage_id) {
880 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
881 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
882 pi->vddci_leakage.count++;
883 }
884 }
885 }
886 }
887}
888
889static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
890{
891 struct ci_power_info *pi = ci_get_pi(rdev);
892 bool want_thermal_protection;
893 enum radeon_dpm_event_src dpm_event_src;
894 u32 tmp;
895
896 switch (sources) {
897 case 0:
898 default:
899 want_thermal_protection = false;
900 break;
901 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
902 want_thermal_protection = true;
903 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
904 break;
905 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
906 want_thermal_protection = true;
907 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
908 break;
909 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
910 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
911 want_thermal_protection = true;
912 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
913 break;
914 }
915
916 if (want_thermal_protection) {
917#if 0
918 /* XXX: need to figure out how to handle this properly */
919 tmp = RREG32_SMC(CG_THERMAL_CTRL);
920 tmp &= DPM_EVENT_SRC_MASK;
921 tmp |= DPM_EVENT_SRC(dpm_event_src);
922 WREG32_SMC(CG_THERMAL_CTRL, tmp);
923#endif
924
925 tmp = RREG32_SMC(GENERAL_PWRMGT);
926 if (pi->thermal_protection)
927 tmp &= ~THERMAL_PROTECTION_DIS;
928 else
929 tmp |= THERMAL_PROTECTION_DIS;
930 WREG32_SMC(GENERAL_PWRMGT, tmp);
931 } else {
932 tmp = RREG32_SMC(GENERAL_PWRMGT);
933 tmp |= THERMAL_PROTECTION_DIS;
934 WREG32_SMC(GENERAL_PWRMGT, tmp);
935 }
936}
937
938static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
939 enum radeon_dpm_auto_throttle_src source,
940 bool enable)
941{
942 struct ci_power_info *pi = ci_get_pi(rdev);
943
944 if (enable) {
945 if (!(pi->active_auto_throttle_sources & (1 << source))) {
946 pi->active_auto_throttle_sources |= 1 << source;
947 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
948 }
949 } else {
950 if (pi->active_auto_throttle_sources & (1 << source)) {
951 pi->active_auto_throttle_sources &= ~(1 << source);
952 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
953 }
954 }
955}
956
957static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
958{
959 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
960 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
961}
962
963static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
964{
965 struct ci_power_info *pi = ci_get_pi(rdev);
966 PPSMC_Result smc_result;
967
968 if (!pi->need_update_smu7_dpm_table)
969 return 0;
970
971 if ((!pi->sclk_dpm_key_disabled) &&
972 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
973 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
974 if (smc_result != PPSMC_Result_OK)
975 return -EINVAL;
976 }
977
978 if ((!pi->mclk_dpm_key_disabled) &&
979 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
980 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
981 if (smc_result != PPSMC_Result_OK)
982 return -EINVAL;
983 }
984
985 pi->need_update_smu7_dpm_table = 0;
986 return 0;
987}
988
989static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
990{
991 struct ci_power_info *pi = ci_get_pi(rdev);
992 PPSMC_Result smc_result;
993
994 if (enable) {
995 if (!pi->sclk_dpm_key_disabled) {
996 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
997 if (smc_result != PPSMC_Result_OK)
998 return -EINVAL;
999 }
1000
1001 if (!pi->mclk_dpm_key_disabled) {
1002 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1003 if (smc_result != PPSMC_Result_OK)
1004 return -EINVAL;
1005
1006 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1007
1008 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1009 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1010 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1011
1012 udelay(10);
1013
1014 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1015 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1016 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1017 }
1018 } else {
1019 if (!pi->sclk_dpm_key_disabled) {
1020 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1021 if (smc_result != PPSMC_Result_OK)
1022 return -EINVAL;
1023 }
1024
1025 if (!pi->mclk_dpm_key_disabled) {
1026 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1027 if (smc_result != PPSMC_Result_OK)
1028 return -EINVAL;
1029 }
1030 }
1031
1032 return 0;
1033}
1034
1035static int ci_start_dpm(struct radeon_device *rdev)
1036{
1037 struct ci_power_info *pi = ci_get_pi(rdev);
1038 PPSMC_Result smc_result;
1039 int ret;
1040 u32 tmp;
1041
1042 tmp = RREG32_SMC(GENERAL_PWRMGT);
1043 tmp |= GLOBAL_PWRMGT_EN;
1044 WREG32_SMC(GENERAL_PWRMGT, tmp);
1045
1046 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1047 tmp |= DYNAMIC_PM_EN;
1048 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1049
1050 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1051
1052 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1053
1054 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1055 if (smc_result != PPSMC_Result_OK)
1056 return -EINVAL;
1057
1058 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1059 if (ret)
1060 return ret;
1061
1062 if (!pi->pcie_dpm_key_disabled) {
1063 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1064 if (smc_result != PPSMC_Result_OK)
1065 return -EINVAL;
1066 }
1067
1068 return 0;
1069}
1070
1071static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1072{
1073 struct ci_power_info *pi = ci_get_pi(rdev);
1074 PPSMC_Result smc_result;
1075
1076 if (!pi->need_update_smu7_dpm_table)
1077 return 0;
1078
1079 if ((!pi->sclk_dpm_key_disabled) &&
1080 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1081 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1082 if (smc_result != PPSMC_Result_OK)
1083 return -EINVAL;
1084 }
1085
1086 if ((!pi->mclk_dpm_key_disabled) &&
1087 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1088 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1089 if (smc_result != PPSMC_Result_OK)
1090 return -EINVAL;
1091 }
1092
1093 return 0;
1094}
1095
1096static int ci_stop_dpm(struct radeon_device *rdev)
1097{
1098 struct ci_power_info *pi = ci_get_pi(rdev);
1099 PPSMC_Result smc_result;
1100 int ret;
1101 u32 tmp;
1102
1103 tmp = RREG32_SMC(GENERAL_PWRMGT);
1104 tmp &= ~GLOBAL_PWRMGT_EN;
1105 WREG32_SMC(GENERAL_PWRMGT, tmp);
1106
1107 tmp = RREG32(SCLK_PWRMGT_CNTL);
1108 tmp &= ~DYNAMIC_PM_EN;
1109 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1110
1111 if (!pi->pcie_dpm_key_disabled) {
1112 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1113 if (smc_result != PPSMC_Result_OK)
1114 return -EINVAL;
1115 }
1116
1117 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1118 if (ret)
1119 return ret;
1120
1121 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1122 if (smc_result != PPSMC_Result_OK)
1123 return -EINVAL;
1124
1125 return 0;
1126}
1127
1128static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1129{
1130 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1131
1132 if (enable)
1133 tmp &= ~SCLK_PWRMGT_OFF;
1134 else
1135 tmp |= SCLK_PWRMGT_OFF;
1136 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1137}
1138
1139#if 0
1140static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1141 bool ac_power)
1142{
1143 struct ci_power_info *pi = ci_get_pi(rdev);
1144 struct radeon_cac_tdp_table *cac_tdp_table =
1145 rdev->pm.dpm.dyn_state.cac_tdp_table;
1146 u32 power_limit;
1147
1148 if (ac_power)
1149 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1150 else
1151 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1152
1153 ci_set_power_limit(rdev, power_limit);
1154
1155 if (pi->caps_automatic_dc_transition) {
1156 if (ac_power)
1157 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1158 else
1159 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1160 }
1161
1162 return 0;
1163}
1164#endif
1165
1166static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1167 PPSMC_Msg msg, u32 parameter)
1168{
1169 WREG32(SMC_MSG_ARG_0, parameter);
1170 return ci_send_msg_to_smc(rdev, msg);
1171}
1172
1173static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1174 PPSMC_Msg msg, u32 *parameter)
1175{
1176 PPSMC_Result smc_result;
1177
1178 smc_result = ci_send_msg_to_smc(rdev, msg);
1179
1180 if ((smc_result == PPSMC_Result_OK) && parameter)
1181 *parameter = RREG32(SMC_MSG_ARG_0);
1182
1183 return smc_result;
1184}
1185
1186static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1187{
1188 struct ci_power_info *pi = ci_get_pi(rdev);
1189
1190 if (!pi->sclk_dpm_key_disabled) {
1191 PPSMC_Result smc_result =
1192 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1193 if (smc_result != PPSMC_Result_OK)
1194 return -EINVAL;
1195 }
1196
1197 return 0;
1198}
1199
1200static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1201{
1202 struct ci_power_info *pi = ci_get_pi(rdev);
1203
1204 if (!pi->mclk_dpm_key_disabled) {
1205 PPSMC_Result smc_result =
1206 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1207 if (smc_result != PPSMC_Result_OK)
1208 return -EINVAL;
1209 }
1210
1211 return 0;
1212}
1213
1214static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1215{
1216 struct ci_power_info *pi = ci_get_pi(rdev);
1217
1218 if (!pi->pcie_dpm_key_disabled) {
1219 PPSMC_Result smc_result =
1220 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1221 if (smc_result != PPSMC_Result_OK)
1222 return -EINVAL;
1223 }
1224
1225 return 0;
1226}
1227
1228static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1229{
1230 struct ci_power_info *pi = ci_get_pi(rdev);
1231
1232 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1233 PPSMC_Result smc_result =
1234 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1235 if (smc_result != PPSMC_Result_OK)
1236 return -EINVAL;
1237 }
1238
1239 return 0;
1240}
1241
1242static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1243 u32 target_tdp)
1244{
1245 PPSMC_Result smc_result =
1246 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1247 if (smc_result != PPSMC_Result_OK)
1248 return -EINVAL;
1249 return 0;
1250}
1251
1252static int ci_set_boot_state(struct radeon_device *rdev)
1253{
1254 return ci_enable_sclk_mclk_dpm(rdev, false);
1255}
1256
1257static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1258{
1259 u32 sclk_freq;
1260 PPSMC_Result smc_result =
1261 ci_send_msg_to_smc_return_parameter(rdev,
1262 PPSMC_MSG_API_GetSclkFrequency,
1263 &sclk_freq);
1264 if (smc_result != PPSMC_Result_OK)
1265 sclk_freq = 0;
1266
1267 return sclk_freq;
1268}
1269
1270static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1271{
1272 u32 mclk_freq;
1273 PPSMC_Result smc_result =
1274 ci_send_msg_to_smc_return_parameter(rdev,
1275 PPSMC_MSG_API_GetMclkFrequency,
1276 &mclk_freq);
1277 if (smc_result != PPSMC_Result_OK)
1278 mclk_freq = 0;
1279
1280 return mclk_freq;
1281}
1282
1283static void ci_dpm_start_smc(struct radeon_device *rdev)
1284{
1285 int i;
1286
1287 ci_program_jump_on_start(rdev);
1288 ci_start_smc_clock(rdev);
1289 ci_start_smc(rdev);
1290 for (i = 0; i < rdev->usec_timeout; i++) {
1291 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1292 break;
1293 }
1294}
1295
1296static void ci_dpm_stop_smc(struct radeon_device *rdev)
1297{
1298 ci_reset_smc(rdev);
1299 ci_stop_smc_clock(rdev);
1300}
1301
1302static int ci_process_firmware_header(struct radeon_device *rdev)
1303{
1304 struct ci_power_info *pi = ci_get_pi(rdev);
1305 u32 tmp;
1306 int ret;
1307
1308 ret = ci_read_smc_sram_dword(rdev,
1309 SMU7_FIRMWARE_HEADER_LOCATION +
1310 offsetof(SMU7_Firmware_Header, DpmTable),
1311 &tmp, pi->sram_end);
1312 if (ret)
1313 return ret;
1314
1315 pi->dpm_table_start = tmp;
1316
1317 ret = ci_read_smc_sram_dword(rdev,
1318 SMU7_FIRMWARE_HEADER_LOCATION +
1319 offsetof(SMU7_Firmware_Header, SoftRegisters),
1320 &tmp, pi->sram_end);
1321 if (ret)
1322 return ret;
1323
1324 pi->soft_regs_start = tmp;
1325
1326 ret = ci_read_smc_sram_dword(rdev,
1327 SMU7_FIRMWARE_HEADER_LOCATION +
1328 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1329 &tmp, pi->sram_end);
1330 if (ret)
1331 return ret;
1332
1333 pi->mc_reg_table_start = tmp;
1334
1335 ret = ci_read_smc_sram_dword(rdev,
1336 SMU7_FIRMWARE_HEADER_LOCATION +
1337 offsetof(SMU7_Firmware_Header, FanTable),
1338 &tmp, pi->sram_end);
1339 if (ret)
1340 return ret;
1341
1342 pi->fan_table_start = tmp;
1343
1344 ret = ci_read_smc_sram_dword(rdev,
1345 SMU7_FIRMWARE_HEADER_LOCATION +
1346 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1347 &tmp, pi->sram_end);
1348 if (ret)
1349 return ret;
1350
1351 pi->arb_table_start = tmp;
1352
1353 return 0;
1354}
1355
1356static void ci_read_clock_registers(struct radeon_device *rdev)
1357{
1358 struct ci_power_info *pi = ci_get_pi(rdev);
1359
1360 pi->clock_registers.cg_spll_func_cntl =
1361 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1362 pi->clock_registers.cg_spll_func_cntl_2 =
1363 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1364 pi->clock_registers.cg_spll_func_cntl_3 =
1365 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1366 pi->clock_registers.cg_spll_func_cntl_4 =
1367 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1368 pi->clock_registers.cg_spll_spread_spectrum =
1369 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1370 pi->clock_registers.cg_spll_spread_spectrum_2 =
1371 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1372 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1373 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1374 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1375 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1376 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1377 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1378 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1379 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1380 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1381}
1382
1383static void ci_init_sclk_t(struct radeon_device *rdev)
1384{
1385 struct ci_power_info *pi = ci_get_pi(rdev);
1386
1387 pi->low_sclk_interrupt_t = 0;
1388}
1389
1390static void ci_enable_thermal_protection(struct radeon_device *rdev,
1391 bool enable)
1392{
1393 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1394
1395 if (enable)
1396 tmp &= ~THERMAL_PROTECTION_DIS;
1397 else
1398 tmp |= THERMAL_PROTECTION_DIS;
1399 WREG32_SMC(GENERAL_PWRMGT, tmp);
1400}
1401
1402static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1403{
1404 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1405
1406 tmp |= STATIC_PM_EN;
1407
1408 WREG32_SMC(GENERAL_PWRMGT, tmp);
1409}
1410
1411#if 0
1412static int ci_enter_ulp_state(struct radeon_device *rdev)
1413{
1414
1415 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1416
1417 udelay(25000);
1418
1419 return 0;
1420}
1421
1422static int ci_exit_ulp_state(struct radeon_device *rdev)
1423{
1424 int i;
1425
1426 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1427
1428 udelay(7000);
1429
1430 for (i = 0; i < rdev->usec_timeout; i++) {
1431 if (RREG32(SMC_RESP_0) == 1)
1432 break;
1433 udelay(1000);
1434 }
1435
1436 return 0;
1437}
1438#endif
1439
1440static int ci_notify_smc_display_change(struct radeon_device *rdev,
1441 bool has_display)
1442{
1443 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1444
1445 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1446}
1447
1448static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1449 bool enable)
1450{
1451 struct ci_power_info *pi = ci_get_pi(rdev);
1452
1453 if (enable) {
1454 if (pi->caps_sclk_ds) {
1455 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1456 return -EINVAL;
1457 } else {
1458 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1459 return -EINVAL;
1460 }
1461 } else {
1462 if (pi->caps_sclk_ds) {
1463 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1464 return -EINVAL;
1465 }
1466 }
1467
1468 return 0;
1469}
1470
1471static void ci_program_display_gap(struct radeon_device *rdev)
1472{
1473 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1474 u32 pre_vbi_time_in_us;
1475 u32 frame_time_in_us;
1476 u32 ref_clock = rdev->clock.spll.reference_freq;
1477 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1478 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1479
1480 tmp &= ~DISP_GAP_MASK;
1481 if (rdev->pm.dpm.new_active_crtc_count > 0)
1482 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1483 else
1484 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1485 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1486
1487 if (refresh_rate == 0)
1488 refresh_rate = 60;
1489 if (vblank_time == 0xffffffff)
1490 vblank_time = 500;
1491 frame_time_in_us = 1000000 / refresh_rate;
1492 pre_vbi_time_in_us =
1493 frame_time_in_us - 200 - vblank_time;
1494 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1495
1496 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1497 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1498 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1499
1500
1501 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1502
1503}
1504
1505static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1506{
1507 struct ci_power_info *pi = ci_get_pi(rdev);
1508 u32 tmp;
1509
1510 if (enable) {
1511 if (pi->caps_sclk_ss_support) {
1512 tmp = RREG32_SMC(GENERAL_PWRMGT);
1513 tmp |= DYN_SPREAD_SPECTRUM_EN;
1514 WREG32_SMC(GENERAL_PWRMGT, tmp);
1515 }
1516 } else {
1517 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1518 tmp &= ~SSEN;
1519 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1520
1521 tmp = RREG32_SMC(GENERAL_PWRMGT);
1522 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1523 WREG32_SMC(GENERAL_PWRMGT, tmp);
1524 }
1525}
1526
1527static void ci_program_sstp(struct radeon_device *rdev)
1528{
1529 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1530}
1531
1532static void ci_enable_display_gap(struct radeon_device *rdev)
1533{
1534 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1535
1536 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1537 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1538 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1539
1540 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1541}
1542
1543static void ci_program_vc(struct radeon_device *rdev)
1544{
1545 u32 tmp;
1546
1547 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1548 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1549 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1550
1551 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1552 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1553 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1554 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1555 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1556 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1557 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1558 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1559}
1560
1561static void ci_clear_vc(struct radeon_device *rdev)
1562{
1563 u32 tmp;
1564
1565 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1566 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1567 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1568
1569 WREG32_SMC(CG_FTV_0, 0);
1570 WREG32_SMC(CG_FTV_1, 0);
1571 WREG32_SMC(CG_FTV_2, 0);
1572 WREG32_SMC(CG_FTV_3, 0);
1573 WREG32_SMC(CG_FTV_4, 0);
1574 WREG32_SMC(CG_FTV_5, 0);
1575 WREG32_SMC(CG_FTV_6, 0);
1576 WREG32_SMC(CG_FTV_7, 0);
1577}
1578
1579static int ci_upload_firmware(struct radeon_device *rdev)
1580{
1581 struct ci_power_info *pi = ci_get_pi(rdev);
1582 int i, ret;
1583
1584 for (i = 0; i < rdev->usec_timeout; i++) {
1585 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1586 break;
1587 }
1588 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1589
1590 ci_stop_smc_clock(rdev);
1591 ci_reset_smc(rdev);
1592
1593 ret = ci_load_smc_ucode(rdev, pi->sram_end);
1594
1595 return ret;
1596
1597}
1598
1599static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1600 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1601 struct atom_voltage_table *voltage_table)
1602{
1603 u32 i;
1604
1605 if (voltage_dependency_table == NULL)
1606 return -EINVAL;
1607
1608 voltage_table->mask_low = 0;
1609 voltage_table->phase_delay = 0;
1610
1611 voltage_table->count = voltage_dependency_table->count;
1612 for (i = 0; i < voltage_table->count; i++) {
1613 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1614 voltage_table->entries[i].smio_low = 0;
1615 }
1616
1617 return 0;
1618}
1619
1620static int ci_construct_voltage_tables(struct radeon_device *rdev)
1621{
1622 struct ci_power_info *pi = ci_get_pi(rdev);
1623 int ret;
1624
1625 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1626 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1627 VOLTAGE_OBJ_GPIO_LUT,
1628 &pi->vddc_voltage_table);
1629 if (ret)
1630 return ret;
1631 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1632 ret = ci_get_svi2_voltage_table(rdev,
1633 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1634 &pi->vddc_voltage_table);
1635 if (ret)
1636 return ret;
1637 }
1638
1639 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1640 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1641 &pi->vddc_voltage_table);
1642
1643 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1644 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1645 VOLTAGE_OBJ_GPIO_LUT,
1646 &pi->vddci_voltage_table);
1647 if (ret)
1648 return ret;
1649 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1650 ret = ci_get_svi2_voltage_table(rdev,
1651 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1652 &pi->vddci_voltage_table);
1653 if (ret)
1654 return ret;
1655 }
1656
1657 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1658 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1659 &pi->vddci_voltage_table);
1660
1661 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1662 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1663 VOLTAGE_OBJ_GPIO_LUT,
1664 &pi->mvdd_voltage_table);
1665 if (ret)
1666 return ret;
1667 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1668 ret = ci_get_svi2_voltage_table(rdev,
1669 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1670 &pi->mvdd_voltage_table);
1671 if (ret)
1672 return ret;
1673 }
1674
1675 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1676 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1677 &pi->mvdd_voltage_table);
1678
1679 return 0;
1680}
1681
1682static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1683 struct atom_voltage_table_entry *voltage_table,
1684 SMU7_Discrete_VoltageLevel *smc_voltage_table)
1685{
1686 int ret;
1687
1688 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1689 &smc_voltage_table->StdVoltageHiSidd,
1690 &smc_voltage_table->StdVoltageLoSidd);
1691
1692 if (ret) {
1693 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1694 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1695 }
1696
1697 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1698 smc_voltage_table->StdVoltageHiSidd =
1699 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1700 smc_voltage_table->StdVoltageLoSidd =
1701 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1702}
1703
1704static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1705 SMU7_Discrete_DpmTable *table)
1706{
1707 struct ci_power_info *pi = ci_get_pi(rdev);
1708 unsigned int count;
1709
1710 table->VddcLevelCount = pi->vddc_voltage_table.count;
1711 for (count = 0; count < table->VddcLevelCount; count++) {
1712 ci_populate_smc_voltage_table(rdev,
1713 &pi->vddc_voltage_table.entries[count],
1714 &table->VddcLevel[count]);
1715
1716 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1717 table->VddcLevel[count].Smio |=
1718 pi->vddc_voltage_table.entries[count].smio_low;
1719 else
1720 table->VddcLevel[count].Smio = 0;
1721 }
1722 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1723
1724 return 0;
1725}
1726
1727static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1728 SMU7_Discrete_DpmTable *table)
1729{
1730 unsigned int count;
1731 struct ci_power_info *pi = ci_get_pi(rdev);
1732
1733 table->VddciLevelCount = pi->vddci_voltage_table.count;
1734 for (count = 0; count < table->VddciLevelCount; count++) {
1735 ci_populate_smc_voltage_table(rdev,
1736 &pi->vddci_voltage_table.entries[count],
1737 &table->VddciLevel[count]);
1738
1739 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1740 table->VddciLevel[count].Smio |=
1741 pi->vddci_voltage_table.entries[count].smio_low;
1742 else
1743 table->VddciLevel[count].Smio = 0;
1744 }
1745 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1746
1747 return 0;
1748}
1749
1750static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1751 SMU7_Discrete_DpmTable *table)
1752{
1753 struct ci_power_info *pi = ci_get_pi(rdev);
1754 unsigned int count;
1755
1756 table->MvddLevelCount = pi->mvdd_voltage_table.count;
1757 for (count = 0; count < table->MvddLevelCount; count++) {
1758 ci_populate_smc_voltage_table(rdev,
1759 &pi->mvdd_voltage_table.entries[count],
1760 &table->MvddLevel[count]);
1761
1762 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1763 table->MvddLevel[count].Smio |=
1764 pi->mvdd_voltage_table.entries[count].smio_low;
1765 else
1766 table->MvddLevel[count].Smio = 0;
1767 }
1768 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1769
1770 return 0;
1771}
1772
1773static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1774 SMU7_Discrete_DpmTable *table)
1775{
1776 int ret;
1777
1778 ret = ci_populate_smc_vddc_table(rdev, table);
1779 if (ret)
1780 return ret;
1781
1782 ret = ci_populate_smc_vddci_table(rdev, table);
1783 if (ret)
1784 return ret;
1785
1786 ret = ci_populate_smc_mvdd_table(rdev, table);
1787 if (ret)
1788 return ret;
1789
1790 return 0;
1791}
1792
1793static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1794 SMU7_Discrete_VoltageLevel *voltage)
1795{
1796 struct ci_power_info *pi = ci_get_pi(rdev);
1797 u32 i = 0;
1798
1799 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1800 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1801 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1802 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1803 break;
1804 }
1805 }
1806
1807 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1808 return -EINVAL;
1809 }
1810
1811 return -EINVAL;
1812}
1813
1814static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1815 struct atom_voltage_table_entry *voltage_table,
1816 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1817{
1818 u16 v_index, idx;
1819 bool voltage_found = false;
1820 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1821 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1822
1823 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1824 return -EINVAL;
1825
1826 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1827 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1828 if (voltage_table->value ==
1829 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1830 voltage_found = true;
1831 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1832 idx = v_index;
1833 else
1834 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1835 *std_voltage_lo_sidd =
1836 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1837 *std_voltage_hi_sidd =
1838 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1839 break;
1840 }
1841 }
1842
1843 if (!voltage_found) {
1844 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1845 if (voltage_table->value <=
1846 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1847 voltage_found = true;
1848 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1849 idx = v_index;
1850 else
1851 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1852 *std_voltage_lo_sidd =
1853 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1854 *std_voltage_hi_sidd =
1855 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1856 break;
1857 }
1858 }
1859 }
1860 }
1861
1862 return 0;
1863}
1864
1865static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1866 const struct radeon_phase_shedding_limits_table *limits,
1867 u32 sclk,
1868 u32 *phase_shedding)
1869{
1870 unsigned int i;
1871
1872 *phase_shedding = 1;
1873
1874 for (i = 0; i < limits->count; i++) {
1875 if (sclk < limits->entries[i].sclk) {
1876 *phase_shedding = i;
1877 break;
1878 }
1879 }
1880}
1881
1882static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1883 const struct radeon_phase_shedding_limits_table *limits,
1884 u32 mclk,
1885 u32 *phase_shedding)
1886{
1887 unsigned int i;
1888
1889 *phase_shedding = 1;
1890
1891 for (i = 0; i < limits->count; i++) {
1892 if (mclk < limits->entries[i].mclk) {
1893 *phase_shedding = i;
1894 break;
1895 }
1896 }
1897}
1898
1899static int ci_init_arb_table_index(struct radeon_device *rdev)
1900{
1901 struct ci_power_info *pi = ci_get_pi(rdev);
1902 u32 tmp;
1903 int ret;
1904
1905 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1906 &tmp, pi->sram_end);
1907 if (ret)
1908 return ret;
1909
1910 tmp &= 0x00FFFFFF;
1911 tmp |= MC_CG_ARB_FREQ_F1 << 24;
1912
1913 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1914 tmp, pi->sram_end);
1915}
1916
1917static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1918 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1919 u32 clock, u32 *voltage)
1920{
1921 u32 i = 0;
1922
1923 if (allowed_clock_voltage_table->count == 0)
1924 return -EINVAL;
1925
1926 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1927 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1928 *voltage = allowed_clock_voltage_table->entries[i].v;
1929 return 0;
1930 }
1931 }
1932
1933 *voltage = allowed_clock_voltage_table->entries[i-1].v;
1934
1935 return 0;
1936}
1937
1938static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1939 u32 sclk, u32 min_sclk_in_sr)
1940{
1941 u32 i;
1942 u32 tmp;
1943 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1944 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
1945
1946 if (sclk < min)
1947 return 0;
1948
1949 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1950 tmp = sclk / (1 << i);
1951 if (tmp >= min || i == 0)
1952 break;
1953 }
1954
1955 return (u8)i;
1956}
1957
1958static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1959{
1960 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1961}
1962
1963static int ci_reset_to_default(struct radeon_device *rdev)
1964{
1965 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
1966 0 : -EINVAL;
1967}
1968
1969static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
1970{
1971 u32 tmp;
1972
1973 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
1974
1975 if (tmp == MC_CG_ARB_FREQ_F0)
1976 return 0;
1977
1978 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1979}
1980
1981static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
1982 u32 sclk,
1983 u32 mclk,
1984 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
1985{
1986 u32 dram_timing;
1987 u32 dram_timing2;
1988 u32 burst_time;
1989
1990 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
1991
1992 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1993 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1994 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
1995
1996 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
1997 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
1998 arb_regs->McArbBurstTime = (u8)burst_time;
1999
2000 return 0;
2001}
2002
2003static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2004{
2005 struct ci_power_info *pi = ci_get_pi(rdev);
2006 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2007 u32 i, j;
2008 int ret = 0;
2009
2010 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2011
2012 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2013 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2014 ret = ci_populate_memory_timing_parameters(rdev,
2015 pi->dpm_table.sclk_table.dpm_levels[i].value,
2016 pi->dpm_table.mclk_table.dpm_levels[j].value,
2017 &arb_regs.entries[i][j]);
2018 if (ret)
2019 break;
2020 }
2021 }
2022
2023 if (ret == 0)
2024 ret = ci_copy_bytes_to_smc(rdev,
2025 pi->arb_table_start,
2026 (u8 *)&arb_regs,
2027 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2028 pi->sram_end);
2029
2030 return ret;
2031}
2032
2033static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2034{
2035 struct ci_power_info *pi = ci_get_pi(rdev);
2036
2037 if (pi->need_update_smu7_dpm_table == 0)
2038 return 0;
2039
2040 return ci_do_program_memory_timing_parameters(rdev);
2041}
2042
2043static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2044 struct radeon_ps *radeon_boot_state)
2045{
2046 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2047 struct ci_power_info *pi = ci_get_pi(rdev);
2048 u32 level = 0;
2049
2050 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2051 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2052 boot_state->performance_levels[0].sclk) {
2053 pi->smc_state_table.GraphicsBootLevel = level;
2054 break;
2055 }
2056 }
2057
2058 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2059 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2060 boot_state->performance_levels[0].mclk) {
2061 pi->smc_state_table.MemoryBootLevel = level;
2062 break;
2063 }
2064 }
2065}
2066
2067static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2068{
2069 u32 i;
2070 u32 mask_value = 0;
2071
2072 for (i = dpm_table->count; i > 0; i--) {
2073 mask_value = mask_value << 1;
2074 if (dpm_table->dpm_levels[i-1].enabled)
2075 mask_value |= 0x1;
2076 else
2077 mask_value &= 0xFFFFFFFE;
2078 }
2079
2080 return mask_value;
2081}
2082
2083static void ci_populate_smc_link_level(struct radeon_device *rdev,
2084 SMU7_Discrete_DpmTable *table)
2085{
2086 struct ci_power_info *pi = ci_get_pi(rdev);
2087 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2088 u32 i;
2089
2090 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2091 table->LinkLevel[i].PcieGenSpeed =
2092 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2093 table->LinkLevel[i].PcieLaneCount =
2094 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2095 table->LinkLevel[i].EnabledForActivity = 1;
2096 table->LinkLevel[i].DownT = cpu_to_be32(5);
2097 table->LinkLevel[i].UpT = cpu_to_be32(30);
2098 }
2099
2100 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2101 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2102 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2103}
2104
2105static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2106 SMU7_Discrete_DpmTable *table)
2107{
2108 u32 count;
2109 struct atom_clock_dividers dividers;
2110 int ret = -EINVAL;
2111
2112 table->UvdLevelCount =
2113 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2114
2115 for (count = 0; count < table->UvdLevelCount; count++) {
2116 table->UvdLevel[count].VclkFrequency =
2117 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2118 table->UvdLevel[count].DclkFrequency =
2119 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2120 table->UvdLevel[count].MinVddc =
2121 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2122 table->UvdLevel[count].MinVddcPhases = 1;
2123
2124 ret = radeon_atom_get_clock_dividers(rdev,
2125 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2126 table->UvdLevel[count].VclkFrequency, false, &dividers);
2127 if (ret)
2128 return ret;
2129
2130 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2131
2132 ret = radeon_atom_get_clock_dividers(rdev,
2133 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2134 table->UvdLevel[count].DclkFrequency, false, &dividers);
2135 if (ret)
2136 return ret;
2137
2138 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2139
2140 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2141 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2142 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2143 }
2144
2145 return ret;
2146}
2147
2148static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2149 SMU7_Discrete_DpmTable *table)
2150{
2151 u32 count;
2152 struct atom_clock_dividers dividers;
2153 int ret = -EINVAL;
2154
2155 table->VceLevelCount =
2156 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2157
2158 for (count = 0; count < table->VceLevelCount; count++) {
2159 table->VceLevel[count].Frequency =
2160 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2161 table->VceLevel[count].MinVoltage =
2162 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2163 table->VceLevel[count].MinPhases = 1;
2164
2165 ret = radeon_atom_get_clock_dividers(rdev,
2166 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2167 table->VceLevel[count].Frequency, false, &dividers);
2168 if (ret)
2169 return ret;
2170
2171 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2172
2173 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2174 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2175 }
2176
2177 return ret;
2178
2179}
2180
2181static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2182 SMU7_Discrete_DpmTable *table)
2183{
2184 u32 count;
2185 struct atom_clock_dividers dividers;
2186 int ret = -EINVAL;
2187
2188 table->AcpLevelCount = (u8)
2189 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2190
2191 for (count = 0; count < table->AcpLevelCount; count++) {
2192 table->AcpLevel[count].Frequency =
2193 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2194 table->AcpLevel[count].MinVoltage =
2195 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2196 table->AcpLevel[count].MinPhases = 1;
2197
2198 ret = radeon_atom_get_clock_dividers(rdev,
2199 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2200 table->AcpLevel[count].Frequency, false, &dividers);
2201 if (ret)
2202 return ret;
2203
2204 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2205
2206 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2207 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2208 }
2209
2210 return ret;
2211}
2212
2213static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2214 SMU7_Discrete_DpmTable *table)
2215{
2216 u32 count;
2217 struct atom_clock_dividers dividers;
2218 int ret = -EINVAL;
2219
2220 table->SamuLevelCount =
2221 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2222
2223 for (count = 0; count < table->SamuLevelCount; count++) {
2224 table->SamuLevel[count].Frequency =
2225 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2226 table->SamuLevel[count].MinVoltage =
2227 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2228 table->SamuLevel[count].MinPhases = 1;
2229
2230 ret = radeon_atom_get_clock_dividers(rdev,
2231 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2232 table->SamuLevel[count].Frequency, false, &dividers);
2233 if (ret)
2234 return ret;
2235
2236 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2237
2238 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2239 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2240 }
2241
2242 return ret;
2243}
2244
2245static int ci_calculate_mclk_params(struct radeon_device *rdev,
2246 u32 memory_clock,
2247 SMU7_Discrete_MemoryLevel *mclk,
2248 bool strobe_mode,
2249 bool dll_state_on)
2250{
2251 struct ci_power_info *pi = ci_get_pi(rdev);
2252 u32 dll_cntl = pi->clock_registers.dll_cntl;
2253 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2254 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2255 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2256 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2257 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2258 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2259 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2260 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2261 struct atom_mpll_param mpll_param;
2262 int ret;
2263
2264 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2265 if (ret)
2266 return ret;
2267
2268 mpll_func_cntl &= ~BWCTRL_MASK;
2269 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2270
2271 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2272 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2273 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2274
2275 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2276 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2277
2278 if (pi->mem_gddr5) {
2279 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2280 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2281 YCLK_POST_DIV(mpll_param.post_div);
2282 }
2283
2284 if (pi->caps_mclk_ss_support) {
2285 struct radeon_atom_ss ss;
2286 u32 freq_nom;
2287 u32 tmp;
2288 u32 reference_clock = rdev->clock.mpll.reference_freq;
2289
2290 if (pi->mem_gddr5)
2291 freq_nom = memory_clock * 4;
2292 else
2293 freq_nom = memory_clock * 2;
2294
2295 tmp = (freq_nom / reference_clock);
2296 tmp = tmp * tmp;
2297 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2298 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2299 u32 clks = reference_clock * 5 / ss.rate;
2300 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2301
2302 mpll_ss1 &= ~CLKV_MASK;
2303 mpll_ss1 |= CLKV(clkv);
2304
2305 mpll_ss2 &= ~CLKS_MASK;
2306 mpll_ss2 |= CLKS(clks);
2307 }
2308 }
2309
2310 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2311 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2312
2313 if (dll_state_on)
2314 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2315 else
2316 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2317
2318 mclk->MclkFrequency = memory_clock;
2319 mclk->MpllFuncCntl = mpll_func_cntl;
2320 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2321 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2322 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2323 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2324 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2325 mclk->DllCntl = dll_cntl;
2326 mclk->MpllSs1 = mpll_ss1;
2327 mclk->MpllSs2 = mpll_ss2;
2328
2329 return 0;
2330}
2331
2332static int ci_populate_single_memory_level(struct radeon_device *rdev,
2333 u32 memory_clock,
2334 SMU7_Discrete_MemoryLevel *memory_level)
2335{
2336 struct ci_power_info *pi = ci_get_pi(rdev);
2337 int ret;
2338 bool dll_state_on;
2339
2340 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2341 ret = ci_get_dependency_volt_by_clk(rdev,
2342 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2343 memory_clock, &memory_level->MinVddc);
2344 if (ret)
2345 return ret;
2346 }
2347
2348 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2349 ret = ci_get_dependency_volt_by_clk(rdev,
2350 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2351 memory_clock, &memory_level->MinVddci);
2352 if (ret)
2353 return ret;
2354 }
2355
2356 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2357 ret = ci_get_dependency_volt_by_clk(rdev,
2358 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2359 memory_clock, &memory_level->MinMvdd);
2360 if (ret)
2361 return ret;
2362 }
2363
2364 memory_level->MinVddcPhases = 1;
2365
2366 if (pi->vddc_phase_shed_control)
2367 ci_populate_phase_value_based_on_mclk(rdev,
2368 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2369 memory_clock,
2370 &memory_level->MinVddcPhases);
2371
2372 memory_level->EnabledForThrottle = 1;
2373 memory_level->EnabledForActivity = 1;
2374 memory_level->UpH = 0;
2375 memory_level->DownH = 100;
2376 memory_level->VoltageDownH = 0;
2377 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2378
2379 memory_level->StutterEnable = false;
2380 memory_level->StrobeEnable = false;
2381 memory_level->EdcReadEnable = false;
2382 memory_level->EdcWriteEnable = false;
2383 memory_level->RttEnable = false;
2384
2385 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2386
2387 if (pi->mclk_stutter_mode_threshold &&
2388 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2389 (pi->uvd_enabled == false) &&
2390 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2391 (rdev->pm.dpm.new_active_crtc_count <= 2))
2392 memory_level->StutterEnable = true;
2393
2394 if (pi->mclk_strobe_mode_threshold &&
2395 (memory_clock <= pi->mclk_strobe_mode_threshold))
2396 memory_level->StrobeEnable = 1;
2397
2398 if (pi->mem_gddr5) {
2399 memory_level->StrobeRatio =
2400 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2401 if (pi->mclk_edc_enable_threshold &&
2402 (memory_clock > pi->mclk_edc_enable_threshold))
2403 memory_level->EdcReadEnable = true;
2404
2405 if (pi->mclk_edc_wr_enable_threshold &&
2406 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2407 memory_level->EdcWriteEnable = true;
2408
2409 if (memory_level->StrobeEnable) {
2410 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2411 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2412 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2413 else
2414 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2415 } else {
2416 dll_state_on = pi->dll_default_on;
2417 }
2418 } else {
2419 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2420 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2421 }
2422
2423 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2424 if (ret)
2425 return ret;
2426
2427 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2428 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2429 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2430 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2431
2432 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2433 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2434 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2435 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2436 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2437 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2438 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2439 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2440 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2441 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2442 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2443
2444 return 0;
2445}
2446
2447static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2448 SMU7_Discrete_DpmTable *table)
2449{
2450 struct ci_power_info *pi = ci_get_pi(rdev);
2451 struct atom_clock_dividers dividers;
2452 SMU7_Discrete_VoltageLevel voltage_level;
2453 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2454 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2455 u32 dll_cntl = pi->clock_registers.dll_cntl;
2456 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2457 int ret;
2458
2459 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2460
2461 if (pi->acpi_vddc)
2462 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2463 else
2464 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2465
2466 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2467
2468 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2469
2470 ret = radeon_atom_get_clock_dividers(rdev,
2471 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2472 table->ACPILevel.SclkFrequency, false, &dividers);
2473 if (ret)
2474 return ret;
2475
2476 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2477 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2478 table->ACPILevel.DeepSleepDivId = 0;
2479
2480 spll_func_cntl &= ~SPLL_PWRON;
2481 spll_func_cntl |= SPLL_RESET;
2482
2483 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2484 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2485
2486 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2487 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2488 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2489 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2490 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2491 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2492 table->ACPILevel.CcPwrDynRm = 0;
2493 table->ACPILevel.CcPwrDynRm1 = 0;
2494
2495 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2496 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2497 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2498 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2499 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2500 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2501 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2502 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2503 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2504 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2505 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2506
2507 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2508 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2509
2510 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2511 if (pi->acpi_vddci)
2512 table->MemoryACPILevel.MinVddci =
2513 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2514 else
2515 table->MemoryACPILevel.MinVddci =
2516 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2517 }
2518
2519 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2520 table->MemoryACPILevel.MinMvdd = 0;
2521 else
2522 table->MemoryACPILevel.MinMvdd =
2523 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2524
2525 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2526 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2527
2528 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2529
2530 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2531 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2532 table->MemoryACPILevel.MpllAdFuncCntl =
2533 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2534 table->MemoryACPILevel.MpllDqFuncCntl =
2535 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2536 table->MemoryACPILevel.MpllFuncCntl =
2537 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2538 table->MemoryACPILevel.MpllFuncCntl_1 =
2539 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2540 table->MemoryACPILevel.MpllFuncCntl_2 =
2541 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2542 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2543 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2544
2545 table->MemoryACPILevel.EnabledForThrottle = 0;
2546 table->MemoryACPILevel.EnabledForActivity = 0;
2547 table->MemoryACPILevel.UpH = 0;
2548 table->MemoryACPILevel.DownH = 100;
2549 table->MemoryACPILevel.VoltageDownH = 0;
2550 table->MemoryACPILevel.ActivityLevel =
2551 cpu_to_be16((u16)pi->mclk_activity_target);
2552
2553 table->MemoryACPILevel.StutterEnable = false;
2554 table->MemoryACPILevel.StrobeEnable = false;
2555 table->MemoryACPILevel.EdcReadEnable = false;
2556 table->MemoryACPILevel.EdcWriteEnable = false;
2557 table->MemoryACPILevel.RttEnable = false;
2558
2559 return 0;
2560}
2561
2562
2563static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2564{
2565 struct ci_power_info *pi = ci_get_pi(rdev);
2566 struct ci_ulv_parm *ulv = &pi->ulv;
2567
2568 if (ulv->supported) {
2569 if (enable)
2570 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2571 0 : -EINVAL;
2572 else
2573 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2574 0 : -EINVAL;
2575 }
2576
2577 return 0;
2578}
2579
2580static int ci_populate_ulv_level(struct radeon_device *rdev,
2581 SMU7_Discrete_Ulv *state)
2582{
2583 struct ci_power_info *pi = ci_get_pi(rdev);
2584 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2585
2586 state->CcPwrDynRm = 0;
2587 state->CcPwrDynRm1 = 0;
2588
2589 if (ulv_voltage == 0) {
2590 pi->ulv.supported = false;
2591 return 0;
2592 }
2593
2594 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2595 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2596 state->VddcOffset = 0;
2597 else
2598 state->VddcOffset =
2599 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2600 } else {
2601 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2602 state->VddcOffsetVid = 0;
2603 else
2604 state->VddcOffsetVid = (u8)
2605 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2606 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2607 }
2608 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2609
2610 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2611 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2612 state->VddcOffset = cpu_to_be16(state->VddcOffset);
2613
2614 return 0;
2615}
2616
2617static int ci_calculate_sclk_params(struct radeon_device *rdev,
2618 u32 engine_clock,
2619 SMU7_Discrete_GraphicsLevel *sclk)
2620{
2621 struct ci_power_info *pi = ci_get_pi(rdev);
2622 struct atom_clock_dividers dividers;
2623 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2624 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2625 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2626 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2627 u32 reference_clock = rdev->clock.spll.reference_freq;
2628 u32 reference_divider;
2629 u32 fbdiv;
2630 int ret;
2631
2632 ret = radeon_atom_get_clock_dividers(rdev,
2633 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2634 engine_clock, false, &dividers);
2635 if (ret)
2636 return ret;
2637
2638 reference_divider = 1 + dividers.ref_div;
2639 fbdiv = dividers.fb_div & 0x3FFFFFF;
2640
2641 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2642 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2643 spll_func_cntl_3 |= SPLL_DITHEN;
2644
2645 if (pi->caps_sclk_ss_support) {
2646 struct radeon_atom_ss ss;
2647 u32 vco_freq = engine_clock * dividers.post_div;
2648
2649 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2650 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2651 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2652 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2653
2654 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2655 cg_spll_spread_spectrum |= CLK_S(clk_s);
2656 cg_spll_spread_spectrum |= SSEN;
2657
2658 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2659 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2660 }
2661 }
2662
2663 sclk->SclkFrequency = engine_clock;
2664 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2665 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2666 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2667 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2668 sclk->SclkDid = (u8)dividers.post_divider;
2669
2670 return 0;
2671}
2672
2673static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2674 u32 engine_clock,
2675 u16 sclk_activity_level_t,
2676 SMU7_Discrete_GraphicsLevel *graphic_level)
2677{
2678 struct ci_power_info *pi = ci_get_pi(rdev);
2679 int ret;
2680
2681 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2682 if (ret)
2683 return ret;
2684
2685 ret = ci_get_dependency_volt_by_clk(rdev,
2686 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2687 engine_clock, &graphic_level->MinVddc);
2688 if (ret)
2689 return ret;
2690
2691 graphic_level->SclkFrequency = engine_clock;
2692
2693 graphic_level->Flags = 0;
2694 graphic_level->MinVddcPhases = 1;
2695
2696 if (pi->vddc_phase_shed_control)
2697 ci_populate_phase_value_based_on_sclk(rdev,
2698 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2699 engine_clock,
2700 &graphic_level->MinVddcPhases);
2701
2702 graphic_level->ActivityLevel = sclk_activity_level_t;
2703
2704 graphic_level->CcPwrDynRm = 0;
2705 graphic_level->CcPwrDynRm1 = 0;
2706 graphic_level->EnabledForActivity = 1;
2707 graphic_level->EnabledForThrottle = 1;
2708 graphic_level->UpH = 0;
2709 graphic_level->DownH = 0;
2710 graphic_level->VoltageDownH = 0;
2711 graphic_level->PowerThrottle = 0;
2712
2713 if (pi->caps_sclk_ds)
2714 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2715 engine_clock,
2716 CISLAND_MINIMUM_ENGINE_CLOCK);
2717
2718 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2719
2720 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2721 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2722 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2723 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2724 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2725 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2726 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2727 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2728 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2729 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2730 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2731
2732 return 0;
2733}
2734
2735static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2736{
2737 struct ci_power_info *pi = ci_get_pi(rdev);
2738 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2739 u32 level_array_address = pi->dpm_table_start +
2740 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2741 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2742 SMU7_MAX_LEVELS_GRAPHICS;
2743 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2744 u32 i, ret;
2745
2746 memset(levels, 0, level_array_size);
2747
2748 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2749 ret = ci_populate_single_graphic_level(rdev,
2750 dpm_table->sclk_table.dpm_levels[i].value,
2751 (u16)pi->activity_target[i],
2752 &pi->smc_state_table.GraphicsLevel[i]);
2753 if (ret)
2754 return ret;
2755 if (i == (dpm_table->sclk_table.count - 1))
2756 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2757 PPSMC_DISPLAY_WATERMARK_HIGH;
2758 }
2759
2760 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2761 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2762 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2763
2764 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2765 (u8 *)levels, level_array_size,
2766 pi->sram_end);
2767 if (ret)
2768 return ret;
2769
2770 return 0;
2771}
2772
2773static int ci_populate_ulv_state(struct radeon_device *rdev,
2774 SMU7_Discrete_Ulv *ulv_level)
2775{
2776 return ci_populate_ulv_level(rdev, ulv_level);
2777}
2778
2779static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2780{
2781 struct ci_power_info *pi = ci_get_pi(rdev);
2782 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2783 u32 level_array_address = pi->dpm_table_start +
2784 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2785 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2786 SMU7_MAX_LEVELS_MEMORY;
2787 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2788 u32 i, ret;
2789
2790 memset(levels, 0, level_array_size);
2791
2792 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2793 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2794 return -EINVAL;
2795 ret = ci_populate_single_memory_level(rdev,
2796 dpm_table->mclk_table.dpm_levels[i].value,
2797 &pi->smc_state_table.MemoryLevel[i]);
2798 if (ret)
2799 return ret;
2800 }
2801
2802 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2803
2804 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2805 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2806 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2807
2808 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2809 PPSMC_DISPLAY_WATERMARK_HIGH;
2810
2811 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2812 (u8 *)levels, level_array_size,
2813 pi->sram_end);
2814 if (ret)
2815 return ret;
2816
2817 return 0;
2818}
2819
2820static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2821 struct ci_single_dpm_table* dpm_table,
2822 u32 count)
2823{
2824 u32 i;
2825
2826 dpm_table->count = count;
2827 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2828 dpm_table->dpm_levels[i].enabled = false;
2829}
2830
2831static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2832 u32 index, u32 pcie_gen, u32 pcie_lanes)
2833{
2834 dpm_table->dpm_levels[index].value = pcie_gen;
2835 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2836 dpm_table->dpm_levels[index].enabled = true;
2837}
2838
2839static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2840{
2841 struct ci_power_info *pi = ci_get_pi(rdev);
2842
2843 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2844 return -EINVAL;
2845
2846 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2847 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2848 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2849 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2850 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2851 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2852 }
2853
2854 ci_reset_single_dpm_table(rdev,
2855 &pi->dpm_table.pcie_speed_table,
2856 SMU7_MAX_LEVELS_LINK);
2857
2858 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2859 pi->pcie_gen_powersaving.min,
2860 pi->pcie_lane_powersaving.min);
2861 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2862 pi->pcie_gen_performance.min,
2863 pi->pcie_lane_performance.min);
2864 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2865 pi->pcie_gen_powersaving.min,
2866 pi->pcie_lane_powersaving.max);
2867 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2868 pi->pcie_gen_performance.min,
2869 pi->pcie_lane_performance.max);
2870 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2871 pi->pcie_gen_powersaving.max,
2872 pi->pcie_lane_powersaving.max);
2873 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2874 pi->pcie_gen_performance.max,
2875 pi->pcie_lane_performance.max);
2876
2877 pi->dpm_table.pcie_speed_table.count = 6;
2878
2879 return 0;
2880}
2881
2882static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2883{
2884 struct ci_power_info *pi = ci_get_pi(rdev);
2885 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2886 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2887 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2888 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2889 struct radeon_cac_leakage_table *std_voltage_table =
2890 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2891 u32 i;
2892
2893 if (allowed_sclk_vddc_table == NULL)
2894 return -EINVAL;
2895 if (allowed_sclk_vddc_table->count < 1)
2896 return -EINVAL;
2897 if (allowed_mclk_table == NULL)
2898 return -EINVAL;
2899 if (allowed_mclk_table->count < 1)
2900 return -EINVAL;
2901
2902 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2903
2904 ci_reset_single_dpm_table(rdev,
2905 &pi->dpm_table.sclk_table,
2906 SMU7_MAX_LEVELS_GRAPHICS);
2907 ci_reset_single_dpm_table(rdev,
2908 &pi->dpm_table.mclk_table,
2909 SMU7_MAX_LEVELS_MEMORY);
2910 ci_reset_single_dpm_table(rdev,
2911 &pi->dpm_table.vddc_table,
2912 SMU7_MAX_LEVELS_VDDC);
2913 ci_reset_single_dpm_table(rdev,
2914 &pi->dpm_table.vddci_table,
2915 SMU7_MAX_LEVELS_VDDCI);
2916 ci_reset_single_dpm_table(rdev,
2917 &pi->dpm_table.mvdd_table,
2918 SMU7_MAX_LEVELS_MVDD);
2919
2920 pi->dpm_table.sclk_table.count = 0;
2921 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2922 if ((i == 0) ||
2923 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2924 allowed_sclk_vddc_table->entries[i].clk)) {
2925 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2926 allowed_sclk_vddc_table->entries[i].clk;
2927 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2928 pi->dpm_table.sclk_table.count++;
2929 }
2930 }
2931
2932 pi->dpm_table.mclk_table.count = 0;
2933 for (i = 0; i < allowed_mclk_table->count; i++) {
2934 if ((i==0) ||
2935 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2936 allowed_mclk_table->entries[i].clk)) {
2937 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2938 allowed_mclk_table->entries[i].clk;
2939 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2940 pi->dpm_table.mclk_table.count++;
2941 }
2942 }
2943
2944 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2945 pi->dpm_table.vddc_table.dpm_levels[i].value =
2946 allowed_sclk_vddc_table->entries[i].v;
2947 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
2948 std_voltage_table->entries[i].leakage;
2949 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
2950 }
2951 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
2952
2953 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
2954 if (allowed_mclk_table) {
2955 for (i = 0; i < allowed_mclk_table->count; i++) {
2956 pi->dpm_table.vddci_table.dpm_levels[i].value =
2957 allowed_mclk_table->entries[i].v;
2958 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
2959 }
2960 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
2961 }
2962
2963 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
2964 if (allowed_mclk_table) {
2965 for (i = 0; i < allowed_mclk_table->count; i++) {
2966 pi->dpm_table.mvdd_table.dpm_levels[i].value =
2967 allowed_mclk_table->entries[i].v;
2968 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
2969 }
2970 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
2971 }
2972
2973 ci_setup_default_pcie_tables(rdev);
2974
2975 return 0;
2976}
2977
2978static int ci_find_boot_level(struct ci_single_dpm_table *table,
2979 u32 value, u32 *boot_level)
2980{
2981 u32 i;
2982 int ret = -EINVAL;
2983
2984 for(i = 0; i < table->count; i++) {
2985 if (value == table->dpm_levels[i].value) {
2986 *boot_level = i;
2987 ret = 0;
2988 }
2989 }
2990
2991 return ret;
2992}
2993
2994static int ci_init_smc_table(struct radeon_device *rdev)
2995{
2996 struct ci_power_info *pi = ci_get_pi(rdev);
2997 struct ci_ulv_parm *ulv = &pi->ulv;
2998 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
2999 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3000 int ret;
3001
3002 ret = ci_setup_default_dpm_tables(rdev);
3003 if (ret)
3004 return ret;
3005
3006 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3007 ci_populate_smc_voltage_tables(rdev, table);
3008
3009 ci_init_fps_limits(rdev);
3010
3011 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3012 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3013
3014 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3015 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3016
3017 if (pi->mem_gddr5)
3018 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3019
3020 if (ulv->supported) {
3021 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3022 if (ret)
3023 return ret;
3024 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3025 }
3026
3027 ret = ci_populate_all_graphic_levels(rdev);
3028 if (ret)
3029 return ret;
3030
3031 ret = ci_populate_all_memory_levels(rdev);
3032 if (ret)
3033 return ret;
3034
3035 ci_populate_smc_link_level(rdev, table);
3036
3037 ret = ci_populate_smc_acpi_level(rdev, table);
3038 if (ret)
3039 return ret;
3040
3041 ret = ci_populate_smc_vce_level(rdev, table);
3042 if (ret)
3043 return ret;
3044
3045 ret = ci_populate_smc_acp_level(rdev, table);
3046 if (ret)
3047 return ret;
3048
3049 ret = ci_populate_smc_samu_level(rdev, table);
3050 if (ret)
3051 return ret;
3052
3053 ret = ci_do_program_memory_timing_parameters(rdev);
3054 if (ret)
3055 return ret;
3056
3057 ret = ci_populate_smc_uvd_level(rdev, table);
3058 if (ret)
3059 return ret;
3060
3061 table->UvdBootLevel = 0;
3062 table->VceBootLevel = 0;
3063 table->AcpBootLevel = 0;
3064 table->SamuBootLevel = 0;
3065 table->GraphicsBootLevel = 0;
3066 table->MemoryBootLevel = 0;
3067
3068 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3069 pi->vbios_boot_state.sclk_bootup_value,
3070 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3071
3072 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3073 pi->vbios_boot_state.mclk_bootup_value,
3074 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3075
3076 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3077 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3078 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3079
3080 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3081
3082 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3083 if (ret)
3084 return ret;
3085
3086 table->UVDInterval = 1;
3087 table->VCEInterval = 1;
3088 table->ACPInterval = 1;
3089 table->SAMUInterval = 1;
3090 table->GraphicsVoltageChangeEnable = 1;
3091 table->GraphicsThermThrottleEnable = 1;
3092 table->GraphicsInterval = 1;
3093 table->VoltageInterval = 1;
3094 table->ThermalInterval = 1;
3095 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3096 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3097 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3098 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3099 table->MemoryVoltageChangeEnable = 1;
3100 table->MemoryInterval = 1;
3101 table->VoltageResponseTime = 0;
3102 table->VddcVddciDelta = 4000;
3103 table->PhaseResponseTime = 0;
3104 table->MemoryThermThrottleEnable = 1;
3105 table->PCIeBootLinkLevel = 0;
3106 table->PCIeGenInterval = 1;
3107 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3108 table->SVI2Enable = 1;
3109 else
3110 table->SVI2Enable = 0;
3111
3112 table->ThermGpio = 17;
3113 table->SclkStepSize = 0x4000;
3114
3115 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3116 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3117 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3118 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3119 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3120 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3121 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3122 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3123 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3124 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3125 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3126 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3127 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3128 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3129
3130 ret = ci_copy_bytes_to_smc(rdev,
3131 pi->dpm_table_start +
3132 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3133 (u8 *)&table->SystemFlags,
3134 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3135 pi->sram_end);
3136 if (ret)
3137 return ret;
3138
3139 return 0;
3140}
3141
3142static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3143 struct ci_single_dpm_table *dpm_table,
3144 u32 low_limit, u32 high_limit)
3145{
3146 u32 i;
3147
3148 for (i = 0; i < dpm_table->count; i++) {
3149 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3150 (dpm_table->dpm_levels[i].value > high_limit))
3151 dpm_table->dpm_levels[i].enabled = false;
3152 else
3153 dpm_table->dpm_levels[i].enabled = true;
3154 }
3155}
3156
3157static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3158 u32 speed_low, u32 lanes_low,
3159 u32 speed_high, u32 lanes_high)
3160{
3161 struct ci_power_info *pi = ci_get_pi(rdev);
3162 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3163 u32 i, j;
3164
3165 for (i = 0; i < pcie_table->count; i++) {
3166 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3167 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3168 (pcie_table->dpm_levels[i].value > speed_high) ||
3169 (pcie_table->dpm_levels[i].param1 > lanes_high))
3170 pcie_table->dpm_levels[i].enabled = false;
3171 else
3172 pcie_table->dpm_levels[i].enabled = true;
3173 }
3174
3175 for (i = 0; i < pcie_table->count; i++) {
3176 if (pcie_table->dpm_levels[i].enabled) {
3177 for (j = i + 1; j < pcie_table->count; j++) {
3178 if (pcie_table->dpm_levels[j].enabled) {
3179 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3180 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3181 pcie_table->dpm_levels[j].enabled = false;
3182 }
3183 }
3184 }
3185 }
3186}
3187
3188static int ci_trim_dpm_states(struct radeon_device *rdev,
3189 struct radeon_ps *radeon_state)
3190{
3191 struct ci_ps *state = ci_get_ps(radeon_state);
3192 struct ci_power_info *pi = ci_get_pi(rdev);
3193 u32 high_limit_count;
3194
3195 if (state->performance_level_count < 1)
3196 return -EINVAL;
3197
3198 if (state->performance_level_count == 1)
3199 high_limit_count = 0;
3200 else
3201 high_limit_count = 1;
3202
3203 ci_trim_single_dpm_states(rdev,
3204 &pi->dpm_table.sclk_table,
3205 state->performance_levels[0].sclk,
3206 state->performance_levels[high_limit_count].sclk);
3207
3208 ci_trim_single_dpm_states(rdev,
3209 &pi->dpm_table.mclk_table,
3210 state->performance_levels[0].mclk,
3211 state->performance_levels[high_limit_count].mclk);
3212
3213 ci_trim_pcie_dpm_states(rdev,
3214 state->performance_levels[0].pcie_gen,
3215 state->performance_levels[0].pcie_lane,
3216 state->performance_levels[high_limit_count].pcie_gen,
3217 state->performance_levels[high_limit_count].pcie_lane);
3218
3219 return 0;
3220}
3221
3222static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3223{
3224 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3225 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3226 struct radeon_clock_voltage_dependency_table *vddc_table =
3227 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3228 u32 requested_voltage = 0;
3229 u32 i;
3230
3231 if (disp_voltage_table == NULL)
3232 return -EINVAL;
3233 if (!disp_voltage_table->count)
3234 return -EINVAL;
3235
3236 for (i = 0; i < disp_voltage_table->count; i++) {
3237 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3238 requested_voltage = disp_voltage_table->entries[i].v;
3239 }
3240
3241 for (i = 0; i < vddc_table->count; i++) {
3242 if (requested_voltage <= vddc_table->entries[i].v) {
3243 requested_voltage = vddc_table->entries[i].v;
3244 return (ci_send_msg_to_smc_with_parameter(rdev,
3245 PPSMC_MSG_VddC_Request,
3246 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3247 0 : -EINVAL;
3248 }
3249 }
3250
3251 return -EINVAL;
3252}
3253
3254static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3255{
3256 struct ci_power_info *pi = ci_get_pi(rdev);
3257 PPSMC_Result result;
3258
3259 if (!pi->sclk_dpm_key_disabled) {
3260 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3261 result = ci_send_msg_to_smc_with_parameter(rdev,
3262 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3263 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3264 if (result != PPSMC_Result_OK)
3265 return -EINVAL;
3266 }
3267 }
3268
3269 if (!pi->mclk_dpm_key_disabled) {
3270 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3271 result = ci_send_msg_to_smc_with_parameter(rdev,
3272 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3273 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3274 if (result != PPSMC_Result_OK)
3275 return -EINVAL;
3276 }
3277 }
3278
3279 if (!pi->pcie_dpm_key_disabled) {
3280 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3281 result = ci_send_msg_to_smc_with_parameter(rdev,
3282 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3283 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3284 if (result != PPSMC_Result_OK)
3285 return -EINVAL;
3286 }
3287 }
3288
3289 ci_apply_disp_minimum_voltage_request(rdev);
3290
3291 return 0;
3292}
3293
3294static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3295 struct radeon_ps *radeon_state)
3296{
3297 struct ci_power_info *pi = ci_get_pi(rdev);
3298 struct ci_ps *state = ci_get_ps(radeon_state);
3299 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3300 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3301 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3302 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3303 u32 i;
3304
3305 pi->need_update_smu7_dpm_table = 0;
3306
3307 for (i = 0; i < sclk_table->count; i++) {
3308 if (sclk == sclk_table->dpm_levels[i].value)
3309 break;
3310 }
3311
3312 if (i >= sclk_table->count) {
3313 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3314 } else {
3315 /* XXX check display min clock requirements */
3316 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3317 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3318 }
3319
3320 for (i = 0; i < mclk_table->count; i++) {
3321 if (mclk == mclk_table->dpm_levels[i].value)
3322 break;
3323 }
3324
3325 if (i >= mclk_table->count)
3326 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3327
3328 if (rdev->pm.dpm.current_active_crtc_count !=
3329 rdev->pm.dpm.new_active_crtc_count)
3330 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3331}
3332
3333static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3334 struct radeon_ps *radeon_state)
3335{
3336 struct ci_power_info *pi = ci_get_pi(rdev);
3337 struct ci_ps *state = ci_get_ps(radeon_state);
3338 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3339 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3340 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3341 int ret;
3342
3343 if (!pi->need_update_smu7_dpm_table)
3344 return 0;
3345
3346 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3347 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3348
3349 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3350 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3351
3352 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3353 ret = ci_populate_all_graphic_levels(rdev);
3354 if (ret)
3355 return ret;
3356 }
3357
3358 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3359 ret = ci_populate_all_memory_levels(rdev);
3360 if (ret)
3361 return ret;
3362 }
3363
3364 return 0;
3365}
3366
3367static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3368{
3369 struct ci_power_info *pi = ci_get_pi(rdev);
3370 const struct radeon_clock_and_voltage_limits *max_limits;
3371 int i;
3372
3373 if (rdev->pm.dpm.ac_power)
3374 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3375 else
3376 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3377
3378 if (enable) {
3379 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3380
3381 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3382 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3383 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3384
3385 if (!pi->caps_uvd_dpm)
3386 break;
3387 }
3388 }
3389
3390 ci_send_msg_to_smc_with_parameter(rdev,
3391 PPSMC_MSG_UVDDPM_SetEnabledMask,
3392 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3393
3394 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3395 pi->uvd_enabled = true;
3396 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3397 ci_send_msg_to_smc_with_parameter(rdev,
3398 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3399 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3400 }
3401 } else {
3402 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3403 pi->uvd_enabled = false;
3404 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3405 ci_send_msg_to_smc_with_parameter(rdev,
3406 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3407 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3408 }
3409 }
3410
3411 return (ci_send_msg_to_smc(rdev, enable ?
3412 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3413 0 : -EINVAL;
3414}
3415
3416#if 0
3417static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3418{
3419 struct ci_power_info *pi = ci_get_pi(rdev);
3420 const struct radeon_clock_and_voltage_limits *max_limits;
3421 int i;
3422
3423 if (rdev->pm.dpm.ac_power)
3424 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3425 else
3426 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3427
3428 if (enable) {
3429 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3430 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3431 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3432 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3433
3434 if (!pi->caps_vce_dpm)
3435 break;
3436 }
3437 }
3438
3439 ci_send_msg_to_smc_with_parameter(rdev,
3440 PPSMC_MSG_VCEDPM_SetEnabledMask,
3441 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3442 }
3443
3444 return (ci_send_msg_to_smc(rdev, enable ?
3445 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3446 0 : -EINVAL;
3447}
3448
3449static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3450{
3451 struct ci_power_info *pi = ci_get_pi(rdev);
3452 const struct radeon_clock_and_voltage_limits *max_limits;
3453 int i;
3454
3455 if (rdev->pm.dpm.ac_power)
3456 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3457 else
3458 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3459
3460 if (enable) {
3461 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3462 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3463 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3464 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3465
3466 if (!pi->caps_samu_dpm)
3467 break;
3468 }
3469 }
3470
3471 ci_send_msg_to_smc_with_parameter(rdev,
3472 PPSMC_MSG_SAMUDPM_SetEnabledMask,
3473 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3474 }
3475 return (ci_send_msg_to_smc(rdev, enable ?
3476 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3477 0 : -EINVAL;
3478}
3479
3480static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3481{
3482 struct ci_power_info *pi = ci_get_pi(rdev);
3483 const struct radeon_clock_and_voltage_limits *max_limits;
3484 int i;
3485
3486 if (rdev->pm.dpm.ac_power)
3487 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3488 else
3489 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3490
3491 if (enable) {
3492 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3493 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3494 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3495 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3496
3497 if (!pi->caps_acp_dpm)
3498 break;
3499 }
3500 }
3501
3502 ci_send_msg_to_smc_with_parameter(rdev,
3503 PPSMC_MSG_ACPDPM_SetEnabledMask,
3504 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3505 }
3506
3507 return (ci_send_msg_to_smc(rdev, enable ?
3508 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3509 0 : -EINVAL;
3510}
3511#endif
3512
3513static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3514{
3515 struct ci_power_info *pi = ci_get_pi(rdev);
3516 u32 tmp;
3517
3518 if (!gate) {
3519 if (pi->caps_uvd_dpm ||
3520 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3521 pi->smc_state_table.UvdBootLevel = 0;
3522 else
3523 pi->smc_state_table.UvdBootLevel =
3524 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3525
3526 tmp = RREG32_SMC(DPM_TABLE_475);
3527 tmp &= ~UvdBootLevel_MASK;
3528 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3529 WREG32_SMC(DPM_TABLE_475, tmp);
3530 }
3531
3532 return ci_enable_uvd_dpm(rdev, !gate);
3533}
3534
3535#if 0
3536static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3537{
3538 u8 i;
3539 u32 min_evclk = 30000; /* ??? */
3540 struct radeon_vce_clock_voltage_dependency_table *table =
3541 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3542
3543 for (i = 0; i < table->count; i++) {
3544 if (table->entries[i].evclk >= min_evclk)
3545 return i;
3546 }
3547
3548 return table->count - 1;
3549}
3550
3551static int ci_update_vce_dpm(struct radeon_device *rdev,
3552 struct radeon_ps *radeon_new_state,
3553 struct radeon_ps *radeon_current_state)
3554{
3555 struct ci_power_info *pi = ci_get_pi(rdev);
3556 bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3557 bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3558 int ret = 0;
3559 u32 tmp;
3560
3561 if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3562 if (new_vce_clock_non_zero) {
3563 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3564
3565 tmp = RREG32_SMC(DPM_TABLE_475);
3566 tmp &= ~VceBootLevel_MASK;
3567 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3568 WREG32_SMC(DPM_TABLE_475, tmp);
3569
3570 ret = ci_enable_vce_dpm(rdev, true);
3571 } else {
3572 ret = ci_enable_vce_dpm(rdev, false);
3573 }
3574 }
3575 return ret;
3576}
3577
3578static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3579{
3580 return ci_enable_samu_dpm(rdev, gate);
3581}
3582
3583static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3584{
3585 struct ci_power_info *pi = ci_get_pi(rdev);
3586 u32 tmp;
3587
3588 if (!gate) {
3589 pi->smc_state_table.AcpBootLevel = 0;
3590
3591 tmp = RREG32_SMC(DPM_TABLE_475);
3592 tmp &= ~AcpBootLevel_MASK;
3593 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3594 WREG32_SMC(DPM_TABLE_475, tmp);
3595 }
3596
3597 return ci_enable_acp_dpm(rdev, !gate);
3598}
3599#endif
3600
3601static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3602 struct radeon_ps *radeon_state)
3603{
3604 struct ci_power_info *pi = ci_get_pi(rdev);
3605 int ret;
3606
3607 ret = ci_trim_dpm_states(rdev, radeon_state);
3608 if (ret)
3609 return ret;
3610
3611 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3612 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3613 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3614 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3615 pi->last_mclk_dpm_enable_mask =
3616 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3617 if (pi->uvd_enabled) {
3618 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3619 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3620 }
3621 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3622 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3623
3624 return 0;
3625}
3626
3627static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3628 u32 level_mask)
3629{
3630 u32 level = 0;
3631
3632 while ((level_mask & (1 << level)) == 0)
3633 level++;
3634
3635 return level;
3636}
3637
3638
3639int ci_dpm_force_performance_level(struct radeon_device *rdev,
3640 enum radeon_dpm_forced_level level)
3641{
3642 struct ci_power_info *pi = ci_get_pi(rdev);
3643 PPSMC_Result smc_result;
3644 u32 tmp, levels, i;
3645 int ret;
3646
3647 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3648 if ((!pi->sclk_dpm_key_disabled) &&
3649 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3650 levels = 0;
3651 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3652 while (tmp >>= 1)
3653 levels++;
3654 if (levels) {
3655 ret = ci_dpm_force_state_sclk(rdev, levels);
3656 if (ret)
3657 return ret;
3658 for (i = 0; i < rdev->usec_timeout; i++) {
3659 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3660 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3661 if (tmp == levels)
3662 break;
3663 udelay(1);
3664 }
3665 }
3666 }
3667 if ((!pi->mclk_dpm_key_disabled) &&
3668 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3669 levels = 0;
3670 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3671 while (tmp >>= 1)
3672 levels++;
3673 if (levels) {
3674 ret = ci_dpm_force_state_mclk(rdev, levels);
3675 if (ret)
3676 return ret;
3677 for (i = 0; i < rdev->usec_timeout; i++) {
3678 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3679 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3680 if (tmp == levels)
3681 break;
3682 udelay(1);
3683 }
3684 }
3685 }
3686 if ((!pi->pcie_dpm_key_disabled) &&
3687 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3688 levels = 0;
3689 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3690 while (tmp >>= 1)
3691 levels++;
3692 if (levels) {
3693 ret = ci_dpm_force_state_pcie(rdev, level);
3694 if (ret)
3695 return ret;
3696 for (i = 0; i < rdev->usec_timeout; i++) {
3697 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3698 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3699 if (tmp == levels)
3700 break;
3701 udelay(1);
3702 }
3703 }
3704 }
3705 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3706 if ((!pi->sclk_dpm_key_disabled) &&
3707 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3708 levels = ci_get_lowest_enabled_level(rdev,
3709 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3710 ret = ci_dpm_force_state_sclk(rdev, levels);
3711 if (ret)
3712 return ret;
3713 for (i = 0; i < rdev->usec_timeout; i++) {
3714 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3715 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3716 if (tmp == levels)
3717 break;
3718 udelay(1);
3719 }
3720 }
3721 if ((!pi->mclk_dpm_key_disabled) &&
3722 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3723 levels = ci_get_lowest_enabled_level(rdev,
3724 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3725 ret = ci_dpm_force_state_mclk(rdev, levels);
3726 if (ret)
3727 return ret;
3728 for (i = 0; i < rdev->usec_timeout; i++) {
3729 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3730 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3731 if (tmp == levels)
3732 break;
3733 udelay(1);
3734 }
3735 }
3736 if ((!pi->pcie_dpm_key_disabled) &&
3737 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3738 levels = ci_get_lowest_enabled_level(rdev,
3739 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3740 ret = ci_dpm_force_state_pcie(rdev, levels);
3741 if (ret)
3742 return ret;
3743 for (i = 0; i < rdev->usec_timeout; i++) {
3744 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3745 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3746 if (tmp == levels)
3747 break;
3748 udelay(1);
3749 }
3750 }
3751 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3752 if (!pi->sclk_dpm_key_disabled) {
3753 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3754 if (smc_result != PPSMC_Result_OK)
3755 return -EINVAL;
3756 }
3757 if (!pi->mclk_dpm_key_disabled) {
3758 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3759 if (smc_result != PPSMC_Result_OK)
3760 return -EINVAL;
3761 }
3762 if (!pi->pcie_dpm_key_disabled) {
3763 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3764 if (smc_result != PPSMC_Result_OK)
3765 return -EINVAL;
3766 }
3767 }
3768
3769 rdev->pm.dpm.forced_level = level;
3770
3771 return 0;
3772}
3773
3774static int ci_set_mc_special_registers(struct radeon_device *rdev,
3775 struct ci_mc_reg_table *table)
3776{
3777 struct ci_power_info *pi = ci_get_pi(rdev);
3778 u8 i, j, k;
3779 u32 temp_reg;
3780
3781 for (i = 0, j = table->last; i < table->last; i++) {
3782 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3783 return -EINVAL;
3784 switch(table->mc_reg_address[i].s1 << 2) {
3785 case MC_SEQ_MISC1:
3786 temp_reg = RREG32(MC_PMG_CMD_EMRS);
3787 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3788 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3789 for (k = 0; k < table->num_entries; k++) {
3790 table->mc_reg_table_entry[k].mc_data[j] =
3791 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3792 }
3793 j++;
3794 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3795 return -EINVAL;
3796
3797 temp_reg = RREG32(MC_PMG_CMD_MRS);
3798 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3799 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3800 for (k = 0; k < table->num_entries; k++) {
3801 table->mc_reg_table_entry[k].mc_data[j] =
3802 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3803 if (!pi->mem_gddr5)
3804 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3805 }
3806 j++;
3807 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3808 return -EINVAL;
3809
3810 if (!pi->mem_gddr5) {
3811 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3812 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3813 for (k = 0; k < table->num_entries; k++) {
3814 table->mc_reg_table_entry[k].mc_data[j] =
3815 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3816 }
3817 j++;
3818 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3819 return -EINVAL;
3820 }
3821 break;
3822 case MC_SEQ_RESERVE_M:
3823 temp_reg = RREG32(MC_PMG_CMD_MRS1);
3824 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3825 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3826 for (k = 0; k < table->num_entries; k++) {
3827 table->mc_reg_table_entry[k].mc_data[j] =
3828 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3829 }
3830 j++;
3831 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3832 return -EINVAL;
3833 break;
3834 default:
3835 break;
3836 }
3837
3838 }
3839
3840 table->last = j;
3841
3842 return 0;
3843}
3844
3845static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3846{
3847 bool result = true;
3848
3849 switch(in_reg) {
3850 case MC_SEQ_RAS_TIMING >> 2:
3851 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3852 break;
3853 case MC_SEQ_DLL_STBY >> 2:
3854 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3855 break;
3856 case MC_SEQ_G5PDX_CMD0 >> 2:
3857 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3858 break;
3859 case MC_SEQ_G5PDX_CMD1 >> 2:
3860 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3861 break;
3862 case MC_SEQ_G5PDX_CTRL >> 2:
3863 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3864 break;
3865 case MC_SEQ_CAS_TIMING >> 2:
3866 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3867 break;
3868 case MC_SEQ_MISC_TIMING >> 2:
3869 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3870 break;
3871 case MC_SEQ_MISC_TIMING2 >> 2:
3872 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3873 break;
3874 case MC_SEQ_PMG_DVS_CMD >> 2:
3875 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3876 break;
3877 case MC_SEQ_PMG_DVS_CTL >> 2:
3878 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3879 break;
3880 case MC_SEQ_RD_CTL_D0 >> 2:
3881 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3882 break;
3883 case MC_SEQ_RD_CTL_D1 >> 2:
3884 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3885 break;
3886 case MC_SEQ_WR_CTL_D0 >> 2:
3887 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3888 break;
3889 case MC_SEQ_WR_CTL_D1 >> 2:
3890 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3891 break;
3892 case MC_PMG_CMD_EMRS >> 2:
3893 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3894 break;
3895 case MC_PMG_CMD_MRS >> 2:
3896 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3897 break;
3898 case MC_PMG_CMD_MRS1 >> 2:
3899 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3900 break;
3901 case MC_SEQ_PMG_TIMING >> 2:
3902 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3903 break;
3904 case MC_PMG_CMD_MRS2 >> 2:
3905 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3906 break;
3907 case MC_SEQ_WR_CTL_2 >> 2:
3908 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3909 break;
3910 default:
3911 result = false;
3912 break;
3913 }
3914
3915 return result;
3916}
3917
3918static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3919{
3920 u8 i, j;
3921
3922 for (i = 0; i < table->last; i++) {
3923 for (j = 1; j < table->num_entries; j++) {
3924 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3925 table->mc_reg_table_entry[j].mc_data[i]) {
3926 table->valid_flag |= 1 << i;
3927 break;
3928 }
3929 }
3930 }
3931}
3932
3933static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3934{
3935 u32 i;
3936 u16 address;
3937
3938 for (i = 0; i < table->last; i++) {
3939 table->mc_reg_address[i].s0 =
3940 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3941 address : table->mc_reg_address[i].s1;
3942 }
3943}
3944
3945static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
3946 struct ci_mc_reg_table *ci_table)
3947{
3948 u8 i, j;
3949
3950 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3951 return -EINVAL;
3952 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
3953 return -EINVAL;
3954
3955 for (i = 0; i < table->last; i++)
3956 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3957
3958 ci_table->last = table->last;
3959
3960 for (i = 0; i < table->num_entries; i++) {
3961 ci_table->mc_reg_table_entry[i].mclk_max =
3962 table->mc_reg_table_entry[i].mclk_max;
3963 for (j = 0; j < table->last; j++)
3964 ci_table->mc_reg_table_entry[i].mc_data[j] =
3965 table->mc_reg_table_entry[i].mc_data[j];
3966 }
3967 ci_table->num_entries = table->num_entries;
3968
3969 return 0;
3970}
3971
3972static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
3973{
3974 struct ci_power_info *pi = ci_get_pi(rdev);
3975 struct atom_mc_reg_table *table;
3976 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
3977 u8 module_index = rv770_get_memory_module_index(rdev);
3978 int ret;
3979
3980 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
3981 if (!table)
3982 return -ENOMEM;
3983
3984 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
3985 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
3986 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
3987 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
3988 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
3989 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
3990 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
3991 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
3992 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
3993 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
3994 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
3995 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
3996 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
3997 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
3998 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
3999 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4000 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4001 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4002 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4003 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4004
4005 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4006 if (ret)
4007 goto init_mc_done;
4008
4009 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4010 if (ret)
4011 goto init_mc_done;
4012
4013 ci_set_s0_mc_reg_index(ci_table);
4014
4015 ret = ci_set_mc_special_registers(rdev, ci_table);
4016 if (ret)
4017 goto init_mc_done;
4018
4019 ci_set_valid_flag(ci_table);
4020
4021init_mc_done:
4022 kfree(table);
4023
4024 return ret;
4025}
4026
4027static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4028 SMU7_Discrete_MCRegisters *mc_reg_table)
4029{
4030 struct ci_power_info *pi = ci_get_pi(rdev);
4031 u32 i, j;
4032
4033 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4034 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4035 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4036 return -EINVAL;
4037 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4038 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4039 i++;
4040 }
4041 }
4042
4043 mc_reg_table->last = (u8)i;
4044
4045 return 0;
4046}
4047
4048static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4049 SMU7_Discrete_MCRegisterSet *data,
4050 u32 num_entries, u32 valid_flag)
4051{
4052 u32 i, j;
4053
4054 for (i = 0, j = 0; j < num_entries; j++) {
4055 if (valid_flag & (1 << j)) {
4056 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4057 i++;
4058 }
4059 }
4060}
4061
4062static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4063 const u32 memory_clock,
4064 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4065{
4066 struct ci_power_info *pi = ci_get_pi(rdev);
4067 u32 i = 0;
4068
4069 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4070 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4071 break;
4072 }
4073
4074 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4075 --i;
4076
4077 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4078 mc_reg_table_data, pi->mc_reg_table.last,
4079 pi->mc_reg_table.valid_flag);
4080}
4081
4082static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4083 SMU7_Discrete_MCRegisters *mc_reg_table)
4084{
4085 struct ci_power_info *pi = ci_get_pi(rdev);
4086 u32 i;
4087
4088 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4089 ci_convert_mc_reg_table_entry_to_smc(rdev,
4090 pi->dpm_table.mclk_table.dpm_levels[i].value,
4091 &mc_reg_table->data[i]);
4092}
4093
4094static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4095{
4096 struct ci_power_info *pi = ci_get_pi(rdev);
4097 int ret;
4098
4099 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4100
4101 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4102 if (ret)
4103 return ret;
4104 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4105
4106 return ci_copy_bytes_to_smc(rdev,
4107 pi->mc_reg_table_start,
4108 (u8 *)&pi->smc_mc_reg_table,
4109 sizeof(SMU7_Discrete_MCRegisters),
4110 pi->sram_end);
4111}
4112
4113static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4114{
4115 struct ci_power_info *pi = ci_get_pi(rdev);
4116
4117 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4118 return 0;
4119
4120 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4121
4122 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4123
4124 return ci_copy_bytes_to_smc(rdev,
4125 pi->mc_reg_table_start +
4126 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4127 (u8 *)&pi->smc_mc_reg_table.data[0],
4128 sizeof(SMU7_Discrete_MCRegisterSet) *
4129 pi->dpm_table.mclk_table.count,
4130 pi->sram_end);
4131}
4132
4133static void ci_enable_voltage_control(struct radeon_device *rdev)
4134{
4135 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4136
4137 tmp |= VOLT_PWRMGT_EN;
4138 WREG32_SMC(GENERAL_PWRMGT, tmp);
4139}
4140
4141static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4142 struct radeon_ps *radeon_state)
4143{
4144 struct ci_ps *state = ci_get_ps(radeon_state);
4145 int i;
4146 u16 pcie_speed, max_speed = 0;
4147
4148 for (i = 0; i < state->performance_level_count; i++) {
4149 pcie_speed = state->performance_levels[i].pcie_gen;
4150 if (max_speed < pcie_speed)
4151 max_speed = pcie_speed;
4152 }
4153
4154 return max_speed;
4155}
4156
4157static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4158{
4159 u32 speed_cntl = 0;
4160
4161 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4162 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4163
4164 return (u16)speed_cntl;
4165}
4166
4167static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4168{
4169 u32 link_width = 0;
4170
4171 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4172 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4173
4174 switch (link_width) {
4175 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4176 return 1;
4177 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4178 return 2;
4179 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4180 return 4;
4181 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4182 return 8;
4183 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4184 /* not actually supported */
4185 return 12;
4186 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4187 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4188 default:
4189 return 16;
4190 }
4191}
4192
4193static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4194 struct radeon_ps *radeon_new_state,
4195 struct radeon_ps *radeon_current_state)
4196{
4197 struct ci_power_info *pi = ci_get_pi(rdev);
4198 enum radeon_pcie_gen target_link_speed =
4199 ci_get_maximum_link_speed(rdev, radeon_new_state);
4200 enum radeon_pcie_gen current_link_speed;
4201
4202 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4203 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4204 else
4205 current_link_speed = pi->force_pcie_gen;
4206
4207 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4208 pi->pspp_notify_required = false;
4209 if (target_link_speed > current_link_speed) {
4210 switch (target_link_speed) {
4211#ifdef CONFIG_ACPI
4212 case RADEON_PCIE_GEN3:
4213 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4214 break;
4215 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4216 if (current_link_speed == RADEON_PCIE_GEN2)
4217 break;
4218 case RADEON_PCIE_GEN2:
4219 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4220 break;
4221#endif
4222 default:
4223 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4224 break;
4225 }
4226 } else {
4227 if (target_link_speed < current_link_speed)
4228 pi->pspp_notify_required = true;
4229 }
4230}
4231
4232static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4233 struct radeon_ps *radeon_new_state,
4234 struct radeon_ps *radeon_current_state)
4235{
4236 struct ci_power_info *pi = ci_get_pi(rdev);
4237 enum radeon_pcie_gen target_link_speed =
4238 ci_get_maximum_link_speed(rdev, radeon_new_state);
4239 u8 request;
4240
4241 if (pi->pspp_notify_required) {
4242 if (target_link_speed == RADEON_PCIE_GEN3)
4243 request = PCIE_PERF_REQ_PECI_GEN3;
4244 else if (target_link_speed == RADEON_PCIE_GEN2)
4245 request = PCIE_PERF_REQ_PECI_GEN2;
4246 else
4247 request = PCIE_PERF_REQ_PECI_GEN1;
4248
4249 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4250 (ci_get_current_pcie_speed(rdev) > 0))
4251 return;
4252
4253#ifdef CONFIG_ACPI
4254 radeon_acpi_pcie_performance_request(rdev, request, false);
4255#endif
4256 }
4257}
4258
4259static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4260{
4261 struct ci_power_info *pi = ci_get_pi(rdev);
4262 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4263 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4264 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4265 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4266 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4267 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4268
4269 if (allowed_sclk_vddc_table == NULL)
4270 return -EINVAL;
4271 if (allowed_sclk_vddc_table->count < 1)
4272 return -EINVAL;
4273 if (allowed_mclk_vddc_table == NULL)
4274 return -EINVAL;
4275 if (allowed_mclk_vddc_table->count < 1)
4276 return -EINVAL;
4277 if (allowed_mclk_vddci_table == NULL)
4278 return -EINVAL;
4279 if (allowed_mclk_vddci_table->count < 1)
4280 return -EINVAL;
4281
4282 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4283 pi->max_vddc_in_pp_table =
4284 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4285
4286 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4287 pi->max_vddci_in_pp_table =
4288 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4289
4290 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4291 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4292 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4293 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4294 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4295 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4296 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4297 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4298
4299 return 0;
4300}
4301
4302static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4303{
4304 struct ci_power_info *pi = ci_get_pi(rdev);
4305 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4306 u32 leakage_index;
4307
4308 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4309 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4310 *vddc = leakage_table->actual_voltage[leakage_index];
4311 break;
4312 }
4313 }
4314}
4315
4316static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4317{
4318 struct ci_power_info *pi = ci_get_pi(rdev);
4319 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4320 u32 leakage_index;
4321
4322 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4323 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4324 *vddci = leakage_table->actual_voltage[leakage_index];
4325 break;
4326 }
4327 }
4328}
4329
4330static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4331 struct radeon_clock_voltage_dependency_table *table)
4332{
4333 u32 i;
4334
4335 if (table) {
4336 for (i = 0; i < table->count; i++)
4337 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4338 }
4339}
4340
4341static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4342 struct radeon_clock_voltage_dependency_table *table)
4343{
4344 u32 i;
4345
4346 if (table) {
4347 for (i = 0; i < table->count; i++)
4348 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4349 }
4350}
4351
4352static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4353 struct radeon_vce_clock_voltage_dependency_table *table)
4354{
4355 u32 i;
4356
4357 if (table) {
4358 for (i = 0; i < table->count; i++)
4359 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4360 }
4361}
4362
4363static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4364 struct radeon_uvd_clock_voltage_dependency_table *table)
4365{
4366 u32 i;
4367
4368 if (table) {
4369 for (i = 0; i < table->count; i++)
4370 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4371 }
4372}
4373
4374static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4375 struct radeon_phase_shedding_limits_table *table)
4376{
4377 u32 i;
4378
4379 if (table) {
4380 for (i = 0; i < table->count; i++)
4381 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4382 }
4383}
4384
4385static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4386 struct radeon_clock_and_voltage_limits *table)
4387{
4388 if (table) {
4389 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4390 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4391 }
4392}
4393
4394static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4395 struct radeon_cac_leakage_table *table)
4396{
4397 u32 i;
4398
4399 if (table) {
4400 for (i = 0; i < table->count; i++)
4401 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4402 }
4403}
4404
4405static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4406{
4407
4408 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4409 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4410 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4411 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4412 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4413 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4414 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4415 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4416 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4417 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4418 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4419 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4420 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4421 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4422 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4423 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4424 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4425 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4426 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4427 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4428 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4429 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4430 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4431 &rdev->pm.dpm.dyn_state.cac_leakage_table);
4432
4433}
4434
4435static void ci_get_memory_type(struct radeon_device *rdev)
4436{
4437 struct ci_power_info *pi = ci_get_pi(rdev);
4438 u32 tmp;
4439
4440 tmp = RREG32(MC_SEQ_MISC0);
4441
4442 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4443 MC_SEQ_MISC0_GDDR5_VALUE)
4444 pi->mem_gddr5 = true;
4445 else
4446 pi->mem_gddr5 = false;
4447
4448}
4449
4450void ci_update_current_ps(struct radeon_device *rdev,
4451 struct radeon_ps *rps)
4452{
4453 struct ci_ps *new_ps = ci_get_ps(rps);
4454 struct ci_power_info *pi = ci_get_pi(rdev);
4455
4456 pi->current_rps = *rps;
4457 pi->current_ps = *new_ps;
4458 pi->current_rps.ps_priv = &pi->current_ps;
4459}
4460
4461void ci_update_requested_ps(struct radeon_device *rdev,
4462 struct radeon_ps *rps)
4463{
4464 struct ci_ps *new_ps = ci_get_ps(rps);
4465 struct ci_power_info *pi = ci_get_pi(rdev);
4466
4467 pi->requested_rps = *rps;
4468 pi->requested_ps = *new_ps;
4469 pi->requested_rps.ps_priv = &pi->requested_ps;
4470}
4471
4472int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4473{
4474 struct ci_power_info *pi = ci_get_pi(rdev);
4475 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4476 struct radeon_ps *new_ps = &requested_ps;
4477
4478 ci_update_requested_ps(rdev, new_ps);
4479
4480 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4481
4482 return 0;
4483}
4484
4485void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4486{
4487 struct ci_power_info *pi = ci_get_pi(rdev);
4488 struct radeon_ps *new_ps = &pi->requested_rps;
4489
4490 ci_update_current_ps(rdev, new_ps);
4491}
4492
4493
4494void ci_dpm_setup_asic(struct radeon_device *rdev)
4495{
4496 ci_read_clock_registers(rdev);
4497 ci_get_memory_type(rdev);
4498 ci_enable_acpi_power_management(rdev);
4499 ci_init_sclk_t(rdev);
4500}
4501
4502int ci_dpm_enable(struct radeon_device *rdev)
4503{
4504 struct ci_power_info *pi = ci_get_pi(rdev);
4505 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4506 int ret;
4507
4508 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4509 RADEON_CG_BLOCK_MC |
4510 RADEON_CG_BLOCK_SDMA |
4511 RADEON_CG_BLOCK_BIF |
4512 RADEON_CG_BLOCK_UVD |
4513 RADEON_CG_BLOCK_HDP), false);
4514
4515 if (ci_is_smc_running(rdev))
4516 return -EINVAL;
4517 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4518 ci_enable_voltage_control(rdev);
4519 ret = ci_construct_voltage_tables(rdev);
4520 if (ret) {
4521 DRM_ERROR("ci_construct_voltage_tables failed\n");
4522 return ret;
4523 }
4524 }
4525 if (pi->caps_dynamic_ac_timing) {
4526 ret = ci_initialize_mc_reg_table(rdev);
4527 if (ret)
4528 pi->caps_dynamic_ac_timing = false;
4529 }
4530 if (pi->dynamic_ss)
4531 ci_enable_spread_spectrum(rdev, true);
4532 if (pi->thermal_protection)
4533 ci_enable_thermal_protection(rdev, true);
4534 ci_program_sstp(rdev);
4535 ci_enable_display_gap(rdev);
4536 ci_program_vc(rdev);
4537 ret = ci_upload_firmware(rdev);
4538 if (ret) {
4539 DRM_ERROR("ci_upload_firmware failed\n");
4540 return ret;
4541 }
4542 ret = ci_process_firmware_header(rdev);
4543 if (ret) {
4544 DRM_ERROR("ci_process_firmware_header failed\n");
4545 return ret;
4546 }
4547 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4548 if (ret) {
4549 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4550 return ret;
4551 }
4552 ret = ci_init_smc_table(rdev);
4553 if (ret) {
4554 DRM_ERROR("ci_init_smc_table failed\n");
4555 return ret;
4556 }
4557 ret = ci_init_arb_table_index(rdev);
4558 if (ret) {
4559 DRM_ERROR("ci_init_arb_table_index failed\n");
4560 return ret;
4561 }
4562 if (pi->caps_dynamic_ac_timing) {
4563 ret = ci_populate_initial_mc_reg_table(rdev);
4564 if (ret) {
4565 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4566 return ret;
4567 }
4568 }
4569 ret = ci_populate_pm_base(rdev);
4570 if (ret) {
4571 DRM_ERROR("ci_populate_pm_base failed\n");
4572 return ret;
4573 }
4574 ci_dpm_start_smc(rdev);
4575 ci_enable_vr_hot_gpio_interrupt(rdev);
4576 ret = ci_notify_smc_display_change(rdev, false);
4577 if (ret) {
4578 DRM_ERROR("ci_notify_smc_display_change failed\n");
4579 return ret;
4580 }
4581 ci_enable_sclk_control(rdev, true);
4582 ret = ci_enable_ulv(rdev, true);
4583 if (ret) {
4584 DRM_ERROR("ci_enable_ulv failed\n");
4585 return ret;
4586 }
4587 ret = ci_enable_ds_master_switch(rdev, true);
4588 if (ret) {
4589 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4590 return ret;
4591 }
4592 ret = ci_start_dpm(rdev);
4593 if (ret) {
4594 DRM_ERROR("ci_start_dpm failed\n");
4595 return ret;
4596 }
4597 ret = ci_enable_didt(rdev, true);
4598 if (ret) {
4599 DRM_ERROR("ci_enable_didt failed\n");
4600 return ret;
4601 }
4602 ret = ci_enable_smc_cac(rdev, true);
4603 if (ret) {
4604 DRM_ERROR("ci_enable_smc_cac failed\n");
4605 return ret;
4606 }
4607 ret = ci_enable_power_containment(rdev, true);
4608 if (ret) {
4609 DRM_ERROR("ci_enable_power_containment failed\n");
4610 return ret;
4611 }
4612 if (rdev->irq.installed &&
4613 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4614#if 0
4615 PPSMC_Result result;
4616#endif
4617 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4618 if (ret) {
4619 DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4620 return ret;
4621 }
4622 rdev->irq.dpm_thermal = true;
4623 radeon_irq_set(rdev);
4624#if 0
4625 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4626
4627 if (result != PPSMC_Result_OK)
4628 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4629#endif
4630 }
4631
4632 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4633
4634 ci_dpm_powergate_uvd(rdev, true);
4635
4636 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4637 RADEON_CG_BLOCK_MC |
4638 RADEON_CG_BLOCK_SDMA |
4639 RADEON_CG_BLOCK_BIF |
4640 RADEON_CG_BLOCK_UVD |
4641 RADEON_CG_BLOCK_HDP), true);
4642
4643 ci_update_current_ps(rdev, boot_ps);
4644
4645 return 0;
4646}
4647
4648void ci_dpm_disable(struct radeon_device *rdev)
4649{
4650 struct ci_power_info *pi = ci_get_pi(rdev);
4651 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4652
4653 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4654 RADEON_CG_BLOCK_MC |
4655 RADEON_CG_BLOCK_SDMA |
4656 RADEON_CG_BLOCK_UVD |
4657 RADEON_CG_BLOCK_HDP), false);
4658
4659 ci_dpm_powergate_uvd(rdev, false);
4660
4661 if (!ci_is_smc_running(rdev))
4662 return;
4663
4664 if (pi->thermal_protection)
4665 ci_enable_thermal_protection(rdev, false);
4666 ci_enable_power_containment(rdev, false);
4667 ci_enable_smc_cac(rdev, false);
4668 ci_enable_didt(rdev, false);
4669 ci_enable_spread_spectrum(rdev, false);
4670 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4671 ci_stop_dpm(rdev);
4672 ci_enable_ds_master_switch(rdev, true);
4673 ci_enable_ulv(rdev, false);
4674 ci_clear_vc(rdev);
4675 ci_reset_to_default(rdev);
4676 ci_dpm_stop_smc(rdev);
4677 ci_force_switch_to_arb_f0(rdev);
4678
4679 ci_update_current_ps(rdev, boot_ps);
4680}
4681
4682int ci_dpm_set_power_state(struct radeon_device *rdev)
4683{
4684 struct ci_power_info *pi = ci_get_pi(rdev);
4685 struct radeon_ps *new_ps = &pi->requested_rps;
4686 struct radeon_ps *old_ps = &pi->current_rps;
4687 int ret;
4688
4689 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4690 RADEON_CG_BLOCK_MC |
4691 RADEON_CG_BLOCK_SDMA |
4692 RADEON_CG_BLOCK_BIF |
4693 RADEON_CG_BLOCK_UVD |
4694 RADEON_CG_BLOCK_HDP), false);
4695
4696 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4697 if (pi->pcie_performance_request)
4698 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4699 ret = ci_freeze_sclk_mclk_dpm(rdev);
4700 if (ret) {
4701 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4702 return ret;
4703 }
4704 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4705 if (ret) {
4706 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4707 return ret;
4708 }
4709 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4710 if (ret) {
4711 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4712 return ret;
4713 }
4714#if 0
4715 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4716 if (ret) {
4717 DRM_ERROR("ci_update_vce_dpm failed\n");
4718 return ret;
4719 }
4720#endif
4721 ret = ci_update_sclk_t(rdev);
4722 if (ret) {
4723 DRM_ERROR("ci_update_sclk_t failed\n");
4724 return ret;
4725 }
4726 if (pi->caps_dynamic_ac_timing) {
4727 ret = ci_update_and_upload_mc_reg_table(rdev);
4728 if (ret) {
4729 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4730 return ret;
4731 }
4732 }
4733 ret = ci_program_memory_timing_parameters(rdev);
4734 if (ret) {
4735 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4736 return ret;
4737 }
4738 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4739 if (ret) {
4740 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4741 return ret;
4742 }
4743 ret = ci_upload_dpm_level_enable_mask(rdev);
4744 if (ret) {
4745 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4746 return ret;
4747 }
4748 if (pi->pcie_performance_request)
4749 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4750
4751 ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4752 if (ret) {
4753 DRM_ERROR("ci_dpm_force_performance_level failed\n");
4754 return ret;
4755 }
4756
4757 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4758 RADEON_CG_BLOCK_MC |
4759 RADEON_CG_BLOCK_SDMA |
4760 RADEON_CG_BLOCK_BIF |
4761 RADEON_CG_BLOCK_UVD |
4762 RADEON_CG_BLOCK_HDP), true);
4763
4764 return 0;
4765}
4766
4767int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4768{
4769 return ci_power_control_set_level(rdev);
4770}
4771
4772void ci_dpm_reset_asic(struct radeon_device *rdev)
4773{
4774 ci_set_boot_state(rdev);
4775}
4776
4777void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4778{
4779 ci_program_display_gap(rdev);
4780}
4781
4782union power_info {
4783 struct _ATOM_POWERPLAY_INFO info;
4784 struct _ATOM_POWERPLAY_INFO_V2 info_2;
4785 struct _ATOM_POWERPLAY_INFO_V3 info_3;
4786 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4787 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4788 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4789};
4790
4791union pplib_clock_info {
4792 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4793 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4794 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4795 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4796 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4797 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4798};
4799
4800union pplib_power_state {
4801 struct _ATOM_PPLIB_STATE v1;
4802 struct _ATOM_PPLIB_STATE_V2 v2;
4803};
4804
4805static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4806 struct radeon_ps *rps,
4807 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4808 u8 table_rev)
4809{
4810 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4811 rps->class = le16_to_cpu(non_clock_info->usClassification);
4812 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4813
4814 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4815 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4816 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4817 } else {
4818 rps->vclk = 0;
4819 rps->dclk = 0;
4820 }
4821
4822 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4823 rdev->pm.dpm.boot_ps = rps;
4824 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4825 rdev->pm.dpm.uvd_ps = rps;
4826}
4827
4828static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4829 struct radeon_ps *rps, int index,
4830 union pplib_clock_info *clock_info)
4831{
4832 struct ci_power_info *pi = ci_get_pi(rdev);
4833 struct ci_ps *ps = ci_get_ps(rps);
4834 struct ci_pl *pl = &ps->performance_levels[index];
4835
4836 ps->performance_level_count = index + 1;
4837
4838 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4839 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4840 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4841 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4842
4843 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4844 pi->sys_pcie_mask,
4845 pi->vbios_boot_state.pcie_gen_bootup_value,
4846 clock_info->ci.ucPCIEGen);
4847 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4848 pi->vbios_boot_state.pcie_lane_bootup_value,
4849 le16_to_cpu(clock_info->ci.usPCIELane));
4850
4851 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4852 pi->acpi_pcie_gen = pl->pcie_gen;
4853 }
4854
4855 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4856 pi->ulv.supported = true;
4857 pi->ulv.pl = *pl;
4858 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4859 }
4860
4861 /* patch up boot state */
4862 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4863 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4864 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4865 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4866 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4867 }
4868
4869 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4870 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4871 pi->use_pcie_powersaving_levels = true;
4872 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4873 pi->pcie_gen_powersaving.max = pl->pcie_gen;
4874 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4875 pi->pcie_gen_powersaving.min = pl->pcie_gen;
4876 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4877 pi->pcie_lane_powersaving.max = pl->pcie_lane;
4878 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4879 pi->pcie_lane_powersaving.min = pl->pcie_lane;
4880 break;
4881 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4882 pi->use_pcie_performance_levels = true;
4883 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4884 pi->pcie_gen_performance.max = pl->pcie_gen;
4885 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4886 pi->pcie_gen_performance.min = pl->pcie_gen;
4887 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4888 pi->pcie_lane_performance.max = pl->pcie_lane;
4889 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4890 pi->pcie_lane_performance.min = pl->pcie_lane;
4891 break;
4892 default:
4893 break;
4894 }
4895}
4896
4897static int ci_parse_power_table(struct radeon_device *rdev)
4898{
4899 struct radeon_mode_info *mode_info = &rdev->mode_info;
4900 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4901 union pplib_power_state *power_state;
4902 int i, j, k, non_clock_array_index, clock_array_index;
4903 union pplib_clock_info *clock_info;
4904 struct _StateArray *state_array;
4905 struct _ClockInfoArray *clock_info_array;
4906 struct _NonClockInfoArray *non_clock_info_array;
4907 union power_info *power_info;
4908 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4909 u16 data_offset;
4910 u8 frev, crev;
4911 u8 *power_state_offset;
4912 struct ci_ps *ps;
4913
4914 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4915 &frev, &crev, &data_offset))
4916 return -EINVAL;
4917 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4918
4919 state_array = (struct _StateArray *)
4920 (mode_info->atom_context->bios + data_offset +
4921 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4922 clock_info_array = (struct _ClockInfoArray *)
4923 (mode_info->atom_context->bios + data_offset +
4924 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4925 non_clock_info_array = (struct _NonClockInfoArray *)
4926 (mode_info->atom_context->bios + data_offset +
4927 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4928
4929 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4930 state_array->ucNumEntries, GFP_KERNEL);
4931 if (!rdev->pm.dpm.ps)
4932 return -ENOMEM;
4933 power_state_offset = (u8 *)state_array->states;
4934 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4935 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4936 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4937 for (i = 0; i < state_array->ucNumEntries; i++) {
4938 u8 *idx;
4939 power_state = (union pplib_power_state *)power_state_offset;
4940 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4941 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4942 &non_clock_info_array->nonClockInfo[non_clock_array_index];
4943 if (!rdev->pm.power_state[i].clock_info)
4944 return -EINVAL;
4945 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4946 if (ps == NULL) {
4947 kfree(rdev->pm.dpm.ps);
4948 return -ENOMEM;
4949 }
4950 rdev->pm.dpm.ps[i].ps_priv = ps;
4951 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4952 non_clock_info,
4953 non_clock_info_array->ucEntrySize);
4954 k = 0;
4955 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4956 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4957 clock_array_index = idx[j];
4958 if (clock_array_index >= clock_info_array->ucNumEntries)
4959 continue;
4960 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4961 break;
4962 clock_info = (union pplib_clock_info *)
4963 ((u8 *)&clock_info_array->clockInfo[0] +
4964 (clock_array_index * clock_info_array->ucEntrySize));
4965 ci_parse_pplib_clock_info(rdev,
4966 &rdev->pm.dpm.ps[i], k,
4967 clock_info);
4968 k++;
4969 }
4970 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4971 }
4972 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
4973 return 0;
4974}
4975
4976int ci_get_vbios_boot_values(struct radeon_device *rdev,
4977 struct ci_vbios_boot_state *boot_state)
4978{
4979 struct radeon_mode_info *mode_info = &rdev->mode_info;
4980 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4981 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
4982 u8 frev, crev;
4983 u16 data_offset;
4984
4985 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4986 &frev, &crev, &data_offset)) {
4987 firmware_info =
4988 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
4989 data_offset);
4990 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
4991 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
4992 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
4993 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
4994 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
4995 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
4996 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
4997
4998 return 0;
4999 }
5000 return -EINVAL;
5001}
5002
5003void ci_dpm_fini(struct radeon_device *rdev)
5004{
5005 int i;
5006
5007 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5008 kfree(rdev->pm.dpm.ps[i].ps_priv);
5009 }
5010 kfree(rdev->pm.dpm.ps);
5011 kfree(rdev->pm.dpm.priv);
5012 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5013 r600_free_extended_power_table(rdev);
5014}
5015
5016int ci_dpm_init(struct radeon_device *rdev)
5017{
5018 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5019 u16 data_offset, size;
5020 u8 frev, crev;
5021 struct ci_power_info *pi;
5022 int ret;
5023 u32 mask;
5024
5025 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5026 if (pi == NULL)
5027 return -ENOMEM;
5028 rdev->pm.dpm.priv = pi;
5029
5030 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5031 if (ret)
5032 pi->sys_pcie_mask = 0;
5033 else
5034 pi->sys_pcie_mask = mask;
5035 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5036
5037 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5038 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5039 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5040 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5041
5042 pi->pcie_lane_performance.max = 0;
5043 pi->pcie_lane_performance.min = 16;
5044 pi->pcie_lane_powersaving.max = 0;
5045 pi->pcie_lane_powersaving.min = 16;
5046
5047 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5048 if (ret) {
5049 ci_dpm_fini(rdev);
5050 return ret;
5051 }
5052 ret = ci_parse_power_table(rdev);
5053 if (ret) {
5054 ci_dpm_fini(rdev);
5055 return ret;
5056 }
5057 ret = r600_parse_extended_power_table(rdev);
5058 if (ret) {
5059 ci_dpm_fini(rdev);
5060 return ret;
5061 }
5062
5063 pi->dll_default_on = false;
5064 pi->sram_end = SMC_RAM_END;
5065
5066 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5067 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5068 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5069 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5070 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5071 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5072 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5073 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5074
5075 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5076
5077 pi->sclk_dpm_key_disabled = 0;
5078 pi->mclk_dpm_key_disabled = 0;
5079 pi->pcie_dpm_key_disabled = 0;
5080
5081 pi->caps_sclk_ds = true;
5082
5083 pi->mclk_strobe_mode_threshold = 40000;
5084 pi->mclk_stutter_mode_threshold = 40000;
5085 pi->mclk_edc_enable_threshold = 40000;
5086 pi->mclk_edc_wr_enable_threshold = 40000;
5087
5088 ci_initialize_powertune_defaults(rdev);
5089
5090 pi->caps_fps = false;
5091
5092 pi->caps_sclk_throttle_low_notification = false;
5093
5094 pi->caps_uvd_dpm = true;
5095
5096 ci_get_leakage_voltages(rdev);
5097 ci_patch_dependency_tables_with_leakage(rdev);
5098 ci_set_private_data_variables_based_on_pptable(rdev);
5099
5100 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5101 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5102 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5103 ci_dpm_fini(rdev);
5104 return -ENOMEM;
5105 }
5106 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5107 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5108 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5109 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5110 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5111 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5112 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5113 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5114 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5115
5116 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5117 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5118 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5119
5120 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5121 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5122 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5123 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5124
5125 pi->thermal_temp_setting.temperature_low = 99500;
5126 pi->thermal_temp_setting.temperature_high = 100000;
5127 pi->thermal_temp_setting.temperature_shutdown = 104000;
5128
5129 pi->uvd_enabled = false;
5130
5131 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5132 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5133 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5134 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5135 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5136 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5137 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5138
5139 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5140 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5141 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5142 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5143 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5144 else
5145 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5146 }
5147
5148 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5149 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5150 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5151 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5152 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5153 else
5154 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5155 }
5156
5157 pi->vddc_phase_shed_control = true;
5158
5159#if defined(CONFIG_ACPI)
5160 pi->pcie_performance_request =
5161 radeon_acpi_is_pcie_performance_request_supported(rdev);
5162#else
5163 pi->pcie_performance_request = false;
5164#endif
5165
5166 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5167 &frev, &crev, &data_offset)) {
5168 pi->caps_sclk_ss_support = true;
5169 pi->caps_mclk_ss_support = true;
5170 pi->dynamic_ss = true;
5171 } else {
5172 pi->caps_sclk_ss_support = false;
5173 pi->caps_mclk_ss_support = false;
5174 pi->dynamic_ss = true;
5175 }
5176
5177 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5178 pi->thermal_protection = true;
5179 else
5180 pi->thermal_protection = false;
5181
5182 pi->caps_dynamic_ac_timing = true;
5183
5184 pi->uvd_power_gated = false;
5185
5186 /* make sure dc limits are valid */
5187 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5188 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5189 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5190 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5191
5192 return 0;
5193}
5194
5195void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5196 struct seq_file *m)
5197{
5198 u32 sclk = ci_get_average_sclk_freq(rdev);
5199 u32 mclk = ci_get_average_mclk_freq(rdev);
5200
5201 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5202 sclk, mclk);
5203}
5204
5205void ci_dpm_print_power_state(struct radeon_device *rdev,
5206 struct radeon_ps *rps)
5207{
5208 struct ci_ps *ps = ci_get_ps(rps);
5209 struct ci_pl *pl;
5210 int i;
5211
5212 r600_dpm_print_class_info(rps->class, rps->class2);
5213 r600_dpm_print_cap_info(rps->caps);
5214 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5215 for (i = 0; i < ps->performance_level_count; i++) {
5216 pl = &ps->performance_levels[i];
5217 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5218 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5219 }
5220 r600_dpm_print_ps_status(rdev, rps);
5221}
5222
5223u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5224{
5225 struct ci_power_info *pi = ci_get_pi(rdev);
5226 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5227
5228 if (low)
5229 return requested_state->performance_levels[0].sclk;
5230 else
5231 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5232}
5233
5234u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5235{
5236 struct ci_power_info *pi = ci_get_pi(rdev);
5237 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5238
5239 if (low)
5240 return requested_state->performance_levels[0].mclk;
5241 else
5242 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5243}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
new file mode 100644
index 000000000000..93bbed977ffb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -0,0 +1,332 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __CI_DPM_H__
24#define __CI_DPM_H__
25
26#include "ppsmc.h"
27
28#define SMU__NUM_SCLK_DPM_STATE 8
29#define SMU__NUM_MCLK_DPM_LEVELS 6
30#define SMU__NUM_LCLK_DPM_LEVELS 8
31#define SMU__NUM_PCIE_DPM_LEVELS 8
32#include "smu7_discrete.h"
33
34#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
35
36struct ci_pl {
37 u32 mclk;
38 u32 sclk;
39 enum radeon_pcie_gen pcie_gen;
40 u16 pcie_lane;
41};
42
43struct ci_ps {
44 u16 performance_level_count;
45 bool dc_compatible;
46 u32 sclk_t;
47 struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
48};
49
50struct ci_dpm_level {
51 bool enabled;
52 u32 value;
53 u32 param1;
54};
55
56#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
57#define MAX_REGULAR_DPM_NUMBER 8
58#define CISLAND_MINIMUM_ENGINE_CLOCK 800
59
60struct ci_single_dpm_table {
61 u32 count;
62 struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
63};
64
65struct ci_dpm_table {
66 struct ci_single_dpm_table sclk_table;
67 struct ci_single_dpm_table mclk_table;
68 struct ci_single_dpm_table pcie_speed_table;
69 struct ci_single_dpm_table vddc_table;
70 struct ci_single_dpm_table vddci_table;
71 struct ci_single_dpm_table mvdd_table;
72};
73
74struct ci_mc_reg_entry {
75 u32 mclk_max;
76 u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
77};
78
79struct ci_mc_reg_table {
80 u8 last;
81 u8 num_entries;
82 u16 valid_flag;
83 struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
84 SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
85};
86
87struct ci_ulv_parm
88{
89 bool supported;
90 u32 cg_ulv_parameter;
91 u32 volt_change_delay;
92 struct ci_pl pl;
93};
94
95#define CISLANDS_MAX_LEAKAGE_COUNT 8
96
97struct ci_leakage_voltage {
98 u16 count;
99 u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
100 u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
101};
102
103struct ci_dpm_level_enable_mask {
104 u32 uvd_dpm_enable_mask;
105 u32 vce_dpm_enable_mask;
106 u32 acp_dpm_enable_mask;
107 u32 samu_dpm_enable_mask;
108 u32 sclk_dpm_enable_mask;
109 u32 mclk_dpm_enable_mask;
110 u32 pcie_dpm_enable_mask;
111};
112
113struct ci_vbios_boot_state
114{
115 u16 mvdd_bootup_value;
116 u16 vddc_bootup_value;
117 u16 vddci_bootup_value;
118 u32 sclk_bootup_value;
119 u32 mclk_bootup_value;
120 u16 pcie_gen_bootup_value;
121 u16 pcie_lane_bootup_value;
122};
123
124struct ci_clock_registers {
125 u32 cg_spll_func_cntl;
126 u32 cg_spll_func_cntl_2;
127 u32 cg_spll_func_cntl_3;
128 u32 cg_spll_func_cntl_4;
129 u32 cg_spll_spread_spectrum;
130 u32 cg_spll_spread_spectrum_2;
131 u32 dll_cntl;
132 u32 mclk_pwrmgt_cntl;
133 u32 mpll_ad_func_cntl;
134 u32 mpll_dq_func_cntl;
135 u32 mpll_func_cntl;
136 u32 mpll_func_cntl_1;
137 u32 mpll_func_cntl_2;
138 u32 mpll_ss1;
139 u32 mpll_ss2;
140};
141
142struct ci_thermal_temperature_setting {
143 s32 temperature_low;
144 s32 temperature_high;
145 s32 temperature_shutdown;
146};
147
148struct ci_pcie_perf_range {
149 u16 max;
150 u16 min;
151};
152
153enum ci_pt_config_reg_type {
154 CISLANDS_CONFIGREG_MMR = 0,
155 CISLANDS_CONFIGREG_SMC_IND,
156 CISLANDS_CONFIGREG_DIDT_IND,
157 CISLANDS_CONFIGREG_CACHE,
158 CISLANDS_CONFIGREG_MAX
159};
160
161#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
162#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
163#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
164
165struct ci_pt_config_reg {
166 u32 offset;
167 u32 mask;
168 u32 shift;
169 u32 value;
170 enum ci_pt_config_reg_type type;
171};
172
173struct ci_pt_defaults {
174 u8 svi_load_line_en;
175 u8 svi_load_line_vddc;
176 u8 tdc_vddc_throttle_release_limit_perc;
177 u8 tdc_mawt;
178 u8 tdc_waterfall_ctl;
179 u8 dte_ambient_temp_base;
180 u32 display_cac;
181 u32 bapm_temp_gradient;
182 u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
183 u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
184};
185
186#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
187#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
188#define DPMTABLE_UPDATE_SCLK 0x00000004
189#define DPMTABLE_UPDATE_MCLK 0x00000008
190
191struct ci_power_info {
192 struct ci_dpm_table dpm_table;
193 u32 voltage_control;
194 u32 mvdd_control;
195 u32 vddci_control;
196 u32 active_auto_throttle_sources;
197 struct ci_clock_registers clock_registers;
198 u16 acpi_vddc;
199 u16 acpi_vddci;
200 enum radeon_pcie_gen force_pcie_gen;
201 enum radeon_pcie_gen acpi_pcie_gen;
202 struct ci_leakage_voltage vddc_leakage;
203 struct ci_leakage_voltage vddci_leakage;
204 u16 max_vddc_in_pp_table;
205 u16 min_vddc_in_pp_table;
206 u16 max_vddci_in_pp_table;
207 u16 min_vddci_in_pp_table;
208 u32 mclk_strobe_mode_threshold;
209 u32 mclk_stutter_mode_threshold;
210 u32 mclk_edc_enable_threshold;
211 u32 mclk_edc_wr_enable_threshold;
212 struct ci_vbios_boot_state vbios_boot_state;
213 /* smc offsets */
214 u32 sram_end;
215 u32 dpm_table_start;
216 u32 soft_regs_start;
217 u32 mc_reg_table_start;
218 u32 fan_table_start;
219 u32 arb_table_start;
220 /* smc tables */
221 SMU7_Discrete_DpmTable smc_state_table;
222 SMU7_Discrete_MCRegisters smc_mc_reg_table;
223 SMU7_Discrete_PmFuses smc_powertune_table;
224 /* other stuff */
225 struct ci_mc_reg_table mc_reg_table;
226 struct atom_voltage_table vddc_voltage_table;
227 struct atom_voltage_table vddci_voltage_table;
228 struct atom_voltage_table mvdd_voltage_table;
229 struct ci_ulv_parm ulv;
230 u32 power_containment_features;
231 const struct ci_pt_defaults *powertune_defaults;
232 u32 dte_tj_offset;
233 bool vddc_phase_shed_control;
234 struct ci_thermal_temperature_setting thermal_temp_setting;
235 struct ci_dpm_level_enable_mask dpm_level_enable_mask;
236 u32 need_update_smu7_dpm_table;
237 u32 sclk_dpm_key_disabled;
238 u32 mclk_dpm_key_disabled;
239 u32 pcie_dpm_key_disabled;
240 struct ci_pcie_perf_range pcie_gen_performance;
241 struct ci_pcie_perf_range pcie_lane_performance;
242 struct ci_pcie_perf_range pcie_gen_powersaving;
243 struct ci_pcie_perf_range pcie_lane_powersaving;
244 u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
245 u32 mclk_activity_target;
246 u32 low_sclk_interrupt_t;
247 u32 last_mclk_dpm_enable_mask;
248 u32 sys_pcie_mask;
249 /* caps */
250 bool caps_power_containment;
251 bool caps_cac;
252 bool caps_sq_ramping;
253 bool caps_db_ramping;
254 bool caps_td_ramping;
255 bool caps_tcp_ramping;
256 bool caps_fps;
257 bool caps_sclk_ds;
258 bool caps_sclk_ss_support;
259 bool caps_mclk_ss_support;
260 bool caps_uvd_dpm;
261 bool caps_vce_dpm;
262 bool caps_samu_dpm;
263 bool caps_acp_dpm;
264 bool caps_automatic_dc_transition;
265 bool caps_sclk_throttle_low_notification;
266 bool caps_dynamic_ac_timing;
267 /* flags */
268 bool thermal_protection;
269 bool pcie_performance_request;
270 bool dynamic_ss;
271 bool dll_default_on;
272 bool cac_enabled;
273 bool uvd_enabled;
274 bool battery_state;
275 bool pspp_notify_required;
276 bool mem_gddr5;
277 bool enable_bapm_feature;
278 bool enable_tdc_limit_feature;
279 bool enable_pkg_pwr_tracking_feature;
280 bool use_pcie_performance_levels;
281 bool use_pcie_powersaving_levels;
282 bool uvd_power_gated;
283 /* driver states */
284 struct radeon_ps current_rps;
285 struct ci_ps current_ps;
286 struct radeon_ps requested_rps;
287 struct ci_ps requested_ps;
288};
289
290#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
291#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
292#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
293
294#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
295
296#define CISLANDS_VRC_DFLT0 0x3FFFC000
297#define CISLANDS_VRC_DFLT1 0x000400
298#define CISLANDS_VRC_DFLT2 0xC00080
299#define CISLANDS_VRC_DFLT3 0xC00200
300#define CISLANDS_VRC_DFLT4 0xC01680
301#define CISLANDS_VRC_DFLT5 0xC00033
302#define CISLANDS_VRC_DFLT6 0xC00033
303#define CISLANDS_VRC_DFLT7 0x3FFFC000
304
305#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
306#define CISLAND_TARGETACTIVITY_DFLT 30
307#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
308
309#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
310#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
311#define PCIE_PERF_REQ_PECI_GEN1 2
312#define PCIE_PERF_REQ_PECI_GEN2 3
313#define PCIE_PERF_REQ_PECI_GEN3 4
314
315int ci_copy_bytes_to_smc(struct radeon_device *rdev,
316 u32 smc_start_address,
317 const u8 *src, u32 byte_count, u32 limit);
318void ci_start_smc(struct radeon_device *rdev);
319void ci_reset_smc(struct radeon_device *rdev);
320int ci_program_jump_on_start(struct radeon_device *rdev);
321void ci_stop_smc_clock(struct radeon_device *rdev);
322void ci_start_smc_clock(struct radeon_device *rdev);
323bool ci_is_smc_running(struct radeon_device *rdev);
324PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
325PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
326int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
327int ci_read_smc_sram_dword(struct radeon_device *rdev,
328 u32 smc_address, u32 *value, u32 limit);
329int ci_write_smc_sram_dword(struct radeon_device *rdev,
330 u32 smc_address, u32 value, u32 limit);
331
332#endif
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
new file mode 100644
index 000000000000..53b43dd3cf1e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "cikd.h"
29#include "ppsmc.h"
30#include "radeon_ucode.h"
31
32static int ci_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit)
34{
35 if (smc_address & 3)
36 return -EINVAL;
37 if ((smc_address + 3) > limit)
38 return -EINVAL;
39
40 WREG32(SMC_IND_INDEX_0, smc_address);
41 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
42
43 return 0;
44}
45
46int ci_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit)
49{
50 u32 data, original_data;
51 u32 addr;
52 u32 extra_shift;
53 int ret;
54
55 if (smc_start_address & 3)
56 return -EINVAL;
57 if ((smc_start_address + byte_count) > limit)
58 return -EINVAL;
59
60 addr = smc_start_address;
61
62 while (byte_count >= 4) {
63 /* SMC address space is BE */
64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
65
66 ret = ci_set_smc_sram_address(rdev, addr, limit);
67 if (ret)
68 return ret;
69
70 WREG32(SMC_IND_DATA_0, data);
71
72 src += 4;
73 byte_count -= 4;
74 addr += 4;
75 }
76
77 /* RMW for the final bytes */
78 if (byte_count > 0) {
79 data = 0;
80
81 ret = ci_set_smc_sram_address(rdev, addr, limit);
82 if (ret)
83 return ret;
84
85 original_data = RREG32(SMC_IND_DATA_0);
86
87 extra_shift = 8 * (4 - byte_count);
88
89 while (byte_count > 0) {
90 data = (data << 8) + *src++;
91 byte_count--;
92 }
93
94 data <<= extra_shift;
95
96 data |= (original_data & ~((~0UL) << extra_shift));
97
98 ret = ci_set_smc_sram_address(rdev, addr, limit);
99 if (ret)
100 return ret;
101
102 WREG32(SMC_IND_DATA_0, data);
103 }
104 return 0;
105}
106
107void ci_start_smc(struct radeon_device *rdev)
108{
109 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
110
111 tmp &= ~RST_REG;
112 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
113}
114
115void ci_reset_smc(struct radeon_device *rdev)
116{
117 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
118
119 tmp |= RST_REG;
120 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
121}
122
123int ci_program_jump_on_start(struct radeon_device *rdev)
124{
125 static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
126
127 return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
128}
129
130void ci_stop_smc_clock(struct radeon_device *rdev)
131{
132 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
133
134 tmp |= CK_DISABLE;
135
136 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
137}
138
139void ci_start_smc_clock(struct radeon_device *rdev)
140{
141 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
142
143 tmp &= ~CK_DISABLE;
144
145 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
146}
147
148bool ci_is_smc_running(struct radeon_device *rdev)
149{
150 u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
151 u32 pc_c = RREG32_SMC(SMC_PC_C);
152
153 if (!(clk & CK_DISABLE) && (0x20100 <= pc_c))
154 return true;
155
156 return false;
157}
158
159PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
160{
161 u32 tmp;
162 int i;
163
164 if (!ci_is_smc_running(rdev))
165 return PPSMC_Result_Failed;
166
167 WREG32(SMC_MESSAGE_0, msg);
168
169 for (i = 0; i < rdev->usec_timeout; i++) {
170 tmp = RREG32(SMC_RESP_0);
171 if (tmp != 0)
172 break;
173 udelay(1);
174 }
175 tmp = RREG32(SMC_RESP_0);
176
177 return (PPSMC_Result)tmp;
178}
179
180PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
181{
182 u32 tmp;
183 int i;
184
185 if (!ci_is_smc_running(rdev))
186 return PPSMC_Result_OK;
187
188 for (i = 0; i < rdev->usec_timeout; i++) {
189 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
190 if ((tmp & CKEN) == 0)
191 break;
192 udelay(1);
193 }
194
195 return PPSMC_Result_OK;
196}
197
198int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
199{
200 u32 ucode_start_address;
201 u32 ucode_size;
202 const u8 *src;
203 u32 data;
204
205 if (!rdev->smc_fw)
206 return -EINVAL;
207
208 switch (rdev->family) {
209 case CHIP_BONAIRE:
210 ucode_start_address = BONAIRE_SMC_UCODE_START;
211 ucode_size = BONAIRE_SMC_UCODE_SIZE;
212 break;
213 default:
214 DRM_ERROR("unknown asic in smc ucode loader\n");
215 BUG();
216 }
217
218 if (ucode_size & 3)
219 return -EINVAL;
220
221 src = (const u8 *)rdev->smc_fw->data;
222 WREG32(SMC_IND_INDEX_0, ucode_start_address);
223 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
224 while (ucode_size >= 4) {
225 /* SMC address space is BE */
226 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
227
228 WREG32(SMC_IND_DATA_0, data);
229
230 src += 4;
231 ucode_size -= 4;
232 }
233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
234
235 return 0;
236}
237
238int ci_read_smc_sram_dword(struct radeon_device *rdev,
239 u32 smc_address, u32 *value, u32 limit)
240{
241 int ret;
242
243 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
244 if (ret)
245 return ret;
246
247 *value = RREG32(SMC_IND_DATA_0);
248 return 0;
249}
250
251int ci_write_smc_sram_dword(struct radeon_device *rdev,
252 u32 smc_address, u32 value, u32 limit)
253{
254 int ret;
255
256 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
257 if (ret)
258 return ret;
259
260 WREG32(SMC_IND_DATA_0, value);
261 return 0;
262}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 8928bd109c16..a3bba0587276 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -30,22 +30,8 @@
30#include "cikd.h" 30#include "cikd.h"
31#include "atom.h" 31#include "atom.h"
32#include "cik_blit_shaders.h" 32#include "cik_blit_shaders.h"
33 33#include "radeon_ucode.h"
34/* GFX */ 34#include "clearstate_ci.h"
35#define CIK_PFP_UCODE_SIZE 2144
36#define CIK_ME_UCODE_SIZE 2144
37#define CIK_CE_UCODE_SIZE 2144
38/* compute */
39#define CIK_MEC_UCODE_SIZE 4192
40/* interrupts */
41#define BONAIRE_RLC_UCODE_SIZE 2048
42#define KB_RLC_UCODE_SIZE 2560
43#define KV_RLC_UCODE_SIZE 2560
44/* gddr controller */
45#define CIK_MC_UCODE_SIZE 7866
46/* sdma */
47#define CIK_SDMA_UCODE_SIZE 1050
48#define CIK_SDMA_UCODE_VERSION 64
49 35
50MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin"); 36MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
51MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); 37MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -54,6 +40,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
54MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); 40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
55MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
56MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
57MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 44MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
58MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 45MODULE_FIRMWARE("radeon/KAVERI_me.bin");
59MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 46MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -72,10 +59,61 @@ extern void r600_ih_ring_fini(struct radeon_device *rdev);
72extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 59extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
73extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 60extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
74extern bool evergreen_is_display_hung(struct radeon_device *rdev); 61extern bool evergreen_is_display_hung(struct radeon_device *rdev);
62extern void sumo_rlc_fini(struct radeon_device *rdev);
63extern int sumo_rlc_init(struct radeon_device *rdev);
75extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 64extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
76extern void si_rlc_fini(struct radeon_device *rdev); 65extern void si_rlc_reset(struct radeon_device *rdev);
77extern int si_rlc_init(struct radeon_device *rdev); 66extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
67extern int cik_sdma_resume(struct radeon_device *rdev);
68extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
69extern void cik_sdma_fini(struct radeon_device *rdev);
70extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
71 struct radeon_ib *ib,
72 uint64_t pe,
73 uint64_t addr, unsigned count,
74 uint32_t incr, uint32_t flags);
78static void cik_rlc_stop(struct radeon_device *rdev); 75static void cik_rlc_stop(struct radeon_device *rdev);
76static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev);
78static void cik_init_pg(struct radeon_device *rdev);
79static void cik_init_cg(struct radeon_device *rdev);
80
81/* get temperature in millidegrees */
82int ci_get_temp(struct radeon_device *rdev)
83{
84 u32 temp;
85 int actual_temp = 0;
86
87 temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
88 CTF_TEMP_SHIFT;
89
90 if (temp & 0x200)
91 actual_temp = 255;
92 else
93 actual_temp = temp & 0x1ff;
94
95 actual_temp = actual_temp * 1000;
96
97 return actual_temp;
98}
99
100/* get temperature in millidegrees */
101int kv_get_temp(struct radeon_device *rdev)
102{
103 u32 temp;
104 int actual_temp = 0;
105
106 temp = RREG32_SMC(0xC0300E0C);
107
108 if (temp)
109 actual_temp = (temp / 8) - 49;
110 else
111 actual_temp = 0;
112
113 actual_temp = actual_temp * 1000;
114
115 return actual_temp;
116}
79 117
80/* 118/*
81 * Indirect registers accessor 119 * Indirect registers accessor
@@ -98,6 +136,778 @@ void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
98 (void)RREG32(PCIE_DATA); 136 (void)RREG32(PCIE_DATA);
99} 137}
100 138
139static const u32 spectre_rlc_save_restore_register_list[] =
140{
141 (0x0e00 << 16) | (0xc12c >> 2),
142 0x00000000,
143 (0x0e00 << 16) | (0xc140 >> 2),
144 0x00000000,
145 (0x0e00 << 16) | (0xc150 >> 2),
146 0x00000000,
147 (0x0e00 << 16) | (0xc15c >> 2),
148 0x00000000,
149 (0x0e00 << 16) | (0xc168 >> 2),
150 0x00000000,
151 (0x0e00 << 16) | (0xc170 >> 2),
152 0x00000000,
153 (0x0e00 << 16) | (0xc178 >> 2),
154 0x00000000,
155 (0x0e00 << 16) | (0xc204 >> 2),
156 0x00000000,
157 (0x0e00 << 16) | (0xc2b4 >> 2),
158 0x00000000,
159 (0x0e00 << 16) | (0xc2b8 >> 2),
160 0x00000000,
161 (0x0e00 << 16) | (0xc2bc >> 2),
162 0x00000000,
163 (0x0e00 << 16) | (0xc2c0 >> 2),
164 0x00000000,
165 (0x0e00 << 16) | (0x8228 >> 2),
166 0x00000000,
167 (0x0e00 << 16) | (0x829c >> 2),
168 0x00000000,
169 (0x0e00 << 16) | (0x869c >> 2),
170 0x00000000,
171 (0x0600 << 16) | (0x98f4 >> 2),
172 0x00000000,
173 (0x0e00 << 16) | (0x98f8 >> 2),
174 0x00000000,
175 (0x0e00 << 16) | (0x9900 >> 2),
176 0x00000000,
177 (0x0e00 << 16) | (0xc260 >> 2),
178 0x00000000,
179 (0x0e00 << 16) | (0x90e8 >> 2),
180 0x00000000,
181 (0x0e00 << 16) | (0x3c000 >> 2),
182 0x00000000,
183 (0x0e00 << 16) | (0x3c00c >> 2),
184 0x00000000,
185 (0x0e00 << 16) | (0x8c1c >> 2),
186 0x00000000,
187 (0x0e00 << 16) | (0x9700 >> 2),
188 0x00000000,
189 (0x0e00 << 16) | (0xcd20 >> 2),
190 0x00000000,
191 (0x4e00 << 16) | (0xcd20 >> 2),
192 0x00000000,
193 (0x5e00 << 16) | (0xcd20 >> 2),
194 0x00000000,
195 (0x6e00 << 16) | (0xcd20 >> 2),
196 0x00000000,
197 (0x7e00 << 16) | (0xcd20 >> 2),
198 0x00000000,
199 (0x8e00 << 16) | (0xcd20 >> 2),
200 0x00000000,
201 (0x9e00 << 16) | (0xcd20 >> 2),
202 0x00000000,
203 (0xae00 << 16) | (0xcd20 >> 2),
204 0x00000000,
205 (0xbe00 << 16) | (0xcd20 >> 2),
206 0x00000000,
207 (0x0e00 << 16) | (0x89bc >> 2),
208 0x00000000,
209 (0x0e00 << 16) | (0x8900 >> 2),
210 0x00000000,
211 0x3,
212 (0x0e00 << 16) | (0xc130 >> 2),
213 0x00000000,
214 (0x0e00 << 16) | (0xc134 >> 2),
215 0x00000000,
216 (0x0e00 << 16) | (0xc1fc >> 2),
217 0x00000000,
218 (0x0e00 << 16) | (0xc208 >> 2),
219 0x00000000,
220 (0x0e00 << 16) | (0xc264 >> 2),
221 0x00000000,
222 (0x0e00 << 16) | (0xc268 >> 2),
223 0x00000000,
224 (0x0e00 << 16) | (0xc26c >> 2),
225 0x00000000,
226 (0x0e00 << 16) | (0xc270 >> 2),
227 0x00000000,
228 (0x0e00 << 16) | (0xc274 >> 2),
229 0x00000000,
230 (0x0e00 << 16) | (0xc278 >> 2),
231 0x00000000,
232 (0x0e00 << 16) | (0xc27c >> 2),
233 0x00000000,
234 (0x0e00 << 16) | (0xc280 >> 2),
235 0x00000000,
236 (0x0e00 << 16) | (0xc284 >> 2),
237 0x00000000,
238 (0x0e00 << 16) | (0xc288 >> 2),
239 0x00000000,
240 (0x0e00 << 16) | (0xc28c >> 2),
241 0x00000000,
242 (0x0e00 << 16) | (0xc290 >> 2),
243 0x00000000,
244 (0x0e00 << 16) | (0xc294 >> 2),
245 0x00000000,
246 (0x0e00 << 16) | (0xc298 >> 2),
247 0x00000000,
248 (0x0e00 << 16) | (0xc29c >> 2),
249 0x00000000,
250 (0x0e00 << 16) | (0xc2a0 >> 2),
251 0x00000000,
252 (0x0e00 << 16) | (0xc2a4 >> 2),
253 0x00000000,
254 (0x0e00 << 16) | (0xc2a8 >> 2),
255 0x00000000,
256 (0x0e00 << 16) | (0xc2ac >> 2),
257 0x00000000,
258 (0x0e00 << 16) | (0xc2b0 >> 2),
259 0x00000000,
260 (0x0e00 << 16) | (0x301d0 >> 2),
261 0x00000000,
262 (0x0e00 << 16) | (0x30238 >> 2),
263 0x00000000,
264 (0x0e00 << 16) | (0x30250 >> 2),
265 0x00000000,
266 (0x0e00 << 16) | (0x30254 >> 2),
267 0x00000000,
268 (0x0e00 << 16) | (0x30258 >> 2),
269 0x00000000,
270 (0x0e00 << 16) | (0x3025c >> 2),
271 0x00000000,
272 (0x4e00 << 16) | (0xc900 >> 2),
273 0x00000000,
274 (0x5e00 << 16) | (0xc900 >> 2),
275 0x00000000,
276 (0x6e00 << 16) | (0xc900 >> 2),
277 0x00000000,
278 (0x7e00 << 16) | (0xc900 >> 2),
279 0x00000000,
280 (0x8e00 << 16) | (0xc900 >> 2),
281 0x00000000,
282 (0x9e00 << 16) | (0xc900 >> 2),
283 0x00000000,
284 (0xae00 << 16) | (0xc900 >> 2),
285 0x00000000,
286 (0xbe00 << 16) | (0xc900 >> 2),
287 0x00000000,
288 (0x4e00 << 16) | (0xc904 >> 2),
289 0x00000000,
290 (0x5e00 << 16) | (0xc904 >> 2),
291 0x00000000,
292 (0x6e00 << 16) | (0xc904 >> 2),
293 0x00000000,
294 (0x7e00 << 16) | (0xc904 >> 2),
295 0x00000000,
296 (0x8e00 << 16) | (0xc904 >> 2),
297 0x00000000,
298 (0x9e00 << 16) | (0xc904 >> 2),
299 0x00000000,
300 (0xae00 << 16) | (0xc904 >> 2),
301 0x00000000,
302 (0xbe00 << 16) | (0xc904 >> 2),
303 0x00000000,
304 (0x4e00 << 16) | (0xc908 >> 2),
305 0x00000000,
306 (0x5e00 << 16) | (0xc908 >> 2),
307 0x00000000,
308 (0x6e00 << 16) | (0xc908 >> 2),
309 0x00000000,
310 (0x7e00 << 16) | (0xc908 >> 2),
311 0x00000000,
312 (0x8e00 << 16) | (0xc908 >> 2),
313 0x00000000,
314 (0x9e00 << 16) | (0xc908 >> 2),
315 0x00000000,
316 (0xae00 << 16) | (0xc908 >> 2),
317 0x00000000,
318 (0xbe00 << 16) | (0xc908 >> 2),
319 0x00000000,
320 (0x4e00 << 16) | (0xc90c >> 2),
321 0x00000000,
322 (0x5e00 << 16) | (0xc90c >> 2),
323 0x00000000,
324 (0x6e00 << 16) | (0xc90c >> 2),
325 0x00000000,
326 (0x7e00 << 16) | (0xc90c >> 2),
327 0x00000000,
328 (0x8e00 << 16) | (0xc90c >> 2),
329 0x00000000,
330 (0x9e00 << 16) | (0xc90c >> 2),
331 0x00000000,
332 (0xae00 << 16) | (0xc90c >> 2),
333 0x00000000,
334 (0xbe00 << 16) | (0xc90c >> 2),
335 0x00000000,
336 (0x4e00 << 16) | (0xc910 >> 2),
337 0x00000000,
338 (0x5e00 << 16) | (0xc910 >> 2),
339 0x00000000,
340 (0x6e00 << 16) | (0xc910 >> 2),
341 0x00000000,
342 (0x7e00 << 16) | (0xc910 >> 2),
343 0x00000000,
344 (0x8e00 << 16) | (0xc910 >> 2),
345 0x00000000,
346 (0x9e00 << 16) | (0xc910 >> 2),
347 0x00000000,
348 (0xae00 << 16) | (0xc910 >> 2),
349 0x00000000,
350 (0xbe00 << 16) | (0xc910 >> 2),
351 0x00000000,
352 (0x0e00 << 16) | (0xc99c >> 2),
353 0x00000000,
354 (0x0e00 << 16) | (0x9834 >> 2),
355 0x00000000,
356 (0x0000 << 16) | (0x30f00 >> 2),
357 0x00000000,
358 (0x0001 << 16) | (0x30f00 >> 2),
359 0x00000000,
360 (0x0000 << 16) | (0x30f04 >> 2),
361 0x00000000,
362 (0x0001 << 16) | (0x30f04 >> 2),
363 0x00000000,
364 (0x0000 << 16) | (0x30f08 >> 2),
365 0x00000000,
366 (0x0001 << 16) | (0x30f08 >> 2),
367 0x00000000,
368 (0x0000 << 16) | (0x30f0c >> 2),
369 0x00000000,
370 (0x0001 << 16) | (0x30f0c >> 2),
371 0x00000000,
372 (0x0600 << 16) | (0x9b7c >> 2),
373 0x00000000,
374 (0x0e00 << 16) | (0x8a14 >> 2),
375 0x00000000,
376 (0x0e00 << 16) | (0x8a18 >> 2),
377 0x00000000,
378 (0x0600 << 16) | (0x30a00 >> 2),
379 0x00000000,
380 (0x0e00 << 16) | (0x8bf0 >> 2),
381 0x00000000,
382 (0x0e00 << 16) | (0x8bcc >> 2),
383 0x00000000,
384 (0x0e00 << 16) | (0x8b24 >> 2),
385 0x00000000,
386 (0x0e00 << 16) | (0x30a04 >> 2),
387 0x00000000,
388 (0x0600 << 16) | (0x30a10 >> 2),
389 0x00000000,
390 (0x0600 << 16) | (0x30a14 >> 2),
391 0x00000000,
392 (0x0600 << 16) | (0x30a18 >> 2),
393 0x00000000,
394 (0x0600 << 16) | (0x30a2c >> 2),
395 0x00000000,
396 (0x0e00 << 16) | (0xc700 >> 2),
397 0x00000000,
398 (0x0e00 << 16) | (0xc704 >> 2),
399 0x00000000,
400 (0x0e00 << 16) | (0xc708 >> 2),
401 0x00000000,
402 (0x0e00 << 16) | (0xc768 >> 2),
403 0x00000000,
404 (0x0400 << 16) | (0xc770 >> 2),
405 0x00000000,
406 (0x0400 << 16) | (0xc774 >> 2),
407 0x00000000,
408 (0x0400 << 16) | (0xc778 >> 2),
409 0x00000000,
410 (0x0400 << 16) | (0xc77c >> 2),
411 0x00000000,
412 (0x0400 << 16) | (0xc780 >> 2),
413 0x00000000,
414 (0x0400 << 16) | (0xc784 >> 2),
415 0x00000000,
416 (0x0400 << 16) | (0xc788 >> 2),
417 0x00000000,
418 (0x0400 << 16) | (0xc78c >> 2),
419 0x00000000,
420 (0x0400 << 16) | (0xc798 >> 2),
421 0x00000000,
422 (0x0400 << 16) | (0xc79c >> 2),
423 0x00000000,
424 (0x0400 << 16) | (0xc7a0 >> 2),
425 0x00000000,
426 (0x0400 << 16) | (0xc7a4 >> 2),
427 0x00000000,
428 (0x0400 << 16) | (0xc7a8 >> 2),
429 0x00000000,
430 (0x0400 << 16) | (0xc7ac >> 2),
431 0x00000000,
432 (0x0400 << 16) | (0xc7b0 >> 2),
433 0x00000000,
434 (0x0400 << 16) | (0xc7b4 >> 2),
435 0x00000000,
436 (0x0e00 << 16) | (0x9100 >> 2),
437 0x00000000,
438 (0x0e00 << 16) | (0x3c010 >> 2),
439 0x00000000,
440 (0x0e00 << 16) | (0x92a8 >> 2),
441 0x00000000,
442 (0x0e00 << 16) | (0x92ac >> 2),
443 0x00000000,
444 (0x0e00 << 16) | (0x92b4 >> 2),
445 0x00000000,
446 (0x0e00 << 16) | (0x92b8 >> 2),
447 0x00000000,
448 (0x0e00 << 16) | (0x92bc >> 2),
449 0x00000000,
450 (0x0e00 << 16) | (0x92c0 >> 2),
451 0x00000000,
452 (0x0e00 << 16) | (0x92c4 >> 2),
453 0x00000000,
454 (0x0e00 << 16) | (0x92c8 >> 2),
455 0x00000000,
456 (0x0e00 << 16) | (0x92cc >> 2),
457 0x00000000,
458 (0x0e00 << 16) | (0x92d0 >> 2),
459 0x00000000,
460 (0x0e00 << 16) | (0x8c00 >> 2),
461 0x00000000,
462 (0x0e00 << 16) | (0x8c04 >> 2),
463 0x00000000,
464 (0x0e00 << 16) | (0x8c20 >> 2),
465 0x00000000,
466 (0x0e00 << 16) | (0x8c38 >> 2),
467 0x00000000,
468 (0x0e00 << 16) | (0x8c3c >> 2),
469 0x00000000,
470 (0x0e00 << 16) | (0xae00 >> 2),
471 0x00000000,
472 (0x0e00 << 16) | (0x9604 >> 2),
473 0x00000000,
474 (0x0e00 << 16) | (0xac08 >> 2),
475 0x00000000,
476 (0x0e00 << 16) | (0xac0c >> 2),
477 0x00000000,
478 (0x0e00 << 16) | (0xac10 >> 2),
479 0x00000000,
480 (0x0e00 << 16) | (0xac14 >> 2),
481 0x00000000,
482 (0x0e00 << 16) | (0xac58 >> 2),
483 0x00000000,
484 (0x0e00 << 16) | (0xac68 >> 2),
485 0x00000000,
486 (0x0e00 << 16) | (0xac6c >> 2),
487 0x00000000,
488 (0x0e00 << 16) | (0xac70 >> 2),
489 0x00000000,
490 (0x0e00 << 16) | (0xac74 >> 2),
491 0x00000000,
492 (0x0e00 << 16) | (0xac78 >> 2),
493 0x00000000,
494 (0x0e00 << 16) | (0xac7c >> 2),
495 0x00000000,
496 (0x0e00 << 16) | (0xac80 >> 2),
497 0x00000000,
498 (0x0e00 << 16) | (0xac84 >> 2),
499 0x00000000,
500 (0x0e00 << 16) | (0xac88 >> 2),
501 0x00000000,
502 (0x0e00 << 16) | (0xac8c >> 2),
503 0x00000000,
504 (0x0e00 << 16) | (0x970c >> 2),
505 0x00000000,
506 (0x0e00 << 16) | (0x9714 >> 2),
507 0x00000000,
508 (0x0e00 << 16) | (0x9718 >> 2),
509 0x00000000,
510 (0x0e00 << 16) | (0x971c >> 2),
511 0x00000000,
512 (0x0e00 << 16) | (0x31068 >> 2),
513 0x00000000,
514 (0x4e00 << 16) | (0x31068 >> 2),
515 0x00000000,
516 (0x5e00 << 16) | (0x31068 >> 2),
517 0x00000000,
518 (0x6e00 << 16) | (0x31068 >> 2),
519 0x00000000,
520 (0x7e00 << 16) | (0x31068 >> 2),
521 0x00000000,
522 (0x8e00 << 16) | (0x31068 >> 2),
523 0x00000000,
524 (0x9e00 << 16) | (0x31068 >> 2),
525 0x00000000,
526 (0xae00 << 16) | (0x31068 >> 2),
527 0x00000000,
528 (0xbe00 << 16) | (0x31068 >> 2),
529 0x00000000,
530 (0x0e00 << 16) | (0xcd10 >> 2),
531 0x00000000,
532 (0x0e00 << 16) | (0xcd14 >> 2),
533 0x00000000,
534 (0x0e00 << 16) | (0x88b0 >> 2),
535 0x00000000,
536 (0x0e00 << 16) | (0x88b4 >> 2),
537 0x00000000,
538 (0x0e00 << 16) | (0x88b8 >> 2),
539 0x00000000,
540 (0x0e00 << 16) | (0x88bc >> 2),
541 0x00000000,
542 (0x0400 << 16) | (0x89c0 >> 2),
543 0x00000000,
544 (0x0e00 << 16) | (0x88c4 >> 2),
545 0x00000000,
546 (0x0e00 << 16) | (0x88c8 >> 2),
547 0x00000000,
548 (0x0e00 << 16) | (0x88d0 >> 2),
549 0x00000000,
550 (0x0e00 << 16) | (0x88d4 >> 2),
551 0x00000000,
552 (0x0e00 << 16) | (0x88d8 >> 2),
553 0x00000000,
554 (0x0e00 << 16) | (0x8980 >> 2),
555 0x00000000,
556 (0x0e00 << 16) | (0x30938 >> 2),
557 0x00000000,
558 (0x0e00 << 16) | (0x3093c >> 2),
559 0x00000000,
560 (0x0e00 << 16) | (0x30940 >> 2),
561 0x00000000,
562 (0x0e00 << 16) | (0x89a0 >> 2),
563 0x00000000,
564 (0x0e00 << 16) | (0x30900 >> 2),
565 0x00000000,
566 (0x0e00 << 16) | (0x30904 >> 2),
567 0x00000000,
568 (0x0e00 << 16) | (0x89b4 >> 2),
569 0x00000000,
570 (0x0e00 << 16) | (0x3c210 >> 2),
571 0x00000000,
572 (0x0e00 << 16) | (0x3c214 >> 2),
573 0x00000000,
574 (0x0e00 << 16) | (0x3c218 >> 2),
575 0x00000000,
576 (0x0e00 << 16) | (0x8904 >> 2),
577 0x00000000,
578 0x5,
579 (0x0e00 << 16) | (0x8c28 >> 2),
580 (0x0e00 << 16) | (0x8c2c >> 2),
581 (0x0e00 << 16) | (0x8c30 >> 2),
582 (0x0e00 << 16) | (0x8c34 >> 2),
583 (0x0e00 << 16) | (0x9600 >> 2),
584};
585
586static const u32 kalindi_rlc_save_restore_register_list[] =
587{
588 (0x0e00 << 16) | (0xc12c >> 2),
589 0x00000000,
590 (0x0e00 << 16) | (0xc140 >> 2),
591 0x00000000,
592 (0x0e00 << 16) | (0xc150 >> 2),
593 0x00000000,
594 (0x0e00 << 16) | (0xc15c >> 2),
595 0x00000000,
596 (0x0e00 << 16) | (0xc168 >> 2),
597 0x00000000,
598 (0x0e00 << 16) | (0xc170 >> 2),
599 0x00000000,
600 (0x0e00 << 16) | (0xc204 >> 2),
601 0x00000000,
602 (0x0e00 << 16) | (0xc2b4 >> 2),
603 0x00000000,
604 (0x0e00 << 16) | (0xc2b8 >> 2),
605 0x00000000,
606 (0x0e00 << 16) | (0xc2bc >> 2),
607 0x00000000,
608 (0x0e00 << 16) | (0xc2c0 >> 2),
609 0x00000000,
610 (0x0e00 << 16) | (0x8228 >> 2),
611 0x00000000,
612 (0x0e00 << 16) | (0x829c >> 2),
613 0x00000000,
614 (0x0e00 << 16) | (0x869c >> 2),
615 0x00000000,
616 (0x0600 << 16) | (0x98f4 >> 2),
617 0x00000000,
618 (0x0e00 << 16) | (0x98f8 >> 2),
619 0x00000000,
620 (0x0e00 << 16) | (0x9900 >> 2),
621 0x00000000,
622 (0x0e00 << 16) | (0xc260 >> 2),
623 0x00000000,
624 (0x0e00 << 16) | (0x90e8 >> 2),
625 0x00000000,
626 (0x0e00 << 16) | (0x3c000 >> 2),
627 0x00000000,
628 (0x0e00 << 16) | (0x3c00c >> 2),
629 0x00000000,
630 (0x0e00 << 16) | (0x8c1c >> 2),
631 0x00000000,
632 (0x0e00 << 16) | (0x9700 >> 2),
633 0x00000000,
634 (0x0e00 << 16) | (0xcd20 >> 2),
635 0x00000000,
636 (0x4e00 << 16) | (0xcd20 >> 2),
637 0x00000000,
638 (0x5e00 << 16) | (0xcd20 >> 2),
639 0x00000000,
640 (0x6e00 << 16) | (0xcd20 >> 2),
641 0x00000000,
642 (0x7e00 << 16) | (0xcd20 >> 2),
643 0x00000000,
644 (0x0e00 << 16) | (0x89bc >> 2),
645 0x00000000,
646 (0x0e00 << 16) | (0x8900 >> 2),
647 0x00000000,
648 0x3,
649 (0x0e00 << 16) | (0xc130 >> 2),
650 0x00000000,
651 (0x0e00 << 16) | (0xc134 >> 2),
652 0x00000000,
653 (0x0e00 << 16) | (0xc1fc >> 2),
654 0x00000000,
655 (0x0e00 << 16) | (0xc208 >> 2),
656 0x00000000,
657 (0x0e00 << 16) | (0xc264 >> 2),
658 0x00000000,
659 (0x0e00 << 16) | (0xc268 >> 2),
660 0x00000000,
661 (0x0e00 << 16) | (0xc26c >> 2),
662 0x00000000,
663 (0x0e00 << 16) | (0xc270 >> 2),
664 0x00000000,
665 (0x0e00 << 16) | (0xc274 >> 2),
666 0x00000000,
667 (0x0e00 << 16) | (0xc28c >> 2),
668 0x00000000,
669 (0x0e00 << 16) | (0xc290 >> 2),
670 0x00000000,
671 (0x0e00 << 16) | (0xc294 >> 2),
672 0x00000000,
673 (0x0e00 << 16) | (0xc298 >> 2),
674 0x00000000,
675 (0x0e00 << 16) | (0xc2a0 >> 2),
676 0x00000000,
677 (0x0e00 << 16) | (0xc2a4 >> 2),
678 0x00000000,
679 (0x0e00 << 16) | (0xc2a8 >> 2),
680 0x00000000,
681 (0x0e00 << 16) | (0xc2ac >> 2),
682 0x00000000,
683 (0x0e00 << 16) | (0x301d0 >> 2),
684 0x00000000,
685 (0x0e00 << 16) | (0x30238 >> 2),
686 0x00000000,
687 (0x0e00 << 16) | (0x30250 >> 2),
688 0x00000000,
689 (0x0e00 << 16) | (0x30254 >> 2),
690 0x00000000,
691 (0x0e00 << 16) | (0x30258 >> 2),
692 0x00000000,
693 (0x0e00 << 16) | (0x3025c >> 2),
694 0x00000000,
695 (0x4e00 << 16) | (0xc900 >> 2),
696 0x00000000,
697 (0x5e00 << 16) | (0xc900 >> 2),
698 0x00000000,
699 (0x6e00 << 16) | (0xc900 >> 2),
700 0x00000000,
701 (0x7e00 << 16) | (0xc900 >> 2),
702 0x00000000,
703 (0x4e00 << 16) | (0xc904 >> 2),
704 0x00000000,
705 (0x5e00 << 16) | (0xc904 >> 2),
706 0x00000000,
707 (0x6e00 << 16) | (0xc904 >> 2),
708 0x00000000,
709 (0x7e00 << 16) | (0xc904 >> 2),
710 0x00000000,
711 (0x4e00 << 16) | (0xc908 >> 2),
712 0x00000000,
713 (0x5e00 << 16) | (0xc908 >> 2),
714 0x00000000,
715 (0x6e00 << 16) | (0xc908 >> 2),
716 0x00000000,
717 (0x7e00 << 16) | (0xc908 >> 2),
718 0x00000000,
719 (0x4e00 << 16) | (0xc90c >> 2),
720 0x00000000,
721 (0x5e00 << 16) | (0xc90c >> 2),
722 0x00000000,
723 (0x6e00 << 16) | (0xc90c >> 2),
724 0x00000000,
725 (0x7e00 << 16) | (0xc90c >> 2),
726 0x00000000,
727 (0x4e00 << 16) | (0xc910 >> 2),
728 0x00000000,
729 (0x5e00 << 16) | (0xc910 >> 2),
730 0x00000000,
731 (0x6e00 << 16) | (0xc910 >> 2),
732 0x00000000,
733 (0x7e00 << 16) | (0xc910 >> 2),
734 0x00000000,
735 (0x0e00 << 16) | (0xc99c >> 2),
736 0x00000000,
737 (0x0e00 << 16) | (0x9834 >> 2),
738 0x00000000,
739 (0x0000 << 16) | (0x30f00 >> 2),
740 0x00000000,
741 (0x0000 << 16) | (0x30f04 >> 2),
742 0x00000000,
743 (0x0000 << 16) | (0x30f08 >> 2),
744 0x00000000,
745 (0x0000 << 16) | (0x30f0c >> 2),
746 0x00000000,
747 (0x0600 << 16) | (0x9b7c >> 2),
748 0x00000000,
749 (0x0e00 << 16) | (0x8a14 >> 2),
750 0x00000000,
751 (0x0e00 << 16) | (0x8a18 >> 2),
752 0x00000000,
753 (0x0600 << 16) | (0x30a00 >> 2),
754 0x00000000,
755 (0x0e00 << 16) | (0x8bf0 >> 2),
756 0x00000000,
757 (0x0e00 << 16) | (0x8bcc >> 2),
758 0x00000000,
759 (0x0e00 << 16) | (0x8b24 >> 2),
760 0x00000000,
761 (0x0e00 << 16) | (0x30a04 >> 2),
762 0x00000000,
763 (0x0600 << 16) | (0x30a10 >> 2),
764 0x00000000,
765 (0x0600 << 16) | (0x30a14 >> 2),
766 0x00000000,
767 (0x0600 << 16) | (0x30a18 >> 2),
768 0x00000000,
769 (0x0600 << 16) | (0x30a2c >> 2),
770 0x00000000,
771 (0x0e00 << 16) | (0xc700 >> 2),
772 0x00000000,
773 (0x0e00 << 16) | (0xc704 >> 2),
774 0x00000000,
775 (0x0e00 << 16) | (0xc708 >> 2),
776 0x00000000,
777 (0x0e00 << 16) | (0xc768 >> 2),
778 0x00000000,
779 (0x0400 << 16) | (0xc770 >> 2),
780 0x00000000,
781 (0x0400 << 16) | (0xc774 >> 2),
782 0x00000000,
783 (0x0400 << 16) | (0xc798 >> 2),
784 0x00000000,
785 (0x0400 << 16) | (0xc79c >> 2),
786 0x00000000,
787 (0x0e00 << 16) | (0x9100 >> 2),
788 0x00000000,
789 (0x0e00 << 16) | (0x3c010 >> 2),
790 0x00000000,
791 (0x0e00 << 16) | (0x8c00 >> 2),
792 0x00000000,
793 (0x0e00 << 16) | (0x8c04 >> 2),
794 0x00000000,
795 (0x0e00 << 16) | (0x8c20 >> 2),
796 0x00000000,
797 (0x0e00 << 16) | (0x8c38 >> 2),
798 0x00000000,
799 (0x0e00 << 16) | (0x8c3c >> 2),
800 0x00000000,
801 (0x0e00 << 16) | (0xae00 >> 2),
802 0x00000000,
803 (0x0e00 << 16) | (0x9604 >> 2),
804 0x00000000,
805 (0x0e00 << 16) | (0xac08 >> 2),
806 0x00000000,
807 (0x0e00 << 16) | (0xac0c >> 2),
808 0x00000000,
809 (0x0e00 << 16) | (0xac10 >> 2),
810 0x00000000,
811 (0x0e00 << 16) | (0xac14 >> 2),
812 0x00000000,
813 (0x0e00 << 16) | (0xac58 >> 2),
814 0x00000000,
815 (0x0e00 << 16) | (0xac68 >> 2),
816 0x00000000,
817 (0x0e00 << 16) | (0xac6c >> 2),
818 0x00000000,
819 (0x0e00 << 16) | (0xac70 >> 2),
820 0x00000000,
821 (0x0e00 << 16) | (0xac74 >> 2),
822 0x00000000,
823 (0x0e00 << 16) | (0xac78 >> 2),
824 0x00000000,
825 (0x0e00 << 16) | (0xac7c >> 2),
826 0x00000000,
827 (0x0e00 << 16) | (0xac80 >> 2),
828 0x00000000,
829 (0x0e00 << 16) | (0xac84 >> 2),
830 0x00000000,
831 (0x0e00 << 16) | (0xac88 >> 2),
832 0x00000000,
833 (0x0e00 << 16) | (0xac8c >> 2),
834 0x00000000,
835 (0x0e00 << 16) | (0x970c >> 2),
836 0x00000000,
837 (0x0e00 << 16) | (0x9714 >> 2),
838 0x00000000,
839 (0x0e00 << 16) | (0x9718 >> 2),
840 0x00000000,
841 (0x0e00 << 16) | (0x971c >> 2),
842 0x00000000,
843 (0x0e00 << 16) | (0x31068 >> 2),
844 0x00000000,
845 (0x4e00 << 16) | (0x31068 >> 2),
846 0x00000000,
847 (0x5e00 << 16) | (0x31068 >> 2),
848 0x00000000,
849 (0x6e00 << 16) | (0x31068 >> 2),
850 0x00000000,
851 (0x7e00 << 16) | (0x31068 >> 2),
852 0x00000000,
853 (0x0e00 << 16) | (0xcd10 >> 2),
854 0x00000000,
855 (0x0e00 << 16) | (0xcd14 >> 2),
856 0x00000000,
857 (0x0e00 << 16) | (0x88b0 >> 2),
858 0x00000000,
859 (0x0e00 << 16) | (0x88b4 >> 2),
860 0x00000000,
861 (0x0e00 << 16) | (0x88b8 >> 2),
862 0x00000000,
863 (0x0e00 << 16) | (0x88bc >> 2),
864 0x00000000,
865 (0x0400 << 16) | (0x89c0 >> 2),
866 0x00000000,
867 (0x0e00 << 16) | (0x88c4 >> 2),
868 0x00000000,
869 (0x0e00 << 16) | (0x88c8 >> 2),
870 0x00000000,
871 (0x0e00 << 16) | (0x88d0 >> 2),
872 0x00000000,
873 (0x0e00 << 16) | (0x88d4 >> 2),
874 0x00000000,
875 (0x0e00 << 16) | (0x88d8 >> 2),
876 0x00000000,
877 (0x0e00 << 16) | (0x8980 >> 2),
878 0x00000000,
879 (0x0e00 << 16) | (0x30938 >> 2),
880 0x00000000,
881 (0x0e00 << 16) | (0x3093c >> 2),
882 0x00000000,
883 (0x0e00 << 16) | (0x30940 >> 2),
884 0x00000000,
885 (0x0e00 << 16) | (0x89a0 >> 2),
886 0x00000000,
887 (0x0e00 << 16) | (0x30900 >> 2),
888 0x00000000,
889 (0x0e00 << 16) | (0x30904 >> 2),
890 0x00000000,
891 (0x0e00 << 16) | (0x89b4 >> 2),
892 0x00000000,
893 (0x0e00 << 16) | (0x3e1fc >> 2),
894 0x00000000,
895 (0x0e00 << 16) | (0x3c210 >> 2),
896 0x00000000,
897 (0x0e00 << 16) | (0x3c214 >> 2),
898 0x00000000,
899 (0x0e00 << 16) | (0x3c218 >> 2),
900 0x00000000,
901 (0x0e00 << 16) | (0x8904 >> 2),
902 0x00000000,
903 0x5,
904 (0x0e00 << 16) | (0x8c28 >> 2),
905 (0x0e00 << 16) | (0x8c2c >> 2),
906 (0x0e00 << 16) | (0x8c30 >> 2),
907 (0x0e00 << 16) | (0x8c34 >> 2),
908 (0x0e00 << 16) | (0x9600 >> 2),
909};
910
101static const u32 bonaire_golden_spm_registers[] = 911static const u32 bonaire_golden_spm_registers[] =
102{ 912{
103 0x30800, 0xe0ffffff, 0xe0000000 913 0x30800, 0xe0ffffff, 0xe0000000
@@ -744,7 +1554,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
744 const char *chip_name; 1554 const char *chip_name;
745 size_t pfp_req_size, me_req_size, ce_req_size, 1555 size_t pfp_req_size, me_req_size, ce_req_size,
746 mec_req_size, rlc_req_size, mc_req_size, 1556 mec_req_size, rlc_req_size, mc_req_size,
747 sdma_req_size; 1557 sdma_req_size, smc_req_size;
748 char fw_name[30]; 1558 char fw_name[30];
749 int err; 1559 int err;
750 1560
@@ -760,6 +1570,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
760 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1570 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
761 mc_req_size = CIK_MC_UCODE_SIZE * 4; 1571 mc_req_size = CIK_MC_UCODE_SIZE * 4;
762 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1572 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1573 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
763 break; 1574 break;
764 case CHIP_KAVERI: 1575 case CHIP_KAVERI:
765 chip_name = "KAVERI"; 1576 chip_name = "KAVERI";
@@ -851,7 +1662,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
851 err = -EINVAL; 1662 err = -EINVAL;
852 } 1663 }
853 1664
854 /* No MC ucode on APUs */ 1665 /* No SMC, MC ucode on APUs */
855 if (!(rdev->flags & RADEON_IS_IGP)) { 1666 if (!(rdev->flags & RADEON_IS_IGP)) {
856 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1667 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
857 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1668 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
@@ -863,6 +1674,21 @@ static int cik_init_microcode(struct radeon_device *rdev)
863 rdev->mc_fw->size, fw_name); 1674 rdev->mc_fw->size, fw_name);
864 err = -EINVAL; 1675 err = -EINVAL;
865 } 1676 }
1677
1678 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1679 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1680 if (err) {
1681 printk(KERN_ERR
1682 "smc: error loading firmware \"%s\"\n",
1683 fw_name);
1684 release_firmware(rdev->smc_fw);
1685 rdev->smc_fw = NULL;
1686 } else if (rdev->smc_fw->size != smc_req_size) {
1687 printk(KERN_ERR
1688 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
1689 rdev->smc_fw->size, fw_name);
1690 err = -EINVAL;
1691 }
866 } 1692 }
867 1693
868out: 1694out:
@@ -881,6 +1707,8 @@ out:
881 rdev->rlc_fw = NULL; 1707 rdev->rlc_fw = NULL;
882 release_firmware(rdev->mc_fw); 1708 release_firmware(rdev->mc_fw);
883 rdev->mc_fw = NULL; 1709 rdev->mc_fw = NULL;
1710 release_firmware(rdev->smc_fw);
1711 rdev->smc_fw = NULL;
884 } 1712 }
885 return err; 1713 return err;
886} 1714}
@@ -1880,7 +2708,46 @@ static void cik_gpu_init(struct radeon_device *rdev)
1880 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 2708 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1881 break; 2709 break;
1882 case CHIP_KAVERI: 2710 case CHIP_KAVERI:
1883 /* TODO */ 2711 rdev->config.cik.max_shader_engines = 1;
2712 rdev->config.cik.max_tile_pipes = 4;
2713 if ((rdev->pdev->device == 0x1304) ||
2714 (rdev->pdev->device == 0x1305) ||
2715 (rdev->pdev->device == 0x130C) ||
2716 (rdev->pdev->device == 0x130F) ||
2717 (rdev->pdev->device == 0x1310) ||
2718 (rdev->pdev->device == 0x1311) ||
2719 (rdev->pdev->device == 0x131C)) {
2720 rdev->config.cik.max_cu_per_sh = 8;
2721 rdev->config.cik.max_backends_per_se = 2;
2722 } else if ((rdev->pdev->device == 0x1309) ||
2723 (rdev->pdev->device == 0x130A) ||
2724 (rdev->pdev->device == 0x130D) ||
2725 (rdev->pdev->device == 0x1313)) {
2726 rdev->config.cik.max_cu_per_sh = 6;
2727 rdev->config.cik.max_backends_per_se = 2;
2728 } else if ((rdev->pdev->device == 0x1306) ||
2729 (rdev->pdev->device == 0x1307) ||
2730 (rdev->pdev->device == 0x130B) ||
2731 (rdev->pdev->device == 0x130E) ||
2732 (rdev->pdev->device == 0x1315) ||
2733 (rdev->pdev->device == 0x131B)) {
2734 rdev->config.cik.max_cu_per_sh = 4;
2735 rdev->config.cik.max_backends_per_se = 1;
2736 } else {
2737 rdev->config.cik.max_cu_per_sh = 3;
2738 rdev->config.cik.max_backends_per_se = 1;
2739 }
2740 rdev->config.cik.max_sh_per_se = 1;
2741 rdev->config.cik.max_texture_channel_caches = 4;
2742 rdev->config.cik.max_gprs = 256;
2743 rdev->config.cik.max_gs_threads = 16;
2744 rdev->config.cik.max_hw_contexts = 8;
2745
2746 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
2747 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
2748 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
2749 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
2750 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1884 break; 2751 break;
1885 case CHIP_KABINI: 2752 case CHIP_KABINI:
1886 default: 2753 default:
@@ -2535,8 +3402,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
2535 /* ring 0 - compute and gfx */ 3402 /* ring 0 - compute and gfx */
2536 /* Set ring buffer size */ 3403 /* Set ring buffer size */
2537 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3404 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2538 rb_bufsz = drm_order(ring->ring_size / 8); 3405 rb_bufsz = order_base_2(ring->ring_size / 8);
2539 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3406 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2540#ifdef __BIG_ENDIAN 3407#ifdef __BIG_ENDIAN
2541 tmp |= BUF_SWAP_32BIT; 3408 tmp |= BUF_SWAP_32BIT;
2542#endif 3409#endif
@@ -2593,7 +3460,6 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
2593 cik_srbm_select(rdev, 0, 0, 0, 0); 3460 cik_srbm_select(rdev, 0, 0, 0, 0);
2594 mutex_unlock(&rdev->srbm_mutex); 3461 mutex_unlock(&rdev->srbm_mutex);
2595 } 3462 }
2596 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2597 3463
2598 return rptr; 3464 return rptr;
2599} 3465}
@@ -2612,7 +3478,6 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
2612 cik_srbm_select(rdev, 0, 0, 0, 0); 3478 cik_srbm_select(rdev, 0, 0, 0, 0);
2613 mutex_unlock(&rdev->srbm_mutex); 3479 mutex_unlock(&rdev->srbm_mutex);
2614 } 3480 }
2615 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2616 3481
2617 return wptr; 3482 return wptr;
2618} 3483}
@@ -2620,10 +3485,8 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
2620void cik_compute_ring_set_wptr(struct radeon_device *rdev, 3485void cik_compute_ring_set_wptr(struct radeon_device *rdev,
2621 struct radeon_ring *ring) 3486 struct radeon_ring *ring)
2622{ 3487{
2623 u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask; 3488 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
2624 3489 WDOORBELL32(ring->doorbell_offset, ring->wptr);
2625 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
2626 WDOORBELL32(ring->doorbell_offset, wptr);
2627} 3490}
2628 3491
2629/** 3492/**
@@ -2920,7 +3783,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2920 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3783 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2921 tmp = RREG32(CP_HPD_EOP_CONTROL); 3784 tmp = RREG32(CP_HPD_EOP_CONTROL);
2922 tmp &= ~EOP_SIZE_MASK; 3785 tmp &= ~EOP_SIZE_MASK;
2923 tmp |= drm_order(MEC_HPD_SIZE / 8); 3786 tmp |= order_base_2(MEC_HPD_SIZE / 8);
2924 WREG32(CP_HPD_EOP_CONTROL, tmp); 3787 WREG32(CP_HPD_EOP_CONTROL, tmp);
2925 } 3788 }
2926 cik_srbm_select(rdev, 0, 0, 0, 0); 3789 cik_srbm_select(rdev, 0, 0, 0, 0);
@@ -3037,9 +3900,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
3037 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK); 3900 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
3038 3901
3039 mqd->queue_state.cp_hqd_pq_control |= 3902 mqd->queue_state.cp_hqd_pq_control |=
3040 drm_order(rdev->ring[idx].ring_size / 8); 3903 order_base_2(rdev->ring[idx].ring_size / 8);
3041 mqd->queue_state.cp_hqd_pq_control |= 3904 mqd->queue_state.cp_hqd_pq_control |=
3042 (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8); 3905 (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
3043#ifdef __BIG_ENDIAN 3906#ifdef __BIG_ENDIAN
3044 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; 3907 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
3045#endif 3908#endif
@@ -3150,13 +4013,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
3150{ 4013{
3151 int r; 4014 int r;
3152 4015
3153 /* Reset all cp blocks */
3154 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
3155 RREG32(GRBM_SOFT_RESET);
3156 mdelay(15);
3157 WREG32(GRBM_SOFT_RESET, 0);
3158 RREG32(GRBM_SOFT_RESET);
3159
3160 r = cik_cp_load_microcode(rdev); 4016 r = cik_cp_load_microcode(rdev);
3161 if (r) 4017 if (r)
3162 return r; 4018 return r;
@@ -3171,579 +4027,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
3171 return 0; 4027 return 0;
3172} 4028}
3173 4029
3174/*
3175 * sDMA - System DMA
3176 * Starting with CIK, the GPU has new asynchronous
3177 * DMA engines. These engines are used for compute
3178 * and gfx. There are two DMA engines (SDMA0, SDMA1)
3179 * and each one supports 1 ring buffer used for gfx
3180 * and 2 queues used for compute.
3181 *
3182 * The programming model is very similar to the CP
3183 * (ring buffer, IBs, etc.), but sDMA has it's own
3184 * packet format that is different from the PM4 format
3185 * used by the CP. sDMA supports copying data, writing
3186 * embedded data, solid fills, and a number of other
3187 * things. It also has support for tiling/detiling of
3188 * buffers.
3189 */
3190/**
3191 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
3192 *
3193 * @rdev: radeon_device pointer
3194 * @ib: IB object to schedule
3195 *
3196 * Schedule an IB in the DMA ring (CIK).
3197 */
3198void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
3199 struct radeon_ib *ib)
3200{
3201 struct radeon_ring *ring = &rdev->ring[ib->ring];
3202 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
3203
3204 if (rdev->wb.enabled) {
3205 u32 next_rptr = ring->wptr + 5;
3206 while ((next_rptr & 7) != 4)
3207 next_rptr++;
3208 next_rptr += 4;
3209 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3210 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3211 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3212 radeon_ring_write(ring, 1); /* number of DWs to follow */
3213 radeon_ring_write(ring, next_rptr);
3214 }
3215
3216 /* IB packet must end on a 8 DW boundary */
3217 while ((ring->wptr & 7) != 4)
3218 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
3219 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
3220 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
3221 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
3222 radeon_ring_write(ring, ib->length_dw);
3223
3224}
3225
3226/**
3227 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
3228 *
3229 * @rdev: radeon_device pointer
3230 * @fence: radeon fence object
3231 *
3232 * Add a DMA fence packet to the ring to write
3233 * the fence seq number and DMA trap packet to generate
3234 * an interrupt if needed (CIK).
3235 */
3236void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
3237 struct radeon_fence *fence)
3238{
3239 struct radeon_ring *ring = &rdev->ring[fence->ring];
3240 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3241 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
3242 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
3243 u32 ref_and_mask;
3244
3245 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
3246 ref_and_mask = SDMA0;
3247 else
3248 ref_and_mask = SDMA1;
3249
3250 /* write the fence */
3251 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
3252 radeon_ring_write(ring, addr & 0xffffffff);
3253 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3254 radeon_ring_write(ring, fence->seq);
3255 /* generate an interrupt */
3256 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
3257 /* flush HDP */
3258 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
3259 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
3260 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
3261 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
3262 radeon_ring_write(ring, ref_and_mask); /* MASK */
3263 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
3264}
3265
3266/**
3267 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
3268 *
3269 * @rdev: radeon_device pointer
3270 * @ring: radeon_ring structure holding ring information
3271 * @semaphore: radeon semaphore object
3272 * @emit_wait: wait or signal semaphore
3273 *
3274 * Add a DMA semaphore packet to the ring wait on or signal
3275 * other rings (CIK).
3276 */
3277void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
3278 struct radeon_ring *ring,
3279 struct radeon_semaphore *semaphore,
3280 bool emit_wait)
3281{
3282 u64 addr = semaphore->gpu_addr;
3283 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
3284
3285 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
3286 radeon_ring_write(ring, addr & 0xfffffff8);
3287 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3288}
3289
3290/**
3291 * cik_sdma_gfx_stop - stop the gfx async dma engines
3292 *
3293 * @rdev: radeon_device pointer
3294 *
3295 * Stop the gfx async dma ring buffers (CIK).
3296 */
3297static void cik_sdma_gfx_stop(struct radeon_device *rdev)
3298{
3299 u32 rb_cntl, reg_offset;
3300 int i;
3301
3302 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3303
3304 for (i = 0; i < 2; i++) {
3305 if (i == 0)
3306 reg_offset = SDMA0_REGISTER_OFFSET;
3307 else
3308 reg_offset = SDMA1_REGISTER_OFFSET;
3309 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
3310 rb_cntl &= ~SDMA_RB_ENABLE;
3311 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3312 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
3313 }
3314}
3315
3316/**
3317 * cik_sdma_rlc_stop - stop the compute async dma engines
3318 *
3319 * @rdev: radeon_device pointer
3320 *
3321 * Stop the compute async dma queues (CIK).
3322 */
3323static void cik_sdma_rlc_stop(struct radeon_device *rdev)
3324{
3325 /* XXX todo */
3326}
3327
3328/**
3329 * cik_sdma_enable - stop the async dma engines
3330 *
3331 * @rdev: radeon_device pointer
3332 * @enable: enable/disable the DMA MEs.
3333 *
3334 * Halt or unhalt the async dma engines (CIK).
3335 */
3336static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
3337{
3338 u32 me_cntl, reg_offset;
3339 int i;
3340
3341 for (i = 0; i < 2; i++) {
3342 if (i == 0)
3343 reg_offset = SDMA0_REGISTER_OFFSET;
3344 else
3345 reg_offset = SDMA1_REGISTER_OFFSET;
3346 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
3347 if (enable)
3348 me_cntl &= ~SDMA_HALT;
3349 else
3350 me_cntl |= SDMA_HALT;
3351 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
3352 }
3353}
3354
3355/**
3356 * cik_sdma_gfx_resume - setup and start the async dma engines
3357 *
3358 * @rdev: radeon_device pointer
3359 *
3360 * Set up the gfx DMA ring buffers and enable them (CIK).
3361 * Returns 0 for success, error for failure.
3362 */
3363static int cik_sdma_gfx_resume(struct radeon_device *rdev)
3364{
3365 struct radeon_ring *ring;
3366 u32 rb_cntl, ib_cntl;
3367 u32 rb_bufsz;
3368 u32 reg_offset, wb_offset;
3369 int i, r;
3370
3371 for (i = 0; i < 2; i++) {
3372 if (i == 0) {
3373 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3374 reg_offset = SDMA0_REGISTER_OFFSET;
3375 wb_offset = R600_WB_DMA_RPTR_OFFSET;
3376 } else {
3377 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
3378 reg_offset = SDMA1_REGISTER_OFFSET;
3379 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
3380 }
3381
3382 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
3383 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
3384
3385 /* Set ring buffer size in dwords */
3386 rb_bufsz = drm_order(ring->ring_size / 4);
3387 rb_cntl = rb_bufsz << 1;
3388#ifdef __BIG_ENDIAN
3389 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
3390#endif
3391 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3392
3393 /* Initialize the ring buffer's read and write pointers */
3394 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
3395 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
3396
3397 /* set the wb address whether it's enabled or not */
3398 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
3399 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
3400 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
3401 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
3402
3403 if (rdev->wb.enabled)
3404 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
3405
3406 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
3407 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
3408
3409 ring->wptr = 0;
3410 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
3411
3412 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
3413
3414 /* enable DMA RB */
3415 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
3416
3417 ib_cntl = SDMA_IB_ENABLE;
3418#ifdef __BIG_ENDIAN
3419 ib_cntl |= SDMA_IB_SWAP_ENABLE;
3420#endif
3421 /* enable DMA IBs */
3422 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
3423
3424 ring->ready = true;
3425
3426 r = radeon_ring_test(rdev, ring->idx, ring);
3427 if (r) {
3428 ring->ready = false;
3429 return r;
3430 }
3431 }
3432
3433 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3434
3435 return 0;
3436}
3437
3438/**
3439 * cik_sdma_rlc_resume - setup and start the async dma engines
3440 *
3441 * @rdev: radeon_device pointer
3442 *
3443 * Set up the compute DMA queues and enable them (CIK).
3444 * Returns 0 for success, error for failure.
3445 */
3446static int cik_sdma_rlc_resume(struct radeon_device *rdev)
3447{
3448 /* XXX todo */
3449 return 0;
3450}
3451
3452/**
3453 * cik_sdma_load_microcode - load the sDMA ME ucode
3454 *
3455 * @rdev: radeon_device pointer
3456 *
3457 * Loads the sDMA0/1 ucode.
3458 * Returns 0 for success, -EINVAL if the ucode is not available.
3459 */
3460static int cik_sdma_load_microcode(struct radeon_device *rdev)
3461{
3462 const __be32 *fw_data;
3463 int i;
3464
3465 if (!rdev->sdma_fw)
3466 return -EINVAL;
3467
3468 /* stop the gfx rings and rlc compute queues */
3469 cik_sdma_gfx_stop(rdev);
3470 cik_sdma_rlc_stop(rdev);
3471
3472 /* halt the MEs */
3473 cik_sdma_enable(rdev, false);
3474
3475 /* sdma0 */
3476 fw_data = (const __be32 *)rdev->sdma_fw->data;
3477 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3478 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3479 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3480 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3481
3482 /* sdma1 */
3483 fw_data = (const __be32 *)rdev->sdma_fw->data;
3484 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3485 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3486 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3487 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3488
3489 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3490 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3491 return 0;
3492}
3493
3494/**
3495 * cik_sdma_resume - setup and start the async dma engines
3496 *
3497 * @rdev: radeon_device pointer
3498 *
3499 * Set up the DMA engines and enable them (CIK).
3500 * Returns 0 for success, error for failure.
3501 */
3502static int cik_sdma_resume(struct radeon_device *rdev)
3503{
3504 int r;
3505
3506 /* Reset dma */
3507 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
3508 RREG32(SRBM_SOFT_RESET);
3509 udelay(50);
3510 WREG32(SRBM_SOFT_RESET, 0);
3511 RREG32(SRBM_SOFT_RESET);
3512
3513 r = cik_sdma_load_microcode(rdev);
3514 if (r)
3515 return r;
3516
3517 /* unhalt the MEs */
3518 cik_sdma_enable(rdev, true);
3519
3520 /* start the gfx rings and rlc compute queues */
3521 r = cik_sdma_gfx_resume(rdev);
3522 if (r)
3523 return r;
3524 r = cik_sdma_rlc_resume(rdev);
3525 if (r)
3526 return r;
3527
3528 return 0;
3529}
3530
3531/**
3532 * cik_sdma_fini - tear down the async dma engines
3533 *
3534 * @rdev: radeon_device pointer
3535 *
3536 * Stop the async dma engines and free the rings (CIK).
3537 */
3538static void cik_sdma_fini(struct radeon_device *rdev)
3539{
3540 /* stop the gfx rings and rlc compute queues */
3541 cik_sdma_gfx_stop(rdev);
3542 cik_sdma_rlc_stop(rdev);
3543 /* halt the MEs */
3544 cik_sdma_enable(rdev, false);
3545 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
3546 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
3547 /* XXX - compute dma queue tear down */
3548}
3549
3550/**
3551 * cik_copy_dma - copy pages using the DMA engine
3552 *
3553 * @rdev: radeon_device pointer
3554 * @src_offset: src GPU address
3555 * @dst_offset: dst GPU address
3556 * @num_gpu_pages: number of GPU pages to xfer
3557 * @fence: radeon fence object
3558 *
3559 * Copy GPU paging using the DMA engine (CIK).
3560 * Used by the radeon ttm implementation to move pages if
3561 * registered as the asic copy callback.
3562 */
3563int cik_copy_dma(struct radeon_device *rdev,
3564 uint64_t src_offset, uint64_t dst_offset,
3565 unsigned num_gpu_pages,
3566 struct radeon_fence **fence)
3567{
3568 struct radeon_semaphore *sem = NULL;
3569 int ring_index = rdev->asic->copy.dma_ring_index;
3570 struct radeon_ring *ring = &rdev->ring[ring_index];
3571 u32 size_in_bytes, cur_size_in_bytes;
3572 int i, num_loops;
3573 int r = 0;
3574
3575 r = radeon_semaphore_create(rdev, &sem);
3576 if (r) {
3577 DRM_ERROR("radeon: moving bo (%d).\n", r);
3578 return r;
3579 }
3580
3581 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3582 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3583 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
3584 if (r) {
3585 DRM_ERROR("radeon: moving bo (%d).\n", r);
3586 radeon_semaphore_free(rdev, &sem, NULL);
3587 return r;
3588 }
3589
3590 if (radeon_fence_need_sync(*fence, ring->idx)) {
3591 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3592 ring->idx);
3593 radeon_fence_note_sync(*fence, ring->idx);
3594 } else {
3595 radeon_semaphore_free(rdev, &sem, NULL);
3596 }
3597
3598 for (i = 0; i < num_loops; i++) {
3599 cur_size_in_bytes = size_in_bytes;
3600 if (cur_size_in_bytes > 0x1fffff)
3601 cur_size_in_bytes = 0x1fffff;
3602 size_in_bytes -= cur_size_in_bytes;
3603 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
3604 radeon_ring_write(ring, cur_size_in_bytes);
3605 radeon_ring_write(ring, 0); /* src/dst endian swap */
3606 radeon_ring_write(ring, src_offset & 0xffffffff);
3607 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
3608 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3609 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
3610 src_offset += cur_size_in_bytes;
3611 dst_offset += cur_size_in_bytes;
3612 }
3613
3614 r = radeon_fence_emit(rdev, fence, ring->idx);
3615 if (r) {
3616 radeon_ring_unlock_undo(rdev, ring);
3617 return r;
3618 }
3619
3620 radeon_ring_unlock_commit(rdev, ring);
3621 radeon_semaphore_free(rdev, &sem, *fence);
3622
3623 return r;
3624}
3625
3626/**
3627 * cik_sdma_ring_test - simple async dma engine test
3628 *
3629 * @rdev: radeon_device pointer
3630 * @ring: radeon_ring structure holding ring information
3631 *
3632 * Test the DMA engine by writing using it to write an
3633 * value to memory. (CIK).
3634 * Returns 0 for success, error for failure.
3635 */
3636int cik_sdma_ring_test(struct radeon_device *rdev,
3637 struct radeon_ring *ring)
3638{
3639 unsigned i;
3640 int r;
3641 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3642 u32 tmp;
3643
3644 if (!ptr) {
3645 DRM_ERROR("invalid vram scratch pointer\n");
3646 return -EINVAL;
3647 }
3648
3649 tmp = 0xCAFEDEAD;
3650 writel(tmp, ptr);
3651
3652 r = radeon_ring_lock(rdev, ring, 4);
3653 if (r) {
3654 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
3655 return r;
3656 }
3657 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3658 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
3659 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
3660 radeon_ring_write(ring, 1); /* number of DWs to follow */
3661 radeon_ring_write(ring, 0xDEADBEEF);
3662 radeon_ring_unlock_commit(rdev, ring);
3663
3664 for (i = 0; i < rdev->usec_timeout; i++) {
3665 tmp = readl(ptr);
3666 if (tmp == 0xDEADBEEF)
3667 break;
3668 DRM_UDELAY(1);
3669 }
3670
3671 if (i < rdev->usec_timeout) {
3672 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
3673 } else {
3674 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
3675 ring->idx, tmp);
3676 r = -EINVAL;
3677 }
3678 return r;
3679}
3680
3681/**
3682 * cik_sdma_ib_test - test an IB on the DMA engine
3683 *
3684 * @rdev: radeon_device pointer
3685 * @ring: radeon_ring structure holding ring information
3686 *
3687 * Test a simple IB in the DMA ring (CIK).
3688 * Returns 0 on success, error on failure.
3689 */
3690int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3691{
3692 struct radeon_ib ib;
3693 unsigned i;
3694 int r;
3695 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3696 u32 tmp = 0;
3697
3698 if (!ptr) {
3699 DRM_ERROR("invalid vram scratch pointer\n");
3700 return -EINVAL;
3701 }
3702
3703 tmp = 0xCAFEDEAD;
3704 writel(tmp, ptr);
3705
3706 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3707 if (r) {
3708 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3709 return r;
3710 }
3711
3712 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
3713 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3714 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
3715 ib.ptr[3] = 1;
3716 ib.ptr[4] = 0xDEADBEEF;
3717 ib.length_dw = 5;
3718
3719 r = radeon_ib_schedule(rdev, &ib, NULL);
3720 if (r) {
3721 radeon_ib_free(rdev, &ib);
3722 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3723 return r;
3724 }
3725 r = radeon_fence_wait(ib.fence, false);
3726 if (r) {
3727 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3728 return r;
3729 }
3730 for (i = 0; i < rdev->usec_timeout; i++) {
3731 tmp = readl(ptr);
3732 if (tmp == 0xDEADBEEF)
3733 break;
3734 DRM_UDELAY(1);
3735 }
3736 if (i < rdev->usec_timeout) {
3737 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3738 } else {
3739 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3740 r = -EINVAL;
3741 }
3742 radeon_ib_free(rdev, &ib);
3743 return r;
3744}
3745
3746
3747static void cik_print_gpu_status_regs(struct radeon_device *rdev) 4030static void cik_print_gpu_status_regs(struct radeon_device *rdev)
3748{ 4031{
3749 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 4032 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@@ -3793,7 +4076,7 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev)
3793 * mask to be used by cik_gpu_soft_reset(). 4076 * mask to be used by cik_gpu_soft_reset().
3794 * Returns a mask of the blocks to be reset. 4077 * Returns a mask of the blocks to be reset.
3795 */ 4078 */
3796static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev) 4079u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
3797{ 4080{
3798 u32 reset_mask = 0; 4081 u32 reset_mask = 0;
3799 u32 tmp; 4082 u32 tmp;
@@ -4044,34 +4327,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4044 return radeon_ring_test_lockup(rdev, ring); 4327 return radeon_ring_test_lockup(rdev, ring);
4045} 4328}
4046 4329
4047/**
4048 * cik_sdma_is_lockup - Check if the DMA engine is locked up
4049 *
4050 * @rdev: radeon_device pointer
4051 * @ring: radeon_ring structure holding ring information
4052 *
4053 * Check if the async DMA engine is locked up (CIK).
4054 * Returns true if the engine appears to be locked up, false if not.
4055 */
4056bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4057{
4058 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
4059 u32 mask;
4060
4061 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
4062 mask = RADEON_RESET_DMA;
4063 else
4064 mask = RADEON_RESET_DMA1;
4065
4066 if (!(reset_mask & mask)) {
4067 radeon_ring_lockup_update(ring);
4068 return false;
4069 }
4070 /* force ring activities */
4071 radeon_ring_force_activity(rdev, ring);
4072 return radeon_ring_test_lockup(rdev, ring);
4073}
4074
4075/* MC */ 4330/* MC */
4076/** 4331/**
4077 * cik_mc_program - program the GPU memory controller 4332 * cik_mc_program - program the GPU memory controller
@@ -4608,131 +4863,8 @@ void cik_vm_set_page(struct radeon_device *rdev,
4608 } 4863 }
4609 } else { 4864 } else {
4610 /* DMA */ 4865 /* DMA */
4611 if (flags & RADEON_VM_PAGE_SYSTEM) { 4866 cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4612 while (count) {
4613 ndw = count * 2;
4614 if (ndw > 0xFFFFE)
4615 ndw = 0xFFFFE;
4616
4617 /* for non-physically contiguous pages (system) */
4618 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
4619 ib->ptr[ib->length_dw++] = pe;
4620 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4621 ib->ptr[ib->length_dw++] = ndw;
4622 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
4623 if (flags & RADEON_VM_PAGE_SYSTEM) {
4624 value = radeon_vm_map_gart(rdev, addr);
4625 value &= 0xFFFFFFFFFFFFF000ULL;
4626 } else if (flags & RADEON_VM_PAGE_VALID) {
4627 value = addr;
4628 } else {
4629 value = 0;
4630 }
4631 addr += incr;
4632 value |= r600_flags;
4633 ib->ptr[ib->length_dw++] = value;
4634 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4635 }
4636 }
4637 } else {
4638 while (count) {
4639 ndw = count;
4640 if (ndw > 0x7FFFF)
4641 ndw = 0x7FFFF;
4642
4643 if (flags & RADEON_VM_PAGE_VALID)
4644 value = addr;
4645 else
4646 value = 0;
4647 /* for physically contiguous pages (vram) */
4648 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
4649 ib->ptr[ib->length_dw++] = pe; /* dst addr */
4650 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4651 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
4652 ib->ptr[ib->length_dw++] = 0;
4653 ib->ptr[ib->length_dw++] = value; /* value */
4654 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4655 ib->ptr[ib->length_dw++] = incr; /* increment size */
4656 ib->ptr[ib->length_dw++] = 0;
4657 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
4658 pe += ndw * 8;
4659 addr += ndw * incr;
4660 count -= ndw;
4661 }
4662 }
4663 while (ib->length_dw & 0x7)
4664 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
4665 }
4666}
4667
4668/**
4669 * cik_dma_vm_flush - cik vm flush using sDMA
4670 *
4671 * @rdev: radeon_device pointer
4672 *
4673 * Update the page table base and flush the VM TLB
4674 * using sDMA (CIK).
4675 */
4676void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4677{
4678 struct radeon_ring *ring = &rdev->ring[ridx];
4679 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
4680 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
4681 u32 ref_and_mask;
4682
4683 if (vm == NULL)
4684 return;
4685
4686 if (ridx == R600_RING_TYPE_DMA_INDEX)
4687 ref_and_mask = SDMA0;
4688 else
4689 ref_and_mask = SDMA1;
4690
4691 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4692 if (vm->id < 8) {
4693 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
4694 } else {
4695 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
4696 } 4867 }
4697 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4698
4699 /* update SH_MEM_* regs */
4700 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4701 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4702 radeon_ring_write(ring, VMID(vm->id));
4703
4704 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4705 radeon_ring_write(ring, SH_MEM_BASES >> 2);
4706 radeon_ring_write(ring, 0);
4707
4708 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4709 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
4710 radeon_ring_write(ring, 0);
4711
4712 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4713 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
4714 radeon_ring_write(ring, 1);
4715
4716 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4717 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
4718 radeon_ring_write(ring, 0);
4719
4720 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4721 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4722 radeon_ring_write(ring, VMID(0));
4723
4724 /* flush HDP */
4725 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
4726 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
4727 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
4728 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
4729 radeon_ring_write(ring, ref_and_mask); /* MASK */
4730 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
4731
4732 /* flush TLB */
4733 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4734 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4735 radeon_ring_write(ring, 1 << vm->id);
4736} 4868}
4737 4869
4738/* 4870/*
@@ -4741,31 +4873,34 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
4741 * variety of functions, the most important of which is 4873 * variety of functions, the most important of which is
4742 * the interrupt controller. 4874 * the interrupt controller.
4743 */ 4875 */
4744/** 4876static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
4745 * cik_rlc_stop - stop the RLC ME 4877 bool enable)
4746 *
4747 * @rdev: radeon_device pointer
4748 *
4749 * Halt the RLC ME (MicroEngine) (CIK).
4750 */
4751static void cik_rlc_stop(struct radeon_device *rdev)
4752{ 4878{
4753 int i, j, k; 4879 u32 tmp = RREG32(CP_INT_CNTL_RING0);
4754 u32 mask, tmp;
4755 4880
4756 tmp = RREG32(CP_INT_CNTL_RING0); 4881 if (enable)
4757 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 4882 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4883 else
4884 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4758 WREG32(CP_INT_CNTL_RING0, tmp); 4885 WREG32(CP_INT_CNTL_RING0, tmp);
4886}
4759 4887
4760 RREG32(CB_CGTT_SCLK_CTRL); 4888static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
4761 RREG32(CB_CGTT_SCLK_CTRL); 4889{
4762 RREG32(CB_CGTT_SCLK_CTRL); 4890 u32 tmp;
4763 RREG32(CB_CGTT_SCLK_CTRL);
4764 4891
4765 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; 4892 tmp = RREG32(RLC_LB_CNTL);
4766 WREG32(RLC_CGCG_CGLS_CTRL, tmp); 4893 if (enable)
4894 tmp |= LOAD_BALANCE_ENABLE;
4895 else
4896 tmp &= ~LOAD_BALANCE_ENABLE;
4897 WREG32(RLC_LB_CNTL, tmp);
4898}
4767 4899
4768 WREG32(RLC_CNTL, 0); 4900static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
4901{
4902 u32 i, j, k;
4903 u32 mask;
4769 4904
4770 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 4905 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
4771 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { 4906 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
@@ -4787,6 +4922,84 @@ static void cik_rlc_stop(struct radeon_device *rdev)
4787 } 4922 }
4788} 4923}
4789 4924
4925static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
4926{
4927 u32 tmp;
4928
4929 tmp = RREG32(RLC_CNTL);
4930 if (tmp != rlc)
4931 WREG32(RLC_CNTL, rlc);
4932}
4933
4934static u32 cik_halt_rlc(struct radeon_device *rdev)
4935{
4936 u32 data, orig;
4937
4938 orig = data = RREG32(RLC_CNTL);
4939
4940 if (data & RLC_ENABLE) {
4941 u32 i;
4942
4943 data &= ~RLC_ENABLE;
4944 WREG32(RLC_CNTL, data);
4945
4946 for (i = 0; i < rdev->usec_timeout; i++) {
4947 if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
4948 break;
4949 udelay(1);
4950 }
4951
4952 cik_wait_for_rlc_serdes(rdev);
4953 }
4954
4955 return orig;
4956}
4957
4958void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
4959{
4960 u32 tmp, i, mask;
4961
4962 tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
4963 WREG32(RLC_GPR_REG2, tmp);
4964
4965 mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
4966 for (i = 0; i < rdev->usec_timeout; i++) {
4967 if ((RREG32(RLC_GPM_STAT) & mask) == mask)
4968 break;
4969 udelay(1);
4970 }
4971
4972 for (i = 0; i < rdev->usec_timeout; i++) {
4973 if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
4974 break;
4975 udelay(1);
4976 }
4977}
4978
4979void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
4980{
4981 u32 tmp;
4982
4983 tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
4984 WREG32(RLC_GPR_REG2, tmp);
4985}
4986
4987/**
4988 * cik_rlc_stop - stop the RLC ME
4989 *
4990 * @rdev: radeon_device pointer
4991 *
4992 * Halt the RLC ME (MicroEngine) (CIK).
4993 */
4994static void cik_rlc_stop(struct radeon_device *rdev)
4995{
4996 WREG32(RLC_CNTL, 0);
4997
4998 cik_enable_gui_idle_interrupt(rdev, false);
4999
5000 cik_wait_for_rlc_serdes(rdev);
5001}
5002
4790/** 5003/**
4791 * cik_rlc_start - start the RLC ME 5004 * cik_rlc_start - start the RLC ME
4792 * 5005 *
@@ -4796,13 +5009,9 @@ static void cik_rlc_stop(struct radeon_device *rdev)
4796 */ 5009 */
4797static void cik_rlc_start(struct radeon_device *rdev) 5010static void cik_rlc_start(struct radeon_device *rdev)
4798{ 5011{
4799 u32 tmp;
4800
4801 WREG32(RLC_CNTL, RLC_ENABLE); 5012 WREG32(RLC_CNTL, RLC_ENABLE);
4802 5013
4803 tmp = RREG32(CP_INT_CNTL_RING0); 5014 cik_enable_gui_idle_interrupt(rdev, true);
4804 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4805 WREG32(CP_INT_CNTL_RING0, tmp);
4806 5015
4807 udelay(50); 5016 udelay(50);
4808} 5017}
@@ -4818,8 +5027,7 @@ static void cik_rlc_start(struct radeon_device *rdev)
4818 */ 5027 */
4819static int cik_rlc_resume(struct radeon_device *rdev) 5028static int cik_rlc_resume(struct radeon_device *rdev)
4820{ 5029{
4821 u32 i, size; 5030 u32 i, size, tmp;
4822 u32 clear_state_info[3];
4823 const __be32 *fw_data; 5031 const __be32 *fw_data;
4824 5032
4825 if (!rdev->rlc_fw) 5033 if (!rdev->rlc_fw)
@@ -4840,12 +5048,15 @@ static int cik_rlc_resume(struct radeon_device *rdev)
4840 5048
4841 cik_rlc_stop(rdev); 5049 cik_rlc_stop(rdev);
4842 5050
4843 WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC); 5051 /* disable CG */
4844 RREG32(GRBM_SOFT_RESET); 5052 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
4845 udelay(50); 5053 WREG32(RLC_CGCG_CGLS_CTRL, tmp);
4846 WREG32(GRBM_SOFT_RESET, 0); 5054
4847 RREG32(GRBM_SOFT_RESET); 5055 si_rlc_reset(rdev);
4848 udelay(50); 5056
5057 cik_init_pg(rdev);
5058
5059 cik_init_cg(rdev);
4849 5060
4850 WREG32(RLC_LB_CNTR_INIT, 0); 5061 WREG32(RLC_LB_CNTR_INIT, 0);
4851 WREG32(RLC_LB_CNTR_MAX, 0x00008000); 5062 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
@@ -4864,20 +5075,757 @@ static int cik_rlc_resume(struct radeon_device *rdev)
4864 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); 5075 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
4865 WREG32(RLC_GPM_UCODE_ADDR, 0); 5076 WREG32(RLC_GPM_UCODE_ADDR, 0);
4866 5077
4867 /* XXX */ 5078 /* XXX - find out what chips support lbpw */
4868 clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr); 5079 cik_enable_lbpw(rdev, false);
4869 clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr; 5080
4870 clear_state_info[2] = 0;//cik_default_size; 5081 if (rdev->family == CHIP_BONAIRE)
4871 WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d); 5082 WREG32(RLC_DRIVER_DMA_STATUS, 0);
4872 for (i = 0; i < 3; i++)
4873 WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]);
4874 WREG32(RLC_DRIVER_DMA_STATUS, 0);
4875 5083
4876 cik_rlc_start(rdev); 5084 cik_rlc_start(rdev);
4877 5085
4878 return 0; 5086 return 0;
4879} 5087}
4880 5088
5089static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
5090{
5091 u32 data, orig, tmp, tmp2;
5092
5093 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5094
5095 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5096 cik_enable_gui_idle_interrupt(rdev, true);
5097
5098 tmp = cik_halt_rlc(rdev);
5099
5100 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5101 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5102 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5103 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
5104 WREG32(RLC_SERDES_WR_CTRL, tmp2);
5105
5106 cik_update_rlc(rdev, tmp);
5107
5108 data |= CGCG_EN | CGLS_EN;
5109 } else {
5110 cik_enable_gui_idle_interrupt(rdev, false);
5111
5112 RREG32(CB_CGTT_SCLK_CTRL);
5113 RREG32(CB_CGTT_SCLK_CTRL);
5114 RREG32(CB_CGTT_SCLK_CTRL);
5115 RREG32(CB_CGTT_SCLK_CTRL);
5116
5117 data &= ~(CGCG_EN | CGLS_EN);
5118 }
5119
5120 if (orig != data)
5121 WREG32(RLC_CGCG_CGLS_CTRL, data);
5122
5123}
5124
5125static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
5126{
5127 u32 data, orig, tmp = 0;
5128
5129 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5130 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
5131 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5132 orig = data = RREG32(CP_MEM_SLP_CNTL);
5133 data |= CP_MEM_LS_EN;
5134 if (orig != data)
5135 WREG32(CP_MEM_SLP_CNTL, data);
5136 }
5137 }
5138
5139 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5140 data &= 0xfffffffd;
5141 if (orig != data)
5142 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5143
5144 tmp = cik_halt_rlc(rdev);
5145
5146 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5147 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5148 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5149 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
5150 WREG32(RLC_SERDES_WR_CTRL, data);
5151
5152 cik_update_rlc(rdev, tmp);
5153
5154 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
5155 orig = data = RREG32(CGTS_SM_CTRL_REG);
5156 data &= ~SM_MODE_MASK;
5157 data |= SM_MODE(0x2);
5158 data |= SM_MODE_ENABLE;
5159 data &= ~CGTS_OVERRIDE;
5160 if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
5161 (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
5162 data &= ~CGTS_LS_OVERRIDE;
5163 data &= ~ON_MONITOR_ADD_MASK;
5164 data |= ON_MONITOR_ADD_EN;
5165 data |= ON_MONITOR_ADD(0x96);
5166 if (orig != data)
5167 WREG32(CGTS_SM_CTRL_REG, data);
5168 }
5169 } else {
5170 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5171 data |= 0x00000002;
5172 if (orig != data)
5173 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5174
5175 data = RREG32(RLC_MEM_SLP_CNTL);
5176 if (data & RLC_MEM_LS_EN) {
5177 data &= ~RLC_MEM_LS_EN;
5178 WREG32(RLC_MEM_SLP_CNTL, data);
5179 }
5180
5181 data = RREG32(CP_MEM_SLP_CNTL);
5182 if (data & CP_MEM_LS_EN) {
5183 data &= ~CP_MEM_LS_EN;
5184 WREG32(CP_MEM_SLP_CNTL, data);
5185 }
5186
5187 orig = data = RREG32(CGTS_SM_CTRL_REG);
5188 data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
5189 if (orig != data)
5190 WREG32(CGTS_SM_CTRL_REG, data);
5191
5192 tmp = cik_halt_rlc(rdev);
5193
5194 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5195 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5196 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5197 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
5198 WREG32(RLC_SERDES_WR_CTRL, data);
5199
5200 cik_update_rlc(rdev, tmp);
5201 }
5202}
5203
5204static const u32 mc_cg_registers[] =
5205{
5206 MC_HUB_MISC_HUB_CG,
5207 MC_HUB_MISC_SIP_CG,
5208 MC_HUB_MISC_VM_CG,
5209 MC_XPB_CLK_GAT,
5210 ATC_MISC_CG,
5211 MC_CITF_MISC_WR_CG,
5212 MC_CITF_MISC_RD_CG,
5213 MC_CITF_MISC_VM_CG,
5214 VM_L2_CG,
5215};
5216
5217static void cik_enable_mc_ls(struct radeon_device *rdev,
5218 bool enable)
5219{
5220 int i;
5221 u32 orig, data;
5222
5223 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5224 orig = data = RREG32(mc_cg_registers[i]);
5225 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5226 data |= MC_LS_ENABLE;
5227 else
5228 data &= ~MC_LS_ENABLE;
5229 if (data != orig)
5230 WREG32(mc_cg_registers[i], data);
5231 }
5232}
5233
5234static void cik_enable_mc_mgcg(struct radeon_device *rdev,
5235 bool enable)
5236{
5237 int i;
5238 u32 orig, data;
5239
5240 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5241 orig = data = RREG32(mc_cg_registers[i]);
5242 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5243 data |= MC_CG_ENABLE;
5244 else
5245 data &= ~MC_CG_ENABLE;
5246 if (data != orig)
5247 WREG32(mc_cg_registers[i], data);
5248 }
5249}
5250
5251static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
5252 bool enable)
5253{
5254 u32 orig, data;
5255
5256 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5257 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
5258 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
5259 } else {
5260 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
5261 data |= 0xff000000;
5262 if (data != orig)
5263 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
5264
5265 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
5266 data |= 0xff000000;
5267 if (data != orig)
5268 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
5269 }
5270}
5271
5272static void cik_enable_sdma_mgls(struct radeon_device *rdev,
5273 bool enable)
5274{
5275 u32 orig, data;
5276
5277 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
5278 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
5279 data |= 0x100;
5280 if (orig != data)
5281 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
5282
5283 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
5284 data |= 0x100;
5285 if (orig != data)
5286 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
5287 } else {
5288 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
5289 data &= ~0x100;
5290 if (orig != data)
5291 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
5292
5293 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
5294 data &= ~0x100;
5295 if (orig != data)
5296 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
5297 }
5298}
5299
5300static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
5301 bool enable)
5302{
5303 u32 orig, data;
5304
5305 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5306 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5307 data = 0xfff;
5308 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
5309
5310 orig = data = RREG32(UVD_CGC_CTRL);
5311 data |= DCM;
5312 if (orig != data)
5313 WREG32(UVD_CGC_CTRL, data);
5314 } else {
5315 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5316 data &= ~0xfff;
5317 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
5318
5319 orig = data = RREG32(UVD_CGC_CTRL);
5320 data &= ~DCM;
5321 if (orig != data)
5322 WREG32(UVD_CGC_CTRL, data);
5323 }
5324}
5325
5326static void cik_enable_bif_mgls(struct radeon_device *rdev,
5327 bool enable)
5328{
5329 u32 orig, data;
5330
5331 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
5332
5333 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5334 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5335 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5336 else
5337 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5338 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5339
5340 if (orig != data)
5341 WREG32_PCIE_PORT(PCIE_CNTL2, data);
5342}
5343
5344static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
5345 bool enable)
5346{
5347 u32 orig, data;
5348
5349 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5350
5351 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5352 data &= ~CLOCK_GATING_DIS;
5353 else
5354 data |= CLOCK_GATING_DIS;
5355
5356 if (orig != data)
5357 WREG32(HDP_HOST_PATH_CNTL, data);
5358}
5359
5360static void cik_enable_hdp_ls(struct radeon_device *rdev,
5361 bool enable)
5362{
5363 u32 orig, data;
5364
5365 orig = data = RREG32(HDP_MEM_POWER_LS);
5366
5367 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5368 data |= HDP_LS_ENABLE;
5369 else
5370 data &= ~HDP_LS_ENABLE;
5371
5372 if (orig != data)
5373 WREG32(HDP_MEM_POWER_LS, data);
5374}
5375
5376void cik_update_cg(struct radeon_device *rdev,
5377 u32 block, bool enable)
5378{
5379 if (block & RADEON_CG_BLOCK_GFX) {
5380 /* order matters! */
5381 if (enable) {
5382 cik_enable_mgcg(rdev, true);
5383 cik_enable_cgcg(rdev, true);
5384 } else {
5385 cik_enable_cgcg(rdev, false);
5386 cik_enable_mgcg(rdev, false);
5387 }
5388 }
5389
5390 if (block & RADEON_CG_BLOCK_MC) {
5391 if (!(rdev->flags & RADEON_IS_IGP)) {
5392 cik_enable_mc_mgcg(rdev, enable);
5393 cik_enable_mc_ls(rdev, enable);
5394 }
5395 }
5396
5397 if (block & RADEON_CG_BLOCK_SDMA) {
5398 cik_enable_sdma_mgcg(rdev, enable);
5399 cik_enable_sdma_mgls(rdev, enable);
5400 }
5401
5402 if (block & RADEON_CG_BLOCK_BIF) {
5403 cik_enable_bif_mgls(rdev, enable);
5404 }
5405
5406 if (block & RADEON_CG_BLOCK_UVD) {
5407 if (rdev->has_uvd)
5408 cik_enable_uvd_mgcg(rdev, enable);
5409 }
5410
5411 if (block & RADEON_CG_BLOCK_HDP) {
5412 cik_enable_hdp_mgcg(rdev, enable);
5413 cik_enable_hdp_ls(rdev, enable);
5414 }
5415}
5416
5417static void cik_init_cg(struct radeon_device *rdev)
5418{
5419
5420 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
5421
5422 if (rdev->has_uvd)
5423 si_init_uvd_internal_cg(rdev);
5424
5425 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
5426 RADEON_CG_BLOCK_SDMA |
5427 RADEON_CG_BLOCK_BIF |
5428 RADEON_CG_BLOCK_UVD |
5429 RADEON_CG_BLOCK_HDP), true);
5430}
5431
5432static void cik_fini_cg(struct radeon_device *rdev)
5433{
5434 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
5435 RADEON_CG_BLOCK_SDMA |
5436 RADEON_CG_BLOCK_BIF |
5437 RADEON_CG_BLOCK_UVD |
5438 RADEON_CG_BLOCK_HDP), false);
5439
5440 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
5441}
5442
5443static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
5444 bool enable)
5445{
5446 u32 data, orig;
5447
5448 orig = data = RREG32(RLC_PG_CNTL);
5449 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
5450 data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
5451 else
5452 data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
5453 if (orig != data)
5454 WREG32(RLC_PG_CNTL, data);
5455}
5456
5457static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
5458 bool enable)
5459{
5460 u32 data, orig;
5461
5462 orig = data = RREG32(RLC_PG_CNTL);
5463 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
5464 data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
5465 else
5466 data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
5467 if (orig != data)
5468 WREG32(RLC_PG_CNTL, data);
5469}
5470
5471static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
5472{
5473 u32 data, orig;
5474
5475 orig = data = RREG32(RLC_PG_CNTL);
5476 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
5477 data &= ~DISABLE_CP_PG;
5478 else
5479 data |= DISABLE_CP_PG;
5480 if (orig != data)
5481 WREG32(RLC_PG_CNTL, data);
5482}
5483
5484static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
5485{
5486 u32 data, orig;
5487
5488 orig = data = RREG32(RLC_PG_CNTL);
5489 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
5490 data &= ~DISABLE_GDS_PG;
5491 else
5492 data |= DISABLE_GDS_PG;
5493 if (orig != data)
5494 WREG32(RLC_PG_CNTL, data);
5495}
5496
5497#define CP_ME_TABLE_SIZE 96
5498#define CP_ME_TABLE_OFFSET 2048
5499#define CP_MEC_TABLE_OFFSET 4096
5500
5501void cik_init_cp_pg_table(struct radeon_device *rdev)
5502{
5503 const __be32 *fw_data;
5504 volatile u32 *dst_ptr;
5505 int me, i, max_me = 4;
5506 u32 bo_offset = 0;
5507 u32 table_offset;
5508
5509 if (rdev->family == CHIP_KAVERI)
5510 max_me = 5;
5511
5512 if (rdev->rlc.cp_table_ptr == NULL)
5513 return;
5514
5515 /* write the cp table buffer */
5516 dst_ptr = rdev->rlc.cp_table_ptr;
5517 for (me = 0; me < max_me; me++) {
5518 if (me == 0) {
5519 fw_data = (const __be32 *)rdev->ce_fw->data;
5520 table_offset = CP_ME_TABLE_OFFSET;
5521 } else if (me == 1) {
5522 fw_data = (const __be32 *)rdev->pfp_fw->data;
5523 table_offset = CP_ME_TABLE_OFFSET;
5524 } else if (me == 2) {
5525 fw_data = (const __be32 *)rdev->me_fw->data;
5526 table_offset = CP_ME_TABLE_OFFSET;
5527 } else {
5528 fw_data = (const __be32 *)rdev->mec_fw->data;
5529 table_offset = CP_MEC_TABLE_OFFSET;
5530 }
5531
5532 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
5533 dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
5534 }
5535 bo_offset += CP_ME_TABLE_SIZE;
5536 }
5537}
5538
5539static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
5540 bool enable)
5541{
5542 u32 data, orig;
5543
5544 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
5545 orig = data = RREG32(RLC_PG_CNTL);
5546 data |= GFX_PG_ENABLE;
5547 if (orig != data)
5548 WREG32(RLC_PG_CNTL, data);
5549
5550 orig = data = RREG32(RLC_AUTO_PG_CTRL);
5551 data |= AUTO_PG_EN;
5552 if (orig != data)
5553 WREG32(RLC_AUTO_PG_CTRL, data);
5554 } else {
5555 orig = data = RREG32(RLC_PG_CNTL);
5556 data &= ~GFX_PG_ENABLE;
5557 if (orig != data)
5558 WREG32(RLC_PG_CNTL, data);
5559
5560 orig = data = RREG32(RLC_AUTO_PG_CTRL);
5561 data &= ~AUTO_PG_EN;
5562 if (orig != data)
5563 WREG32(RLC_AUTO_PG_CTRL, data);
5564
5565 data = RREG32(DB_RENDER_CONTROL);
5566 }
5567}
5568
5569static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
5570{
5571 u32 mask = 0, tmp, tmp1;
5572 int i;
5573
5574 cik_select_se_sh(rdev, se, sh);
5575 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5576 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5577 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5578
5579 tmp &= 0xffff0000;
5580
5581 tmp |= tmp1;
5582 tmp >>= 16;
5583
5584 for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
5585 mask <<= 1;
5586 mask |= 1;
5587 }
5588
5589 return (~tmp) & mask;
5590}
5591
5592static void cik_init_ao_cu_mask(struct radeon_device *rdev)
5593{
5594 u32 i, j, k, active_cu_number = 0;
5595 u32 mask, counter, cu_bitmap;
5596 u32 tmp = 0;
5597
5598 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
5599 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
5600 mask = 1;
5601 cu_bitmap = 0;
5602 counter = 0;
5603 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
5604 if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
5605 if (counter < 2)
5606 cu_bitmap |= mask;
5607 counter ++;
5608 }
5609 mask <<= 1;
5610 }
5611
5612 active_cu_number += counter;
5613 tmp |= (cu_bitmap << (i * 16 + j * 8));
5614 }
5615 }
5616
5617 WREG32(RLC_PG_AO_CU_MASK, tmp);
5618
5619 tmp = RREG32(RLC_MAX_PG_CU);
5620 tmp &= ~MAX_PU_CU_MASK;
5621 tmp |= MAX_PU_CU(active_cu_number);
5622 WREG32(RLC_MAX_PG_CU, tmp);
5623}
5624
5625static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
5626 bool enable)
5627{
5628 u32 data, orig;
5629
5630 orig = data = RREG32(RLC_PG_CNTL);
5631 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
5632 data |= STATIC_PER_CU_PG_ENABLE;
5633 else
5634 data &= ~STATIC_PER_CU_PG_ENABLE;
5635 if (orig != data)
5636 WREG32(RLC_PG_CNTL, data);
5637}
5638
5639static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
5640 bool enable)
5641{
5642 u32 data, orig;
5643
5644 orig = data = RREG32(RLC_PG_CNTL);
5645 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
5646 data |= DYN_PER_CU_PG_ENABLE;
5647 else
5648 data &= ~DYN_PER_CU_PG_ENABLE;
5649 if (orig != data)
5650 WREG32(RLC_PG_CNTL, data);
5651}
5652
5653#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
5654#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
5655
5656static void cik_init_gfx_cgpg(struct radeon_device *rdev)
5657{
5658 u32 data, orig;
5659 u32 i;
5660
5661 if (rdev->rlc.cs_data) {
5662 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
5663 WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
5664 WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
5665 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
5666 } else {
5667 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
5668 for (i = 0; i < 3; i++)
5669 WREG32(RLC_GPM_SCRATCH_DATA, 0);
5670 }
5671 if (rdev->rlc.reg_list) {
5672 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
5673 for (i = 0; i < rdev->rlc.reg_list_size; i++)
5674 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
5675 }
5676
5677 orig = data = RREG32(RLC_PG_CNTL);
5678 data |= GFX_PG_SRC;
5679 if (orig != data)
5680 WREG32(RLC_PG_CNTL, data);
5681
5682 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5683 WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
5684
5685 data = RREG32(CP_RB_WPTR_POLL_CNTL);
5686 data &= ~IDLE_POLL_COUNT_MASK;
5687 data |= IDLE_POLL_COUNT(0x60);
5688 WREG32(CP_RB_WPTR_POLL_CNTL, data);
5689
5690 data = 0x10101010;
5691 WREG32(RLC_PG_DELAY, data);
5692
5693 data = RREG32(RLC_PG_DELAY_2);
5694 data &= ~0xff;
5695 data |= 0x3;
5696 WREG32(RLC_PG_DELAY_2, data);
5697
5698 data = RREG32(RLC_AUTO_PG_CTRL);
5699 data &= ~GRBM_REG_SGIT_MASK;
5700 data |= GRBM_REG_SGIT(0x700);
5701 WREG32(RLC_AUTO_PG_CTRL, data);
5702
5703}
5704
5705static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
5706{
5707 cik_enable_gfx_cgpg(rdev, enable);
5708 cik_enable_gfx_static_mgpg(rdev, enable);
5709 cik_enable_gfx_dynamic_mgpg(rdev, enable);
5710}
5711
5712u32 cik_get_csb_size(struct radeon_device *rdev)
5713{
5714 u32 count = 0;
5715 const struct cs_section_def *sect = NULL;
5716 const struct cs_extent_def *ext = NULL;
5717
5718 if (rdev->rlc.cs_data == NULL)
5719 return 0;
5720
5721 /* begin clear state */
5722 count += 2;
5723 /* context control state */
5724 count += 3;
5725
5726 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5727 for (ext = sect->section; ext->extent != NULL; ++ext) {
5728 if (sect->id == SECT_CONTEXT)
5729 count += 2 + ext->reg_count;
5730 else
5731 return 0;
5732 }
5733 }
5734 /* pa_sc_raster_config/pa_sc_raster_config1 */
5735 count += 4;
5736 /* end clear state */
5737 count += 2;
5738 /* clear state */
5739 count += 2;
5740
5741 return count;
5742}
5743
5744void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5745{
5746 u32 count = 0, i;
5747 const struct cs_section_def *sect = NULL;
5748 const struct cs_extent_def *ext = NULL;
5749
5750 if (rdev->rlc.cs_data == NULL)
5751 return;
5752 if (buffer == NULL)
5753 return;
5754
5755 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5756 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
5757
5758 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
5759 buffer[count++] = 0x80000000;
5760 buffer[count++] = 0x80000000;
5761
5762 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5763 for (ext = sect->section; ext->extent != NULL; ++ext) {
5764 if (sect->id == SECT_CONTEXT) {
5765 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
5766 buffer[count++] = ext->reg_index - 0xa000;
5767 for (i = 0; i < ext->reg_count; i++)
5768 buffer[count++] = ext->extent[i];
5769 } else {
5770 return;
5771 }
5772 }
5773 }
5774
5775 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
5776 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
5777 switch (rdev->family) {
5778 case CHIP_BONAIRE:
5779 buffer[count++] = 0x16000012;
5780 buffer[count++] = 0x00000000;
5781 break;
5782 case CHIP_KAVERI:
5783 buffer[count++] = 0x00000000; /* XXX */
5784 buffer[count++] = 0x00000000;
5785 break;
5786 case CHIP_KABINI:
5787 buffer[count++] = 0x00000000; /* XXX */
5788 buffer[count++] = 0x00000000;
5789 break;
5790 default:
5791 buffer[count++] = 0x00000000;
5792 buffer[count++] = 0x00000000;
5793 break;
5794 }
5795
5796 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5797 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
5798
5799 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
5800 buffer[count++] = 0;
5801}
5802
5803static void cik_init_pg(struct radeon_device *rdev)
5804{
5805 if (rdev->pg_flags) {
5806 cik_enable_sck_slowdown_on_pu(rdev, true);
5807 cik_enable_sck_slowdown_on_pd(rdev, true);
5808 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
5809 cik_init_gfx_cgpg(rdev);
5810 cik_enable_cp_pg(rdev, true);
5811 cik_enable_gds_pg(rdev, true);
5812 }
5813 cik_init_ao_cu_mask(rdev);
5814 cik_update_gfx_pg(rdev, true);
5815 }
5816}
5817
5818static void cik_fini_pg(struct radeon_device *rdev)
5819{
5820 if (rdev->pg_flags) {
5821 cik_update_gfx_pg(rdev, false);
5822 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
5823 cik_enable_cp_pg(rdev, false);
5824 cik_enable_gds_pg(rdev, false);
5825 }
5826 }
5827}
5828
4881/* 5829/*
4882 * Interrupts 5830 * Interrupts
4883 * Starting with r6xx, interrupts are handled via a ring buffer. 5831 * Starting with r6xx, interrupts are handled via a ring buffer.
@@ -5040,7 +5988,7 @@ static int cik_irq_init(struct radeon_device *rdev)
5040 WREG32(INTERRUPT_CNTL, interrupt_cntl); 5988 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5041 5989
5042 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 5990 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5043 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 5991 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5044 5992
5045 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 5993 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5046 IH_WPTR_OVERFLOW_CLEAR | 5994 IH_WPTR_OVERFLOW_CLEAR |
@@ -5096,6 +6044,7 @@ int cik_irq_set(struct radeon_device *rdev)
5096 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 6044 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
5097 u32 grbm_int_cntl = 0; 6045 u32 grbm_int_cntl = 0;
5098 u32 dma_cntl, dma_cntl1; 6046 u32 dma_cntl, dma_cntl1;
6047 u32 thermal_int;
5099 6048
5100 if (!rdev->irq.installed) { 6049 if (!rdev->irq.installed) {
5101 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 6050 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -5128,6 +6077,13 @@ int cik_irq_set(struct radeon_device *rdev)
5128 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6077 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5129 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6078 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5130 6079
6080 if (rdev->flags & RADEON_IS_IGP)
6081 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
6082 ~(THERM_INTH_MASK | THERM_INTL_MASK);
6083 else
6084 thermal_int = RREG32_SMC(CG_THERMAL_INT) &
6085 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6086
5131 /* enable CP interrupts on all rings */ 6087 /* enable CP interrupts on all rings */
5132 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 6088 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
5133 DRM_DEBUG("cik_irq_set: sw int gfx\n"); 6089 DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -5285,6 +6241,14 @@ int cik_irq_set(struct radeon_device *rdev)
5285 hpd6 |= DC_HPDx_INT_EN; 6241 hpd6 |= DC_HPDx_INT_EN;
5286 } 6242 }
5287 6243
6244 if (rdev->irq.dpm_thermal) {
6245 DRM_DEBUG("dpm thermal\n");
6246 if (rdev->flags & RADEON_IS_IGP)
6247 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
6248 else
6249 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6250 }
6251
5288 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 6252 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
5289 6253
5290 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); 6254 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -5319,6 +6283,11 @@ int cik_irq_set(struct radeon_device *rdev)
5319 WREG32(DC_HPD5_INT_CONTROL, hpd5); 6283 WREG32(DC_HPD5_INT_CONTROL, hpd5);
5320 WREG32(DC_HPD6_INT_CONTROL, hpd6); 6284 WREG32(DC_HPD6_INT_CONTROL, hpd6);
5321 6285
6286 if (rdev->flags & RADEON_IS_IGP)
6287 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
6288 else
6289 WREG32_SMC(CG_THERMAL_INT, thermal_int);
6290
5322 return 0; 6291 return 0;
5323} 6292}
5324 6293
@@ -5530,6 +6499,7 @@ int cik_irq_process(struct radeon_device *rdev)
5530 bool queue_hotplug = false; 6499 bool queue_hotplug = false;
5531 bool queue_reset = false; 6500 bool queue_reset = false;
5532 u32 addr, status, mc_client; 6501 u32 addr, status, mc_client;
6502 bool queue_thermal = false;
5533 6503
5534 if (!rdev->ih.enabled || rdev->shutdown) 6504 if (!rdev->ih.enabled || rdev->shutdown)
5535 return IRQ_NONE; 6505 return IRQ_NONE;
@@ -5763,6 +6733,10 @@ restart_ih:
5763 break; 6733 break;
5764 } 6734 }
5765 break; 6735 break;
6736 case 124: /* UVD */
6737 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6738 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6739 break;
5766 case 146: 6740 case 146:
5767 case 147: 6741 case 147:
5768 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 6742 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -5880,6 +6854,19 @@ restart_ih:
5880 break; 6854 break;
5881 } 6855 }
5882 break; 6856 break;
6857 case 230: /* thermal low to high */
6858 DRM_DEBUG("IH: thermal low to high\n");
6859 rdev->pm.dpm.thermal.high_to_low = false;
6860 queue_thermal = true;
6861 break;
6862 case 231: /* thermal high to low */
6863 DRM_DEBUG("IH: thermal high to low\n");
6864 rdev->pm.dpm.thermal.high_to_low = true;
6865 queue_thermal = true;
6866 break;
6867 case 233: /* GUI IDLE */
6868 DRM_DEBUG("IH: GUI idle\n");
6869 break;
5883 case 241: /* SDMA Privileged inst */ 6870 case 241: /* SDMA Privileged inst */
5884 case 247: /* SDMA Privileged inst */ 6871 case 247: /* SDMA Privileged inst */
5885 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 6872 DRM_ERROR("Illegal instruction in SDMA command stream\n");
@@ -5919,9 +6906,6 @@ restart_ih:
5919 break; 6906 break;
5920 } 6907 }
5921 break; 6908 break;
5922 case 233: /* GUI IDLE */
5923 DRM_DEBUG("IH: GUI idle\n");
5924 break;
5925 default: 6909 default:
5926 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6910 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5927 break; 6911 break;
@@ -5935,6 +6919,8 @@ restart_ih:
5935 schedule_work(&rdev->hotplug_work); 6919 schedule_work(&rdev->hotplug_work);
5936 if (queue_reset) 6920 if (queue_reset)
5937 schedule_work(&rdev->reset_work); 6921 schedule_work(&rdev->reset_work);
6922 if (queue_thermal)
6923 schedule_work(&rdev->pm.dpm.thermal.work);
5938 rdev->ih.rptr = rptr; 6924 rdev->ih.rptr = rptr;
5939 WREG32(IH_RB_RPTR, rdev->ih.rptr); 6925 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5940 atomic_set(&rdev->ih.lock, 0); 6926 atomic_set(&rdev->ih.lock, 0);
@@ -5964,6 +6950,16 @@ static int cik_startup(struct radeon_device *rdev)
5964 struct radeon_ring *ring; 6950 struct radeon_ring *ring;
5965 int r; 6951 int r;
5966 6952
6953 /* enable pcie gen2/3 link */
6954 cik_pcie_gen3_enable(rdev);
6955 /* enable aspm */
6956 cik_program_aspm(rdev);
6957
6958 /* scratch needs to be initialized before MC */
6959 r = r600_vram_scratch_init(rdev);
6960 if (r)
6961 return r;
6962
5967 cik_mc_program(rdev); 6963 cik_mc_program(rdev);
5968 6964
5969 if (rdev->flags & RADEON_IS_IGP) { 6965 if (rdev->flags & RADEON_IS_IGP) {
@@ -5993,17 +6989,26 @@ static int cik_startup(struct radeon_device *rdev)
5993 } 6989 }
5994 } 6990 }
5995 6991
5996 r = r600_vram_scratch_init(rdev);
5997 if (r)
5998 return r;
5999
6000 r = cik_pcie_gart_enable(rdev); 6992 r = cik_pcie_gart_enable(rdev);
6001 if (r) 6993 if (r)
6002 return r; 6994 return r;
6003 cik_gpu_init(rdev); 6995 cik_gpu_init(rdev);
6004 6996
6005 /* allocate rlc buffers */ 6997 /* allocate rlc buffers */
6006 r = si_rlc_init(rdev); 6998 if (rdev->flags & RADEON_IS_IGP) {
6999 if (rdev->family == CHIP_KAVERI) {
7000 rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
7001 rdev->rlc.reg_list_size =
7002 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
7003 } else {
7004 rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
7005 rdev->rlc.reg_list_size =
7006 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
7007 }
7008 }
7009 rdev->rlc.cs_data = ci_cs_data;
7010 rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
7011 r = sumo_rlc_init(rdev);
6007 if (r) { 7012 if (r) {
6008 DRM_ERROR("Failed to init rlc BOs!\n"); 7013 DRM_ERROR("Failed to init rlc BOs!\n");
6009 return r; 7014 return r;
@@ -6051,12 +7056,15 @@ static int cik_startup(struct radeon_device *rdev)
6051 return r; 7056 return r;
6052 } 7057 }
6053 7058
6054 r = cik_uvd_resume(rdev); 7059 r = radeon_uvd_resume(rdev);
6055 if (!r) { 7060 if (!r) {
6056 r = radeon_fence_driver_start_ring(rdev, 7061 r = uvd_v4_2_resume(rdev);
6057 R600_RING_TYPE_UVD_INDEX); 7062 if (!r) {
6058 if (r) 7063 r = radeon_fence_driver_start_ring(rdev,
6059 dev_err(rdev->dev, "UVD fences init error (%d).\n", r); 7064 R600_RING_TYPE_UVD_INDEX);
7065 if (r)
7066 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
7067 }
6060 } 7068 }
6061 if (r) 7069 if (r)
6062 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 7070 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
@@ -6079,7 +7087,7 @@ static int cik_startup(struct radeon_device *rdev)
6079 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 7087 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6080 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 7088 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6081 CP_RB0_RPTR, CP_RB0_WPTR, 7089 CP_RB0_RPTR, CP_RB0_WPTR,
6082 0, 0xfffff, RADEON_CP_PACKET2); 7090 RADEON_CP_PACKET2);
6083 if (r) 7091 if (r)
6084 return r; 7092 return r;
6085 7093
@@ -6088,7 +7096,7 @@ static int cik_startup(struct radeon_device *rdev)
6088 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 7096 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6089 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, 7097 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6090 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, 7098 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6091 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF)); 7099 PACKET3(PACKET3_NOP, 0x3FFF));
6092 if (r) 7100 if (r)
6093 return r; 7101 return r;
6094 ring->me = 1; /* first MEC */ 7102 ring->me = 1; /* first MEC */
@@ -6100,7 +7108,7 @@ static int cik_startup(struct radeon_device *rdev)
6100 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 7108 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6101 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, 7109 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6102 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, 7110 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6103 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF)); 7111 PACKET3(PACKET3_NOP, 0x3FFF));
6104 if (r) 7112 if (r)
6105 return r; 7113 return r;
6106 /* dGPU only have 1 MEC */ 7114 /* dGPU only have 1 MEC */
@@ -6113,7 +7121,7 @@ static int cik_startup(struct radeon_device *rdev)
6113 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 7121 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6114 SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET, 7122 SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
6115 SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET, 7123 SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
6116 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 7124 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6117 if (r) 7125 if (r)
6118 return r; 7126 return r;
6119 7127
@@ -6121,7 +7129,7 @@ static int cik_startup(struct radeon_device *rdev)
6121 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 7129 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6122 SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET, 7130 SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
6123 SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET, 7131 SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
6124 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 7132 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6125 if (r) 7133 if (r)
6126 return r; 7134 return r;
6127 7135
@@ -6135,12 +7143,11 @@ static int cik_startup(struct radeon_device *rdev)
6135 7143
6136 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 7144 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6137 if (ring->ring_size) { 7145 if (ring->ring_size) {
6138 r = radeon_ring_init(rdev, ring, ring->ring_size, 7146 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
6139 R600_WB_UVD_RPTR_OFFSET,
6140 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 7147 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
6141 0, 0xfffff, RADEON_CP_PACKET2); 7148 RADEON_CP_PACKET2);
6142 if (!r) 7149 if (!r)
6143 r = r600_uvd_init(rdev); 7150 r = uvd_v1_0_init(rdev);
6144 if (r) 7151 if (r)
6145 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 7152 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6146 } 7153 }
@@ -6157,6 +7164,10 @@ static int cik_startup(struct radeon_device *rdev)
6157 return r; 7164 return r;
6158 } 7165 }
6159 7166
7167 r = dce6_audio_init(rdev);
7168 if (r)
7169 return r;
7170
6160 return 0; 7171 return 0;
6161} 7172}
6162 7173
@@ -6202,11 +7213,14 @@ int cik_resume(struct radeon_device *rdev)
6202 */ 7213 */
6203int cik_suspend(struct radeon_device *rdev) 7214int cik_suspend(struct radeon_device *rdev)
6204{ 7215{
7216 dce6_audio_fini(rdev);
6205 radeon_vm_manager_fini(rdev); 7217 radeon_vm_manager_fini(rdev);
6206 cik_cp_enable(rdev, false); 7218 cik_cp_enable(rdev, false);
6207 cik_sdma_enable(rdev, false); 7219 cik_sdma_enable(rdev, false);
6208 r600_uvd_stop(rdev); 7220 uvd_v1_0_fini(rdev);
6209 radeon_uvd_suspend(rdev); 7221 radeon_uvd_suspend(rdev);
7222 cik_fini_pg(rdev);
7223 cik_fini_cg(rdev);
6210 cik_irq_suspend(rdev); 7224 cik_irq_suspend(rdev);
6211 radeon_wb_disable(rdev); 7225 radeon_wb_disable(rdev);
6212 cik_pcie_gart_disable(rdev); 7226 cik_pcie_gart_disable(rdev);
@@ -6327,7 +7341,7 @@ int cik_init(struct radeon_device *rdev)
6327 cik_cp_fini(rdev); 7341 cik_cp_fini(rdev);
6328 cik_sdma_fini(rdev); 7342 cik_sdma_fini(rdev);
6329 cik_irq_fini(rdev); 7343 cik_irq_fini(rdev);
6330 si_rlc_fini(rdev); 7344 sumo_rlc_fini(rdev);
6331 cik_mec_fini(rdev); 7345 cik_mec_fini(rdev);
6332 radeon_wb_fini(rdev); 7346 radeon_wb_fini(rdev);
6333 radeon_ib_pool_fini(rdev); 7347 radeon_ib_pool_fini(rdev);
@@ -6362,14 +7376,16 @@ void cik_fini(struct radeon_device *rdev)
6362{ 7376{
6363 cik_cp_fini(rdev); 7377 cik_cp_fini(rdev);
6364 cik_sdma_fini(rdev); 7378 cik_sdma_fini(rdev);
7379 cik_fini_pg(rdev);
7380 cik_fini_cg(rdev);
6365 cik_irq_fini(rdev); 7381 cik_irq_fini(rdev);
6366 si_rlc_fini(rdev); 7382 sumo_rlc_fini(rdev);
6367 cik_mec_fini(rdev); 7383 cik_mec_fini(rdev);
6368 radeon_wb_fini(rdev); 7384 radeon_wb_fini(rdev);
6369 radeon_vm_manager_fini(rdev); 7385 radeon_vm_manager_fini(rdev);
6370 radeon_ib_pool_fini(rdev); 7386 radeon_ib_pool_fini(rdev);
6371 radeon_irq_kms_fini(rdev); 7387 radeon_irq_kms_fini(rdev);
6372 r600_uvd_stop(rdev); 7388 uvd_v1_0_fini(rdev);
6373 radeon_uvd_fini(rdev); 7389 radeon_uvd_fini(rdev);
6374 cik_pcie_gart_fini(rdev); 7390 cik_pcie_gart_fini(rdev);
6375 r600_vram_scratch_fini(rdev); 7391 r600_vram_scratch_fini(rdev);
@@ -6398,8 +7414,8 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
6398 struct radeon_crtc *radeon_crtc, 7414 struct radeon_crtc *radeon_crtc,
6399 struct drm_display_mode *mode) 7415 struct drm_display_mode *mode)
6400{ 7416{
6401 u32 tmp; 7417 u32 tmp, buffer_alloc, i;
6402 7418 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
6403 /* 7419 /*
6404 * Line Buffer Setup 7420 * Line Buffer Setup
6405 * There are 6 line buffers, one for each display controllers. 7421 * There are 6 line buffers, one for each display controllers.
@@ -6409,22 +7425,37 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
6409 * them using the stereo blender. 7425 * them using the stereo blender.
6410 */ 7426 */
6411 if (radeon_crtc->base.enabled && mode) { 7427 if (radeon_crtc->base.enabled && mode) {
6412 if (mode->crtc_hdisplay < 1920) 7428 if (mode->crtc_hdisplay < 1920) {
6413 tmp = 1; 7429 tmp = 1;
6414 else if (mode->crtc_hdisplay < 2560) 7430 buffer_alloc = 2;
7431 } else if (mode->crtc_hdisplay < 2560) {
6415 tmp = 2; 7432 tmp = 2;
6416 else if (mode->crtc_hdisplay < 4096) 7433 buffer_alloc = 2;
7434 } else if (mode->crtc_hdisplay < 4096) {
6417 tmp = 0; 7435 tmp = 0;
6418 else { 7436 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
7437 } else {
6419 DRM_DEBUG_KMS("Mode too big for LB!\n"); 7438 DRM_DEBUG_KMS("Mode too big for LB!\n");
6420 tmp = 0; 7439 tmp = 0;
7440 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
6421 } 7441 }
6422 } else 7442 } else {
6423 tmp = 1; 7443 tmp = 1;
7444 buffer_alloc = 0;
7445 }
6424 7446
6425 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset, 7447 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
6426 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0)); 7448 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
6427 7449
7450 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
7451 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
7452 for (i = 0; i < rdev->usec_timeout; i++) {
7453 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
7454 DMIF_BUFFERS_ALLOCATED_COMPLETED)
7455 break;
7456 udelay(1);
7457 }
7458
6428 if (radeon_crtc->base.enabled && mode) { 7459 if (radeon_crtc->base.enabled && mode) {
6429 switch (tmp) { 7460 switch (tmp) {
6430 case 0: 7461 case 0:
@@ -6826,7 +7857,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
6826 u32 lb_size, u32 num_heads) 7857 u32 lb_size, u32 num_heads)
6827{ 7858{
6828 struct drm_display_mode *mode = &radeon_crtc->base.mode; 7859 struct drm_display_mode *mode = &radeon_crtc->base.mode;
6829 struct dce8_wm_params wm; 7860 struct dce8_wm_params wm_low, wm_high;
6830 u32 pixel_period; 7861 u32 pixel_period;
6831 u32 line_time = 0; 7862 u32 line_time = 0;
6832 u32 latency_watermark_a = 0, latency_watermark_b = 0; 7863 u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -6836,35 +7867,82 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
6836 pixel_period = 1000000 / (u32)mode->clock; 7867 pixel_period = 1000000 / (u32)mode->clock;
6837 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 7868 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
6838 7869
6839 wm.yclk = rdev->pm.current_mclk * 10; 7870 /* watermark for high clocks */
6840 wm.sclk = rdev->pm.current_sclk * 10; 7871 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
6841 wm.disp_clk = mode->clock; 7872 rdev->pm.dpm_enabled) {
6842 wm.src_width = mode->crtc_hdisplay; 7873 wm_high.yclk =
6843 wm.active_time = mode->crtc_hdisplay * pixel_period; 7874 radeon_dpm_get_mclk(rdev, false) * 10;
6844 wm.blank_time = line_time - wm.active_time; 7875 wm_high.sclk =
6845 wm.interlaced = false; 7876 radeon_dpm_get_sclk(rdev, false) * 10;
7877 } else {
7878 wm_high.yclk = rdev->pm.current_mclk * 10;
7879 wm_high.sclk = rdev->pm.current_sclk * 10;
7880 }
7881
7882 wm_high.disp_clk = mode->clock;
7883 wm_high.src_width = mode->crtc_hdisplay;
7884 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
7885 wm_high.blank_time = line_time - wm_high.active_time;
7886 wm_high.interlaced = false;
6846 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 7887 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
6847 wm.interlaced = true; 7888 wm_high.interlaced = true;
6848 wm.vsc = radeon_crtc->vsc; 7889 wm_high.vsc = radeon_crtc->vsc;
6849 wm.vtaps = 1; 7890 wm_high.vtaps = 1;
6850 if (radeon_crtc->rmx_type != RMX_OFF) 7891 if (radeon_crtc->rmx_type != RMX_OFF)
6851 wm.vtaps = 2; 7892 wm_high.vtaps = 2;
6852 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ 7893 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
6853 wm.lb_size = lb_size; 7894 wm_high.lb_size = lb_size;
6854 wm.dram_channels = cik_get_number_of_dram_channels(rdev); 7895 wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
6855 wm.num_heads = num_heads; 7896 wm_high.num_heads = num_heads;
6856 7897
6857 /* set for high clocks */ 7898 /* set for high clocks */
6858 latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535); 7899 latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
7900
7901 /* possibly force display priority to high */
7902 /* should really do this at mode validation time... */
7903 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
7904 !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
7905 !dce8_check_latency_hiding(&wm_high) ||
7906 (rdev->disp_priority == 2)) {
7907 DRM_DEBUG_KMS("force priority to high\n");
7908 }
7909
7910 /* watermark for low clocks */
7911 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
7912 rdev->pm.dpm_enabled) {
7913 wm_low.yclk =
7914 radeon_dpm_get_mclk(rdev, true) * 10;
7915 wm_low.sclk =
7916 radeon_dpm_get_sclk(rdev, true) * 10;
7917 } else {
7918 wm_low.yclk = rdev->pm.current_mclk * 10;
7919 wm_low.sclk = rdev->pm.current_sclk * 10;
7920 }
7921
7922 wm_low.disp_clk = mode->clock;
7923 wm_low.src_width = mode->crtc_hdisplay;
7924 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
7925 wm_low.blank_time = line_time - wm_low.active_time;
7926 wm_low.interlaced = false;
7927 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
7928 wm_low.interlaced = true;
7929 wm_low.vsc = radeon_crtc->vsc;
7930 wm_low.vtaps = 1;
7931 if (radeon_crtc->rmx_type != RMX_OFF)
7932 wm_low.vtaps = 2;
7933 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
7934 wm_low.lb_size = lb_size;
7935 wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
7936 wm_low.num_heads = num_heads;
7937
6859 /* set for low clocks */ 7938 /* set for low clocks */
6860 /* wm.yclk = low clk; wm.sclk = low clk */ 7939 latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
6861 latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535);
6862 7940
6863 /* possibly force display priority to high */ 7941 /* possibly force display priority to high */
6864 /* should really do this at mode validation time... */ 7942 /* should really do this at mode validation time... */
6865 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 7943 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
6866 !dce8_average_bandwidth_vs_available_bandwidth(&wm) || 7944 !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
6867 !dce8_check_latency_hiding(&wm) || 7945 !dce8_check_latency_hiding(&wm_low) ||
6868 (rdev->disp_priority == 2)) { 7946 (rdev->disp_priority == 2)) {
6869 DRM_DEBUG_KMS("force priority to high\n"); 7947 DRM_DEBUG_KMS("force priority to high\n");
6870 } 7948 }
@@ -6889,6 +7967,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
6889 LATENCY_HIGH_WATERMARK(line_time))); 7967 LATENCY_HIGH_WATERMARK(line_time)));
6890 /* restore original selection */ 7968 /* restore original selection */
6891 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask); 7969 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
7970
7971 /* save values for DPM */
7972 radeon_crtc->line_time = line_time;
7973 radeon_crtc->wm_high = latency_watermark_a;
7974 radeon_crtc->wm_low = latency_watermark_b;
6892} 7975}
6893 7976
6894/** 7977/**
@@ -6978,39 +8061,307 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
6978 return r; 8061 return r;
6979} 8062}
6980 8063
6981int cik_uvd_resume(struct radeon_device *rdev) 8064static void cik_pcie_gen3_enable(struct radeon_device *rdev)
6982{ 8065{
6983 uint64_t addr; 8066 struct pci_dev *root = rdev->pdev->bus->self;
6984 uint32_t size; 8067 int bridge_pos, gpu_pos;
6985 int r; 8068 u32 speed_cntl, mask, current_data_rate;
8069 int ret, i;
8070 u16 tmp16;
6986 8071
6987 r = radeon_uvd_resume(rdev); 8072 if (radeon_pcie_gen2 == 0)
6988 if (r) 8073 return;
6989 return r; 8074
8075 if (rdev->flags & RADEON_IS_IGP)
8076 return;
6990 8077
6991 /* programm the VCPU memory controller bits 0-27 */ 8078 if (!(rdev->flags & RADEON_IS_PCIE))
6992 addr = rdev->uvd.gpu_addr >> 3; 8079 return;
6993 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; 8080
6994 WREG32(UVD_VCPU_CACHE_OFFSET0, addr); 8081 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6995 WREG32(UVD_VCPU_CACHE_SIZE0, size); 8082 if (ret != 0)
8083 return;
6996 8084
6997 addr += size; 8085 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
6998 size = RADEON_UVD_STACK_SIZE >> 3; 8086 return;
6999 WREG32(UVD_VCPU_CACHE_OFFSET1, addr); 8087
7000 WREG32(UVD_VCPU_CACHE_SIZE1, size); 8088 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8089 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
8090 LC_CURRENT_DATA_RATE_SHIFT;
8091 if (mask & DRM_PCIE_SPEED_80) {
8092 if (current_data_rate == 2) {
8093 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
8094 return;
8095 }
8096 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
8097 } else if (mask & DRM_PCIE_SPEED_50) {
8098 if (current_data_rate == 1) {
8099 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
8100 return;
8101 }
8102 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
8103 }
7001 8104
7002 addr += size; 8105 bridge_pos = pci_pcie_cap(root);
7003 size = RADEON_UVD_HEAP_SIZE >> 3; 8106 if (!bridge_pos)
7004 WREG32(UVD_VCPU_CACHE_OFFSET2, addr); 8107 return;
7005 WREG32(UVD_VCPU_CACHE_SIZE2, size);
7006 8108
7007 /* bits 28-31 */ 8109 gpu_pos = pci_pcie_cap(rdev->pdev);
7008 addr = (rdev->uvd.gpu_addr >> 28) & 0xF; 8110 if (!gpu_pos)
7009 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 8111 return;
7010 8112
7011 /* bits 32-39 */ 8113 if (mask & DRM_PCIE_SPEED_80) {
7012 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; 8114 /* re-try equalization if gen3 is not already enabled */
7013 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 8115 if (current_data_rate != 2) {
8116 u16 bridge_cfg, gpu_cfg;
8117 u16 bridge_cfg2, gpu_cfg2;
8118 u32 max_lw, current_lw, tmp;
8119
8120 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
8121 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
8122
8123 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
8124 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
8125
8126 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
8127 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
8128
8129 tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
8130 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
8131 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
8132
8133 if (current_lw < max_lw) {
8134 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
8135 if (tmp & LC_RENEGOTIATION_SUPPORT) {
8136 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
8137 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
8138 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
8139 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
8140 }
8141 }
7014 8142
7015 return 0; 8143 for (i = 0; i < 10; i++) {
8144 /* check status */
8145 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
8146 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
8147 break;
8148
8149 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
8150 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
8151
8152 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
8153 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
8154
8155 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8156 tmp |= LC_SET_QUIESCE;
8157 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8158
8159 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8160 tmp |= LC_REDO_EQ;
8161 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8162
8163 mdelay(100);
8164
8165 /* linkctl */
8166 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
8167 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
8168 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
8169 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
8170
8171 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
8172 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
8173 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
8174 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
8175
8176 /* linkctl2 */
8177 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
8178 tmp16 &= ~((1 << 4) | (7 << 9));
8179 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
8180 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
8181
8182 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
8183 tmp16 &= ~((1 << 4) | (7 << 9));
8184 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
8185 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
8186
8187 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8188 tmp &= ~LC_SET_QUIESCE;
8189 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8190 }
8191 }
8192 }
8193
8194 /* set the link speed */
8195 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
8196 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
8197 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
8198
8199 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
8200 tmp16 &= ~0xf;
8201 if (mask & DRM_PCIE_SPEED_80)
8202 tmp16 |= 3; /* gen3 */
8203 else if (mask & DRM_PCIE_SPEED_50)
8204 tmp16 |= 2; /* gen2 */
8205 else
8206 tmp16 |= 1; /* gen1 */
8207 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
8208
8209 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8210 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
8211 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
8212
8213 for (i = 0; i < rdev->usec_timeout; i++) {
8214 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8215 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
8216 break;
8217 udelay(1);
8218 }
8219}
8220
8221static void cik_program_aspm(struct radeon_device *rdev)
8222{
8223 u32 data, orig;
8224 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
8225 bool disable_clkreq = false;
8226
8227 if (radeon_aspm == 0)
8228 return;
8229
8230 /* XXX double check IGPs */
8231 if (rdev->flags & RADEON_IS_IGP)
8232 return;
8233
8234 if (!(rdev->flags & RADEON_IS_PCIE))
8235 return;
8236
8237 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
8238 data &= ~LC_XMIT_N_FTS_MASK;
8239 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
8240 if (orig != data)
8241 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
8242
8243 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
8244 data |= LC_GO_TO_RECOVERY;
8245 if (orig != data)
8246 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
8247
8248 orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
8249 data |= P_IGNORE_EDB_ERR;
8250 if (orig != data)
8251 WREG32_PCIE_PORT(PCIE_P_CNTL, data);
8252
8253 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
8254 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
8255 data |= LC_PMI_TO_L1_DIS;
8256 if (!disable_l0s)
8257 data |= LC_L0S_INACTIVITY(7);
8258
8259 if (!disable_l1) {
8260 data |= LC_L1_INACTIVITY(7);
8261 data &= ~LC_PMI_TO_L1_DIS;
8262 if (orig != data)
8263 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8264
8265 if (!disable_plloff_in_l1) {
8266 bool clk_req_support;
8267
8268 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
8269 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
8270 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
8271 if (orig != data)
8272 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
8273
8274 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
8275 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
8276 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
8277 if (orig != data)
8278 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
8279
8280 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
8281 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
8282 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
8283 if (orig != data)
8284 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
8285
8286 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
8287 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
8288 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
8289 if (orig != data)
8290 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
8291
8292 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
8293 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
8294 data |= LC_DYN_LANES_PWR_STATE(3);
8295 if (orig != data)
8296 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
8297
8298 if (!disable_clkreq) {
8299 struct pci_dev *root = rdev->pdev->bus->self;
8300 u32 lnkcap;
8301
8302 clk_req_support = false;
8303 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
8304 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
8305 clk_req_support = true;
8306 } else {
8307 clk_req_support = false;
8308 }
8309
8310 if (clk_req_support) {
8311 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
8312 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
8313 if (orig != data)
8314 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
8315
8316 orig = data = RREG32_SMC(THM_CLK_CNTL);
8317 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
8318 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
8319 if (orig != data)
8320 WREG32_SMC(THM_CLK_CNTL, data);
8321
8322 orig = data = RREG32_SMC(MISC_CLK_CTRL);
8323 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
8324 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
8325 if (orig != data)
8326 WREG32_SMC(MISC_CLK_CTRL, data);
8327
8328 orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
8329 data &= ~BCLK_AS_XCLK;
8330 if (orig != data)
8331 WREG32_SMC(CG_CLKPIN_CNTL, data);
8332
8333 orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
8334 data &= ~FORCE_BIF_REFCLK_EN;
8335 if (orig != data)
8336 WREG32_SMC(CG_CLKPIN_CNTL_2, data);
8337
8338 orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
8339 data &= ~MPLL_CLKOUT_SEL_MASK;
8340 data |= MPLL_CLKOUT_SEL(4);
8341 if (orig != data)
8342 WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
8343 }
8344 }
8345 } else {
8346 if (orig != data)
8347 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8348 }
8349
8350 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
8351 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
8352 if (orig != data)
8353 WREG32_PCIE_PORT(PCIE_CNTL2, data);
8354
8355 if (!disable_l0s) {
8356 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
8357 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
8358 data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
8359 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
8360 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
8361 data &= ~LC_L0S_INACTIVITY_MASK;
8362 if (orig != data)
8363 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8364 }
8365 }
8366 }
7016} 8367}
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index d71e46d571f5..ca1bb6133580 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -24,6 +24,9 @@
24#ifndef __CIK_REG_H__ 24#ifndef __CIK_REG_H__
25#define __CIK_REG_H__ 25#define __CIK_REG_H__
26 26
27#define CIK_DIDT_IND_INDEX 0xca00
28#define CIK_DIDT_IND_DATA 0xca04
29
27#define CIK_DC_GPIO_HPD_MASK 0x65b0 30#define CIK_DC_GPIO_HPD_MASK 0x65b0
28#define CIK_DC_GPIO_HPD_A 0x65b4 31#define CIK_DC_GPIO_HPD_A 0x65b4
29#define CIK_DC_GPIO_HPD_EN 0x65b8 32#define CIK_DC_GPIO_HPD_EN 0x65b8
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
new file mode 100644
index 000000000000..b6286068e111
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -0,0 +1,785 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "radeon.h"
27#include "radeon_asic.h"
28#include "cikd.h"
29
30/* sdma */
31#define CIK_SDMA_UCODE_SIZE 1050
32#define CIK_SDMA_UCODE_VERSION 64
33
34u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
35
36/*
37 * sDMA - System DMA
38 * Starting with CIK, the GPU has new asynchronous
39 * DMA engines. These engines are used for compute
40 * and gfx. There are two DMA engines (SDMA0, SDMA1)
41 * and each one supports 1 ring buffer used for gfx
42 * and 2 queues used for compute.
43 *
44 * The programming model is very similar to the CP
45 * (ring buffer, IBs, etc.), but sDMA has it's own
46 * packet format that is different from the PM4 format
47 * used by the CP. sDMA supports copying data, writing
48 * embedded data, solid fills, and a number of other
49 * things. It also has support for tiling/detiling of
50 * buffers.
51 */
52
53/**
54 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
55 *
56 * @rdev: radeon_device pointer
57 * @ib: IB object to schedule
58 *
59 * Schedule an IB in the DMA ring (CIK).
60 */
61void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
62 struct radeon_ib *ib)
63{
64 struct radeon_ring *ring = &rdev->ring[ib->ring];
65 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
66
67 if (rdev->wb.enabled) {
68 u32 next_rptr = ring->wptr + 5;
69 while ((next_rptr & 7) != 4)
70 next_rptr++;
71 next_rptr += 4;
72 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
73 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
74 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
75 radeon_ring_write(ring, 1); /* number of DWs to follow */
76 radeon_ring_write(ring, next_rptr);
77 }
78
79 /* IB packet must end on a 8 DW boundary */
80 while ((ring->wptr & 7) != 4)
81 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
82 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
83 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
84 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
85 radeon_ring_write(ring, ib->length_dw);
86
87}
88
89/**
90 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
91 *
92 * @rdev: radeon_device pointer
93 * @fence: radeon fence object
94 *
95 * Add a DMA fence packet to the ring to write
96 * the fence seq number and DMA trap packet to generate
97 * an interrupt if needed (CIK).
98 */
99void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
100 struct radeon_fence *fence)
101{
102 struct radeon_ring *ring = &rdev->ring[fence->ring];
103 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
104 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
105 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
106 u32 ref_and_mask;
107
108 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
109 ref_and_mask = SDMA0;
110 else
111 ref_and_mask = SDMA1;
112
113 /* write the fence */
114 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
115 radeon_ring_write(ring, addr & 0xffffffff);
116 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
117 radeon_ring_write(ring, fence->seq);
118 /* generate an interrupt */
119 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
120 /* flush HDP */
121 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
122 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
123 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
124 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
125 radeon_ring_write(ring, ref_and_mask); /* MASK */
126 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
127}
128
129/**
130 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
131 *
132 * @rdev: radeon_device pointer
133 * @ring: radeon_ring structure holding ring information
134 * @semaphore: radeon semaphore object
135 * @emit_wait: wait or signal semaphore
136 *
137 * Add a DMA semaphore packet to the ring wait on or signal
138 * other rings (CIK).
139 */
140void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
141 struct radeon_ring *ring,
142 struct radeon_semaphore *semaphore,
143 bool emit_wait)
144{
145 u64 addr = semaphore->gpu_addr;
146 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
147
148 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
149 radeon_ring_write(ring, addr & 0xfffffff8);
150 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
151}
152
153/**
154 * cik_sdma_gfx_stop - stop the gfx async dma engines
155 *
156 * @rdev: radeon_device pointer
157 *
158 * Stop the gfx async dma ring buffers (CIK).
159 */
160static void cik_sdma_gfx_stop(struct radeon_device *rdev)
161{
162 u32 rb_cntl, reg_offset;
163 int i;
164
165 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
166
167 for (i = 0; i < 2; i++) {
168 if (i == 0)
169 reg_offset = SDMA0_REGISTER_OFFSET;
170 else
171 reg_offset = SDMA1_REGISTER_OFFSET;
172 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
173 rb_cntl &= ~SDMA_RB_ENABLE;
174 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
175 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
176 }
177}
178
179/**
180 * cik_sdma_rlc_stop - stop the compute async dma engines
181 *
182 * @rdev: radeon_device pointer
183 *
184 * Stop the compute async dma queues (CIK).
185 */
186static void cik_sdma_rlc_stop(struct radeon_device *rdev)
187{
188 /* XXX todo */
189}
190
191/**
192 * cik_sdma_enable - stop the async dma engines
193 *
194 * @rdev: radeon_device pointer
195 * @enable: enable/disable the DMA MEs.
196 *
197 * Halt or unhalt the async dma engines (CIK).
198 */
199void cik_sdma_enable(struct radeon_device *rdev, bool enable)
200{
201 u32 me_cntl, reg_offset;
202 int i;
203
204 for (i = 0; i < 2; i++) {
205 if (i == 0)
206 reg_offset = SDMA0_REGISTER_OFFSET;
207 else
208 reg_offset = SDMA1_REGISTER_OFFSET;
209 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
210 if (enable)
211 me_cntl &= ~SDMA_HALT;
212 else
213 me_cntl |= SDMA_HALT;
214 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
215 }
216}
217
218/**
219 * cik_sdma_gfx_resume - setup and start the async dma engines
220 *
221 * @rdev: radeon_device pointer
222 *
223 * Set up the gfx DMA ring buffers and enable them (CIK).
224 * Returns 0 for success, error for failure.
225 */
226static int cik_sdma_gfx_resume(struct radeon_device *rdev)
227{
228 struct radeon_ring *ring;
229 u32 rb_cntl, ib_cntl;
230 u32 rb_bufsz;
231 u32 reg_offset, wb_offset;
232 int i, r;
233
234 for (i = 0; i < 2; i++) {
235 if (i == 0) {
236 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
237 reg_offset = SDMA0_REGISTER_OFFSET;
238 wb_offset = R600_WB_DMA_RPTR_OFFSET;
239 } else {
240 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
241 reg_offset = SDMA1_REGISTER_OFFSET;
242 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
243 }
244
245 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
246 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
247
248 /* Set ring buffer size in dwords */
249 rb_bufsz = order_base_2(ring->ring_size / 4);
250 rb_cntl = rb_bufsz << 1;
251#ifdef __BIG_ENDIAN
252 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
253#endif
254 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
255
256 /* Initialize the ring buffer's read and write pointers */
257 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
258 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
259
260 /* set the wb address whether it's enabled or not */
261 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
262 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
263 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
264 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
265
266 if (rdev->wb.enabled)
267 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
268
269 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
270 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
271
272 ring->wptr = 0;
273 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
274
275 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
276
277 /* enable DMA RB */
278 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
279
280 ib_cntl = SDMA_IB_ENABLE;
281#ifdef __BIG_ENDIAN
282 ib_cntl |= SDMA_IB_SWAP_ENABLE;
283#endif
284 /* enable DMA IBs */
285 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
286
287 ring->ready = true;
288
289 r = radeon_ring_test(rdev, ring->idx, ring);
290 if (r) {
291 ring->ready = false;
292 return r;
293 }
294 }
295
296 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
297
298 return 0;
299}
300
301/**
302 * cik_sdma_rlc_resume - setup and start the async dma engines
303 *
304 * @rdev: radeon_device pointer
305 *
306 * Set up the compute DMA queues and enable them (CIK).
307 * Returns 0 for success, error for failure.
308 */
309static int cik_sdma_rlc_resume(struct radeon_device *rdev)
310{
311 /* XXX todo */
312 return 0;
313}
314
315/**
316 * cik_sdma_load_microcode - load the sDMA ME ucode
317 *
318 * @rdev: radeon_device pointer
319 *
320 * Loads the sDMA0/1 ucode.
321 * Returns 0 for success, -EINVAL if the ucode is not available.
322 */
323static int cik_sdma_load_microcode(struct radeon_device *rdev)
324{
325 const __be32 *fw_data;
326 int i;
327
328 if (!rdev->sdma_fw)
329 return -EINVAL;
330
331 /* stop the gfx rings and rlc compute queues */
332 cik_sdma_gfx_stop(rdev);
333 cik_sdma_rlc_stop(rdev);
334
335 /* halt the MEs */
336 cik_sdma_enable(rdev, false);
337
338 /* sdma0 */
339 fw_data = (const __be32 *)rdev->sdma_fw->data;
340 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
341 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
342 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
343 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
344
345 /* sdma1 */
346 fw_data = (const __be32 *)rdev->sdma_fw->data;
347 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
348 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
349 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
350 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
351
352 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
353 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
354 return 0;
355}
356
357/**
358 * cik_sdma_resume - setup and start the async dma engines
359 *
360 * @rdev: radeon_device pointer
361 *
362 * Set up the DMA engines and enable them (CIK).
363 * Returns 0 for success, error for failure.
364 */
365int cik_sdma_resume(struct radeon_device *rdev)
366{
367 int r;
368
369 /* Reset dma */
370 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
371 RREG32(SRBM_SOFT_RESET);
372 udelay(50);
373 WREG32(SRBM_SOFT_RESET, 0);
374 RREG32(SRBM_SOFT_RESET);
375
376 r = cik_sdma_load_microcode(rdev);
377 if (r)
378 return r;
379
380 /* unhalt the MEs */
381 cik_sdma_enable(rdev, true);
382
383 /* start the gfx rings and rlc compute queues */
384 r = cik_sdma_gfx_resume(rdev);
385 if (r)
386 return r;
387 r = cik_sdma_rlc_resume(rdev);
388 if (r)
389 return r;
390
391 return 0;
392}
393
394/**
395 * cik_sdma_fini - tear down the async dma engines
396 *
397 * @rdev: radeon_device pointer
398 *
399 * Stop the async dma engines and free the rings (CIK).
400 */
401void cik_sdma_fini(struct radeon_device *rdev)
402{
403 /* stop the gfx rings and rlc compute queues */
404 cik_sdma_gfx_stop(rdev);
405 cik_sdma_rlc_stop(rdev);
406 /* halt the MEs */
407 cik_sdma_enable(rdev, false);
408 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
409 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
410 /* XXX - compute dma queue tear down */
411}
412
413/**
414 * cik_copy_dma - copy pages using the DMA engine
415 *
416 * @rdev: radeon_device pointer
417 * @src_offset: src GPU address
418 * @dst_offset: dst GPU address
419 * @num_gpu_pages: number of GPU pages to xfer
420 * @fence: radeon fence object
421 *
422 * Copy GPU paging using the DMA engine (CIK).
423 * Used by the radeon ttm implementation to move pages if
424 * registered as the asic copy callback.
425 */
426int cik_copy_dma(struct radeon_device *rdev,
427 uint64_t src_offset, uint64_t dst_offset,
428 unsigned num_gpu_pages,
429 struct radeon_fence **fence)
430{
431 struct radeon_semaphore *sem = NULL;
432 int ring_index = rdev->asic->copy.dma_ring_index;
433 struct radeon_ring *ring = &rdev->ring[ring_index];
434 u32 size_in_bytes, cur_size_in_bytes;
435 int i, num_loops;
436 int r = 0;
437
438 r = radeon_semaphore_create(rdev, &sem);
439 if (r) {
440 DRM_ERROR("radeon: moving bo (%d).\n", r);
441 return r;
442 }
443
444 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
445 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
446 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
447 if (r) {
448 DRM_ERROR("radeon: moving bo (%d).\n", r);
449 radeon_semaphore_free(rdev, &sem, NULL);
450 return r;
451 }
452
453 if (radeon_fence_need_sync(*fence, ring->idx)) {
454 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
455 ring->idx);
456 radeon_fence_note_sync(*fence, ring->idx);
457 } else {
458 radeon_semaphore_free(rdev, &sem, NULL);
459 }
460
461 for (i = 0; i < num_loops; i++) {
462 cur_size_in_bytes = size_in_bytes;
463 if (cur_size_in_bytes > 0x1fffff)
464 cur_size_in_bytes = 0x1fffff;
465 size_in_bytes -= cur_size_in_bytes;
466 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
467 radeon_ring_write(ring, cur_size_in_bytes);
468 radeon_ring_write(ring, 0); /* src/dst endian swap */
469 radeon_ring_write(ring, src_offset & 0xffffffff);
470 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
471 radeon_ring_write(ring, dst_offset & 0xfffffffc);
472 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
473 src_offset += cur_size_in_bytes;
474 dst_offset += cur_size_in_bytes;
475 }
476
477 r = radeon_fence_emit(rdev, fence, ring->idx);
478 if (r) {
479 radeon_ring_unlock_undo(rdev, ring);
480 return r;
481 }
482
483 radeon_ring_unlock_commit(rdev, ring);
484 radeon_semaphore_free(rdev, &sem, *fence);
485
486 return r;
487}
488
489/**
490 * cik_sdma_ring_test - simple async dma engine test
491 *
492 * @rdev: radeon_device pointer
493 * @ring: radeon_ring structure holding ring information
494 *
495 * Test the DMA engine by writing using it to write an
496 * value to memory. (CIK).
497 * Returns 0 for success, error for failure.
498 */
499int cik_sdma_ring_test(struct radeon_device *rdev,
500 struct radeon_ring *ring)
501{
502 unsigned i;
503 int r;
504 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
505 u32 tmp;
506
507 if (!ptr) {
508 DRM_ERROR("invalid vram scratch pointer\n");
509 return -EINVAL;
510 }
511
512 tmp = 0xCAFEDEAD;
513 writel(tmp, ptr);
514
515 r = radeon_ring_lock(rdev, ring, 4);
516 if (r) {
517 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
518 return r;
519 }
520 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
521 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
522 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
523 radeon_ring_write(ring, 1); /* number of DWs to follow */
524 radeon_ring_write(ring, 0xDEADBEEF);
525 radeon_ring_unlock_commit(rdev, ring);
526
527 for (i = 0; i < rdev->usec_timeout; i++) {
528 tmp = readl(ptr);
529 if (tmp == 0xDEADBEEF)
530 break;
531 DRM_UDELAY(1);
532 }
533
534 if (i < rdev->usec_timeout) {
535 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
536 } else {
537 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
538 ring->idx, tmp);
539 r = -EINVAL;
540 }
541 return r;
542}
543
544/**
545 * cik_sdma_ib_test - test an IB on the DMA engine
546 *
547 * @rdev: radeon_device pointer
548 * @ring: radeon_ring structure holding ring information
549 *
550 * Test a simple IB in the DMA ring (CIK).
551 * Returns 0 on success, error on failure.
552 */
553int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
554{
555 struct radeon_ib ib;
556 unsigned i;
557 int r;
558 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
559 u32 tmp = 0;
560
561 if (!ptr) {
562 DRM_ERROR("invalid vram scratch pointer\n");
563 return -EINVAL;
564 }
565
566 tmp = 0xCAFEDEAD;
567 writel(tmp, ptr);
568
569 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
570 if (r) {
571 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
572 return r;
573 }
574
575 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
576 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
577 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
578 ib.ptr[3] = 1;
579 ib.ptr[4] = 0xDEADBEEF;
580 ib.length_dw = 5;
581
582 r = radeon_ib_schedule(rdev, &ib, NULL);
583 if (r) {
584 radeon_ib_free(rdev, &ib);
585 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
586 return r;
587 }
588 r = radeon_fence_wait(ib.fence, false);
589 if (r) {
590 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
591 return r;
592 }
593 for (i = 0; i < rdev->usec_timeout; i++) {
594 tmp = readl(ptr);
595 if (tmp == 0xDEADBEEF)
596 break;
597 DRM_UDELAY(1);
598 }
599 if (i < rdev->usec_timeout) {
600 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
601 } else {
602 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
603 r = -EINVAL;
604 }
605 radeon_ib_free(rdev, &ib);
606 return r;
607}
608
609/**
610 * cik_sdma_is_lockup - Check if the DMA engine is locked up
611 *
612 * @rdev: radeon_device pointer
613 * @ring: radeon_ring structure holding ring information
614 *
615 * Check if the async DMA engine is locked up (CIK).
616 * Returns true if the engine appears to be locked up, false if not.
617 */
618bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
619{
620 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
621 u32 mask;
622
623 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
624 mask = RADEON_RESET_DMA;
625 else
626 mask = RADEON_RESET_DMA1;
627
628 if (!(reset_mask & mask)) {
629 radeon_ring_lockup_update(ring);
630 return false;
631 }
632 /* force ring activities */
633 radeon_ring_force_activity(rdev, ring);
634 return radeon_ring_test_lockup(rdev, ring);
635}
636
637/**
638 * cik_sdma_vm_set_page - update the page tables using sDMA
639 *
640 * @rdev: radeon_device pointer
641 * @ib: indirect buffer to fill with commands
642 * @pe: addr of the page entry
643 * @addr: dst addr to write into pe
644 * @count: number of page entries to update
645 * @incr: increase next addr by incr bytes
646 * @flags: access flags
647 *
648 * Update the page tables using sDMA (CIK).
649 */
650void cik_sdma_vm_set_page(struct radeon_device *rdev,
651 struct radeon_ib *ib,
652 uint64_t pe,
653 uint64_t addr, unsigned count,
654 uint32_t incr, uint32_t flags)
655{
656 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
657 uint64_t value;
658 unsigned ndw;
659
660 if (flags & RADEON_VM_PAGE_SYSTEM) {
661 while (count) {
662 ndw = count * 2;
663 if (ndw > 0xFFFFE)
664 ndw = 0xFFFFE;
665
666 /* for non-physically contiguous pages (system) */
667 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
668 ib->ptr[ib->length_dw++] = pe;
669 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
670 ib->ptr[ib->length_dw++] = ndw;
671 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
672 if (flags & RADEON_VM_PAGE_SYSTEM) {
673 value = radeon_vm_map_gart(rdev, addr);
674 value &= 0xFFFFFFFFFFFFF000ULL;
675 } else if (flags & RADEON_VM_PAGE_VALID) {
676 value = addr;
677 } else {
678 value = 0;
679 }
680 addr += incr;
681 value |= r600_flags;
682 ib->ptr[ib->length_dw++] = value;
683 ib->ptr[ib->length_dw++] = upper_32_bits(value);
684 }
685 }
686 } else {
687 while (count) {
688 ndw = count;
689 if (ndw > 0x7FFFF)
690 ndw = 0x7FFFF;
691
692 if (flags & RADEON_VM_PAGE_VALID)
693 value = addr;
694 else
695 value = 0;
696 /* for physically contiguous pages (vram) */
697 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
698 ib->ptr[ib->length_dw++] = pe; /* dst addr */
699 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
700 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
701 ib->ptr[ib->length_dw++] = 0;
702 ib->ptr[ib->length_dw++] = value; /* value */
703 ib->ptr[ib->length_dw++] = upper_32_bits(value);
704 ib->ptr[ib->length_dw++] = incr; /* increment size */
705 ib->ptr[ib->length_dw++] = 0;
706 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
707 pe += ndw * 8;
708 addr += ndw * incr;
709 count -= ndw;
710 }
711 }
712 while (ib->length_dw & 0x7)
713 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
714}
715
716/**
717 * cik_dma_vm_flush - cik vm flush using sDMA
718 *
719 * @rdev: radeon_device pointer
720 *
721 * Update the page table base and flush the VM TLB
722 * using sDMA (CIK).
723 */
724void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
725{
726 struct radeon_ring *ring = &rdev->ring[ridx];
727 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
728 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
729 u32 ref_and_mask;
730
731 if (vm == NULL)
732 return;
733
734 if (ridx == R600_RING_TYPE_DMA_INDEX)
735 ref_and_mask = SDMA0;
736 else
737 ref_and_mask = SDMA1;
738
739 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
740 if (vm->id < 8) {
741 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
742 } else {
743 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
744 }
745 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
746
747 /* update SH_MEM_* regs */
748 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
749 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
750 radeon_ring_write(ring, VMID(vm->id));
751
752 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
753 radeon_ring_write(ring, SH_MEM_BASES >> 2);
754 radeon_ring_write(ring, 0);
755
756 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
757 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
758 radeon_ring_write(ring, 0);
759
760 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
761 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
762 radeon_ring_write(ring, 1);
763
764 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
765 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
766 radeon_ring_write(ring, 0);
767
768 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
769 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
770 radeon_ring_write(ring, VMID(0));
771
772 /* flush HDP */
773 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
774 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
775 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
776 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
777 radeon_ring_write(ring, ref_and_mask); /* MASK */
778 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
779
780 /* flush TLB */
781 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
782 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
783 radeon_ring_write(ring, 1 << vm->id);
784}
785
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 7e9275eaef80..203d2a09a1f5 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -28,21 +28,375 @@
28 28
29#define CIK_RB_BITMAP_WIDTH_PER_SH 2 29#define CIK_RB_BITMAP_WIDTH_PER_SH 2
30 30
31/* DIDT IND registers */
32#define DIDT_SQ_CTRL0 0x0
33# define DIDT_CTRL_EN (1 << 0)
34#define DIDT_DB_CTRL0 0x20
35#define DIDT_TD_CTRL0 0x40
36#define DIDT_TCP_CTRL0 0x60
37
31/* SMC IND registers */ 38/* SMC IND registers */
39#define DPM_TABLE_475 0x3F768
40# define SamuBootLevel(x) ((x) << 0)
41# define SamuBootLevel_MASK 0x000000ff
42# define SamuBootLevel_SHIFT 0
43# define AcpBootLevel(x) ((x) << 8)
44# define AcpBootLevel_MASK 0x0000ff00
45# define AcpBootLevel_SHIFT 8
46# define VceBootLevel(x) ((x) << 16)
47# define VceBootLevel_MASK 0x00ff0000
48# define VceBootLevel_SHIFT 16
49# define UvdBootLevel(x) ((x) << 24)
50# define UvdBootLevel_MASK 0xff000000
51# define UvdBootLevel_SHIFT 24
52
53#define FIRMWARE_FLAGS 0x3F800
54# define INTERRUPTS_ENABLED (1 << 0)
55
56#define NB_DPM_CONFIG_1 0x3F9E8
57# define Dpm0PgNbPsLo(x) ((x) << 0)
58# define Dpm0PgNbPsLo_MASK 0x000000ff
59# define Dpm0PgNbPsLo_SHIFT 0
60# define Dpm0PgNbPsHi(x) ((x) << 8)
61# define Dpm0PgNbPsHi_MASK 0x0000ff00
62# define Dpm0PgNbPsHi_SHIFT 8
63# define DpmXNbPsLo(x) ((x) << 16)
64# define DpmXNbPsLo_MASK 0x00ff0000
65# define DpmXNbPsLo_SHIFT 16
66# define DpmXNbPsHi(x) ((x) << 24)
67# define DpmXNbPsHi_MASK 0xff000000
68# define DpmXNbPsHi_SHIFT 24
69
70#define SMC_SYSCON_RESET_CNTL 0x80000000
71# define RST_REG (1 << 0)
72#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
73# define CK_DISABLE (1 << 0)
74# define CKEN (1 << 24)
75
76#define SMC_SYSCON_MISC_CNTL 0x80000010
77
78#define SMC_SYSCON_MSG_ARG_0 0x80000068
79
80#define SMC_PC_C 0x80000370
81
82#define SMC_SCRATCH9 0x80000424
83
84#define RCU_UC_EVENTS 0xC0000004
85# define BOOT_SEQ_DONE (1 << 7)
86
32#define GENERAL_PWRMGT 0xC0200000 87#define GENERAL_PWRMGT 0xC0200000
88# define GLOBAL_PWRMGT_EN (1 << 0)
89# define STATIC_PM_EN (1 << 1)
90# define THERMAL_PROTECTION_DIS (1 << 2)
91# define THERMAL_PROTECTION_TYPE (1 << 3)
92# define SW_SMIO_INDEX(x) ((x) << 6)
93# define SW_SMIO_INDEX_MASK (1 << 6)
94# define SW_SMIO_INDEX_SHIFT 6
95# define VOLT_PWRMGT_EN (1 << 10)
33# define GPU_COUNTER_CLK (1 << 15) 96# define GPU_COUNTER_CLK (1 << 15)
34 97# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
98
99#define CNB_PWRMGT_CNTL 0xC0200004
100# define GNB_SLOW_MODE(x) ((x) << 0)
101# define GNB_SLOW_MODE_MASK (3 << 0)
102# define GNB_SLOW_MODE_SHIFT 0
103# define GNB_SLOW (1 << 2)
104# define FORCE_NB_PS1 (1 << 3)
105# define DPM_ENABLED (1 << 4)
106
107#define SCLK_PWRMGT_CNTL 0xC0200008
108# define SCLK_PWRMGT_OFF (1 << 0)
109# define RESET_BUSY_CNT (1 << 4)
110# define RESET_SCLK_CNT (1 << 5)
111# define DYNAMIC_PM_EN (1 << 21)
112
113#define TARGET_AND_CURRENT_PROFILE_INDEX 0xC0200014
114# define CURRENT_STATE_MASK (0xf << 4)
115# define CURRENT_STATE_SHIFT 4
116# define CURR_MCLK_INDEX_MASK (0xf << 8)
117# define CURR_MCLK_INDEX_SHIFT 8
118# define CURR_SCLK_INDEX_MASK (0x1f << 16)
119# define CURR_SCLK_INDEX_SHIFT 16
120
121#define CG_SSP 0xC0200044
122# define SST(x) ((x) << 0)
123# define SST_MASK (0xffff << 0)
124# define SSTU(x) ((x) << 16)
125# define SSTU_MASK (0xf << 16)
126
127#define CG_DISPLAY_GAP_CNTL 0xC0200060
128# define DISP_GAP(x) ((x) << 0)
129# define DISP_GAP_MASK (3 << 0)
130# define VBI_TIMER_COUNT(x) ((x) << 4)
131# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
132# define VBI_TIMER_UNIT(x) ((x) << 20)
133# define VBI_TIMER_UNIT_MASK (7 << 20)
134# define DISP_GAP_MCHG(x) ((x) << 24)
135# define DISP_GAP_MCHG_MASK (3 << 24)
136
137#define SMU_VOLTAGE_STATUS 0xC0200094
138# define SMU_VOLTAGE_CURRENT_LEVEL_MASK (0xff << 1)
139# define SMU_VOLTAGE_CURRENT_LEVEL_SHIFT 1
140
141#define TARGET_AND_CURRENT_PROFILE_INDEX_1 0xC02000F0
142# define CURR_PCIE_INDEX_MASK (0xf << 24)
143# define CURR_PCIE_INDEX_SHIFT 24
144
145#define CG_ULV_PARAMETER 0xC0200158
146
147#define CG_FTV_0 0xC02001A8
148#define CG_FTV_1 0xC02001AC
149#define CG_FTV_2 0xC02001B0
150#define CG_FTV_3 0xC02001B4
151#define CG_FTV_4 0xC02001B8
152#define CG_FTV_5 0xC02001BC
153#define CG_FTV_6 0xC02001C0
154#define CG_FTV_7 0xC02001C4
155
156#define CG_DISPLAY_GAP_CNTL2 0xC0200230
157
158#define LCAC_SX0_OVR_SEL 0xC0400D04
159#define LCAC_SX0_OVR_VAL 0xC0400D08
160
161#define LCAC_MC0_CNTL 0xC0400D30
162#define LCAC_MC0_OVR_SEL 0xC0400D34
163#define LCAC_MC0_OVR_VAL 0xC0400D38
164#define LCAC_MC1_CNTL 0xC0400D3C
165#define LCAC_MC1_OVR_SEL 0xC0400D40
166#define LCAC_MC1_OVR_VAL 0xC0400D44
167
168#define LCAC_MC2_OVR_SEL 0xC0400D4C
169#define LCAC_MC2_OVR_VAL 0xC0400D50
170
171#define LCAC_MC3_OVR_SEL 0xC0400D58
172#define LCAC_MC3_OVR_VAL 0xC0400D5C
173
174#define LCAC_CPL_CNTL 0xC0400D80
175#define LCAC_CPL_OVR_SEL 0xC0400D84
176#define LCAC_CPL_OVR_VAL 0xC0400D88
177
178/* dGPU */
179#define CG_THERMAL_CTRL 0xC0300004
180#define DPM_EVENT_SRC(x) ((x) << 0)
181#define DPM_EVENT_SRC_MASK (7 << 0)
182#define DIG_THERM_DPM(x) ((x) << 14)
183#define DIG_THERM_DPM_MASK 0x003FC000
184#define DIG_THERM_DPM_SHIFT 14
185
186#define CG_THERMAL_INT 0xC030000C
187#define CI_DIG_THERM_INTH(x) ((x) << 8)
188#define CI_DIG_THERM_INTH_MASK 0x0000FF00
189#define CI_DIG_THERM_INTH_SHIFT 8
190#define CI_DIG_THERM_INTL(x) ((x) << 16)
191#define CI_DIG_THERM_INTL_MASK 0x00FF0000
192#define CI_DIG_THERM_INTL_SHIFT 16
193#define THERM_INT_MASK_HIGH (1 << 24)
194#define THERM_INT_MASK_LOW (1 << 25)
195
196#define CG_MULT_THERMAL_STATUS 0xC0300014
197#define ASIC_MAX_TEMP(x) ((x) << 0)
198#define ASIC_MAX_TEMP_MASK 0x000001ff
199#define ASIC_MAX_TEMP_SHIFT 0
200#define CTF_TEMP(x) ((x) << 9)
201#define CTF_TEMP_MASK 0x0003fe00
202#define CTF_TEMP_SHIFT 9
203
204#define CG_SPLL_FUNC_CNTL 0xC0500140
205#define SPLL_RESET (1 << 0)
206#define SPLL_PWRON (1 << 1)
207#define SPLL_BYPASS_EN (1 << 3)
208#define SPLL_REF_DIV(x) ((x) << 5)
209#define SPLL_REF_DIV_MASK (0x3f << 5)
210#define SPLL_PDIV_A(x) ((x) << 20)
211#define SPLL_PDIV_A_MASK (0x7f << 20)
212#define SPLL_PDIV_A_SHIFT 20
213#define CG_SPLL_FUNC_CNTL_2 0xC0500144
214#define SCLK_MUX_SEL(x) ((x) << 0)
215#define SCLK_MUX_SEL_MASK (0x1ff << 0)
216#define CG_SPLL_FUNC_CNTL_3 0xC0500148
217#define SPLL_FB_DIV(x) ((x) << 0)
218#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
219#define SPLL_FB_DIV_SHIFT 0
220#define SPLL_DITHEN (1 << 28)
221#define CG_SPLL_FUNC_CNTL_4 0xC050014C
222
223#define CG_SPLL_SPREAD_SPECTRUM 0xC0500164
224#define SSEN (1 << 0)
225#define CLK_S(x) ((x) << 4)
226#define CLK_S_MASK (0xfff << 4)
227#define CLK_S_SHIFT 4
228#define CG_SPLL_SPREAD_SPECTRUM_2 0xC0500168
229#define CLK_V(x) ((x) << 0)
230#define CLK_V_MASK (0x3ffffff << 0)
231#define CLK_V_SHIFT 0
232
233#define MPLL_BYPASSCLK_SEL 0xC050019C
234# define MPLL_CLKOUT_SEL(x) ((x) << 8)
235# define MPLL_CLKOUT_SEL_MASK 0xFF00
35#define CG_CLKPIN_CNTL 0xC05001A0 236#define CG_CLKPIN_CNTL 0xC05001A0
36# define XTALIN_DIVIDE (1 << 1) 237# define XTALIN_DIVIDE (1 << 1)
37 238# define BCLK_AS_XCLK (1 << 2)
239#define CG_CLKPIN_CNTL_2 0xC05001A4
240# define FORCE_BIF_REFCLK_EN (1 << 3)
241# define MUX_TCLK_TO_XCLK (1 << 8)
242#define THM_CLK_CNTL 0xC05001A8
243# define CMON_CLK_SEL(x) ((x) << 0)
244# define CMON_CLK_SEL_MASK 0xFF
245# define TMON_CLK_SEL(x) ((x) << 8)
246# define TMON_CLK_SEL_MASK 0xFF00
247#define MISC_CLK_CTRL 0xC05001AC
248# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
249# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
250# define ZCLK_SEL(x) ((x) << 8)
251# define ZCLK_SEL_MASK 0xFF00
252
253/* KV/KB */
254#define CG_THERMAL_INT_CTRL 0xC2100028
255#define DIG_THERM_INTH(x) ((x) << 0)
256#define DIG_THERM_INTH_MASK 0x000000FF
257#define DIG_THERM_INTH_SHIFT 0
258#define DIG_THERM_INTL(x) ((x) << 8)
259#define DIG_THERM_INTL_MASK 0x0000FF00
260#define DIG_THERM_INTL_SHIFT 8
261#define THERM_INTH_MASK (1 << 24)
262#define THERM_INTL_MASK (1 << 25)
263
264/* PCIE registers idx/data 0x38/0x3c */
265#define PB0_PIF_PWRDOWN_0 0x1100012 /* PCIE */
266# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
267# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
268# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
269# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
270# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
271# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
272# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
273# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
274# define PLL_RAMP_UP_TIME_0_SHIFT 24
275#define PB0_PIF_PWRDOWN_1 0x1100013 /* PCIE */
276# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
277# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
278# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
279# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
280# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
281# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
282# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
283# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
284# define PLL_RAMP_UP_TIME_1_SHIFT 24
285
286#define PCIE_CNTL2 0x1001001c /* PCIE */
287# define SLV_MEM_LS_EN (1 << 16)
288# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
289# define MST_MEM_LS_EN (1 << 18)
290# define REPLAY_MEM_LS_EN (1 << 19)
291
292#define PCIE_LC_STATUS1 0x1400028 /* PCIE */
293# define LC_REVERSE_RCVR (1 << 0)
294# define LC_REVERSE_XMIT (1 << 1)
295# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
296# define LC_OPERATING_LINK_WIDTH_SHIFT 2
297# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
298# define LC_DETECTED_LINK_WIDTH_SHIFT 5
299
300#define PCIE_P_CNTL 0x1400040 /* PCIE */
301# define P_IGNORE_EDB_ERR (1 << 6)
302
303#define PB1_PIF_PWRDOWN_0 0x2100012 /* PCIE */
304#define PB1_PIF_PWRDOWN_1 0x2100013 /* PCIE */
305
306#define PCIE_LC_CNTL 0x100100A0 /* PCIE */
307# define LC_L0S_INACTIVITY(x) ((x) << 8)
308# define LC_L0S_INACTIVITY_MASK (0xf << 8)
309# define LC_L0S_INACTIVITY_SHIFT 8
310# define LC_L1_INACTIVITY(x) ((x) << 12)
311# define LC_L1_INACTIVITY_MASK (0xf << 12)
312# define LC_L1_INACTIVITY_SHIFT 12
313# define LC_PMI_TO_L1_DIS (1 << 16)
314# define LC_ASPM_TO_L1_DIS (1 << 24)
315
316#define PCIE_LC_LINK_WIDTH_CNTL 0x100100A2 /* PCIE */
317# define LC_LINK_WIDTH_SHIFT 0
318# define LC_LINK_WIDTH_MASK 0x7
319# define LC_LINK_WIDTH_X0 0
320# define LC_LINK_WIDTH_X1 1
321# define LC_LINK_WIDTH_X2 2
322# define LC_LINK_WIDTH_X4 3
323# define LC_LINK_WIDTH_X8 4
324# define LC_LINK_WIDTH_X16 6
325# define LC_LINK_WIDTH_RD_SHIFT 4
326# define LC_LINK_WIDTH_RD_MASK 0x70
327# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
328# define LC_RECONFIG_NOW (1 << 8)
329# define LC_RENEGOTIATION_SUPPORT (1 << 9)
330# define LC_RENEGOTIATE_EN (1 << 10)
331# define LC_SHORT_RECONFIG_EN (1 << 11)
332# define LC_UPCONFIGURE_SUPPORT (1 << 12)
333# define LC_UPCONFIGURE_DIS (1 << 13)
334# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
335# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
336# define LC_DYN_LANES_PWR_STATE_SHIFT 21
337#define PCIE_LC_N_FTS_CNTL 0x100100a3 /* PCIE */
338# define LC_XMIT_N_FTS(x) ((x) << 0)
339# define LC_XMIT_N_FTS_MASK (0xff << 0)
340# define LC_XMIT_N_FTS_SHIFT 0
341# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
342# define LC_N_FTS_MASK (0xff << 24)
343#define PCIE_LC_SPEED_CNTL 0x100100A4 /* PCIE */
344# define LC_GEN2_EN_STRAP (1 << 0)
345# define LC_GEN3_EN_STRAP (1 << 1)
346# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
347# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
348# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
349# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
350# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
351# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
352# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
353# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
354# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
355# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
356# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
357# define LC_CURRENT_DATA_RATE_SHIFT 13
358# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
359# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
360# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
361# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
362# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
363
364#define PCIE_LC_CNTL2 0x100100B1 /* PCIE */
365# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
366# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
367
368#define PCIE_LC_CNTL3 0x100100B5 /* PCIE */
369# define LC_GO_TO_RECOVERY (1 << 30)
370#define PCIE_LC_CNTL4 0x100100B6 /* PCIE */
371# define LC_REDO_EQ (1 << 5)
372# define LC_SET_QUIESCE (1 << 13)
373
374/* direct registers */
38#define PCIE_INDEX 0x38 375#define PCIE_INDEX 0x38
39#define PCIE_DATA 0x3C 376#define PCIE_DATA 0x3C
40 377
378#define SMC_IND_INDEX_0 0x200
379#define SMC_IND_DATA_0 0x204
380
381#define SMC_IND_ACCESS_CNTL 0x240
382#define AUTO_INCREMENT_IND_0 (1 << 0)
383
384#define SMC_MESSAGE_0 0x250
385#define SMC_MSG_MASK 0xffff
386#define SMC_RESP_0 0x254
387#define SMC_RESP_MASK 0xffff
388
389#define SMC_MSG_ARG_0 0x290
390
41#define VGA_HDP_CONTROL 0x328 391#define VGA_HDP_CONTROL 0x328
42#define VGA_MEMORY_DISABLE (1 << 4) 392#define VGA_MEMORY_DISABLE (1 << 4)
43 393
44#define DMIF_ADDR_CALC 0xC00 394#define DMIF_ADDR_CALC 0xC00
45 395
396#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
397# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
398# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
399
46#define SRBM_GFX_CNTL 0xE44 400#define SRBM_GFX_CNTL 0xE44
47#define PIPEID(x) ((x) << 0) 401#define PIPEID(x) ((x) << 0)
48#define MEID(x) ((x) << 2) 402#define MEID(x) ((x) << 2)
@@ -172,6 +526,10 @@
172#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C 526#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
173#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580 527#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
174 528
529#define VM_L2_CG 0x15c0
530#define MC_CG_ENABLE (1 << 18)
531#define MC_LS_ENABLE (1 << 19)
532
175#define MC_SHARED_CHMAP 0x2004 533#define MC_SHARED_CHMAP 0x2004
176#define NOOFCHAN_SHIFT 12 534#define NOOFCHAN_SHIFT 12
177#define NOOFCHAN_MASK 0x0000f000 535#define NOOFCHAN_MASK 0x0000f000
@@ -201,6 +559,17 @@
201 559
202#define MC_SHARED_BLACKOUT_CNTL 0x20ac 560#define MC_SHARED_BLACKOUT_CNTL 0x20ac
203 561
562#define MC_HUB_MISC_HUB_CG 0x20b8
563#define MC_HUB_MISC_VM_CG 0x20bc
564
565#define MC_HUB_MISC_SIP_CG 0x20c0
566
567#define MC_XPB_CLK_GAT 0x2478
568
569#define MC_CITF_MISC_RD_CG 0x2648
570#define MC_CITF_MISC_WR_CG 0x264c
571#define MC_CITF_MISC_VM_CG 0x2650
572
204#define MC_ARB_RAMCFG 0x2760 573#define MC_ARB_RAMCFG 0x2760
205#define NOOFBANK_SHIFT 0 574#define NOOFBANK_SHIFT 0
206#define NOOFBANK_MASK 0x00000003 575#define NOOFBANK_MASK 0x00000003
@@ -215,9 +584,37 @@
215#define NOOFGROUPS_SHIFT 12 584#define NOOFGROUPS_SHIFT 12
216#define NOOFGROUPS_MASK 0x00001000 585#define NOOFGROUPS_MASK 0x00001000
217 586
587#define MC_ARB_DRAM_TIMING 0x2774
588#define MC_ARB_DRAM_TIMING2 0x2778
589
590#define MC_ARB_BURST_TIME 0x2808
591#define STATE0(x) ((x) << 0)
592#define STATE0_MASK (0x1f << 0)
593#define STATE0_SHIFT 0
594#define STATE1(x) ((x) << 5)
595#define STATE1_MASK (0x1f << 5)
596#define STATE1_SHIFT 5
597#define STATE2(x) ((x) << 10)
598#define STATE2_MASK (0x1f << 10)
599#define STATE2_SHIFT 10
600#define STATE3(x) ((x) << 15)
601#define STATE3_MASK (0x1f << 15)
602#define STATE3_SHIFT 15
603
604#define MC_SEQ_RAS_TIMING 0x28a0
605#define MC_SEQ_CAS_TIMING 0x28a4
606#define MC_SEQ_MISC_TIMING 0x28a8
607#define MC_SEQ_MISC_TIMING2 0x28ac
608#define MC_SEQ_PMG_TIMING 0x28b0
609#define MC_SEQ_RD_CTL_D0 0x28b4
610#define MC_SEQ_RD_CTL_D1 0x28b8
611#define MC_SEQ_WR_CTL_D0 0x28bc
612#define MC_SEQ_WR_CTL_D1 0x28c0
613
218#define MC_SEQ_SUP_CNTL 0x28c8 614#define MC_SEQ_SUP_CNTL 0x28c8
219#define RUN_MASK (1 << 0) 615#define RUN_MASK (1 << 0)
220#define MC_SEQ_SUP_PGM 0x28cc 616#define MC_SEQ_SUP_PGM 0x28cc
617#define MC_PMG_AUTO_CMD 0x28d0
221 618
222#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8 619#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
223#define TRAIN_DONE_D0 (1 << 30) 620#define TRAIN_DONE_D0 (1 << 30)
@@ -226,10 +623,92 @@
226#define MC_IO_PAD_CNTL_D0 0x29d0 623#define MC_IO_PAD_CNTL_D0 0x29d0
227#define MEM_FALL_OUT_CMD (1 << 8) 624#define MEM_FALL_OUT_CMD (1 << 8)
228 625
626#define MC_SEQ_MISC0 0x2a00
627#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
628#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
629#define MC_SEQ_MISC0_VEN_ID_VALUE 3
630#define MC_SEQ_MISC0_REV_ID_SHIFT 12
631#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
632#define MC_SEQ_MISC0_REV_ID_VALUE 1
633#define MC_SEQ_MISC0_GDDR5_SHIFT 28
634#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
635#define MC_SEQ_MISC0_GDDR5_VALUE 5
636#define MC_SEQ_MISC1 0x2a04
637#define MC_SEQ_RESERVE_M 0x2a08
638#define MC_PMG_CMD_EMRS 0x2a0c
639
229#define MC_SEQ_IO_DEBUG_INDEX 0x2a44 640#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
230#define MC_SEQ_IO_DEBUG_DATA 0x2a48 641#define MC_SEQ_IO_DEBUG_DATA 0x2a48
231 642
643#define MC_SEQ_MISC5 0x2a54
644#define MC_SEQ_MISC6 0x2a58
645
646#define MC_SEQ_MISC7 0x2a64
647
648#define MC_SEQ_RAS_TIMING_LP 0x2a6c
649#define MC_SEQ_CAS_TIMING_LP 0x2a70
650#define MC_SEQ_MISC_TIMING_LP 0x2a74
651#define MC_SEQ_MISC_TIMING2_LP 0x2a78
652#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
653#define MC_SEQ_WR_CTL_D1_LP 0x2a80
654#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
655#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
656
657#define MC_PMG_CMD_MRS 0x2aac
658
659#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
660#define MC_SEQ_RD_CTL_D1_LP 0x2b20
661
662#define MC_PMG_CMD_MRS1 0x2b44
663#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
664#define MC_SEQ_PMG_TIMING_LP 0x2b4c
665
666#define MC_SEQ_WR_CTL_2 0x2b54
667#define MC_SEQ_WR_CTL_2_LP 0x2b58
668#define MC_PMG_CMD_MRS2 0x2b5c
669#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
670
671#define MCLK_PWRMGT_CNTL 0x2ba0
672# define DLL_SPEED(x) ((x) << 0)
673# define DLL_SPEED_MASK (0x1f << 0)
674# define DLL_READY (1 << 6)
675# define MC_INT_CNTL (1 << 7)
676# define MRDCK0_PDNB (1 << 8)
677# define MRDCK1_PDNB (1 << 9)
678# define MRDCK0_RESET (1 << 16)
679# define MRDCK1_RESET (1 << 17)
680# define DLL_READY_READ (1 << 24)
681#define DLL_CNTL 0x2ba4
682# define MRDCK0_BYPASS (1 << 24)
683# define MRDCK1_BYPASS (1 << 25)
684
685#define MPLL_FUNC_CNTL 0x2bb4
686#define BWCTRL(x) ((x) << 20)
687#define BWCTRL_MASK (0xff << 20)
688#define MPLL_FUNC_CNTL_1 0x2bb8
689#define VCO_MODE(x) ((x) << 0)
690#define VCO_MODE_MASK (3 << 0)
691#define CLKFRAC(x) ((x) << 4)
692#define CLKFRAC_MASK (0xfff << 4)
693#define CLKF(x) ((x) << 16)
694#define CLKF_MASK (0xfff << 16)
695#define MPLL_FUNC_CNTL_2 0x2bbc
696#define MPLL_AD_FUNC_CNTL 0x2bc0
697#define YCLK_POST_DIV(x) ((x) << 0)
698#define YCLK_POST_DIV_MASK (7 << 0)
699#define MPLL_DQ_FUNC_CNTL 0x2bc4
700#define YCLK_SEL(x) ((x) << 4)
701#define YCLK_SEL_MASK (1 << 4)
702
703#define MPLL_SS1 0x2bcc
704#define CLKV(x) ((x) << 0)
705#define CLKV_MASK (0x3ffffff << 0)
706#define MPLL_SS2 0x2bd0
707#define CLKS(x) ((x) << 0)
708#define CLKS_MASK (0xfff << 0)
709
232#define HDP_HOST_PATH_CNTL 0x2C00 710#define HDP_HOST_PATH_CNTL 0x2C00
711#define CLOCK_GATING_DIS (1 << 23)
233#define HDP_NONSURFACE_BASE 0x2C04 712#define HDP_NONSURFACE_BASE 0x2C04
234#define HDP_NONSURFACE_INFO 0x2C08 713#define HDP_NONSURFACE_INFO 0x2C08
235#define HDP_NONSURFACE_SIZE 0x2C0C 714#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -237,6 +716,26 @@
237#define HDP_ADDR_CONFIG 0x2F48 716#define HDP_ADDR_CONFIG 0x2F48
238#define HDP_MISC_CNTL 0x2F4C 717#define HDP_MISC_CNTL 0x2F4C
239#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 718#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
719#define HDP_MEM_POWER_LS 0x2F50
720#define HDP_LS_ENABLE (1 << 0)
721
722#define ATC_MISC_CG 0x3350
723
724#define MC_SEQ_CNTL_3 0x3600
725# define CAC_EN (1 << 31)
726#define MC_SEQ_G5PDX_CTRL 0x3604
727#define MC_SEQ_G5PDX_CTRL_LP 0x3608
728#define MC_SEQ_G5PDX_CMD0 0x360c
729#define MC_SEQ_G5PDX_CMD0_LP 0x3610
730#define MC_SEQ_G5PDX_CMD1 0x3614
731#define MC_SEQ_G5PDX_CMD1_LP 0x3618
732
733#define MC_SEQ_PMG_DVS_CTL 0x3628
734#define MC_SEQ_PMG_DVS_CTL_LP 0x362c
735#define MC_SEQ_PMG_DVS_CMD 0x3630
736#define MC_SEQ_PMG_DVS_CMD_LP 0x3634
737#define MC_SEQ_DLL_STBY 0x3638
738#define MC_SEQ_DLL_STBY_LP 0x363c
240 739
241#define IH_RB_CNTL 0x3e00 740#define IH_RB_CNTL 0x3e00
242# define IH_RB_ENABLE (1 << 0) 741# define IH_RB_ENABLE (1 << 0)
@@ -265,6 +764,9 @@
265# define MC_WR_CLEAN_CNT(x) ((x) << 20) 764# define MC_WR_CLEAN_CNT(x) ((x) << 20)
266# define MC_VMID(x) ((x) << 25) 765# define MC_VMID(x) ((x) << 25)
267 766
767#define BIF_LNCNT_RESET 0x5220
768# define RESET_LNCNT_EN (1 << 0)
769
268#define CONFIG_MEMSIZE 0x5428 770#define CONFIG_MEMSIZE 0x5428
269 771
270#define INTERRUPT_CNTL 0x5468 772#define INTERRUPT_CNTL 0x5468
@@ -401,6 +903,9 @@
401# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 903# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
402# define DC_HPDx_EN (1 << 28) 904# define DC_HPDx_EN (1 << 28)
403 905
906#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
907# define STUTTER_ENABLE (1 << 0)
908
404#define GRBM_CNTL 0x8000 909#define GRBM_CNTL 0x8000
405#define GRBM_READ_TIMEOUT(x) ((x) << 0) 910#define GRBM_READ_TIMEOUT(x) ((x) << 0)
406 911
@@ -504,6 +1009,9 @@
504 1009
505#define CP_RB0_RPTR 0x8700 1010#define CP_RB0_RPTR 0x8700
506#define CP_RB_WPTR_DELAY 0x8704 1011#define CP_RB_WPTR_DELAY 0x8704
1012#define CP_RB_WPTR_POLL_CNTL 0x8708
1013#define IDLE_POLL_COUNT(x) ((x) << 16)
1014#define IDLE_POLL_COUNT_MASK (0xffff << 16)
507 1015
508#define CP_MEQ_THRESHOLDS 0x8764 1016#define CP_MEQ_THRESHOLDS 0x8764
509#define MEQ1_START(x) ((x) << 0) 1017#define MEQ1_START(x) ((x) << 0)
@@ -730,6 +1238,9 @@
730# define CP_RINGID1_INT_STAT (1 << 30) 1238# define CP_RINGID1_INT_STAT (1 << 30)
731# define CP_RINGID0_INT_STAT (1 << 31) 1239# define CP_RINGID0_INT_STAT (1 << 31)
732 1240
1241#define CP_MEM_SLP_CNTL 0xC1E4
1242# define CP_MEM_LS_EN (1 << 0)
1243
733#define CP_CPF_DEBUG 0xC200 1244#define CP_CPF_DEBUG 0xC200
734 1245
735#define CP_PQ_WPTR_POLL_CNTL 0xC20C 1246#define CP_PQ_WPTR_POLL_CNTL 0xC20C
@@ -775,14 +1286,20 @@
775 1286
776#define RLC_MC_CNTL 0xC30C 1287#define RLC_MC_CNTL 0xC30C
777 1288
1289#define RLC_MEM_SLP_CNTL 0xC318
1290# define RLC_MEM_LS_EN (1 << 0)
1291
778#define RLC_LB_CNTR_MAX 0xC348 1292#define RLC_LB_CNTR_MAX 0xC348
779 1293
780#define RLC_LB_CNTL 0xC364 1294#define RLC_LB_CNTL 0xC364
1295# define LOAD_BALANCE_ENABLE (1 << 0)
781 1296
782#define RLC_LB_CNTR_INIT 0xC36C 1297#define RLC_LB_CNTR_INIT 0xC36C
783 1298
784#define RLC_SAVE_AND_RESTORE_BASE 0xC374 1299#define RLC_SAVE_AND_RESTORE_BASE 0xC374
785#define RLC_DRIVER_DMA_STATUS 0xC378 1300#define RLC_DRIVER_DMA_STATUS 0xC378 /* dGPU */
1301#define RLC_CP_TABLE_RESTORE 0xC378 /* APU */
1302#define RLC_PG_DELAY_2 0xC37C
786 1303
787#define RLC_GPM_UCODE_ADDR 0xC388 1304#define RLC_GPM_UCODE_ADDR 0xC388
788#define RLC_GPM_UCODE_DATA 0xC38C 1305#define RLC_GPM_UCODE_DATA 0xC38C
@@ -791,12 +1308,52 @@
791#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398 1308#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398
792#define RLC_UCODE_CNTL 0xC39C 1309#define RLC_UCODE_CNTL 0xC39C
793 1310
1311#define RLC_GPM_STAT 0xC400
1312# define RLC_GPM_BUSY (1 << 0)
1313# define GFX_POWER_STATUS (1 << 1)
1314# define GFX_CLOCK_STATUS (1 << 2)
1315
1316#define RLC_PG_CNTL 0xC40C
1317# define GFX_PG_ENABLE (1 << 0)
1318# define GFX_PG_SRC (1 << 1)
1319# define DYN_PER_CU_PG_ENABLE (1 << 2)
1320# define STATIC_PER_CU_PG_ENABLE (1 << 3)
1321# define DISABLE_GDS_PG (1 << 13)
1322# define DISABLE_CP_PG (1 << 15)
1323# define SMU_CLK_SLOWDOWN_ON_PU_ENABLE (1 << 17)
1324# define SMU_CLK_SLOWDOWN_ON_PD_ENABLE (1 << 18)
1325
1326#define RLC_CGTT_MGCG_OVERRIDE 0xC420
794#define RLC_CGCG_CGLS_CTRL 0xC424 1327#define RLC_CGCG_CGLS_CTRL 0xC424
1328# define CGCG_EN (1 << 0)
1329# define CGLS_EN (1 << 1)
1330
1331#define RLC_PG_DELAY 0xC434
795 1332
796#define RLC_LB_INIT_CU_MASK 0xC43C 1333#define RLC_LB_INIT_CU_MASK 0xC43C
797 1334
798#define RLC_LB_PARAMS 0xC444 1335#define RLC_LB_PARAMS 0xC444
799 1336
1337#define RLC_PG_AO_CU_MASK 0xC44C
1338
1339#define RLC_MAX_PG_CU 0xC450
1340# define MAX_PU_CU(x) ((x) << 0)
1341# define MAX_PU_CU_MASK (0xff << 0)
1342#define RLC_AUTO_PG_CTRL 0xC454
1343# define AUTO_PG_EN (1 << 0)
1344# define GRBM_REG_SGIT(x) ((x) << 3)
1345# define GRBM_REG_SGIT_MASK (0xffff << 3)
1346
1347#define RLC_SERDES_WR_CU_MASTER_MASK 0xC474
1348#define RLC_SERDES_WR_NONCU_MASTER_MASK 0xC478
1349#define RLC_SERDES_WR_CTRL 0xC47C
1350#define BPM_ADDR(x) ((x) << 0)
1351#define BPM_ADDR_MASK (0xff << 0)
1352#define CGLS_ENABLE (1 << 16)
1353#define CGCG_OVERRIDE_0 (1 << 20)
1354#define MGCG_OVERRIDE_0 (1 << 22)
1355#define MGCG_OVERRIDE_1 (1 << 23)
1356
800#define RLC_SERDES_CU_MASTER_BUSY 0xC484 1357#define RLC_SERDES_CU_MASTER_BUSY 0xC484
801#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488 1358#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488
802# define SE_MASTER_BUSY_MASK 0x0000ffff 1359# define SE_MASTER_BUSY_MASK 0x0000ffff
@@ -807,6 +1364,13 @@
807#define RLC_GPM_SCRATCH_ADDR 0xC4B0 1364#define RLC_GPM_SCRATCH_ADDR 0xC4B0
808#define RLC_GPM_SCRATCH_DATA 0xC4B4 1365#define RLC_GPM_SCRATCH_DATA 0xC4B4
809 1366
1367#define RLC_GPR_REG2 0xC4E8
1368#define REQ 0x00000001
1369#define MESSAGE(x) ((x) << 1)
1370#define MESSAGE_MASK 0x0000001e
1371#define MSG_ENTER_RLC_SAFE_MODE 1
1372#define MSG_EXIT_RLC_SAFE_MODE 0
1373
810#define CP_HPD_EOP_BASE_ADDR 0xC904 1374#define CP_HPD_EOP_BASE_ADDR 0xC904
811#define CP_HPD_EOP_BASE_ADDR_HI 0xC908 1375#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
812#define CP_HPD_EOP_VMID 0xC90C 1376#define CP_HPD_EOP_VMID 0xC90C
@@ -851,6 +1415,8 @@
851#define MQD_VMID(x) ((x) << 0) 1415#define MQD_VMID(x) ((x) << 0)
852#define MQD_VMID_MASK (0xf << 0) 1416#define MQD_VMID_MASK (0xf << 0)
853 1417
1418#define DB_RENDER_CONTROL 0x28000
1419
854#define PA_SC_RASTER_CONFIG 0x28350 1420#define PA_SC_RASTER_CONFIG 0x28350
855# define RASTER_CONFIG_RB_MAP_0 0 1421# define RASTER_CONFIG_RB_MAP_0 0
856# define RASTER_CONFIG_RB_MAP_1 1 1422# define RASTER_CONFIG_RB_MAP_1 1
@@ -944,6 +1510,16 @@
944 1510
945#define CP_PERFMON_CNTL 0x36020 1511#define CP_PERFMON_CNTL 0x36020
946 1512
1513#define CGTS_SM_CTRL_REG 0x3c000
1514#define SM_MODE(x) ((x) << 17)
1515#define SM_MODE_MASK (0x7 << 17)
1516#define SM_MODE_ENABLE (1 << 20)
1517#define CGTS_OVERRIDE (1 << 21)
1518#define CGTS_LS_OVERRIDE (1 << 22)
1519#define ON_MONITOR_ADD_EN (1 << 23)
1520#define ON_MONITOR_ADD(x) ((x) << 24)
1521#define ON_MONITOR_ADD_MASK (0xff << 24)
1522
947#define CGTS_TCC_DISABLE 0x3c00c 1523#define CGTS_TCC_DISABLE 0x3c00c
948#define CGTS_USER_TCC_DISABLE 0x3c010 1524#define CGTS_USER_TCC_DISABLE 0x3c010
949#define TCC_DISABLE_MASK 0xFFFF0000 1525#define TCC_DISABLE_MASK 0xFFFF0000
@@ -1176,6 +1752,8 @@
1176 1752
1177#define SDMA0_UCODE_ADDR 0xD000 1753#define SDMA0_UCODE_ADDR 0xD000
1178#define SDMA0_UCODE_DATA 0xD004 1754#define SDMA0_UCODE_DATA 0xD004
1755#define SDMA0_POWER_CNTL 0xD008
1756#define SDMA0_CLK_CTRL 0xD00C
1179 1757
1180#define SDMA0_CNTL 0xD010 1758#define SDMA0_CNTL 0xD010
1181# define TRAP_ENABLE (1 << 0) 1759# define TRAP_ENABLE (1 << 0)
@@ -1300,6 +1878,13 @@
1300#define UVD_RBC_RB_RPTR 0xf690 1878#define UVD_RBC_RB_RPTR 0xf690
1301#define UVD_RBC_RB_WPTR 0xf694 1879#define UVD_RBC_RB_WPTR 0xf694
1302 1880
1881#define UVD_CGC_CTRL 0xF4B0
1882# define DCM (1 << 0)
1883# define CG_DT(x) ((x) << 2)
1884# define CG_DT_MASK (0xf << 2)
1885# define CLK_OD(x) ((x) << 6)
1886# define CLK_OD_MASK (0x1f << 6)
1887
1303/* UVD clocks */ 1888/* UVD clocks */
1304 1889
1305#define CG_DCLK_CNTL 0xC050009C 1890#define CG_DCLK_CNTL 0xC050009C
@@ -1310,4 +1895,7 @@
1310#define CG_VCLK_CNTL 0xC05000A4 1895#define CG_VCLK_CNTL 0xC05000A4
1311#define CG_VCLK_STATUS 0xC05000A8 1896#define CG_VCLK_STATUS 0xC05000A8
1312 1897
1898/* UVD CTX indirect */
1899#define UVD_CGC_MEM_CTRL 0xC0
1900
1313#endif 1901#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index c00339440c5e..aa908c55a513 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1073,7 +1073,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, 1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1074 { 0, 0, 0 } 1074 { 0, 0, 0 }
1075}; 1075};
1076struct cs_section_def cayman_cs_data[] = { 1076static const struct cs_section_def cayman_cs_data[] = {
1077 { SECT_CONTEXT_defs, SECT_CONTEXT }, 1077 { SECT_CONTEXT_defs, SECT_CONTEXT },
1078 { SECT_CLEAR_defs, SECT_CLEAR }, 1078 { SECT_CLEAR_defs, SECT_CLEAR },
1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST }, 1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
new file mode 100644
index 000000000000..c3982f9475fb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -0,0 +1,944 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const unsigned int ci_SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0x00000000, // DB_DEPTH_BOUNDS_MIN
35 0x00000000, // DB_DEPTH_BOUNDS_MAX
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0x00000000, // DB_DEPTH_INFO
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0x00000000, // TA_BC_BASE_ADDR
59 0x00000000, // TA_BC_BASE_ADDR_HI
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0, // HOLE
107 0, // HOLE
108 0, // HOLE
109 0, // HOLE
110 0, // HOLE
111 0, // HOLE
112 0, // HOLE
113 0, // HOLE
114 0, // HOLE
115 0, // HOLE
116 0, // HOLE
117 0, // HOLE
118 0, // HOLE
119 0, // HOLE
120 0, // HOLE
121 0, // HOLE
122 0, // HOLE
123 0, // HOLE
124 0, // HOLE
125 0, // HOLE
126 0, // HOLE
127 0, // HOLE
128 0, // HOLE
129 0, // HOLE
130 0, // HOLE
131 0, // HOLE
132 0, // HOLE
133 0, // HOLE
134 0, // HOLE
135 0, // HOLE
136 0, // HOLE
137 0, // HOLE
138 0, // HOLE
139 0, // HOLE
140 0, // HOLE
141 0, // HOLE
142 0, // HOLE
143 0, // HOLE
144 0, // HOLE
145 0, // HOLE
146 0, // HOLE
147 0, // HOLE
148 0x00000000, // COHER_DEST_BASE_HI_0
149 0x00000000, // COHER_DEST_BASE_HI_1
150 0x00000000, // COHER_DEST_BASE_HI_2
151 0x00000000, // COHER_DEST_BASE_HI_3
152 0x00000000, // COHER_DEST_BASE_2
153 0x00000000, // COHER_DEST_BASE_3
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238};
239static const unsigned int ci_SECT_CONTEXT_def_2[] =
240{
241 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
242 0, // HOLE
243 0x00000000, // CP_PERFMON_CNTX_CNTL
244 0x00000000, // CP_RINGID
245 0x00000000, // CP_VMID
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0, // HOLE
251 0, // HOLE
252 0, // HOLE
253 0, // HOLE
254 0, // HOLE
255 0, // HOLE
256 0, // HOLE
257 0, // HOLE
258 0, // HOLE
259 0, // HOLE
260 0, // HOLE
261 0, // HOLE
262 0, // HOLE
263 0, // HOLE
264 0, // HOLE
265 0, // HOLE
266 0, // HOLE
267 0, // HOLE
268 0, // HOLE
269 0, // HOLE
270 0, // HOLE
271 0, // HOLE
272 0, // HOLE
273 0, // HOLE
274 0, // HOLE
275 0, // HOLE
276 0, // HOLE
277 0, // HOLE
278 0, // HOLE
279 0, // HOLE
280 0, // HOLE
281 0, // HOLE
282 0, // HOLE
283 0xffffffff, // VGT_MAX_VTX_INDX
284 0x00000000, // VGT_MIN_VTX_INDX
285 0x00000000, // VGT_INDX_OFFSET
286 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
287 0, // HOLE
288 0x00000000, // CB_BLEND_RED
289 0x00000000, // CB_BLEND_GREEN
290 0x00000000, // CB_BLEND_BLUE
291 0x00000000, // CB_BLEND_ALPHA
292 0, // HOLE
293 0, // HOLE
294 0x00000000, // DB_STENCIL_CONTROL
295 0x00000000, // DB_STENCILREFMASK
296 0x00000000, // DB_STENCILREFMASK_BF
297 0, // HOLE
298 0x00000000, // PA_CL_VPORT_XSCALE
299 0x00000000, // PA_CL_VPORT_XOFFSET
300 0x00000000, // PA_CL_VPORT_YSCALE
301 0x00000000, // PA_CL_VPORT_YOFFSET
302 0x00000000, // PA_CL_VPORT_ZSCALE
303 0x00000000, // PA_CL_VPORT_ZOFFSET
304 0x00000000, // PA_CL_VPORT_XSCALE_1
305 0x00000000, // PA_CL_VPORT_XOFFSET_1
306 0x00000000, // PA_CL_VPORT_YSCALE_1
307 0x00000000, // PA_CL_VPORT_YOFFSET_1
308 0x00000000, // PA_CL_VPORT_ZSCALE_1
309 0x00000000, // PA_CL_VPORT_ZOFFSET_1
310 0x00000000, // PA_CL_VPORT_XSCALE_2
311 0x00000000, // PA_CL_VPORT_XOFFSET_2
312 0x00000000, // PA_CL_VPORT_YSCALE_2
313 0x00000000, // PA_CL_VPORT_YOFFSET_2
314 0x00000000, // PA_CL_VPORT_ZSCALE_2
315 0x00000000, // PA_CL_VPORT_ZOFFSET_2
316 0x00000000, // PA_CL_VPORT_XSCALE_3
317 0x00000000, // PA_CL_VPORT_XOFFSET_3
318 0x00000000, // PA_CL_VPORT_YSCALE_3
319 0x00000000, // PA_CL_VPORT_YOFFSET_3
320 0x00000000, // PA_CL_VPORT_ZSCALE_3
321 0x00000000, // PA_CL_VPORT_ZOFFSET_3
322 0x00000000, // PA_CL_VPORT_XSCALE_4
323 0x00000000, // PA_CL_VPORT_XOFFSET_4
324 0x00000000, // PA_CL_VPORT_YSCALE_4
325 0x00000000, // PA_CL_VPORT_YOFFSET_4
326 0x00000000, // PA_CL_VPORT_ZSCALE_4
327 0x00000000, // PA_CL_VPORT_ZOFFSET_4
328 0x00000000, // PA_CL_VPORT_XSCALE_5
329 0x00000000, // PA_CL_VPORT_XOFFSET_5
330 0x00000000, // PA_CL_VPORT_YSCALE_5
331 0x00000000, // PA_CL_VPORT_YOFFSET_5
332 0x00000000, // PA_CL_VPORT_ZSCALE_5
333 0x00000000, // PA_CL_VPORT_ZOFFSET_5
334 0x00000000, // PA_CL_VPORT_XSCALE_6
335 0x00000000, // PA_CL_VPORT_XOFFSET_6
336 0x00000000, // PA_CL_VPORT_YSCALE_6
337 0x00000000, // PA_CL_VPORT_YOFFSET_6
338 0x00000000, // PA_CL_VPORT_ZSCALE_6
339 0x00000000, // PA_CL_VPORT_ZOFFSET_6
340 0x00000000, // PA_CL_VPORT_XSCALE_7
341 0x00000000, // PA_CL_VPORT_XOFFSET_7
342 0x00000000, // PA_CL_VPORT_YSCALE_7
343 0x00000000, // PA_CL_VPORT_YOFFSET_7
344 0x00000000, // PA_CL_VPORT_ZSCALE_7
345 0x00000000, // PA_CL_VPORT_ZOFFSET_7
346 0x00000000, // PA_CL_VPORT_XSCALE_8
347 0x00000000, // PA_CL_VPORT_XOFFSET_8
348 0x00000000, // PA_CL_VPORT_YSCALE_8
349 0x00000000, // PA_CL_VPORT_YOFFSET_8
350 0x00000000, // PA_CL_VPORT_ZSCALE_8
351 0x00000000, // PA_CL_VPORT_ZOFFSET_8
352 0x00000000, // PA_CL_VPORT_XSCALE_9
353 0x00000000, // PA_CL_VPORT_XOFFSET_9
354 0x00000000, // PA_CL_VPORT_YSCALE_9
355 0x00000000, // PA_CL_VPORT_YOFFSET_9
356 0x00000000, // PA_CL_VPORT_ZSCALE_9
357 0x00000000, // PA_CL_VPORT_ZOFFSET_9
358 0x00000000, // PA_CL_VPORT_XSCALE_10
359 0x00000000, // PA_CL_VPORT_XOFFSET_10
360 0x00000000, // PA_CL_VPORT_YSCALE_10
361 0x00000000, // PA_CL_VPORT_YOFFSET_10
362 0x00000000, // PA_CL_VPORT_ZSCALE_10
363 0x00000000, // PA_CL_VPORT_ZOFFSET_10
364 0x00000000, // PA_CL_VPORT_XSCALE_11
365 0x00000000, // PA_CL_VPORT_XOFFSET_11
366 0x00000000, // PA_CL_VPORT_YSCALE_11
367 0x00000000, // PA_CL_VPORT_YOFFSET_11
368 0x00000000, // PA_CL_VPORT_ZSCALE_11
369 0x00000000, // PA_CL_VPORT_ZOFFSET_11
370 0x00000000, // PA_CL_VPORT_XSCALE_12
371 0x00000000, // PA_CL_VPORT_XOFFSET_12
372 0x00000000, // PA_CL_VPORT_YSCALE_12
373 0x00000000, // PA_CL_VPORT_YOFFSET_12
374 0x00000000, // PA_CL_VPORT_ZSCALE_12
375 0x00000000, // PA_CL_VPORT_ZOFFSET_12
376 0x00000000, // PA_CL_VPORT_XSCALE_13
377 0x00000000, // PA_CL_VPORT_XOFFSET_13
378 0x00000000, // PA_CL_VPORT_YSCALE_13
379 0x00000000, // PA_CL_VPORT_YOFFSET_13
380 0x00000000, // PA_CL_VPORT_ZSCALE_13
381 0x00000000, // PA_CL_VPORT_ZOFFSET_13
382 0x00000000, // PA_CL_VPORT_XSCALE_14
383 0x00000000, // PA_CL_VPORT_XOFFSET_14
384 0x00000000, // PA_CL_VPORT_YSCALE_14
385 0x00000000, // PA_CL_VPORT_YOFFSET_14
386 0x00000000, // PA_CL_VPORT_ZSCALE_14
387 0x00000000, // PA_CL_VPORT_ZOFFSET_14
388 0x00000000, // PA_CL_VPORT_XSCALE_15
389 0x00000000, // PA_CL_VPORT_XOFFSET_15
390 0x00000000, // PA_CL_VPORT_YSCALE_15
391 0x00000000, // PA_CL_VPORT_YOFFSET_15
392 0x00000000, // PA_CL_VPORT_ZSCALE_15
393 0x00000000, // PA_CL_VPORT_ZOFFSET_15
394 0x00000000, // PA_CL_UCP_0_X
395 0x00000000, // PA_CL_UCP_0_Y
396 0x00000000, // PA_CL_UCP_0_Z
397 0x00000000, // PA_CL_UCP_0_W
398 0x00000000, // PA_CL_UCP_1_X
399 0x00000000, // PA_CL_UCP_1_Y
400 0x00000000, // PA_CL_UCP_1_Z
401 0x00000000, // PA_CL_UCP_1_W
402 0x00000000, // PA_CL_UCP_2_X
403 0x00000000, // PA_CL_UCP_2_Y
404 0x00000000, // PA_CL_UCP_2_Z
405 0x00000000, // PA_CL_UCP_2_W
406 0x00000000, // PA_CL_UCP_3_X
407 0x00000000, // PA_CL_UCP_3_Y
408 0x00000000, // PA_CL_UCP_3_Z
409 0x00000000, // PA_CL_UCP_3_W
410 0x00000000, // PA_CL_UCP_4_X
411 0x00000000, // PA_CL_UCP_4_Y
412 0x00000000, // PA_CL_UCP_4_Z
413 0x00000000, // PA_CL_UCP_4_W
414 0x00000000, // PA_CL_UCP_5_X
415 0x00000000, // PA_CL_UCP_5_Y
416 0x00000000, // PA_CL_UCP_5_Z
417 0x00000000, // PA_CL_UCP_5_W
418 0, // HOLE
419 0, // HOLE
420 0, // HOLE
421 0, // HOLE
422 0, // HOLE
423 0, // HOLE
424 0, // HOLE
425 0, // HOLE
426 0, // HOLE
427 0, // HOLE
428 0x00000000, // SPI_PS_INPUT_CNTL_0
429 0x00000000, // SPI_PS_INPUT_CNTL_1
430 0x00000000, // SPI_PS_INPUT_CNTL_2
431 0x00000000, // SPI_PS_INPUT_CNTL_3
432 0x00000000, // SPI_PS_INPUT_CNTL_4
433 0x00000000, // SPI_PS_INPUT_CNTL_5
434 0x00000000, // SPI_PS_INPUT_CNTL_6
435 0x00000000, // SPI_PS_INPUT_CNTL_7
436 0x00000000, // SPI_PS_INPUT_CNTL_8
437 0x00000000, // SPI_PS_INPUT_CNTL_9
438 0x00000000, // SPI_PS_INPUT_CNTL_10
439 0x00000000, // SPI_PS_INPUT_CNTL_11
440 0x00000000, // SPI_PS_INPUT_CNTL_12
441 0x00000000, // SPI_PS_INPUT_CNTL_13
442 0x00000000, // SPI_PS_INPUT_CNTL_14
443 0x00000000, // SPI_PS_INPUT_CNTL_15
444 0x00000000, // SPI_PS_INPUT_CNTL_16
445 0x00000000, // SPI_PS_INPUT_CNTL_17
446 0x00000000, // SPI_PS_INPUT_CNTL_18
447 0x00000000, // SPI_PS_INPUT_CNTL_19
448 0x00000000, // SPI_PS_INPUT_CNTL_20
449 0x00000000, // SPI_PS_INPUT_CNTL_21
450 0x00000000, // SPI_PS_INPUT_CNTL_22
451 0x00000000, // SPI_PS_INPUT_CNTL_23
452 0x00000000, // SPI_PS_INPUT_CNTL_24
453 0x00000000, // SPI_PS_INPUT_CNTL_25
454 0x00000000, // SPI_PS_INPUT_CNTL_26
455 0x00000000, // SPI_PS_INPUT_CNTL_27
456 0x00000000, // SPI_PS_INPUT_CNTL_28
457 0x00000000, // SPI_PS_INPUT_CNTL_29
458 0x00000000, // SPI_PS_INPUT_CNTL_30
459 0x00000000, // SPI_PS_INPUT_CNTL_31
460 0x00000000, // SPI_VS_OUT_CONFIG
461 0, // HOLE
462 0x00000000, // SPI_PS_INPUT_ENA
463 0x00000000, // SPI_PS_INPUT_ADDR
464 0x00000000, // SPI_INTERP_CONTROL_0
465 0x00000002, // SPI_PS_IN_CONTROL
466 0, // HOLE
467 0x00000000, // SPI_BARYC_CNTL
468 0, // HOLE
469 0x00000000, // SPI_TMPRING_SIZE
470 0, // HOLE
471 0, // HOLE
472 0, // HOLE
473 0, // HOLE
474 0, // HOLE
475 0, // HOLE
476 0, // HOLE
477 0, // HOLE
478 0x00000000, // SPI_SHADER_POS_FORMAT
479 0x00000000, // SPI_SHADER_Z_FORMAT
480 0x00000000, // SPI_SHADER_COL_FORMAT
481 0, // HOLE
482 0, // HOLE
483 0, // HOLE
484 0, // HOLE
485 0, // HOLE
486 0, // HOLE
487 0, // HOLE
488 0, // HOLE
489 0, // HOLE
490 0, // HOLE
491 0, // HOLE
492 0, // HOLE
493 0, // HOLE
494 0, // HOLE
495 0, // HOLE
496 0, // HOLE
497 0, // HOLE
498 0, // HOLE
499 0, // HOLE
500 0, // HOLE
501 0, // HOLE
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0, // HOLE
506 0, // HOLE
507 0x00000000, // CB_BLEND0_CONTROL
508 0x00000000, // CB_BLEND1_CONTROL
509 0x00000000, // CB_BLEND2_CONTROL
510 0x00000000, // CB_BLEND3_CONTROL
511 0x00000000, // CB_BLEND4_CONTROL
512 0x00000000, // CB_BLEND5_CONTROL
513 0x00000000, // CB_BLEND6_CONTROL
514 0x00000000, // CB_BLEND7_CONTROL
515};
516static const unsigned int ci_SECT_CONTEXT_def_3[] =
517{
518 0x00000000, // PA_CL_POINT_X_RAD
519 0x00000000, // PA_CL_POINT_Y_RAD
520 0x00000000, // PA_CL_POINT_SIZE
521 0x00000000, // PA_CL_POINT_CULL_RAD
522 0x00000000, // VGT_DMA_BASE_HI
523 0x00000000, // VGT_DMA_BASE
524};
525static const unsigned int ci_SECT_CONTEXT_def_4[] =
526{
527 0x00000000, // DB_DEPTH_CONTROL
528 0x00000000, // DB_EQAA
529 0x00000000, // CB_COLOR_CONTROL
530 0x00000000, // DB_SHADER_CONTROL
531 0x00090000, // PA_CL_CLIP_CNTL
532 0x00000004, // PA_SU_SC_MODE_CNTL
533 0x00000000, // PA_CL_VTE_CNTL
534 0x00000000, // PA_CL_VS_OUT_CNTL
535 0x00000000, // PA_CL_NANINF_CNTL
536 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
537 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
538 0x00000000, // PA_SU_PRIM_FILTER_CNTL
539 0, // HOLE
540 0, // HOLE
541 0, // HOLE
542 0, // HOLE
543 0, // HOLE
544 0, // HOLE
545 0, // HOLE
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0, // HOLE
550 0, // HOLE
551 0, // HOLE
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0, // HOLE
556 0, // HOLE
557 0, // HOLE
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0, // HOLE
562 0, // HOLE
563 0, // HOLE
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0, // HOLE
568 0, // HOLE
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0, // HOLE
573 0, // HOLE
574 0, // HOLE
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0, // HOLE
579 0, // HOLE
580 0, // HOLE
581 0, // HOLE
582 0, // HOLE
583 0, // HOLE
584 0, // HOLE
585 0, // HOLE
586 0, // HOLE
587 0, // HOLE
588 0, // HOLE
589 0, // HOLE
590 0, // HOLE
591 0, // HOLE
592 0, // HOLE
593 0, // HOLE
594 0, // HOLE
595 0, // HOLE
596 0, // HOLE
597 0, // HOLE
598 0, // HOLE
599 0, // HOLE
600 0, // HOLE
601 0, // HOLE
602 0, // HOLE
603 0, // HOLE
604 0, // HOLE
605 0, // HOLE
606 0, // HOLE
607 0, // HOLE
608 0, // HOLE
609 0, // HOLE
610 0, // HOLE
611 0, // HOLE
612 0, // HOLE
613 0, // HOLE
614 0, // HOLE
615 0, // HOLE
616 0, // HOLE
617 0, // HOLE
618 0, // HOLE
619 0, // HOLE
620 0, // HOLE
621 0, // HOLE
622 0, // HOLE
623 0, // HOLE
624 0, // HOLE
625 0, // HOLE
626 0, // HOLE
627 0, // HOLE
628 0, // HOLE
629 0, // HOLE
630 0, // HOLE
631 0, // HOLE
632 0, // HOLE
633 0, // HOLE
634 0, // HOLE
635 0, // HOLE
636 0, // HOLE
637 0, // HOLE
638 0, // HOLE
639 0, // HOLE
640 0, // HOLE
641 0, // HOLE
642 0, // HOLE
643 0, // HOLE
644 0, // HOLE
645 0, // HOLE
646 0, // HOLE
647 0, // HOLE
648 0, // HOLE
649 0, // HOLE
650 0, // HOLE
651 0, // HOLE
652 0, // HOLE
653 0, // HOLE
654 0, // HOLE
655 0x00000000, // PA_SU_POINT_SIZE
656 0x00000000, // PA_SU_POINT_MINMAX
657 0x00000000, // PA_SU_LINE_CNTL
658 0x00000000, // PA_SC_LINE_STIPPLE
659 0x00000000, // VGT_OUTPUT_PATH_CNTL
660 0x00000000, // VGT_HOS_CNTL
661 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
662 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
663 0x00000000, // VGT_HOS_REUSE_DEPTH
664 0x00000000, // VGT_GROUP_PRIM_TYPE
665 0x00000000, // VGT_GROUP_FIRST_DECR
666 0x00000000, // VGT_GROUP_DECR
667 0x00000000, // VGT_GROUP_VECT_0_CNTL
668 0x00000000, // VGT_GROUP_VECT_1_CNTL
669 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
670 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
671 0x00000000, // VGT_GS_MODE
672 0x00000000, // VGT_GS_ONCHIP_CNTL
673 0x00000000, // PA_SC_MODE_CNTL_0
674 0x00000000, // PA_SC_MODE_CNTL_1
675 0x00000000, // VGT_ENHANCE
676 0x00000100, // VGT_GS_PER_ES
677 0x00000080, // VGT_ES_PER_GS
678 0x00000002, // VGT_GS_PER_VS
679 0x00000000, // VGT_GSVS_RING_OFFSET_1
680 0x00000000, // VGT_GSVS_RING_OFFSET_2
681 0x00000000, // VGT_GSVS_RING_OFFSET_3
682 0x00000000, // VGT_GS_OUT_PRIM_TYPE
683 0x00000000, // IA_ENHANCE
684};
685static const unsigned int ci_SECT_CONTEXT_def_5[] =
686{
687 0x00000000, // WD_ENHANCE
688 0x00000000, // VGT_PRIMITIVEID_EN
689};
690static const unsigned int ci_SECT_CONTEXT_def_6[] =
691{
692 0x00000000, // VGT_PRIMITIVEID_RESET
693};
694static const unsigned int ci_SECT_CONTEXT_def_7[] =
695{
696 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
697 0, // HOLE
698 0, // HOLE
699 0x00000000, // VGT_INSTANCE_STEP_RATE_0
700 0x00000000, // VGT_INSTANCE_STEP_RATE_1
701 0x000000ff, // IA_MULTI_VGT_PARAM
702 0x00000000, // VGT_ESGS_RING_ITEMSIZE
703 0x00000000, // VGT_GSVS_RING_ITEMSIZE
704 0x00000000, // VGT_REUSE_OFF
705 0x00000000, // VGT_VTX_CNT_EN
706 0x00000000, // DB_HTILE_SURFACE
707 0x00000000, // DB_SRESULTS_COMPARE_STATE0
708 0x00000000, // DB_SRESULTS_COMPARE_STATE1
709 0x00000000, // DB_PRELOAD_CONTROL
710 0, // HOLE
711 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
712 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
713 0, // HOLE
714 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
715 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
716 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
717 0, // HOLE
718 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
719 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
720 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
721 0, // HOLE
722 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
723 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
724 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
725 0, // HOLE
726 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
727 0, // HOLE
728 0, // HOLE
729 0, // HOLE
730 0, // HOLE
731 0, // HOLE
732 0, // HOLE
733 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
734 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
735 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
736 0, // HOLE
737 0x00000000, // VGT_GS_MAX_VERT_OUT
738 0, // HOLE
739 0, // HOLE
740 0, // HOLE
741 0, // HOLE
742 0, // HOLE
743 0, // HOLE
744 0x00000000, // VGT_SHADER_STAGES_EN
745 0x00000000, // VGT_LS_HS_CONFIG
746 0x00000000, // VGT_GS_VERT_ITEMSIZE
747 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
748 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
749 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
750 0x00000000, // VGT_TF_PARAM
751 0x00000000, // DB_ALPHA_TO_MASK
752 0, // HOLE
753 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
754 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
755 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
756 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
757 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
758 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
759 0x00000000, // VGT_GS_INSTANCE_CNT
760 0x00000000, // VGT_STRMOUT_CONFIG
761 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
762 0, // HOLE
763 0, // HOLE
764 0, // HOLE
765 0, // HOLE
766 0, // HOLE
767 0, // HOLE
768 0, // HOLE
769 0, // HOLE
770 0, // HOLE
771 0, // HOLE
772 0, // HOLE
773 0, // HOLE
774 0, // HOLE
775 0, // HOLE
776 0x00000000, // PA_SC_CENTROID_PRIORITY_0
777 0x00000000, // PA_SC_CENTROID_PRIORITY_1
778 0x00001000, // PA_SC_LINE_CNTL
779 0x00000000, // PA_SC_AA_CONFIG
780 0x00000005, // PA_SU_VTX_CNTL
781 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
782 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
783 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
784 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
785 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
786 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
787 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
788 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
789 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
790 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
791 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
792 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
798 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
799 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
800 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
801 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
802 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
803 0, // HOLE
804 0, // HOLE
805 0, // HOLE
806 0, // HOLE
807 0, // HOLE
808 0, // HOLE
809 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
810 0x00000010, // VGT_OUT_DEALLOC_CNTL
811 0x00000000, // CB_COLOR0_BASE
812 0x00000000, // CB_COLOR0_PITCH
813 0x00000000, // CB_COLOR0_SLICE
814 0x00000000, // CB_COLOR0_VIEW
815 0x00000000, // CB_COLOR0_INFO
816 0x00000000, // CB_COLOR0_ATTRIB
817 0, // HOLE
818 0x00000000, // CB_COLOR0_CMASK
819 0x00000000, // CB_COLOR0_CMASK_SLICE
820 0x00000000, // CB_COLOR0_FMASK
821 0x00000000, // CB_COLOR0_FMASK_SLICE
822 0x00000000, // CB_COLOR0_CLEAR_WORD0
823 0x00000000, // CB_COLOR0_CLEAR_WORD1
824 0, // HOLE
825 0, // HOLE
826 0x00000000, // CB_COLOR1_BASE
827 0x00000000, // CB_COLOR1_PITCH
828 0x00000000, // CB_COLOR1_SLICE
829 0x00000000, // CB_COLOR1_VIEW
830 0x00000000, // CB_COLOR1_INFO
831 0x00000000, // CB_COLOR1_ATTRIB
832 0, // HOLE
833 0x00000000, // CB_COLOR1_CMASK
834 0x00000000, // CB_COLOR1_CMASK_SLICE
835 0x00000000, // CB_COLOR1_FMASK
836 0x00000000, // CB_COLOR1_FMASK_SLICE
837 0x00000000, // CB_COLOR1_CLEAR_WORD0
838 0x00000000, // CB_COLOR1_CLEAR_WORD1
839 0, // HOLE
840 0, // HOLE
841 0x00000000, // CB_COLOR2_BASE
842 0x00000000, // CB_COLOR2_PITCH
843 0x00000000, // CB_COLOR2_SLICE
844 0x00000000, // CB_COLOR2_VIEW
845 0x00000000, // CB_COLOR2_INFO
846 0x00000000, // CB_COLOR2_ATTRIB
847 0, // HOLE
848 0x00000000, // CB_COLOR2_CMASK
849 0x00000000, // CB_COLOR2_CMASK_SLICE
850 0x00000000, // CB_COLOR2_FMASK
851 0x00000000, // CB_COLOR2_FMASK_SLICE
852 0x00000000, // CB_COLOR2_CLEAR_WORD0
853 0x00000000, // CB_COLOR2_CLEAR_WORD1
854 0, // HOLE
855 0, // HOLE
856 0x00000000, // CB_COLOR3_BASE
857 0x00000000, // CB_COLOR3_PITCH
858 0x00000000, // CB_COLOR3_SLICE
859 0x00000000, // CB_COLOR3_VIEW
860 0x00000000, // CB_COLOR3_INFO
861 0x00000000, // CB_COLOR3_ATTRIB
862 0, // HOLE
863 0x00000000, // CB_COLOR3_CMASK
864 0x00000000, // CB_COLOR3_CMASK_SLICE
865 0x00000000, // CB_COLOR3_FMASK
866 0x00000000, // CB_COLOR3_FMASK_SLICE
867 0x00000000, // CB_COLOR3_CLEAR_WORD0
868 0x00000000, // CB_COLOR3_CLEAR_WORD1
869 0, // HOLE
870 0, // HOLE
871 0x00000000, // CB_COLOR4_BASE
872 0x00000000, // CB_COLOR4_PITCH
873 0x00000000, // CB_COLOR4_SLICE
874 0x00000000, // CB_COLOR4_VIEW
875 0x00000000, // CB_COLOR4_INFO
876 0x00000000, // CB_COLOR4_ATTRIB
877 0, // HOLE
878 0x00000000, // CB_COLOR4_CMASK
879 0x00000000, // CB_COLOR4_CMASK_SLICE
880 0x00000000, // CB_COLOR4_FMASK
881 0x00000000, // CB_COLOR4_FMASK_SLICE
882 0x00000000, // CB_COLOR4_CLEAR_WORD0
883 0x00000000, // CB_COLOR4_CLEAR_WORD1
884 0, // HOLE
885 0, // HOLE
886 0x00000000, // CB_COLOR5_BASE
887 0x00000000, // CB_COLOR5_PITCH
888 0x00000000, // CB_COLOR5_SLICE
889 0x00000000, // CB_COLOR5_VIEW
890 0x00000000, // CB_COLOR5_INFO
891 0x00000000, // CB_COLOR5_ATTRIB
892 0, // HOLE
893 0x00000000, // CB_COLOR5_CMASK
894 0x00000000, // CB_COLOR5_CMASK_SLICE
895 0x00000000, // CB_COLOR5_FMASK
896 0x00000000, // CB_COLOR5_FMASK_SLICE
897 0x00000000, // CB_COLOR5_CLEAR_WORD0
898 0x00000000, // CB_COLOR5_CLEAR_WORD1
899 0, // HOLE
900 0, // HOLE
901 0x00000000, // CB_COLOR6_BASE
902 0x00000000, // CB_COLOR6_PITCH
903 0x00000000, // CB_COLOR6_SLICE
904 0x00000000, // CB_COLOR6_VIEW
905 0x00000000, // CB_COLOR6_INFO
906 0x00000000, // CB_COLOR6_ATTRIB
907 0, // HOLE
908 0x00000000, // CB_COLOR6_CMASK
909 0x00000000, // CB_COLOR6_CMASK_SLICE
910 0x00000000, // CB_COLOR6_FMASK
911 0x00000000, // CB_COLOR6_FMASK_SLICE
912 0x00000000, // CB_COLOR6_CLEAR_WORD0
913 0x00000000, // CB_COLOR6_CLEAR_WORD1
914 0, // HOLE
915 0, // HOLE
916 0x00000000, // CB_COLOR7_BASE
917 0x00000000, // CB_COLOR7_PITCH
918 0x00000000, // CB_COLOR7_SLICE
919 0x00000000, // CB_COLOR7_VIEW
920 0x00000000, // CB_COLOR7_INFO
921 0x00000000, // CB_COLOR7_ATTRIB
922 0, // HOLE
923 0x00000000, // CB_COLOR7_CMASK
924 0x00000000, // CB_COLOR7_CMASK_SLICE
925 0x00000000, // CB_COLOR7_FMASK
926 0x00000000, // CB_COLOR7_FMASK_SLICE
927 0x00000000, // CB_COLOR7_CLEAR_WORD0
928 0x00000000, // CB_COLOR7_CLEAR_WORD1
929};
930static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
931{
932 {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
933 {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
934 {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
935 {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
936 {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
937 {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
938 {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
939 { 0, 0, 0 }
940};
941static const struct cs_section_def ci_cs_data[] = {
942 { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
943 { 0, SECT_NONE }
944};
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
index 4791d856b7fd..63a1ffbb3ced 100644
--- a/drivers/gpu/drm/radeon/clearstate_evergreen.h
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -1072,7 +1072,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1072 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, 1072 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1073 { 0, 0, 0 } 1073 { 0, 0, 0 }
1074}; 1074};
1075struct cs_section_def evergreen_cs_data[] = { 1075static const struct cs_section_def evergreen_cs_data[] = {
1076 { SECT_CONTEXT_defs, SECT_CONTEXT }, 1076 { SECT_CONTEXT_defs, SECT_CONTEXT },
1077 { SECT_CLEAR_defs, SECT_CLEAR }, 1077 { SECT_CLEAR_defs, SECT_CLEAR },
1078 { SECT_CTRLCONST_defs, SECT_CTRLCONST }, 1078 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 7e5d0b570a30..95a66db08d9b 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2166,7 +2166,8 @@ bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
2166{ 2166{
2167 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2167 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2168 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2168 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2169 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 2169 /* we never hit the non-gddr5 limit so disable it */
2170 u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
2170 2171
2171 if (vblank_time < switch_limit) 2172 if (vblank_time < switch_limit)
2172 return true; 2173 return true;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
new file mode 100644
index 000000000000..8953255e894b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -0,0 +1,278 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/hdmi.h>
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "sid.h"
27
28static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
29 u32 block_offset, u32 reg)
30{
31 u32 r;
32
33 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
34 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
35 return r;
36}
37
38static void dce6_endpoint_wreg(struct radeon_device *rdev,
39 u32 block_offset, u32 reg, u32 v)
40{
41 if (ASIC_IS_DCE8(rdev))
42 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
43 else
44 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
45 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
46 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
47}
48
49#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
50#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v))
51
52
53static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
54{
55 int i;
56 u32 offset, tmp;
57
58 for (i = 0; i < rdev->audio.num_pins; i++) {
59 offset = rdev->audio.pin[i].offset;
60 tmp = RREG32_ENDPOINT(offset,
61 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
62 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
63 rdev->audio.pin[i].connected = false;
64 else
65 rdev->audio.pin[i].connected = true;
66 }
67}
68
69struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
70{
71 int i;
72
73 dce6_afmt_get_connected_pins(rdev);
74
75 for (i = 0; i < rdev->audio.num_pins; i++) {
76 if (rdev->audio.pin[i].connected)
77 return &rdev->audio.pin[i];
78 }
79 DRM_ERROR("No connected audio pins found!\n");
80 return NULL;
81}
82
83void dce6_afmt_select_pin(struct drm_encoder *encoder)
84{
85 struct radeon_device *rdev = encoder->dev->dev_private;
86 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
87 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
88 u32 offset = dig->afmt->offset;
89 u32 id = dig->afmt->pin->id;
90
91 if (!dig->afmt->pin)
92 return;
93
94 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
95}
96
97void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
98{
99 struct radeon_device *rdev = encoder->dev->dev_private;
100 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
101 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
102 struct drm_connector *connector;
103 struct radeon_connector *radeon_connector = NULL;
104 u32 offset, tmp;
105 u8 *sadb;
106 int sad_count;
107
108 if (!dig->afmt->pin)
109 return;
110
111 offset = dig->afmt->pin->offset;
112
113 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
114 if (connector->encoder == encoder)
115 radeon_connector = to_radeon_connector(connector);
116 }
117
118 if (!radeon_connector) {
119 DRM_ERROR("Couldn't find encoder's connector\n");
120 return;
121 }
122
123 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
124 if (sad_count < 0) {
125 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
126 return;
127 }
128
129 /* program the speaker allocation */
130 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
131 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
132 /* set HDMI mode */
133 tmp |= HDMI_CONNECTION;
134 if (sad_count)
135 tmp |= SPEAKER_ALLOCATION(sadb[0]);
136 else
137 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
138 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
139
140 kfree(sadb);
141}
142
143void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
144{
145 struct radeon_device *rdev = encoder->dev->dev_private;
146 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
147 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
148 u32 offset;
149 struct drm_connector *connector;
150 struct radeon_connector *radeon_connector = NULL;
151 struct cea_sad *sads;
152 int i, sad_count;
153
154 static const u16 eld_reg_to_type[][2] = {
155 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
156 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
157 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
158 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
159 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
160 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
161 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
162 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
163 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
164 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
165 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
166 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
167 };
168
169 if (!dig->afmt->pin)
170 return;
171
172 offset = dig->afmt->pin->offset;
173
174 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
175 if (connector->encoder == encoder)
176 radeon_connector = to_radeon_connector(connector);
177 }
178
179 if (!radeon_connector) {
180 DRM_ERROR("Couldn't find encoder's connector\n");
181 return;
182 }
183
184 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
185 if (sad_count < 0) {
186 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
187 return;
188 }
189 BUG_ON(!sads);
190
191 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
192 u32 value = 0;
193 int j;
194
195 for (j = 0; j < sad_count; j++) {
196 struct cea_sad *sad = &sads[j];
197
198 if (sad->format == eld_reg_to_type[i][1]) {
199 value = MAX_CHANNELS(sad->channels) |
200 DESCRIPTOR_BYTE_2(sad->byte2) |
201 SUPPORTED_FREQUENCIES(sad->freq);
202 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
203 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
204 break;
205 }
206 }
207 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
208 }
209
210 kfree(sads);
211}
212
213static int dce6_audio_chipset_supported(struct radeon_device *rdev)
214{
215 return !ASIC_IS_NODCE(rdev);
216}
217
218static void dce6_audio_enable(struct radeon_device *rdev,
219 struct r600_audio_pin *pin,
220 bool enable)
221{
222 WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
223 AUDIO_ENABLED);
224 DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
225}
226
227static const u32 pin_offsets[7] =
228{
229 (0x5e00 - 0x5e00),
230 (0x5e18 - 0x5e00),
231 (0x5e30 - 0x5e00),
232 (0x5e48 - 0x5e00),
233 (0x5e60 - 0x5e00),
234 (0x5e78 - 0x5e00),
235 (0x5e90 - 0x5e00),
236};
237
238int dce6_audio_init(struct radeon_device *rdev)
239{
240 int i;
241
242 if (!radeon_audio || !dce6_audio_chipset_supported(rdev))
243 return 0;
244
245 rdev->audio.enabled = true;
246
247 if (ASIC_IS_DCE8(rdev))
248 rdev->audio.num_pins = 7;
249 else
250 rdev->audio.num_pins = 6;
251
252 for (i = 0; i < rdev->audio.num_pins; i++) {
253 rdev->audio.pin[i].channels = -1;
254 rdev->audio.pin[i].rate = -1;
255 rdev->audio.pin[i].bits_per_sample = -1;
256 rdev->audio.pin[i].status_bits = 0;
257 rdev->audio.pin[i].category_code = 0;
258 rdev->audio.pin[i].connected = false;
259 rdev->audio.pin[i].offset = pin_offsets[i];
260 rdev->audio.pin[i].id = i;
261 dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
262 }
263
264 return 0;
265}
266
267void dce6_audio_fini(struct radeon_device *rdev)
268{
269 int i;
270
271 if (!rdev->audio.enabled)
272 return;
273
274 for (i = 0; i < rdev->audio.num_pins; i++)
275 dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
276
277 rdev->audio.enabled = false;
278}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d5b49e33315e..555164e270a7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -47,7 +47,7 @@ static const u32 crtc_offsets[6] =
47 47
48#include "clearstate_evergreen.h" 48#include "clearstate_evergreen.h"
49 49
50static u32 sumo_rlc_save_restore_register_list[] = 50static const u32 sumo_rlc_save_restore_register_list[] =
51{ 51{
52 0x98fc, 52 0x98fc,
53 0x9830, 53 0x9830,
@@ -131,7 +131,6 @@ static u32 sumo_rlc_save_restore_register_list[] =
131 0x9150, 131 0x9150,
132 0x802c, 132 0x802c,
133}; 133};
134static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list);
135 134
136static void evergreen_gpu_init(struct radeon_device *rdev); 135static void evergreen_gpu_init(struct radeon_device *rdev);
137void evergreen_fini(struct radeon_device *rdev); 136void evergreen_fini(struct radeon_device *rdev);
@@ -141,6 +140,12 @@ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
141 int ring, u32 cp_int_cntl); 140 int ring, u32 cp_int_cntl);
142extern void cayman_vm_decode_fault(struct radeon_device *rdev, 141extern void cayman_vm_decode_fault(struct radeon_device *rdev,
143 u32 status, u32 addr); 142 u32 status, u32 addr);
143void cik_init_cp_pg_table(struct radeon_device *rdev);
144
145extern u32 si_get_csb_size(struct radeon_device *rdev);
146extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
147extern u32 cik_get_csb_size(struct radeon_device *rdev);
148extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
144 149
145static const u32 evergreen_golden_registers[] = 150static const u32 evergreen_golden_registers[] =
146{ 151{
@@ -1807,7 +1812,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
1807 struct drm_display_mode *mode, 1812 struct drm_display_mode *mode,
1808 struct drm_display_mode *other_mode) 1813 struct drm_display_mode *other_mode)
1809{ 1814{
1810 u32 tmp; 1815 u32 tmp, buffer_alloc, i;
1816 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1811 /* 1817 /*
1812 * Line Buffer Setup 1818 * Line Buffer Setup
1813 * There are 3 line buffers, each one shared by 2 display controllers. 1819 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -1830,18 +1836,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
1830 * non-linked crtcs for maximum line buffer allocation. 1836 * non-linked crtcs for maximum line buffer allocation.
1831 */ 1837 */
1832 if (radeon_crtc->base.enabled && mode) { 1838 if (radeon_crtc->base.enabled && mode) {
1833 if (other_mode) 1839 if (other_mode) {
1834 tmp = 0; /* 1/2 */ 1840 tmp = 0; /* 1/2 */
1835 else 1841 buffer_alloc = 1;
1842 } else {
1836 tmp = 2; /* whole */ 1843 tmp = 2; /* whole */
1837 } else 1844 buffer_alloc = 2;
1845 }
1846 } else {
1838 tmp = 0; 1847 tmp = 0;
1848 buffer_alloc = 0;
1849 }
1839 1850
1840 /* second controller of the pair uses second half of the lb */ 1851 /* second controller of the pair uses second half of the lb */
1841 if (radeon_crtc->crtc_id % 2) 1852 if (radeon_crtc->crtc_id % 2)
1842 tmp += 4; 1853 tmp += 4;
1843 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 1854 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
1844 1855
1856 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1857 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1858 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1859 for (i = 0; i < rdev->usec_timeout; i++) {
1860 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1861 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1862 break;
1863 udelay(1);
1864 }
1865 }
1866
1845 if (radeon_crtc->base.enabled && mode) { 1867 if (radeon_crtc->base.enabled && mode) {
1846 switch (tmp) { 1868 switch (tmp) {
1847 case 0: 1869 case 0:
@@ -2881,8 +2903,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
2881 RREG32(GRBM_SOFT_RESET); 2903 RREG32(GRBM_SOFT_RESET);
2882 2904
2883 /* Set ring buffer size */ 2905 /* Set ring buffer size */
2884 rb_bufsz = drm_order(ring->ring_size / 8); 2906 rb_bufsz = order_base_2(ring->ring_size / 8);
2885 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2907 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2886#ifdef __BIG_ENDIAN 2908#ifdef __BIG_ENDIAN
2887 tmp |= BUF_SWAP_32BIT; 2909 tmp |= BUF_SWAP_32BIT;
2888#endif 2910#endif
@@ -3613,7 +3635,7 @@ bool evergreen_is_display_hung(struct radeon_device *rdev)
3613 return true; 3635 return true;
3614} 3636}
3615 3637
3616static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) 3638u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
3617{ 3639{
3618 u32 reset_mask = 0; 3640 u32 reset_mask = 0;
3619 u32 tmp; 3641 u32 tmp;
@@ -3839,28 +3861,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
3839 return radeon_ring_test_lockup(rdev, ring); 3861 return radeon_ring_test_lockup(rdev, ring);
3840} 3862}
3841 3863
3842/**
3843 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
3844 *
3845 * @rdev: radeon_device pointer
3846 * @ring: radeon_ring structure holding ring information
3847 *
3848 * Check if the async DMA engine is locked up.
3849 * Returns true if the engine appears to be locked up, false if not.
3850 */
3851bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3852{
3853 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
3854
3855 if (!(reset_mask & RADEON_RESET_DMA)) {
3856 radeon_ring_lockup_update(ring);
3857 return false;
3858 }
3859 /* force ring activities */
3860 radeon_ring_force_activity(rdev, ring);
3861 return radeon_ring_test_lockup(rdev, ring);
3862}
3863
3864/* 3864/*
3865 * RLC 3865 * RLC
3866 */ 3866 */
@@ -3894,147 +3894,231 @@ void sumo_rlc_fini(struct radeon_device *rdev)
3894 radeon_bo_unref(&rdev->rlc.clear_state_obj); 3894 radeon_bo_unref(&rdev->rlc.clear_state_obj);
3895 rdev->rlc.clear_state_obj = NULL; 3895 rdev->rlc.clear_state_obj = NULL;
3896 } 3896 }
3897
3898 /* clear state block */
3899 if (rdev->rlc.cp_table_obj) {
3900 r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
3901 if (unlikely(r != 0))
3902 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3903 radeon_bo_unpin(rdev->rlc.cp_table_obj);
3904 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
3905
3906 radeon_bo_unref(&rdev->rlc.cp_table_obj);
3907 rdev->rlc.cp_table_obj = NULL;
3908 }
3897} 3909}
3898 3910
3911#define CP_ME_TABLE_SIZE 96
3912
3899int sumo_rlc_init(struct radeon_device *rdev) 3913int sumo_rlc_init(struct radeon_device *rdev)
3900{ 3914{
3901 u32 *src_ptr; 3915 const u32 *src_ptr;
3902 volatile u32 *dst_ptr; 3916 volatile u32 *dst_ptr;
3903 u32 dws, data, i, j, k, reg_num; 3917 u32 dws, data, i, j, k, reg_num;
3904 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index; 3918 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
3905 u64 reg_list_mc_addr; 3919 u64 reg_list_mc_addr;
3906 struct cs_section_def *cs_data; 3920 const struct cs_section_def *cs_data;
3907 int r; 3921 int r;
3908 3922
3909 src_ptr = rdev->rlc.reg_list; 3923 src_ptr = rdev->rlc.reg_list;
3910 dws = rdev->rlc.reg_list_size; 3924 dws = rdev->rlc.reg_list_size;
3925 if (rdev->family >= CHIP_BONAIRE) {
3926 dws += (5 * 16) + 48 + 48 + 64;
3927 }
3911 cs_data = rdev->rlc.cs_data; 3928 cs_data = rdev->rlc.cs_data;
3912 3929
3913 /* save restore block */ 3930 if (src_ptr) {
3914 if (rdev->rlc.save_restore_obj == NULL) { 3931 /* save restore block */
3915 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 3932 if (rdev->rlc.save_restore_obj == NULL) {
3916 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); 3933 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3934 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
3935 if (r) {
3936 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
3937 return r;
3938 }
3939 }
3940
3941 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
3942 if (unlikely(r != 0)) {
3943 sumo_rlc_fini(rdev);
3944 return r;
3945 }
3946 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
3947 &rdev->rlc.save_restore_gpu_addr);
3917 if (r) { 3948 if (r) {
3918 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 3949 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3950 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
3951 sumo_rlc_fini(rdev);
3919 return r; 3952 return r;
3920 } 3953 }
3921 }
3922 3954
3923 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 3955 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
3924 if (unlikely(r != 0)) { 3956 if (r) {
3925 sumo_rlc_fini(rdev); 3957 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
3926 return r; 3958 sumo_rlc_fini(rdev);
3927 } 3959 return r;
3928 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 3960 }
3929 &rdev->rlc.save_restore_gpu_addr); 3961 /* write the sr buffer */
3930 if (r) { 3962 dst_ptr = rdev->rlc.sr_ptr;
3963 if (rdev->family >= CHIP_TAHITI) {
3964 /* SI */
3965 for (i = 0; i < rdev->rlc.reg_list_size; i++)
3966 dst_ptr[i] = src_ptr[i];
3967 } else {
3968 /* ON/LN/TN */
3969 /* format:
3970 * dw0: (reg2 << 16) | reg1
3971 * dw1: reg1 save space
3972 * dw2: reg2 save space
3973 */
3974 for (i = 0; i < dws; i++) {
3975 data = src_ptr[i] >> 2;
3976 i++;
3977 if (i < dws)
3978 data |= (src_ptr[i] >> 2) << 16;
3979 j = (((i - 1) * 3) / 2);
3980 dst_ptr[j] = data;
3981 }
3982 j = ((i * 3) / 2);
3983 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
3984 }
3985 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
3931 radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3986 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3932 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
3933 sumo_rlc_fini(rdev);
3934 return r;
3935 } 3987 }
3936 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
3937 if (r) {
3938 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
3939 sumo_rlc_fini(rdev);
3940 return r;
3941 }
3942 /* write the sr buffer */
3943 dst_ptr = rdev->rlc.sr_ptr;
3944 /* format:
3945 * dw0: (reg2 << 16) | reg1
3946 * dw1: reg1 save space
3947 * dw2: reg2 save space
3948 */
3949 for (i = 0; i < dws; i++) {
3950 data = src_ptr[i] >> 2;
3951 i++;
3952 if (i < dws)
3953 data |= (src_ptr[i] >> 2) << 16;
3954 j = (((i - 1) * 3) / 2);
3955 dst_ptr[j] = data;
3956 }
3957 j = ((i * 3) / 2);
3958 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
3959 3988
3960 radeon_bo_kunmap(rdev->rlc.save_restore_obj); 3989 if (cs_data) {
3961 radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3990 /* clear state block */
3962 3991 if (rdev->family >= CHIP_BONAIRE) {
3963 /* clear state block */ 3992 rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
3964 reg_list_num = 0; 3993 } else if (rdev->family >= CHIP_TAHITI) {
3965 dws = 0; 3994 rdev->rlc.clear_state_size = si_get_csb_size(rdev);
3966 for (i = 0; cs_data[i].section != NULL; i++) { 3995 dws = rdev->rlc.clear_state_size + (256 / 4);
3967 for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 3996 } else {
3968 reg_list_num++; 3997 reg_list_num = 0;
3969 dws += cs_data[i].section[j].reg_count; 3998 dws = 0;
3999 for (i = 0; cs_data[i].section != NULL; i++) {
4000 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4001 reg_list_num++;
4002 dws += cs_data[i].section[j].reg_count;
4003 }
4004 }
4005 reg_list_blk_index = (3 * reg_list_num + 2);
4006 dws += reg_list_blk_index;
4007 rdev->rlc.clear_state_size = dws;
3970 } 4008 }
3971 }
3972 reg_list_blk_index = (3 * reg_list_num + 2);
3973 dws += reg_list_blk_index;
3974 4009
3975 if (rdev->rlc.clear_state_obj == NULL) { 4010 if (rdev->rlc.clear_state_obj == NULL) {
3976 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 4011 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3977 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); 4012 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
4013 if (r) {
4014 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4015 sumo_rlc_fini(rdev);
4016 return r;
4017 }
4018 }
4019 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
4020 if (unlikely(r != 0)) {
4021 sumo_rlc_fini(rdev);
4022 return r;
4023 }
4024 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
4025 &rdev->rlc.clear_state_gpu_addr);
3978 if (r) { 4026 if (r) {
3979 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 4027 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4028 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
3980 sumo_rlc_fini(rdev); 4029 sumo_rlc_fini(rdev);
3981 return r; 4030 return r;
3982 } 4031 }
3983 }
3984 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
3985 if (unlikely(r != 0)) {
3986 sumo_rlc_fini(rdev);
3987 return r;
3988 }
3989 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
3990 &rdev->rlc.clear_state_gpu_addr);
3991 if (r) {
3992
3993 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
3994 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
3995 sumo_rlc_fini(rdev);
3996 return r;
3997 }
3998 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
3999 if (r) {
4000 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
4001 sumo_rlc_fini(rdev);
4002 return r;
4003 }
4004 /* set up the cs buffer */
4005 dst_ptr = rdev->rlc.cs_ptr;
4006 reg_list_hdr_blk_index = 0;
4007 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4008 data = upper_32_bits(reg_list_mc_addr);
4009 dst_ptr[reg_list_hdr_blk_index] = data;
4010 reg_list_hdr_blk_index++;
4011 for (i = 0; cs_data[i].section != NULL; i++) {
4012 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4013 reg_num = cs_data[i].section[j].reg_count;
4014 data = reg_list_mc_addr & 0xffffffff;
4015 dst_ptr[reg_list_hdr_blk_index] = data;
4016 reg_list_hdr_blk_index++;
4017 4032
4018 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; 4033 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
4019 dst_ptr[reg_list_hdr_blk_index] = data; 4034 if (r) {
4020 reg_list_hdr_blk_index++; 4035 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
4021 4036 sumo_rlc_fini(rdev);
4022 data = 0x08000000 | (reg_num * 4); 4037 return r;
4038 }
4039 /* set up the cs buffer */
4040 dst_ptr = rdev->rlc.cs_ptr;
4041 if (rdev->family >= CHIP_BONAIRE) {
4042 cik_get_csb_buffer(rdev, dst_ptr);
4043 } else if (rdev->family >= CHIP_TAHITI) {
4044 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
4045 dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
4046 dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
4047 dst_ptr[2] = rdev->rlc.clear_state_size;
4048 si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
4049 } else {
4050 reg_list_hdr_blk_index = 0;
4051 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4052 data = upper_32_bits(reg_list_mc_addr);
4023 dst_ptr[reg_list_hdr_blk_index] = data; 4053 dst_ptr[reg_list_hdr_blk_index] = data;
4024 reg_list_hdr_blk_index++; 4054 reg_list_hdr_blk_index++;
4025 4055 for (i = 0; cs_data[i].section != NULL; i++) {
4026 for (k = 0; k < reg_num; k++) { 4056 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4027 data = cs_data[i].section[j].extent[k]; 4057 reg_num = cs_data[i].section[j].reg_count;
4028 dst_ptr[reg_list_blk_index + k] = data; 4058 data = reg_list_mc_addr & 0xffffffff;
4059 dst_ptr[reg_list_hdr_blk_index] = data;
4060 reg_list_hdr_blk_index++;
4061
4062 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4063 dst_ptr[reg_list_hdr_blk_index] = data;
4064 reg_list_hdr_blk_index++;
4065
4066 data = 0x08000000 | (reg_num * 4);
4067 dst_ptr[reg_list_hdr_blk_index] = data;
4068 reg_list_hdr_blk_index++;
4069
4070 for (k = 0; k < reg_num; k++) {
4071 data = cs_data[i].section[j].extent[k];
4072 dst_ptr[reg_list_blk_index + k] = data;
4073 }
4074 reg_list_mc_addr += reg_num * 4;
4075 reg_list_blk_index += reg_num;
4076 }
4029 } 4077 }
4030 reg_list_mc_addr += reg_num * 4; 4078 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
4031 reg_list_blk_index += reg_num;
4032 } 4079 }
4080 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4081 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4033 } 4082 }
4034 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
4035 4083
4036 radeon_bo_kunmap(rdev->rlc.clear_state_obj); 4084 if (rdev->rlc.cp_table_size) {
4037 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 4085 if (rdev->rlc.cp_table_obj == NULL) {
4086 r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
4087 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
4088 if (r) {
4089 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
4090 sumo_rlc_fini(rdev);
4091 return r;
4092 }
4093 }
4094
4095 r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
4096 if (unlikely(r != 0)) {
4097 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
4098 sumo_rlc_fini(rdev);
4099 return r;
4100 }
4101 r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
4102 &rdev->rlc.cp_table_gpu_addr);
4103 if (r) {
4104 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4105 dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
4106 sumo_rlc_fini(rdev);
4107 return r;
4108 }
4109 r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
4110 if (r) {
4111 dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
4112 sumo_rlc_fini(rdev);
4113 return r;
4114 }
4115
4116 cik_init_cp_pg_table(rdev);
4117
4118 radeon_bo_kunmap(rdev->rlc.cp_table_obj);
4119 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4120
4121 }
4038 4122
4039 return 0; 4123 return 0;
4040} 4124}
@@ -4959,143 +5043,6 @@ restart_ih:
4959 return IRQ_HANDLED; 5043 return IRQ_HANDLED;
4960} 5044}
4961 5045
4962/**
4963 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
4964 *
4965 * @rdev: radeon_device pointer
4966 * @fence: radeon fence object
4967 *
4968 * Add a DMA fence packet to the ring to write
4969 * the fence seq number and DMA trap packet to generate
4970 * an interrupt if needed (evergreen-SI).
4971 */
4972void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
4973 struct radeon_fence *fence)
4974{
4975 struct radeon_ring *ring = &rdev->ring[fence->ring];
4976 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
4977 /* write the fence */
4978 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
4979 radeon_ring_write(ring, addr & 0xfffffffc);
4980 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
4981 radeon_ring_write(ring, fence->seq);
4982 /* generate an interrupt */
4983 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
4984 /* flush HDP */
4985 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
4986 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
4987 radeon_ring_write(ring, 1);
4988}
4989
4990/**
4991 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
4992 *
4993 * @rdev: radeon_device pointer
4994 * @ib: IB object to schedule
4995 *
4996 * Schedule an IB in the DMA ring (evergreen).
4997 */
4998void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
4999 struct radeon_ib *ib)
5000{
5001 struct radeon_ring *ring = &rdev->ring[ib->ring];
5002
5003 if (rdev->wb.enabled) {
5004 u32 next_rptr = ring->wptr + 4;
5005 while ((next_rptr & 7) != 5)
5006 next_rptr++;
5007 next_rptr += 3;
5008 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
5009 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
5010 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
5011 radeon_ring_write(ring, next_rptr);
5012 }
5013
5014 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
5015 * Pad as necessary with NOPs.
5016 */
5017 while ((ring->wptr & 7) != 5)
5018 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
5019 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
5020 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
5021 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
5022
5023}
5024
5025/**
5026 * evergreen_copy_dma - copy pages using the DMA engine
5027 *
5028 * @rdev: radeon_device pointer
5029 * @src_offset: src GPU address
5030 * @dst_offset: dst GPU address
5031 * @num_gpu_pages: number of GPU pages to xfer
5032 * @fence: radeon fence object
5033 *
5034 * Copy GPU paging using the DMA engine (evergreen-cayman).
5035 * Used by the radeon ttm implementation to move pages if
5036 * registered as the asic copy callback.
5037 */
5038int evergreen_copy_dma(struct radeon_device *rdev,
5039 uint64_t src_offset, uint64_t dst_offset,
5040 unsigned num_gpu_pages,
5041 struct radeon_fence **fence)
5042{
5043 struct radeon_semaphore *sem = NULL;
5044 int ring_index = rdev->asic->copy.dma_ring_index;
5045 struct radeon_ring *ring = &rdev->ring[ring_index];
5046 u32 size_in_dw, cur_size_in_dw;
5047 int i, num_loops;
5048 int r = 0;
5049
5050 r = radeon_semaphore_create(rdev, &sem);
5051 if (r) {
5052 DRM_ERROR("radeon: moving bo (%d).\n", r);
5053 return r;
5054 }
5055
5056 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
5057 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
5058 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
5059 if (r) {
5060 DRM_ERROR("radeon: moving bo (%d).\n", r);
5061 radeon_semaphore_free(rdev, &sem, NULL);
5062 return r;
5063 }
5064
5065 if (radeon_fence_need_sync(*fence, ring->idx)) {
5066 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
5067 ring->idx);
5068 radeon_fence_note_sync(*fence, ring->idx);
5069 } else {
5070 radeon_semaphore_free(rdev, &sem, NULL);
5071 }
5072
5073 for (i = 0; i < num_loops; i++) {
5074 cur_size_in_dw = size_in_dw;
5075 if (cur_size_in_dw > 0xFFFFF)
5076 cur_size_in_dw = 0xFFFFF;
5077 size_in_dw -= cur_size_in_dw;
5078 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
5079 radeon_ring_write(ring, dst_offset & 0xfffffffc);
5080 radeon_ring_write(ring, src_offset & 0xfffffffc);
5081 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
5082 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
5083 src_offset += cur_size_in_dw * 4;
5084 dst_offset += cur_size_in_dw * 4;
5085 }
5086
5087 r = radeon_fence_emit(rdev, fence, ring->idx);
5088 if (r) {
5089 radeon_ring_unlock_undo(rdev, ring);
5090 return r;
5091 }
5092
5093 radeon_ring_unlock_commit(rdev, ring);
5094 radeon_semaphore_free(rdev, &sem, *fence);
5095
5096 return r;
5097}
5098
5099static int evergreen_startup(struct radeon_device *rdev) 5046static int evergreen_startup(struct radeon_device *rdev)
5100{ 5047{
5101 struct radeon_ring *ring; 5048 struct radeon_ring *ring;
@@ -5106,6 +5053,11 @@ static int evergreen_startup(struct radeon_device *rdev)
5106 /* enable aspm */ 5053 /* enable aspm */
5107 evergreen_program_aspm(rdev); 5054 evergreen_program_aspm(rdev);
5108 5055
5056 /* scratch needs to be initialized before MC */
5057 r = r600_vram_scratch_init(rdev);
5058 if (r)
5059 return r;
5060
5109 evergreen_mc_program(rdev); 5061 evergreen_mc_program(rdev);
5110 5062
5111 if (ASIC_IS_DCE5(rdev)) { 5063 if (ASIC_IS_DCE5(rdev)) {
@@ -5131,10 +5083,6 @@ static int evergreen_startup(struct radeon_device *rdev)
5131 } 5083 }
5132 } 5084 }
5133 5085
5134 r = r600_vram_scratch_init(rdev);
5135 if (r)
5136 return r;
5137
5138 if (rdev->flags & RADEON_IS_AGP) { 5086 if (rdev->flags & RADEON_IS_AGP) {
5139 evergreen_agp_enable(rdev); 5087 evergreen_agp_enable(rdev);
5140 } else { 5088 } else {
@@ -5144,17 +5092,11 @@ static int evergreen_startup(struct radeon_device *rdev)
5144 } 5092 }
5145 evergreen_gpu_init(rdev); 5093 evergreen_gpu_init(rdev);
5146 5094
5147 r = evergreen_blit_init(rdev);
5148 if (r) {
5149 r600_blit_fini(rdev);
5150 rdev->asic->copy.copy = NULL;
5151 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
5152 }
5153
5154 /* allocate rlc buffers */ 5095 /* allocate rlc buffers */
5155 if (rdev->flags & RADEON_IS_IGP) { 5096 if (rdev->flags & RADEON_IS_IGP) {
5156 rdev->rlc.reg_list = sumo_rlc_save_restore_register_list; 5097 rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
5157 rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size; 5098 rdev->rlc.reg_list_size =
5099 (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
5158 rdev->rlc.cs_data = evergreen_cs_data; 5100 rdev->rlc.cs_data = evergreen_cs_data;
5159 r = sumo_rlc_init(rdev); 5101 r = sumo_rlc_init(rdev);
5160 if (r) { 5102 if (r) {
@@ -5180,7 +5122,7 @@ static int evergreen_startup(struct radeon_device *rdev)
5180 return r; 5122 return r;
5181 } 5123 }
5182 5124
5183 r = rv770_uvd_resume(rdev); 5125 r = uvd_v2_2_resume(rdev);
5184 if (!r) { 5126 if (!r) {
5185 r = radeon_fence_driver_start_ring(rdev, 5127 r = radeon_fence_driver_start_ring(rdev,
5186 R600_RING_TYPE_UVD_INDEX); 5128 R600_RING_TYPE_UVD_INDEX);
@@ -5209,14 +5151,14 @@ static int evergreen_startup(struct radeon_device *rdev)
5209 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 5151 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
5210 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 5152 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
5211 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 5153 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
5212 0, 0xfffff, RADEON_CP_PACKET2); 5154 RADEON_CP_PACKET2);
5213 if (r) 5155 if (r)
5214 return r; 5156 return r;
5215 5157
5216 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 5158 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
5217 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 5159 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
5218 DMA_RB_RPTR, DMA_RB_WPTR, 5160 DMA_RB_RPTR, DMA_RB_WPTR,
5219 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); 5161 DMA_PACKET(DMA_PACKET_NOP, 0, 0));
5220 if (r) 5162 if (r)
5221 return r; 5163 return r;
5222 5164
@@ -5232,12 +5174,11 @@ static int evergreen_startup(struct radeon_device *rdev)
5232 5174
5233 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 5175 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5234 if (ring->ring_size) { 5176 if (ring->ring_size) {
5235 r = radeon_ring_init(rdev, ring, ring->ring_size, 5177 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
5236 R600_WB_UVD_RPTR_OFFSET,
5237 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 5178 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
5238 0, 0xfffff, RADEON_CP_PACKET2); 5179 RADEON_CP_PACKET2);
5239 if (!r) 5180 if (!r)
5240 r = r600_uvd_init(rdev); 5181 r = uvd_v1_0_init(rdev);
5241 5182
5242 if (r) 5183 if (r)
5243 DRM_ERROR("radeon: error initializing UVD (%d).\n", r); 5184 DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
@@ -5292,7 +5233,7 @@ int evergreen_resume(struct radeon_device *rdev)
5292int evergreen_suspend(struct radeon_device *rdev) 5233int evergreen_suspend(struct radeon_device *rdev)
5293{ 5234{
5294 r600_audio_fini(rdev); 5235 r600_audio_fini(rdev);
5295 r600_uvd_stop(rdev); 5236 uvd_v1_0_fini(rdev);
5296 radeon_uvd_suspend(rdev); 5237 radeon_uvd_suspend(rdev);
5297 r700_cp_stop(rdev); 5238 r700_cp_stop(rdev);
5298 r600_dma_stop(rdev); 5239 r600_dma_stop(rdev);
@@ -5420,7 +5361,6 @@ int evergreen_init(struct radeon_device *rdev)
5420void evergreen_fini(struct radeon_device *rdev) 5361void evergreen_fini(struct radeon_device *rdev)
5421{ 5362{
5422 r600_audio_fini(rdev); 5363 r600_audio_fini(rdev);
5423 r600_blit_fini(rdev);
5424 r700_cp_fini(rdev); 5364 r700_cp_fini(rdev);
5425 r600_dma_fini(rdev); 5365 r600_dma_fini(rdev);
5426 r600_irq_fini(rdev); 5366 r600_irq_fini(rdev);
@@ -5430,7 +5370,7 @@ void evergreen_fini(struct radeon_device *rdev)
5430 radeon_ib_pool_fini(rdev); 5370 radeon_ib_pool_fini(rdev);
5431 radeon_irq_kms_fini(rdev); 5371 radeon_irq_kms_fini(rdev);
5432 evergreen_pcie_gart_fini(rdev); 5372 evergreen_pcie_gart_fini(rdev);
5433 r600_uvd_stop(rdev); 5373 uvd_v1_0_fini(rdev);
5434 radeon_uvd_fini(rdev); 5374 radeon_uvd_fini(rdev);
5435 r600_vram_scratch_fini(rdev); 5375 r600_vram_scratch_fini(rdev);
5436 radeon_gem_fini(rdev); 5376 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
deleted file mode 100644
index 057c87b6515a..000000000000
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ /dev/null
@@ -1,729 +0,0 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
26
27#include <drm/drmP.h>
28#include <drm/radeon_drm.h>
29#include "radeon.h"
30
31#include "evergreend.h"
32#include "evergreen_blit_shaders.h"
33#include "cayman_blit_shaders.h"
34#include "radeon_blit_common.h"
35
36/* emits 17 */
37static void
38set_render_target(struct radeon_device *rdev, int format,
39 int w, int h, u64 gpu_addr)
40{
41 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
42 u32 cb_color_info;
43 int pitch, slice;
44
45 h = ALIGN(h, 8);
46 if (h < 8)
47 h = 8;
48
49 cb_color_info = CB_FORMAT(format) |
50 CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
51 CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
52 pitch = (w / 8) - 1;
53 slice = ((w * h) / 64) - 1;
54
55 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
56 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
57 radeon_ring_write(ring, gpu_addr >> 8);
58 radeon_ring_write(ring, pitch);
59 radeon_ring_write(ring, slice);
60 radeon_ring_write(ring, 0);
61 radeon_ring_write(ring, cb_color_info);
62 radeon_ring_write(ring, 0);
63 radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
64 radeon_ring_write(ring, 0);
65 radeon_ring_write(ring, 0);
66 radeon_ring_write(ring, 0);
67 radeon_ring_write(ring, 0);
68 radeon_ring_write(ring, 0);
69 radeon_ring_write(ring, 0);
70 radeon_ring_write(ring, 0);
71 radeon_ring_write(ring, 0);
72}
73
74/* emits 5dw */
75static void
76cp_set_surface_sync(struct radeon_device *rdev,
77 u32 sync_type, u32 size,
78 u64 mc_addr)
79{
80 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
81 u32 cp_coher_size;
82
83 if (size == 0xffffffff)
84 cp_coher_size = 0xffffffff;
85 else
86 cp_coher_size = ((size + 255) >> 8);
87
88 if (rdev->family >= CHIP_CAYMAN) {
89 /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
90 * to the RB directly. For IBs, the CP programs this as part of the
91 * surface_sync packet.
92 */
93 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
94 radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
95 radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
96 }
97 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
98 radeon_ring_write(ring, sync_type);
99 radeon_ring_write(ring, cp_coher_size);
100 radeon_ring_write(ring, mc_addr >> 8);
101 radeon_ring_write(ring, 10); /* poll interval */
102}
103
104/* emits 11dw + 1 surface sync = 16dw */
105static void
106set_shaders(struct radeon_device *rdev)
107{
108 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
109 u64 gpu_addr;
110
111 /* VS */
112 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
113 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
114 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
115 radeon_ring_write(ring, gpu_addr >> 8);
116 radeon_ring_write(ring, 2);
117 radeon_ring_write(ring, 0);
118
119 /* PS */
120 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
121 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
122 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
123 radeon_ring_write(ring, gpu_addr >> 8);
124 radeon_ring_write(ring, 1);
125 radeon_ring_write(ring, 0);
126 radeon_ring_write(ring, 2);
127
128 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
129 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
130}
131
132/* emits 10 + 1 sync (5) = 15 */
133static void
134set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
135{
136 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
137 u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
138
139 /* high addr, stride */
140 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
141 SQ_VTXC_STRIDE(16);
142#ifdef __BIG_ENDIAN
143 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
144#endif
145 /* xyzw swizzles */
146 sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
147 SQ_VTCX_SEL_Y(SQ_SEL_Y) |
148 SQ_VTCX_SEL_Z(SQ_SEL_Z) |
149 SQ_VTCX_SEL_W(SQ_SEL_W);
150
151 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
152 radeon_ring_write(ring, 0x580);
153 radeon_ring_write(ring, gpu_addr & 0xffffffff);
154 radeon_ring_write(ring, 48 - 1); /* size */
155 radeon_ring_write(ring, sq_vtx_constant_word2);
156 radeon_ring_write(ring, sq_vtx_constant_word3);
157 radeon_ring_write(ring, 0);
158 radeon_ring_write(ring, 0);
159 radeon_ring_write(ring, 0);
160 radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
161
162 if ((rdev->family == CHIP_CEDAR) ||
163 (rdev->family == CHIP_PALM) ||
164 (rdev->family == CHIP_SUMO) ||
165 (rdev->family == CHIP_SUMO2) ||
166 (rdev->family == CHIP_CAICOS))
167 cp_set_surface_sync(rdev,
168 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
169 else
170 cp_set_surface_sync(rdev,
171 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
172
173}
174
175/* emits 10 */
176static void
177set_tex_resource(struct radeon_device *rdev,
178 int format, int w, int h, int pitch,
179 u64 gpu_addr, u32 size)
180{
181 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
182 u32 sq_tex_resource_word0, sq_tex_resource_word1;
183 u32 sq_tex_resource_word4, sq_tex_resource_word7;
184
185 if (h < 1)
186 h = 1;
187
188 sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
189 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
190 ((w - 1) << 18));
191 sq_tex_resource_word1 = ((h - 1) << 0) |
192 TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
193 /* xyzw swizzles */
194 sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
195 TEX_DST_SEL_Y(SQ_SEL_Y) |
196 TEX_DST_SEL_Z(SQ_SEL_Z) |
197 TEX_DST_SEL_W(SQ_SEL_W);
198
199 sq_tex_resource_word7 = format |
200 S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
201
202 cp_set_surface_sync(rdev,
203 PACKET3_TC_ACTION_ENA, size, gpu_addr);
204
205 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
206 radeon_ring_write(ring, 0);
207 radeon_ring_write(ring, sq_tex_resource_word0);
208 radeon_ring_write(ring, sq_tex_resource_word1);
209 radeon_ring_write(ring, gpu_addr >> 8);
210 radeon_ring_write(ring, gpu_addr >> 8);
211 radeon_ring_write(ring, sq_tex_resource_word4);
212 radeon_ring_write(ring, 0);
213 radeon_ring_write(ring, 0);
214 radeon_ring_write(ring, sq_tex_resource_word7);
215}
216
217/* emits 12 */
218static void
219set_scissors(struct radeon_device *rdev, int x1, int y1,
220 int x2, int y2)
221{
222 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
223 /* workaround some hw bugs */
224 if (x2 == 0)
225 x1 = 1;
226 if (y2 == 0)
227 y1 = 1;
228 if (rdev->family >= CHIP_CAYMAN) {
229 if ((x2 == 1) && (y2 == 1))
230 x2 = 2;
231 }
232
233 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
234 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
235 radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
236 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
237
238 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
239 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
240 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
241 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
242
243 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
244 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
245 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
246 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
247}
248
249/* emits 10 */
250static void
251draw_auto(struct radeon_device *rdev)
252{
253 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
254 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
255 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
256 radeon_ring_write(ring, DI_PT_RECTLIST);
257
258 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
259 radeon_ring_write(ring,
260#ifdef __BIG_ENDIAN
261 (2 << 2) |
262#endif
263 DI_INDEX_SIZE_16_BIT);
264
265 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
266 radeon_ring_write(ring, 1);
267
268 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
269 radeon_ring_write(ring, 3);
270 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
271
272}
273
274/* emits 39 */
275static void
276set_default_state(struct radeon_device *rdev)
277{
278 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
279 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
280 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
281 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
282 int num_ps_gprs, num_vs_gprs, num_temp_gprs;
283 int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
284 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
285 int num_hs_threads, num_ls_threads;
286 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
287 int num_hs_stack_entries, num_ls_stack_entries;
288 u64 gpu_addr;
289 int dwords;
290
291 /* set clear context state */
292 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
293 radeon_ring_write(ring, 0);
294
295 if (rdev->family < CHIP_CAYMAN) {
296 switch (rdev->family) {
297 case CHIP_CEDAR:
298 default:
299 num_ps_gprs = 93;
300 num_vs_gprs = 46;
301 num_temp_gprs = 4;
302 num_gs_gprs = 31;
303 num_es_gprs = 31;
304 num_hs_gprs = 23;
305 num_ls_gprs = 23;
306 num_ps_threads = 96;
307 num_vs_threads = 16;
308 num_gs_threads = 16;
309 num_es_threads = 16;
310 num_hs_threads = 16;
311 num_ls_threads = 16;
312 num_ps_stack_entries = 42;
313 num_vs_stack_entries = 42;
314 num_gs_stack_entries = 42;
315 num_es_stack_entries = 42;
316 num_hs_stack_entries = 42;
317 num_ls_stack_entries = 42;
318 break;
319 case CHIP_REDWOOD:
320 num_ps_gprs = 93;
321 num_vs_gprs = 46;
322 num_temp_gprs = 4;
323 num_gs_gprs = 31;
324 num_es_gprs = 31;
325 num_hs_gprs = 23;
326 num_ls_gprs = 23;
327 num_ps_threads = 128;
328 num_vs_threads = 20;
329 num_gs_threads = 20;
330 num_es_threads = 20;
331 num_hs_threads = 20;
332 num_ls_threads = 20;
333 num_ps_stack_entries = 42;
334 num_vs_stack_entries = 42;
335 num_gs_stack_entries = 42;
336 num_es_stack_entries = 42;
337 num_hs_stack_entries = 42;
338 num_ls_stack_entries = 42;
339 break;
340 case CHIP_JUNIPER:
341 num_ps_gprs = 93;
342 num_vs_gprs = 46;
343 num_temp_gprs = 4;
344 num_gs_gprs = 31;
345 num_es_gprs = 31;
346 num_hs_gprs = 23;
347 num_ls_gprs = 23;
348 num_ps_threads = 128;
349 num_vs_threads = 20;
350 num_gs_threads = 20;
351 num_es_threads = 20;
352 num_hs_threads = 20;
353 num_ls_threads = 20;
354 num_ps_stack_entries = 85;
355 num_vs_stack_entries = 85;
356 num_gs_stack_entries = 85;
357 num_es_stack_entries = 85;
358 num_hs_stack_entries = 85;
359 num_ls_stack_entries = 85;
360 break;
361 case CHIP_CYPRESS:
362 case CHIP_HEMLOCK:
363 num_ps_gprs = 93;
364 num_vs_gprs = 46;
365 num_temp_gprs = 4;
366 num_gs_gprs = 31;
367 num_es_gprs = 31;
368 num_hs_gprs = 23;
369 num_ls_gprs = 23;
370 num_ps_threads = 128;
371 num_vs_threads = 20;
372 num_gs_threads = 20;
373 num_es_threads = 20;
374 num_hs_threads = 20;
375 num_ls_threads = 20;
376 num_ps_stack_entries = 85;
377 num_vs_stack_entries = 85;
378 num_gs_stack_entries = 85;
379 num_es_stack_entries = 85;
380 num_hs_stack_entries = 85;
381 num_ls_stack_entries = 85;
382 break;
383 case CHIP_PALM:
384 num_ps_gprs = 93;
385 num_vs_gprs = 46;
386 num_temp_gprs = 4;
387 num_gs_gprs = 31;
388 num_es_gprs = 31;
389 num_hs_gprs = 23;
390 num_ls_gprs = 23;
391 num_ps_threads = 96;
392 num_vs_threads = 16;
393 num_gs_threads = 16;
394 num_es_threads = 16;
395 num_hs_threads = 16;
396 num_ls_threads = 16;
397 num_ps_stack_entries = 42;
398 num_vs_stack_entries = 42;
399 num_gs_stack_entries = 42;
400 num_es_stack_entries = 42;
401 num_hs_stack_entries = 42;
402 num_ls_stack_entries = 42;
403 break;
404 case CHIP_SUMO:
405 num_ps_gprs = 93;
406 num_vs_gprs = 46;
407 num_temp_gprs = 4;
408 num_gs_gprs = 31;
409 num_es_gprs = 31;
410 num_hs_gprs = 23;
411 num_ls_gprs = 23;
412 num_ps_threads = 96;
413 num_vs_threads = 25;
414 num_gs_threads = 25;
415 num_es_threads = 25;
416 num_hs_threads = 25;
417 num_ls_threads = 25;
418 num_ps_stack_entries = 42;
419 num_vs_stack_entries = 42;
420 num_gs_stack_entries = 42;
421 num_es_stack_entries = 42;
422 num_hs_stack_entries = 42;
423 num_ls_stack_entries = 42;
424 break;
425 case CHIP_SUMO2:
426 num_ps_gprs = 93;
427 num_vs_gprs = 46;
428 num_temp_gprs = 4;
429 num_gs_gprs = 31;
430 num_es_gprs = 31;
431 num_hs_gprs = 23;
432 num_ls_gprs = 23;
433 num_ps_threads = 96;
434 num_vs_threads = 25;
435 num_gs_threads = 25;
436 num_es_threads = 25;
437 num_hs_threads = 25;
438 num_ls_threads = 25;
439 num_ps_stack_entries = 85;
440 num_vs_stack_entries = 85;
441 num_gs_stack_entries = 85;
442 num_es_stack_entries = 85;
443 num_hs_stack_entries = 85;
444 num_ls_stack_entries = 85;
445 break;
446 case CHIP_BARTS:
447 num_ps_gprs = 93;
448 num_vs_gprs = 46;
449 num_temp_gprs = 4;
450 num_gs_gprs = 31;
451 num_es_gprs = 31;
452 num_hs_gprs = 23;
453 num_ls_gprs = 23;
454 num_ps_threads = 128;
455 num_vs_threads = 20;
456 num_gs_threads = 20;
457 num_es_threads = 20;
458 num_hs_threads = 20;
459 num_ls_threads = 20;
460 num_ps_stack_entries = 85;
461 num_vs_stack_entries = 85;
462 num_gs_stack_entries = 85;
463 num_es_stack_entries = 85;
464 num_hs_stack_entries = 85;
465 num_ls_stack_entries = 85;
466 break;
467 case CHIP_TURKS:
468 num_ps_gprs = 93;
469 num_vs_gprs = 46;
470 num_temp_gprs = 4;
471 num_gs_gprs = 31;
472 num_es_gprs = 31;
473 num_hs_gprs = 23;
474 num_ls_gprs = 23;
475 num_ps_threads = 128;
476 num_vs_threads = 20;
477 num_gs_threads = 20;
478 num_es_threads = 20;
479 num_hs_threads = 20;
480 num_ls_threads = 20;
481 num_ps_stack_entries = 42;
482 num_vs_stack_entries = 42;
483 num_gs_stack_entries = 42;
484 num_es_stack_entries = 42;
485 num_hs_stack_entries = 42;
486 num_ls_stack_entries = 42;
487 break;
488 case CHIP_CAICOS:
489 num_ps_gprs = 93;
490 num_vs_gprs = 46;
491 num_temp_gprs = 4;
492 num_gs_gprs = 31;
493 num_es_gprs = 31;
494 num_hs_gprs = 23;
495 num_ls_gprs = 23;
496 num_ps_threads = 128;
497 num_vs_threads = 10;
498 num_gs_threads = 10;
499 num_es_threads = 10;
500 num_hs_threads = 10;
501 num_ls_threads = 10;
502 num_ps_stack_entries = 42;
503 num_vs_stack_entries = 42;
504 num_gs_stack_entries = 42;
505 num_es_stack_entries = 42;
506 num_hs_stack_entries = 42;
507 num_ls_stack_entries = 42;
508 break;
509 }
510
511 if ((rdev->family == CHIP_CEDAR) ||
512 (rdev->family == CHIP_PALM) ||
513 (rdev->family == CHIP_SUMO) ||
514 (rdev->family == CHIP_SUMO2) ||
515 (rdev->family == CHIP_CAICOS))
516 sq_config = 0;
517 else
518 sq_config = VC_ENABLE;
519
520 sq_config |= (EXPORT_SRC_C |
521 CS_PRIO(0) |
522 LS_PRIO(0) |
523 HS_PRIO(0) |
524 PS_PRIO(0) |
525 VS_PRIO(1) |
526 GS_PRIO(2) |
527 ES_PRIO(3));
528
529 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
530 NUM_VS_GPRS(num_vs_gprs) |
531 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
532 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
533 NUM_ES_GPRS(num_es_gprs));
534 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
535 NUM_LS_GPRS(num_ls_gprs));
536 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
537 NUM_VS_THREADS(num_vs_threads) |
538 NUM_GS_THREADS(num_gs_threads) |
539 NUM_ES_THREADS(num_es_threads));
540 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
541 NUM_LS_THREADS(num_ls_threads));
542 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
543 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
544 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
545 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
546 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
547 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
548
549 /* disable dyn gprs */
550 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
551 radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
552 radeon_ring_write(ring, 0);
553
554 /* setup LDS */
555 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
556 radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
557 radeon_ring_write(ring, 0x10001000);
558
559 /* SQ config */
560 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
561 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
562 radeon_ring_write(ring, sq_config);
563 radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
564 radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
565 radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
566 radeon_ring_write(ring, 0);
567 radeon_ring_write(ring, 0);
568 radeon_ring_write(ring, sq_thread_resource_mgmt);
569 radeon_ring_write(ring, sq_thread_resource_mgmt_2);
570 radeon_ring_write(ring, sq_stack_resource_mgmt_1);
571 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
572 radeon_ring_write(ring, sq_stack_resource_mgmt_3);
573 }
574
575 /* CONTEXT_CONTROL */
576 radeon_ring_write(ring, 0xc0012800);
577 radeon_ring_write(ring, 0x80000000);
578 radeon_ring_write(ring, 0x80000000);
579
580 /* SQ_VTX_BASE_VTX_LOC */
581 radeon_ring_write(ring, 0xc0026f00);
582 radeon_ring_write(ring, 0x00000000);
583 radeon_ring_write(ring, 0x00000000);
584 radeon_ring_write(ring, 0x00000000);
585
586 /* SET_SAMPLER */
587 radeon_ring_write(ring, 0xc0036e00);
588 radeon_ring_write(ring, 0x00000000);
589 radeon_ring_write(ring, 0x00000012);
590 radeon_ring_write(ring, 0x00000000);
591 radeon_ring_write(ring, 0x00000000);
592
593 /* set to DX10/11 mode */
594 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
595 radeon_ring_write(ring, 1);
596
597 /* emit an IB pointing at default state */
598 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
599 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
600 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
601 radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
602 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
603 radeon_ring_write(ring, dwords);
604
605}
606
607int evergreen_blit_init(struct radeon_device *rdev)
608{
609 u32 obj_size;
610 int i, r, dwords;
611 void *ptr;
612 u32 packet2s[16];
613 int num_packet2s = 0;
614
615 rdev->r600_blit.primitives.set_render_target = set_render_target;
616 rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
617 rdev->r600_blit.primitives.set_shaders = set_shaders;
618 rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
619 rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
620 rdev->r600_blit.primitives.set_scissors = set_scissors;
621 rdev->r600_blit.primitives.draw_auto = draw_auto;
622 rdev->r600_blit.primitives.set_default_state = set_default_state;
623
624 rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
625 rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
626 rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
627 rdev->r600_blit.ring_size_common += 5; /* done copy */
628 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
629
630 rdev->r600_blit.ring_size_per_loop = 74;
631 if (rdev->family >= CHIP_CAYMAN)
632 rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
633
634 rdev->r600_blit.max_dim = 16384;
635
636 rdev->r600_blit.state_offset = 0;
637
638 if (rdev->family < CHIP_CAYMAN)
639 rdev->r600_blit.state_len = evergreen_default_size;
640 else
641 rdev->r600_blit.state_len = cayman_default_size;
642
643 dwords = rdev->r600_blit.state_len;
644 while (dwords & 0xf) {
645 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
646 dwords++;
647 }
648
649 obj_size = dwords * 4;
650 obj_size = ALIGN(obj_size, 256);
651
652 rdev->r600_blit.vs_offset = obj_size;
653 if (rdev->family < CHIP_CAYMAN)
654 obj_size += evergreen_vs_size * 4;
655 else
656 obj_size += cayman_vs_size * 4;
657 obj_size = ALIGN(obj_size, 256);
658
659 rdev->r600_blit.ps_offset = obj_size;
660 if (rdev->family < CHIP_CAYMAN)
661 obj_size += evergreen_ps_size * 4;
662 else
663 obj_size += cayman_ps_size * 4;
664 obj_size = ALIGN(obj_size, 256);
665
666 /* pin copy shader into vram if not already initialized */
667 if (!rdev->r600_blit.shader_obj) {
668 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
669 RADEON_GEM_DOMAIN_VRAM,
670 NULL, &rdev->r600_blit.shader_obj);
671 if (r) {
672 DRM_ERROR("evergreen failed to allocate shader\n");
673 return r;
674 }
675
676 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
677 if (unlikely(r != 0))
678 return r;
679 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
680 &rdev->r600_blit.shader_gpu_addr);
681 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
682 if (r) {
683 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
684 return r;
685 }
686 }
687
688 DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
689 obj_size,
690 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
691
692 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
693 if (unlikely(r != 0))
694 return r;
695 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
696 if (r) {
697 DRM_ERROR("failed to map blit object %d\n", r);
698 return r;
699 }
700
701 if (rdev->family < CHIP_CAYMAN) {
702 memcpy_toio(ptr + rdev->r600_blit.state_offset,
703 evergreen_default_state, rdev->r600_blit.state_len * 4);
704
705 if (num_packet2s)
706 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
707 packet2s, num_packet2s * 4);
708 for (i = 0; i < evergreen_vs_size; i++)
709 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
710 for (i = 0; i < evergreen_ps_size; i++)
711 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
712 } else {
713 memcpy_toio(ptr + rdev->r600_blit.state_offset,
714 cayman_default_state, rdev->r600_blit.state_len * 4);
715
716 if (num_packet2s)
717 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
718 packet2s, num_packet2s * 4);
719 for (i = 0; i < cayman_vs_size; i++)
720 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
721 for (i = 0; i < cayman_ps_size; i++)
722 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
723 }
724 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
725 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
726
727 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
728 return 0;
729}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index f85c0af115b5..d43383470cdf 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -300,58 +300,4 @@ const u32 evergreen_default_state[] =
300 0x00000010, /* */ 300 0x00000010, /* */
301}; 301};
302 302
303const u32 evergreen_vs[] =
304{
305 0x00000004,
306 0x80800400,
307 0x0000a03c,
308 0x95000688,
309 0x00004000,
310 0x15200688,
311 0x00000000,
312 0x00000000,
313 0x3c000000,
314 0x67961001,
315#ifdef __BIG_ENDIAN
316 0x000a0000,
317#else
318 0x00080000,
319#endif
320 0x00000000,
321 0x1c000000,
322 0x67961000,
323#ifdef __BIG_ENDIAN
324 0x00020008,
325#else
326 0x00000008,
327#endif
328 0x00000000,
329};
330
331const u32 evergreen_ps[] =
332{
333 0x00000003,
334 0xa00c0000,
335 0x00000008,
336 0x80400000,
337 0x00000000,
338 0x95200688,
339 0x00380400,
340 0x00146b10,
341 0x00380000,
342 0x20146b10,
343 0x00380400,
344 0x40146b00,
345 0x80380000,
346 0x60146b00,
347 0x00000000,
348 0x00000000,
349 0x00000010,
350 0x000d1000,
351 0xb0800000,
352 0x00000000,
353};
354
355const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
356const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
357const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state); 303const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
new file mode 100644
index 000000000000..6a0656d00ed0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "evergreend.h"
28
29u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/**
32 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
33 *
34 * @rdev: radeon_device pointer
35 * @fence: radeon fence object
36 *
37 * Add a DMA fence packet to the ring to write
38 * the fence seq number and DMA trap packet to generate
39 * an interrupt if needed (evergreen-SI).
40 */
41void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
42 struct radeon_fence *fence)
43{
44 struct radeon_ring *ring = &rdev->ring[fence->ring];
45 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
46 /* write the fence */
47 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
48 radeon_ring_write(ring, addr & 0xfffffffc);
49 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
50 radeon_ring_write(ring, fence->seq);
51 /* generate an interrupt */
52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
53 /* flush HDP */
54 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
55 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
56 radeon_ring_write(ring, 1);
57}
58
59/**
60 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
61 *
62 * @rdev: radeon_device pointer
63 * @ib: IB object to schedule
64 *
65 * Schedule an IB in the DMA ring (evergreen).
66 */
67void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
68 struct radeon_ib *ib)
69{
70 struct radeon_ring *ring = &rdev->ring[ib->ring];
71
72 if (rdev->wb.enabled) {
73 u32 next_rptr = ring->wptr + 4;
74 while ((next_rptr & 7) != 5)
75 next_rptr++;
76 next_rptr += 3;
77 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
78 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
79 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
80 radeon_ring_write(ring, next_rptr);
81 }
82
83 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
84 * Pad as necessary with NOPs.
85 */
86 while ((ring->wptr & 7) != 5)
87 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
88 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
89 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
90 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
91
92}
93
94/**
95 * evergreen_copy_dma - copy pages using the DMA engine
96 *
97 * @rdev: radeon_device pointer
98 * @src_offset: src GPU address
99 * @dst_offset: dst GPU address
100 * @num_gpu_pages: number of GPU pages to xfer
101 * @fence: radeon fence object
102 *
103 * Copy GPU paging using the DMA engine (evergreen-cayman).
104 * Used by the radeon ttm implementation to move pages if
105 * registered as the asic copy callback.
106 */
107int evergreen_copy_dma(struct radeon_device *rdev,
108 uint64_t src_offset, uint64_t dst_offset,
109 unsigned num_gpu_pages,
110 struct radeon_fence **fence)
111{
112 struct radeon_semaphore *sem = NULL;
113 int ring_index = rdev->asic->copy.dma_ring_index;
114 struct radeon_ring *ring = &rdev->ring[ring_index];
115 u32 size_in_dw, cur_size_in_dw;
116 int i, num_loops;
117 int r = 0;
118
119 r = radeon_semaphore_create(rdev, &sem);
120 if (r) {
121 DRM_ERROR("radeon: moving bo (%d).\n", r);
122 return r;
123 }
124
125 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
126 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
127 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
128 if (r) {
129 DRM_ERROR("radeon: moving bo (%d).\n", r);
130 radeon_semaphore_free(rdev, &sem, NULL);
131 return r;
132 }
133
134 if (radeon_fence_need_sync(*fence, ring->idx)) {
135 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
136 ring->idx);
137 radeon_fence_note_sync(*fence, ring->idx);
138 } else {
139 radeon_semaphore_free(rdev, &sem, NULL);
140 }
141
142 for (i = 0; i < num_loops; i++) {
143 cur_size_in_dw = size_in_dw;
144 if (cur_size_in_dw > 0xFFFFF)
145 cur_size_in_dw = 0xFFFFF;
146 size_in_dw -= cur_size_in_dw;
147 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
148 radeon_ring_write(ring, dst_offset & 0xfffffffc);
149 radeon_ring_write(ring, src_offset & 0xfffffffc);
150 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
151 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
152 src_offset += cur_size_in_dw * 4;
153 dst_offset += cur_size_in_dw * 4;
154 }
155
156 r = radeon_fence_emit(rdev, fence, ring->idx);
157 if (r) {
158 radeon_ring_unlock_undo(rdev, ring);
159 return r;
160 }
161
162 radeon_ring_unlock_commit(rdev, ring);
163 radeon_semaphore_free(rdev, &sem, *fence);
164
165 return r;
166}
167
168/**
169 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
170 *
171 * @rdev: radeon_device pointer
172 * @ring: radeon_ring structure holding ring information
173 *
174 * Check if the async DMA engine is locked up.
175 * Returns true if the engine appears to be locked up, false if not.
176 */
177bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
178{
179 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
180
181 if (!(reset_mask & RADEON_RESET_DMA)) {
182 radeon_ring_lockup_update(ring);
183 return false;
184 }
185 /* force ring activities */
186 radeon_ring_force_activity(rdev, ring);
187 return radeon_ring_test_lockup(rdev, ring);
188}
189
190
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b0e280058b9b..f71ce390aebe 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -32,6 +32,10 @@
32#include "evergreend.h" 32#include "evergreend.h"
33#include "atom.h" 33#include "atom.h"
34 34
35extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
36extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
37extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
38
35/* 39/*
36 * update the N and CTS parameters for a given pixel clock rate 40 * update the N and CTS parameters for a given pixel clock rate
37 */ 41 */
@@ -54,6 +58,45 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
54 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); 58 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
55} 59}
56 60
61static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
62{
63 struct radeon_device *rdev = encoder->dev->dev_private;
64 struct drm_connector *connector;
65 struct radeon_connector *radeon_connector = NULL;
66 u32 tmp;
67 u8 *sadb;
68 int sad_count;
69
70 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
71 if (connector->encoder == encoder)
72 radeon_connector = to_radeon_connector(connector);
73 }
74
75 if (!radeon_connector) {
76 DRM_ERROR("Couldn't find encoder's connector\n");
77 return;
78 }
79
80 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
81 if (sad_count < 0) {
82 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
83 return;
84 }
85
86 /* program the speaker allocation */
87 tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
88 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
89 /* set HDMI mode */
90 tmp |= HDMI_CONNECTION;
91 if (sad_count)
92 tmp |= SPEAKER_ALLOCATION(sadb[0]);
93 else
94 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
95 WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
96
97 kfree(sadb);
98}
99
57static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) 100static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
58{ 101{
59 struct radeon_device *rdev = encoder->dev->dev_private; 102 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -157,22 +200,26 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
157 if (!dig || !dig->afmt) 200 if (!dig || !dig->afmt)
158 return; 201 return;
159 202
160 if (max_ratio >= 8) { 203 if (ASIC_IS_DCE6(rdev)) {
161 dto_phase = 192 * 1000;
162 wallclock_ratio = 3;
163 } else if (max_ratio >= 4) {
164 dto_phase = 96 * 1000;
165 wallclock_ratio = 2;
166 } else if (max_ratio >= 2) {
167 dto_phase = 48 * 1000;
168 wallclock_ratio = 1;
169 } else {
170 dto_phase = 24 * 1000; 204 dto_phase = 24 * 1000;
171 wallclock_ratio = 0; 205 } else {
206 if (max_ratio >= 8) {
207 dto_phase = 192 * 1000;
208 wallclock_ratio = 3;
209 } else if (max_ratio >= 4) {
210 dto_phase = 96 * 1000;
211 wallclock_ratio = 2;
212 } else if (max_ratio >= 2) {
213 dto_phase = 48 * 1000;
214 wallclock_ratio = 1;
215 } else {
216 dto_phase = 24 * 1000;
217 wallclock_ratio = 0;
218 }
219 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
220 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
221 WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
172 } 222 }
173 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
174 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
175 WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
176 223
177 /* XXX two dtos; generally use dto0 for hdmi */ 224 /* XXX two dtos; generally use dto0 for hdmi */
178 /* Express [24MHz / target pixel clock] as an exact rational 225 /* Express [24MHz / target pixel clock] as an exact rational
@@ -260,13 +307,23 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
260 AFMT_60958_CS_CHANNEL_NUMBER_6(7) | 307 AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
261 AFMT_60958_CS_CHANNEL_NUMBER_7(8)); 308 AFMT_60958_CS_CHANNEL_NUMBER_7(8));
262 309
263 /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */ 310 if (ASIC_IS_DCE6(rdev)) {
311 dce6_afmt_write_speaker_allocation(encoder);
312 } else {
313 dce4_afmt_write_speaker_allocation(encoder);
314 }
264 315
265 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, 316 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
266 AFMT_AUDIO_CHANNEL_ENABLE(0xff)); 317 AFMT_AUDIO_CHANNEL_ENABLE(0xff));
267 318
268 /* fglrx sets 0x40 in 0x5f80 here */ 319 /* fglrx sets 0x40 in 0x5f80 here */
269 evergreen_hdmi_write_sad_regs(encoder); 320
321 if (ASIC_IS_DCE6(rdev)) {
322 dce6_afmt_select_pin(encoder);
323 dce6_afmt_write_sad_regs(encoder);
324 } else {
325 evergreen_hdmi_write_sad_regs(encoder);
326 }
270 327
271 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 328 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
272 if (err < 0) { 329 if (err < 0) {
@@ -302,6 +359,8 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
302 359
303void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) 360void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
304{ 361{
362 struct drm_device *dev = encoder->dev;
363 struct radeon_device *rdev = dev->dev_private;
305 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 364 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
306 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 365 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
307 366
@@ -314,6 +373,15 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
314 if (!enable && !dig->afmt->enabled) 373 if (!enable && !dig->afmt->enabled)
315 return; 374 return;
316 375
376 if (enable) {
377 if (ASIC_IS_DCE6(rdev))
378 dig->afmt->pin = dce6_audio_get_pin(rdev);
379 else
380 dig->afmt->pin = r600_audio_get_pin(rdev);
381 } else {
382 dig->afmt->pin = NULL;
383 }
384
317 dig->afmt->enabled = enable; 385 dig->afmt->enabled = enable;
318 386
319 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", 387 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0d582ac1dc31..8768fd6a1e27 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -714,6 +714,13 @@
714#define AFMT_GENERIC0_7 0x7138 714#define AFMT_GENERIC0_7 0x7138
715 715
716/* DCE4/5 ELD audio interface */ 716/* DCE4/5 ELD audio interface */
717#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78
718#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
719#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
720#define SPEAKER_ALLOCATION_SHIFT 0
721#define HDMI_CONNECTION (1 << 16)
722#define DP_CONNECTION (1 << 17)
723
717#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */ 724#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
718#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */ 725#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
719#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */ 726#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
@@ -1153,6 +1160,10 @@
1153# define LATENCY_LOW_WATERMARK(x) ((x) << 0) 1160# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
1154# define LATENCY_HIGH_WATERMARK(x) ((x) << 16) 1161# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
1155 1162
1163#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
1164# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
1165# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
1166
1156#define IH_RB_CNTL 0x3e00 1167#define IH_RB_CNTL 0x3e00
1157# define IH_RB_ENABLE (1 << 0) 1168# define IH_RB_ENABLE (1 << 0)
1158# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ 1169# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
new file mode 100644
index 000000000000..ecd60809db4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -0,0 +1,2645 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "cikd.h"
27#include "r600_dpm.h"
28#include "kv_dpm.h"
29#include "radeon_asic.h"
30#include <linux/seq_file.h>
31
32#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
33#define KV_MINIMUM_ENGINE_CLOCK 800
34#define SMC_RAM_END 0x40000
35
36static void kv_init_graphics_levels(struct radeon_device *rdev);
37static int kv_calculate_ds_divider(struct radeon_device *rdev);
38static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
39static int kv_calculate_dpm_settings(struct radeon_device *rdev);
40static void kv_enable_new_levels(struct radeon_device *rdev);
41static void kv_program_nbps_index_settings(struct radeon_device *rdev,
42 struct radeon_ps *new_rps);
43static int kv_set_enabled_levels(struct radeon_device *rdev);
44static int kv_force_dpm_highest(struct radeon_device *rdev);
45static int kv_force_dpm_lowest(struct radeon_device *rdev);
46static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
47 struct radeon_ps *new_rps,
48 struct radeon_ps *old_rps);
49static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
50 int min_temp, int max_temp);
51static int kv_init_fps_limits(struct radeon_device *rdev);
52
53void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
54static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
55static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
56static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
57
58extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
59extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
60extern void cik_update_cg(struct radeon_device *rdev,
61 u32 block, bool enable);
62
63static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
64{
65 { 0, 4, 1 },
66 { 1, 4, 1 },
67 { 2, 5, 1 },
68 { 3, 4, 2 },
69 { 4, 1, 1 },
70 { 5, 5, 2 },
71 { 6, 6, 1 },
72 { 7, 9, 2 },
73 { 0xffffffff }
74};
75
76static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
77{
78 { 0, 4, 1 },
79 { 0xffffffff }
80};
81
82static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
83{
84 { 0, 4, 1 },
85 { 0xffffffff }
86};
87
88static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
89{
90 { 0, 4, 1 },
91 { 0xffffffff }
92};
93
94static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
95{
96 { 0, 4, 1 },
97 { 0xffffffff }
98};
99
100static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
101{
102 { 0, 4, 1 },
103 { 1, 4, 1 },
104 { 2, 5, 1 },
105 { 3, 4, 1 },
106 { 4, 1, 1 },
107 { 5, 5, 1 },
108 { 6, 6, 1 },
109 { 7, 9, 1 },
110 { 8, 4, 1 },
111 { 9, 2, 1 },
112 { 10, 3, 1 },
113 { 11, 6, 1 },
114 { 12, 8, 2 },
115 { 13, 1, 1 },
116 { 14, 2, 1 },
117 { 15, 3, 1 },
118 { 16, 1, 1 },
119 { 17, 4, 1 },
120 { 18, 3, 1 },
121 { 19, 1, 1 },
122 { 20, 8, 1 },
123 { 21, 5, 1 },
124 { 22, 1, 1 },
125 { 23, 1, 1 },
126 { 24, 4, 1 },
127 { 27, 6, 1 },
128 { 28, 1, 1 },
129 { 0xffffffff }
130};
131
132static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
133{
134 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
135};
136
137static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
138{
139 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
140};
141
142static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
143{
144 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
145};
146
147static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
148{
149 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
150};
151
152static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
153{
154 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
155};
156
157static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
158{
159 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
160};
161
162static const struct kv_pt_config_reg didt_config_kv[] =
163{
164 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
165 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
166 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
167 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
168 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
169 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
170 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
171 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
172 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
173 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
174 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
175 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
176 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
177 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
178 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
179 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
180 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
181 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
182 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
183 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
184 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
185 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
186 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
187 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
188 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
189 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
190 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
191 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
192 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
193 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
194 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
195 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
196 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
197 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
198 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
199 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
200 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
201 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
202 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
203 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
204 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
205 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
206 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
207 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
208 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
209 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
210 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
211 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
212 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
213 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
214 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
215 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
216 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
217 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
218 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
219 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
220 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
221 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
222 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
223 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
224 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
225 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
226 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
227 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
228 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
229 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
230 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
231 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
232 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
233 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
234 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
235 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
236 { 0xFFFFFFFF }
237};
238
239static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
240{
241 struct kv_ps *ps = rps->ps_priv;
242
243 return ps;
244}
245
246static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
247{
248 struct kv_power_info *pi = rdev->pm.dpm.priv;
249
250 return pi;
251}
252
253#if 0
254static void kv_program_local_cac_table(struct radeon_device *rdev,
255 const struct kv_lcac_config_values *local_cac_table,
256 const struct kv_lcac_config_reg *local_cac_reg)
257{
258 u32 i, count, data;
259 const struct kv_lcac_config_values *values = local_cac_table;
260
261 while (values->block_id != 0xffffffff) {
262 count = values->signal_id;
263 for (i = 0; i < count; i++) {
264 data = ((values->block_id << local_cac_reg->block_shift) &
265 local_cac_reg->block_mask);
266 data |= ((i << local_cac_reg->signal_shift) &
267 local_cac_reg->signal_mask);
268 data |= ((values->t << local_cac_reg->t_shift) &
269 local_cac_reg->t_mask);
270 data |= ((1 << local_cac_reg->enable_shift) &
271 local_cac_reg->enable_mask);
272 WREG32_SMC(local_cac_reg->cntl, data);
273 }
274 values++;
275 }
276}
277#endif
278
279static int kv_program_pt_config_registers(struct radeon_device *rdev,
280 const struct kv_pt_config_reg *cac_config_regs)
281{
282 const struct kv_pt_config_reg *config_regs = cac_config_regs;
283 u32 data;
284 u32 cache = 0;
285
286 if (config_regs == NULL)
287 return -EINVAL;
288
289 while (config_regs->offset != 0xFFFFFFFF) {
290 if (config_regs->type == KV_CONFIGREG_CACHE) {
291 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
292 } else {
293 switch (config_regs->type) {
294 case KV_CONFIGREG_SMC_IND:
295 data = RREG32_SMC(config_regs->offset);
296 break;
297 case KV_CONFIGREG_DIDT_IND:
298 data = RREG32_DIDT(config_regs->offset);
299 break;
300 default:
301 data = RREG32(config_regs->offset << 2);
302 break;
303 }
304
305 data &= ~config_regs->mask;
306 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
307 data |= cache;
308 cache = 0;
309
310 switch (config_regs->type) {
311 case KV_CONFIGREG_SMC_IND:
312 WREG32_SMC(config_regs->offset, data);
313 break;
314 case KV_CONFIGREG_DIDT_IND:
315 WREG32_DIDT(config_regs->offset, data);
316 break;
317 default:
318 WREG32(config_regs->offset << 2, data);
319 break;
320 }
321 }
322 config_regs++;
323 }
324
325 return 0;
326}
327
328static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
329{
330 struct kv_power_info *pi = kv_get_pi(rdev);
331 u32 data;
332
333 if (pi->caps_sq_ramping) {
334 data = RREG32_DIDT(DIDT_SQ_CTRL0);
335 if (enable)
336 data |= DIDT_CTRL_EN;
337 else
338 data &= ~DIDT_CTRL_EN;
339 WREG32_DIDT(DIDT_SQ_CTRL0, data);
340 }
341
342 if (pi->caps_db_ramping) {
343 data = RREG32_DIDT(DIDT_DB_CTRL0);
344 if (enable)
345 data |= DIDT_CTRL_EN;
346 else
347 data &= ~DIDT_CTRL_EN;
348 WREG32_DIDT(DIDT_DB_CTRL0, data);
349 }
350
351 if (pi->caps_td_ramping) {
352 data = RREG32_DIDT(DIDT_TD_CTRL0);
353 if (enable)
354 data |= DIDT_CTRL_EN;
355 else
356 data &= ~DIDT_CTRL_EN;
357 WREG32_DIDT(DIDT_TD_CTRL0, data);
358 }
359
360 if (pi->caps_tcp_ramping) {
361 data = RREG32_DIDT(DIDT_TCP_CTRL0);
362 if (enable)
363 data |= DIDT_CTRL_EN;
364 else
365 data &= ~DIDT_CTRL_EN;
366 WREG32_DIDT(DIDT_TCP_CTRL0, data);
367 }
368}
369
370static int kv_enable_didt(struct radeon_device *rdev, bool enable)
371{
372 struct kv_power_info *pi = kv_get_pi(rdev);
373 int ret;
374
375 if (pi->caps_sq_ramping ||
376 pi->caps_db_ramping ||
377 pi->caps_td_ramping ||
378 pi->caps_tcp_ramping) {
379 cik_enter_rlc_safe_mode(rdev);
380
381 if (enable) {
382 ret = kv_program_pt_config_registers(rdev, didt_config_kv);
383 if (ret) {
384 cik_exit_rlc_safe_mode(rdev);
385 return ret;
386 }
387 }
388
389 kv_do_enable_didt(rdev, enable);
390
391 cik_exit_rlc_safe_mode(rdev);
392 }
393
394 return 0;
395}
396
397#if 0
398static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
399{
400 struct kv_power_info *pi = kv_get_pi(rdev);
401
402 if (pi->caps_cac) {
403 WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
404 WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
405 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
406
407 WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
408 WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
409 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
410
411 WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
412 WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
413 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
414
415 WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
416 WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
417 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
418
419 WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
420 WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
421 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
422
423 WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
424 WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
425 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
426 }
427}
428#endif
429
430static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
431{
432 struct kv_power_info *pi = kv_get_pi(rdev);
433 int ret = 0;
434
435 if (pi->caps_cac) {
436 if (enable) {
437 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
438 if (ret)
439 pi->cac_enabled = false;
440 else
441 pi->cac_enabled = true;
442 } else if (pi->cac_enabled) {
443 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
444 pi->cac_enabled = false;
445 }
446 }
447
448 return ret;
449}
450
451static int kv_process_firmware_header(struct radeon_device *rdev)
452{
453 struct kv_power_info *pi = kv_get_pi(rdev);
454 u32 tmp;
455 int ret;
456
457 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
458 offsetof(SMU7_Firmware_Header, DpmTable),
459 &tmp, pi->sram_end);
460
461 if (ret == 0)
462 pi->dpm_table_start = tmp;
463
464 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
465 offsetof(SMU7_Firmware_Header, SoftRegisters),
466 &tmp, pi->sram_end);
467
468 if (ret == 0)
469 pi->soft_regs_start = tmp;
470
471 return ret;
472}
473
474static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
475{
476 struct kv_power_info *pi = kv_get_pi(rdev);
477 int ret;
478
479 pi->graphics_voltage_change_enable = 1;
480
481 ret = kv_copy_bytes_to_smc(rdev,
482 pi->dpm_table_start +
483 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
484 &pi->graphics_voltage_change_enable,
485 sizeof(u8), pi->sram_end);
486
487 return ret;
488}
489
490static int kv_set_dpm_interval(struct radeon_device *rdev)
491{
492 struct kv_power_info *pi = kv_get_pi(rdev);
493 int ret;
494
495 pi->graphics_interval = 1;
496
497 ret = kv_copy_bytes_to_smc(rdev,
498 pi->dpm_table_start +
499 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
500 &pi->graphics_interval,
501 sizeof(u8), pi->sram_end);
502
503 return ret;
504}
505
506static int kv_set_dpm_boot_state(struct radeon_device *rdev)
507{
508 struct kv_power_info *pi = kv_get_pi(rdev);
509 int ret;
510
511 ret = kv_copy_bytes_to_smc(rdev,
512 pi->dpm_table_start +
513 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
514 &pi->graphics_boot_level,
515 sizeof(u8), pi->sram_end);
516
517 return ret;
518}
519
520static void kv_program_vc(struct radeon_device *rdev)
521{
522 WREG32_SMC(CG_FTV_0, 0x3FFFC000);
523}
524
525static void kv_clear_vc(struct radeon_device *rdev)
526{
527 WREG32_SMC(CG_FTV_0, 0);
528}
529
530static int kv_set_divider_value(struct radeon_device *rdev,
531 u32 index, u32 sclk)
532{
533 struct kv_power_info *pi = kv_get_pi(rdev);
534 struct atom_clock_dividers dividers;
535 int ret;
536
537 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
538 sclk, false, &dividers);
539 if (ret)
540 return ret;
541
542 pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
543 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
544
545 return 0;
546}
547
548static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
549 u16 voltage)
550{
551 return 6200 - (voltage * 25);
552}
553
554static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
555 u32 vid_2bit)
556{
557 struct kv_power_info *pi = kv_get_pi(rdev);
558 u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
559 &pi->sys_info.vid_mapping_table,
560 vid_2bit);
561
562 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
563}
564
565
566static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
567{
568 struct kv_power_info *pi = kv_get_pi(rdev);
569
570 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
571 pi->graphics_level[index].MinVddNb =
572 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
573
574 return 0;
575}
576
577static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
578{
579 struct kv_power_info *pi = kv_get_pi(rdev);
580
581 pi->graphics_level[index].AT = cpu_to_be16((u16)at);
582
583 return 0;
584}
585
586static void kv_dpm_power_level_enable(struct radeon_device *rdev,
587 u32 index, bool enable)
588{
589 struct kv_power_info *pi = kv_get_pi(rdev);
590
591 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
592}
593
594static void kv_start_dpm(struct radeon_device *rdev)
595{
596 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
597
598 tmp |= GLOBAL_PWRMGT_EN;
599 WREG32_SMC(GENERAL_PWRMGT, tmp);
600
601 kv_smc_dpm_enable(rdev, true);
602}
603
604static void kv_stop_dpm(struct radeon_device *rdev)
605{
606 kv_smc_dpm_enable(rdev, false);
607}
608
609static void kv_start_am(struct radeon_device *rdev)
610{
611 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
612
613 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
614 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
615
616 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
617}
618
619static void kv_reset_am(struct radeon_device *rdev)
620{
621 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
622
623 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
624
625 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
626}
627
628static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
629{
630 return kv_notify_message_to_smu(rdev, freeze ?
631 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
632}
633
634static int kv_force_lowest_valid(struct radeon_device *rdev)
635{
636 return kv_force_dpm_lowest(rdev);
637}
638
639static int kv_unforce_levels(struct radeon_device *rdev)
640{
641 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
642}
643
644static int kv_update_sclk_t(struct radeon_device *rdev)
645{
646 struct kv_power_info *pi = kv_get_pi(rdev);
647 u32 low_sclk_interrupt_t = 0;
648 int ret = 0;
649
650 if (pi->caps_sclk_throttle_low_notification) {
651 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
652
653 ret = kv_copy_bytes_to_smc(rdev,
654 pi->dpm_table_start +
655 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
656 (u8 *)&low_sclk_interrupt_t,
657 sizeof(u32), pi->sram_end);
658 }
659 return ret;
660}
661
662static int kv_program_bootup_state(struct radeon_device *rdev)
663{
664 struct kv_power_info *pi = kv_get_pi(rdev);
665 u32 i;
666 struct radeon_clock_voltage_dependency_table *table =
667 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
668
669 if (table && table->count) {
670 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
671 if ((table->entries[i].clk == pi->boot_pl.sclk) ||
672 (i == 0))
673 break;
674 }
675
676 pi->graphics_boot_level = (u8)i;
677 kv_dpm_power_level_enable(rdev, i, true);
678 } else {
679 struct sumo_sclk_voltage_mapping_table *table =
680 &pi->sys_info.sclk_voltage_mapping_table;
681
682 if (table->num_max_dpm_entries == 0)
683 return -EINVAL;
684
685 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
686 if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
687 (i == 0))
688 break;
689 }
690
691 pi->graphics_boot_level = (u8)i;
692 kv_dpm_power_level_enable(rdev, i, true);
693 }
694 return 0;
695}
696
697static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
698{
699 struct kv_power_info *pi = kv_get_pi(rdev);
700 int ret;
701
702 pi->graphics_therm_throttle_enable = 1;
703
704 ret = kv_copy_bytes_to_smc(rdev,
705 pi->dpm_table_start +
706 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
707 &pi->graphics_therm_throttle_enable,
708 sizeof(u8), pi->sram_end);
709
710 return ret;
711}
712
713static int kv_upload_dpm_settings(struct radeon_device *rdev)
714{
715 struct kv_power_info *pi = kv_get_pi(rdev);
716 int ret;
717
718 ret = kv_copy_bytes_to_smc(rdev,
719 pi->dpm_table_start +
720 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
721 (u8 *)&pi->graphics_level,
722 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
723 pi->sram_end);
724
725 if (ret)
726 return ret;
727
728 ret = kv_copy_bytes_to_smc(rdev,
729 pi->dpm_table_start +
730 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
731 &pi->graphics_dpm_level_count,
732 sizeof(u8), pi->sram_end);
733
734 return ret;
735}
736
737static u32 kv_get_clock_difference(u32 a, u32 b)
738{
739 return (a >= b) ? a - b : b - a;
740}
741
742static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
743{
744 struct kv_power_info *pi = kv_get_pi(rdev);
745 u32 value;
746
747 if (pi->caps_enable_dfs_bypass) {
748 if (kv_get_clock_difference(clk, 40000) < 200)
749 value = 3;
750 else if (kv_get_clock_difference(clk, 30000) < 200)
751 value = 2;
752 else if (kv_get_clock_difference(clk, 20000) < 200)
753 value = 7;
754 else if (kv_get_clock_difference(clk, 15000) < 200)
755 value = 6;
756 else if (kv_get_clock_difference(clk, 10000) < 200)
757 value = 8;
758 else
759 value = 0;
760 } else {
761 value = 0;
762 }
763
764 return value;
765}
766
767static int kv_populate_uvd_table(struct radeon_device *rdev)
768{
769 struct kv_power_info *pi = kv_get_pi(rdev);
770 struct radeon_uvd_clock_voltage_dependency_table *table =
771 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
772 struct atom_clock_dividers dividers;
773 int ret;
774 u32 i;
775
776 if (table == NULL || table->count == 0)
777 return 0;
778
779 pi->uvd_level_count = 0;
780 for (i = 0; i < table->count; i++) {
781 if (pi->high_voltage_t &&
782 (pi->high_voltage_t < table->entries[i].v))
783 break;
784
785 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
786 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
787 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
788
789 pi->uvd_level[i].VClkBypassCntl =
790 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
791 pi->uvd_level[i].DClkBypassCntl =
792 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
793
794 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
795 table->entries[i].vclk, false, &dividers);
796 if (ret)
797 return ret;
798 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
799
800 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
801 table->entries[i].dclk, false, &dividers);
802 if (ret)
803 return ret;
804 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
805
806 pi->uvd_level_count++;
807 }
808
809 ret = kv_copy_bytes_to_smc(rdev,
810 pi->dpm_table_start +
811 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
812 (u8 *)&pi->uvd_level_count,
813 sizeof(u8), pi->sram_end);
814 if (ret)
815 return ret;
816
817 pi->uvd_interval = 1;
818
819 ret = kv_copy_bytes_to_smc(rdev,
820 pi->dpm_table_start +
821 offsetof(SMU7_Fusion_DpmTable, UVDInterval),
822 &pi->uvd_interval,
823 sizeof(u8), pi->sram_end);
824 if (ret)
825 return ret;
826
827 ret = kv_copy_bytes_to_smc(rdev,
828 pi->dpm_table_start +
829 offsetof(SMU7_Fusion_DpmTable, UvdLevel),
830 (u8 *)&pi->uvd_level,
831 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
832 pi->sram_end);
833
834 return ret;
835
836}
837
838static int kv_populate_vce_table(struct radeon_device *rdev)
839{
840 struct kv_power_info *pi = kv_get_pi(rdev);
841 int ret;
842 u32 i;
843 struct radeon_vce_clock_voltage_dependency_table *table =
844 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
845 struct atom_clock_dividers dividers;
846
847 if (table == NULL || table->count == 0)
848 return 0;
849
850 pi->vce_level_count = 0;
851 for (i = 0; i < table->count; i++) {
852 if (pi->high_voltage_t &&
853 pi->high_voltage_t < table->entries[i].v)
854 break;
855
856 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
857 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
858
859 pi->vce_level[i].ClkBypassCntl =
860 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
861
862 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
863 table->entries[i].evclk, false, &dividers);
864 if (ret)
865 return ret;
866 pi->vce_level[i].Divider = (u8)dividers.post_div;
867
868 pi->vce_level_count++;
869 }
870
871 ret = kv_copy_bytes_to_smc(rdev,
872 pi->dpm_table_start +
873 offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
874 (u8 *)&pi->vce_level_count,
875 sizeof(u8),
876 pi->sram_end);
877 if (ret)
878 return ret;
879
880 pi->vce_interval = 1;
881
882 ret = kv_copy_bytes_to_smc(rdev,
883 pi->dpm_table_start +
884 offsetof(SMU7_Fusion_DpmTable, VCEInterval),
885 (u8 *)&pi->vce_interval,
886 sizeof(u8),
887 pi->sram_end);
888 if (ret)
889 return ret;
890
891 ret = kv_copy_bytes_to_smc(rdev,
892 pi->dpm_table_start +
893 offsetof(SMU7_Fusion_DpmTable, VceLevel),
894 (u8 *)&pi->vce_level,
895 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
896 pi->sram_end);
897
898 return ret;
899}
900
901static int kv_populate_samu_table(struct radeon_device *rdev)
902{
903 struct kv_power_info *pi = kv_get_pi(rdev);
904 struct radeon_clock_voltage_dependency_table *table =
905 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
906 struct atom_clock_dividers dividers;
907 int ret;
908 u32 i;
909
910 if (table == NULL || table->count == 0)
911 return 0;
912
913 pi->samu_level_count = 0;
914 for (i = 0; i < table->count; i++) {
915 if (pi->high_voltage_t &&
916 pi->high_voltage_t < table->entries[i].v)
917 break;
918
919 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
920 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
921
922 pi->samu_level[i].ClkBypassCntl =
923 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
924
925 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
926 table->entries[i].clk, false, &dividers);
927 if (ret)
928 return ret;
929 pi->samu_level[i].Divider = (u8)dividers.post_div;
930
931 pi->samu_level_count++;
932 }
933
934 ret = kv_copy_bytes_to_smc(rdev,
935 pi->dpm_table_start +
936 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
937 (u8 *)&pi->samu_level_count,
938 sizeof(u8),
939 pi->sram_end);
940 if (ret)
941 return ret;
942
943 pi->samu_interval = 1;
944
945 ret = kv_copy_bytes_to_smc(rdev,
946 pi->dpm_table_start +
947 offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
948 (u8 *)&pi->samu_interval,
949 sizeof(u8),
950 pi->sram_end);
951 if (ret)
952 return ret;
953
954 ret = kv_copy_bytes_to_smc(rdev,
955 pi->dpm_table_start +
956 offsetof(SMU7_Fusion_DpmTable, SamuLevel),
957 (u8 *)&pi->samu_level,
958 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
959 pi->sram_end);
960 if (ret)
961 return ret;
962
963 return ret;
964}
965
966
967static int kv_populate_acp_table(struct radeon_device *rdev)
968{
969 struct kv_power_info *pi = kv_get_pi(rdev);
970 struct radeon_clock_voltage_dependency_table *table =
971 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
972 struct atom_clock_dividers dividers;
973 int ret;
974 u32 i;
975
976 if (table == NULL || table->count == 0)
977 return 0;
978
979 pi->acp_level_count = 0;
980 for (i = 0; i < table->count; i++) {
981 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
982 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
983
984 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
985 table->entries[i].clk, false, &dividers);
986 if (ret)
987 return ret;
988 pi->acp_level[i].Divider = (u8)dividers.post_div;
989
990 pi->acp_level_count++;
991 }
992
993 ret = kv_copy_bytes_to_smc(rdev,
994 pi->dpm_table_start +
995 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
996 (u8 *)&pi->acp_level_count,
997 sizeof(u8),
998 pi->sram_end);
999 if (ret)
1000 return ret;
1001
1002 pi->acp_interval = 1;
1003
1004 ret = kv_copy_bytes_to_smc(rdev,
1005 pi->dpm_table_start +
1006 offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1007 (u8 *)&pi->acp_interval,
1008 sizeof(u8),
1009 pi->sram_end);
1010 if (ret)
1011 return ret;
1012
1013 ret = kv_copy_bytes_to_smc(rdev,
1014 pi->dpm_table_start +
1015 offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1016 (u8 *)&pi->acp_level,
1017 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1018 pi->sram_end);
1019 if (ret)
1020 return ret;
1021
1022 return ret;
1023}
1024
1025static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
1026{
1027 struct kv_power_info *pi = kv_get_pi(rdev);
1028 u32 i;
1029 struct radeon_clock_voltage_dependency_table *table =
1030 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1031
1032 if (table && table->count) {
1033 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1034 if (pi->caps_enable_dfs_bypass) {
1035 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1036 pi->graphics_level[i].ClkBypassCntl = 3;
1037 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1038 pi->graphics_level[i].ClkBypassCntl = 2;
1039 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1040 pi->graphics_level[i].ClkBypassCntl = 7;
1041 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1042 pi->graphics_level[i].ClkBypassCntl = 6;
1043 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1044 pi->graphics_level[i].ClkBypassCntl = 8;
1045 else
1046 pi->graphics_level[i].ClkBypassCntl = 0;
1047 } else {
1048 pi->graphics_level[i].ClkBypassCntl = 0;
1049 }
1050 }
1051 } else {
1052 struct sumo_sclk_voltage_mapping_table *table =
1053 &pi->sys_info.sclk_voltage_mapping_table;
1054 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1055 if (pi->caps_enable_dfs_bypass) {
1056 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1057 pi->graphics_level[i].ClkBypassCntl = 3;
1058 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1059 pi->graphics_level[i].ClkBypassCntl = 2;
1060 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1061 pi->graphics_level[i].ClkBypassCntl = 7;
1062 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1063 pi->graphics_level[i].ClkBypassCntl = 6;
1064 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1065 pi->graphics_level[i].ClkBypassCntl = 8;
1066 else
1067 pi->graphics_level[i].ClkBypassCntl = 0;
1068 } else {
1069 pi->graphics_level[i].ClkBypassCntl = 0;
1070 }
1071 }
1072 }
1073}
1074
1075static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1076{
1077 return kv_notify_message_to_smu(rdev, enable ?
1078 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1079}
1080
1081static void kv_update_current_ps(struct radeon_device *rdev,
1082 struct radeon_ps *rps)
1083{
1084 struct kv_ps *new_ps = kv_get_ps(rps);
1085 struct kv_power_info *pi = kv_get_pi(rdev);
1086
1087 pi->current_rps = *rps;
1088 pi->current_ps = *new_ps;
1089 pi->current_rps.ps_priv = &pi->current_ps;
1090}
1091
1092static void kv_update_requested_ps(struct radeon_device *rdev,
1093 struct radeon_ps *rps)
1094{
1095 struct kv_ps *new_ps = kv_get_ps(rps);
1096 struct kv_power_info *pi = kv_get_pi(rdev);
1097
1098 pi->requested_rps = *rps;
1099 pi->requested_ps = *new_ps;
1100 pi->requested_rps.ps_priv = &pi->requested_ps;
1101}
1102
1103int kv_dpm_enable(struct radeon_device *rdev)
1104{
1105 struct kv_power_info *pi = kv_get_pi(rdev);
1106 int ret;
1107
1108 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1109 RADEON_CG_BLOCK_SDMA |
1110 RADEON_CG_BLOCK_BIF |
1111 RADEON_CG_BLOCK_HDP), false);
1112
1113 ret = kv_process_firmware_header(rdev);
1114 if (ret) {
1115 DRM_ERROR("kv_process_firmware_header failed\n");
1116 return ret;
1117 }
1118 kv_init_fps_limits(rdev);
1119 kv_init_graphics_levels(rdev);
1120 ret = kv_program_bootup_state(rdev);
1121 if (ret) {
1122 DRM_ERROR("kv_program_bootup_state failed\n");
1123 return ret;
1124 }
1125 kv_calculate_dfs_bypass_settings(rdev);
1126 ret = kv_upload_dpm_settings(rdev);
1127 if (ret) {
1128 DRM_ERROR("kv_upload_dpm_settings failed\n");
1129 return ret;
1130 }
1131 ret = kv_populate_uvd_table(rdev);
1132 if (ret) {
1133 DRM_ERROR("kv_populate_uvd_table failed\n");
1134 return ret;
1135 }
1136 ret = kv_populate_vce_table(rdev);
1137 if (ret) {
1138 DRM_ERROR("kv_populate_vce_table failed\n");
1139 return ret;
1140 }
1141 ret = kv_populate_samu_table(rdev);
1142 if (ret) {
1143 DRM_ERROR("kv_populate_samu_table failed\n");
1144 return ret;
1145 }
1146 ret = kv_populate_acp_table(rdev);
1147 if (ret) {
1148 DRM_ERROR("kv_populate_acp_table failed\n");
1149 return ret;
1150 }
1151 kv_program_vc(rdev);
1152#if 0
1153 kv_initialize_hardware_cac_manager(rdev);
1154#endif
1155 kv_start_am(rdev);
1156 if (pi->enable_auto_thermal_throttling) {
1157 ret = kv_enable_auto_thermal_throttling(rdev);
1158 if (ret) {
1159 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1160 return ret;
1161 }
1162 }
1163 ret = kv_enable_dpm_voltage_scaling(rdev);
1164 if (ret) {
1165 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1166 return ret;
1167 }
1168 ret = kv_set_dpm_interval(rdev);
1169 if (ret) {
1170 DRM_ERROR("kv_set_dpm_interval failed\n");
1171 return ret;
1172 }
1173 ret = kv_set_dpm_boot_state(rdev);
1174 if (ret) {
1175 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1176 return ret;
1177 }
1178 ret = kv_enable_ulv(rdev, true);
1179 if (ret) {
1180 DRM_ERROR("kv_enable_ulv failed\n");
1181 return ret;
1182 }
1183 kv_start_dpm(rdev);
1184 ret = kv_enable_didt(rdev, true);
1185 if (ret) {
1186 DRM_ERROR("kv_enable_didt failed\n");
1187 return ret;
1188 }
1189 ret = kv_enable_smc_cac(rdev, true);
1190 if (ret) {
1191 DRM_ERROR("kv_enable_smc_cac failed\n");
1192 return ret;
1193 }
1194
1195 if (rdev->irq.installed &&
1196 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1197 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1198 if (ret) {
1199 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1200 return ret;
1201 }
1202 rdev->irq.dpm_thermal = true;
1203 radeon_irq_set(rdev);
1204 }
1205
1206 /* powerdown unused blocks for now */
1207 kv_dpm_powergate_acp(rdev, true);
1208 kv_dpm_powergate_samu(rdev, true);
1209 kv_dpm_powergate_vce(rdev, true);
1210 kv_dpm_powergate_uvd(rdev, true);
1211
1212 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1213 RADEON_CG_BLOCK_SDMA |
1214 RADEON_CG_BLOCK_BIF |
1215 RADEON_CG_BLOCK_HDP), true);
1216
1217 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1218
1219 return ret;
1220}
1221
1222void kv_dpm_disable(struct radeon_device *rdev)
1223{
1224 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1225 RADEON_CG_BLOCK_SDMA |
1226 RADEON_CG_BLOCK_BIF |
1227 RADEON_CG_BLOCK_HDP), false);
1228
1229 /* powerup blocks */
1230 kv_dpm_powergate_acp(rdev, false);
1231 kv_dpm_powergate_samu(rdev, false);
1232 kv_dpm_powergate_vce(rdev, false);
1233 kv_dpm_powergate_uvd(rdev, false);
1234
1235 kv_enable_smc_cac(rdev, false);
1236 kv_enable_didt(rdev, false);
1237 kv_clear_vc(rdev);
1238 kv_stop_dpm(rdev);
1239 kv_enable_ulv(rdev, false);
1240 kv_reset_am(rdev);
1241
1242 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1243}
1244
1245#if 0
1246static int kv_write_smc_soft_register(struct radeon_device *rdev,
1247 u16 reg_offset, u32 value)
1248{
1249 struct kv_power_info *pi = kv_get_pi(rdev);
1250
1251 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
1252 (u8 *)&value, sizeof(u16), pi->sram_end);
1253}
1254
1255static int kv_read_smc_soft_register(struct radeon_device *rdev,
1256 u16 reg_offset, u32 *value)
1257{
1258 struct kv_power_info *pi = kv_get_pi(rdev);
1259
1260 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
1261 value, pi->sram_end);
1262}
1263#endif
1264
1265static void kv_init_sclk_t(struct radeon_device *rdev)
1266{
1267 struct kv_power_info *pi = kv_get_pi(rdev);
1268
1269 pi->low_sclk_interrupt_t = 0;
1270}
1271
1272static int kv_init_fps_limits(struct radeon_device *rdev)
1273{
1274 struct kv_power_info *pi = kv_get_pi(rdev);
1275 int ret = 0;
1276
1277 if (pi->caps_fps) {
1278 u16 tmp;
1279
1280 tmp = 45;
1281 pi->fps_high_t = cpu_to_be16(tmp);
1282 ret = kv_copy_bytes_to_smc(rdev,
1283 pi->dpm_table_start +
1284 offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1285 (u8 *)&pi->fps_high_t,
1286 sizeof(u16), pi->sram_end);
1287
1288 tmp = 30;
1289 pi->fps_low_t = cpu_to_be16(tmp);
1290
1291 ret = kv_copy_bytes_to_smc(rdev,
1292 pi->dpm_table_start +
1293 offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1294 (u8 *)&pi->fps_low_t,
1295 sizeof(u16), pi->sram_end);
1296
1297 }
1298 return ret;
1299}
1300
1301static void kv_init_powergate_state(struct radeon_device *rdev)
1302{
1303 struct kv_power_info *pi = kv_get_pi(rdev);
1304
1305 pi->uvd_power_gated = false;
1306 pi->vce_power_gated = false;
1307 pi->samu_power_gated = false;
1308 pi->acp_power_gated = false;
1309
1310}
1311
1312static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1313{
1314 return kv_notify_message_to_smu(rdev, enable ?
1315 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1316}
1317
1318#if 0
1319static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1320{
1321 return kv_notify_message_to_smu(rdev, enable ?
1322 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1323}
1324#endif
1325
1326static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1327{
1328 return kv_notify_message_to_smu(rdev, enable ?
1329 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1330}
1331
1332static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1333{
1334 return kv_notify_message_to_smu(rdev, enable ?
1335 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1336}
1337
1338static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1339{
1340 struct kv_power_info *pi = kv_get_pi(rdev);
1341 struct radeon_uvd_clock_voltage_dependency_table *table =
1342 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1343 int ret;
1344
1345 if (!gate) {
1346 if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
1347 pi->uvd_boot_level = table->count - 1;
1348 else
1349 pi->uvd_boot_level = 0;
1350
1351 ret = kv_copy_bytes_to_smc(rdev,
1352 pi->dpm_table_start +
1353 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1354 (uint8_t *)&pi->uvd_boot_level,
1355 sizeof(u8), pi->sram_end);
1356 if (ret)
1357 return ret;
1358
1359 if (!pi->caps_uvd_dpm ||
1360 pi->caps_stable_p_state)
1361 kv_send_msg_to_smc_with_parameter(rdev,
1362 PPSMC_MSG_UVDDPM_SetEnabledMask,
1363 (1 << pi->uvd_boot_level));
1364 }
1365
1366 return kv_enable_uvd_dpm(rdev, !gate);
1367}
1368
1369#if 0
1370static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
1371{
1372 u8 i;
1373 struct radeon_vce_clock_voltage_dependency_table *table =
1374 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1375
1376 for (i = 0; i < table->count; i++) {
1377 if (table->entries[i].evclk >= 0) /* XXX */
1378 break;
1379 }
1380
1381 return i;
1382}
1383
1384static int kv_update_vce_dpm(struct radeon_device *rdev,
1385 struct radeon_ps *radeon_new_state,
1386 struct radeon_ps *radeon_current_state)
1387{
1388 struct kv_power_info *pi = kv_get_pi(rdev);
1389 struct radeon_vce_clock_voltage_dependency_table *table =
1390 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1391 int ret;
1392
1393 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1394 if (pi->caps_stable_p_state)
1395 pi->vce_boot_level = table->count - 1;
1396 else
1397 pi->vce_boot_level = kv_get_vce_boot_level(rdev);
1398
1399 ret = kv_copy_bytes_to_smc(rdev,
1400 pi->dpm_table_start +
1401 offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1402 (u8 *)&pi->vce_boot_level,
1403 sizeof(u8),
1404 pi->sram_end);
1405 if (ret)
1406 return ret;
1407
1408 if (pi->caps_stable_p_state)
1409 kv_send_msg_to_smc_with_parameter(rdev,
1410 PPSMC_MSG_VCEDPM_SetEnabledMask,
1411 (1 << pi->vce_boot_level));
1412
1413 kv_enable_vce_dpm(rdev, true);
1414 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1415 kv_enable_vce_dpm(rdev, false);
1416 }
1417
1418 return 0;
1419}
1420#endif
1421
1422static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1423{
1424 struct kv_power_info *pi = kv_get_pi(rdev);
1425 struct radeon_clock_voltage_dependency_table *table =
1426 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1427 int ret;
1428
1429 if (!gate) {
1430 if (pi->caps_stable_p_state)
1431 pi->samu_boot_level = table->count - 1;
1432 else
1433 pi->samu_boot_level = 0;
1434
1435 ret = kv_copy_bytes_to_smc(rdev,
1436 pi->dpm_table_start +
1437 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1438 (u8 *)&pi->samu_boot_level,
1439 sizeof(u8),
1440 pi->sram_end);
1441 if (ret)
1442 return ret;
1443
1444 if (pi->caps_stable_p_state)
1445 kv_send_msg_to_smc_with_parameter(rdev,
1446 PPSMC_MSG_SAMUDPM_SetEnabledMask,
1447 (1 << pi->samu_boot_level));
1448 }
1449
1450 return kv_enable_samu_dpm(rdev, !gate);
1451}
1452
1453static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1454{
1455 struct kv_power_info *pi = kv_get_pi(rdev);
1456 struct radeon_clock_voltage_dependency_table *table =
1457 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1458 int ret;
1459
1460 if (!gate) {
1461 if (pi->caps_stable_p_state)
1462 pi->acp_boot_level = table->count - 1;
1463 else
1464 pi->acp_boot_level = 0;
1465
1466 ret = kv_copy_bytes_to_smc(rdev,
1467 pi->dpm_table_start +
1468 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1469 (u8 *)&pi->acp_boot_level,
1470 sizeof(u8),
1471 pi->sram_end);
1472 if (ret)
1473 return ret;
1474
1475 if (pi->caps_stable_p_state)
1476 kv_send_msg_to_smc_with_parameter(rdev,
1477 PPSMC_MSG_ACPDPM_SetEnabledMask,
1478 (1 << pi->acp_boot_level));
1479 }
1480
1481 return kv_enable_acp_dpm(rdev, !gate);
1482}
1483
1484void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1485{
1486 struct kv_power_info *pi = kv_get_pi(rdev);
1487
1488 if (pi->uvd_power_gated == gate)
1489 return;
1490
1491 pi->uvd_power_gated = gate;
1492
1493 if (gate) {
1494 if (pi->caps_uvd_pg) {
1495 uvd_v1_0_stop(rdev);
1496 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
1497 }
1498 kv_update_uvd_dpm(rdev, gate);
1499 if (pi->caps_uvd_pg)
1500 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1501 } else {
1502 if (pi->caps_uvd_pg) {
1503 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1504 uvd_v4_2_resume(rdev);
1505 uvd_v1_0_start(rdev);
1506 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
1507 }
1508 kv_update_uvd_dpm(rdev, gate);
1509 }
1510}
1511
1512static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1513{
1514 struct kv_power_info *pi = kv_get_pi(rdev);
1515
1516 if (pi->vce_power_gated == gate)
1517 return;
1518
1519 pi->vce_power_gated = gate;
1520
1521 if (gate) {
1522 if (pi->caps_vce_pg)
1523 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1524 } else {
1525 if (pi->caps_vce_pg)
1526 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1527 }
1528}
1529
1530static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1531{
1532 struct kv_power_info *pi = kv_get_pi(rdev);
1533
1534 if (pi->samu_power_gated == gate)
1535 return;
1536
1537 pi->samu_power_gated = gate;
1538
1539 if (gate) {
1540 kv_update_samu_dpm(rdev, true);
1541 if (pi->caps_samu_pg)
1542 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1543 } else {
1544 if (pi->caps_samu_pg)
1545 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1546 kv_update_samu_dpm(rdev, false);
1547 }
1548}
1549
1550static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1551{
1552 struct kv_power_info *pi = kv_get_pi(rdev);
1553
1554 if (pi->acp_power_gated == gate)
1555 return;
1556
1557 if (rdev->family == CHIP_KABINI)
1558 return;
1559
1560 pi->acp_power_gated = gate;
1561
1562 if (gate) {
1563 kv_update_acp_dpm(rdev, true);
1564 if (pi->caps_acp_pg)
1565 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1566 } else {
1567 if (pi->caps_acp_pg)
1568 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1569 kv_update_acp_dpm(rdev, false);
1570 }
1571}
1572
1573static void kv_set_valid_clock_range(struct radeon_device *rdev,
1574 struct radeon_ps *new_rps)
1575{
1576 struct kv_ps *new_ps = kv_get_ps(new_rps);
1577 struct kv_power_info *pi = kv_get_pi(rdev);
1578 u32 i;
1579 struct radeon_clock_voltage_dependency_table *table =
1580 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1581
1582 if (table && table->count) {
1583 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1584 if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1585 (i == (pi->graphics_dpm_level_count - 1))) {
1586 pi->lowest_valid = i;
1587 break;
1588 }
1589 }
1590
1591 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
1592 if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
1593 (i == 0)) {
1594 pi->highest_valid = i;
1595 break;
1596 }
1597 }
1598
1599 if (pi->lowest_valid > pi->highest_valid) {
1600 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1601 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1602 pi->highest_valid = pi->lowest_valid;
1603 else
1604 pi->lowest_valid = pi->highest_valid;
1605 }
1606 } else {
1607 struct sumo_sclk_voltage_mapping_table *table =
1608 &pi->sys_info.sclk_voltage_mapping_table;
1609
1610 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1611 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1612 i == (int)(pi->graphics_dpm_level_count - 1)) {
1613 pi->lowest_valid = i;
1614 break;
1615 }
1616 }
1617
1618 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
1619 if (table->entries[i].sclk_frequency <=
1620 new_ps->levels[new_ps->num_levels - 1].sclk ||
1621 i == 0) {
1622 pi->highest_valid = i;
1623 break;
1624 }
1625 }
1626
1627 if (pi->lowest_valid > pi->highest_valid) {
1628 if ((new_ps->levels[0].sclk -
1629 table->entries[pi->highest_valid].sclk_frequency) >
1630 (table->entries[pi->lowest_valid].sclk_frequency -
1631 new_ps->levels[new_ps->num_levels -1].sclk))
1632 pi->highest_valid = pi->lowest_valid;
1633 else
1634 pi->lowest_valid = pi->highest_valid;
1635 }
1636 }
1637}
1638
1639static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1640 struct radeon_ps *new_rps)
1641{
1642 struct kv_ps *new_ps = kv_get_ps(new_rps);
1643 struct kv_power_info *pi = kv_get_pi(rdev);
1644 int ret = 0;
1645 u8 clk_bypass_cntl;
1646
1647 if (pi->caps_enable_dfs_bypass) {
1648 clk_bypass_cntl = new_ps->need_dfs_bypass ?
1649 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1650 ret = kv_copy_bytes_to_smc(rdev,
1651 (pi->dpm_table_start +
1652 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1653 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1654 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1655 &clk_bypass_cntl,
1656 sizeof(u8), pi->sram_end);
1657 }
1658
1659 return ret;
1660}
1661
1662static int kv_enable_nb_dpm(struct radeon_device *rdev)
1663{
1664 struct kv_power_info *pi = kv_get_pi(rdev);
1665 int ret = 0;
1666
1667 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1668 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1669 if (ret == 0)
1670 pi->nb_dpm_enabled = true;
1671 }
1672
1673 return ret;
1674}
1675
1676int kv_dpm_force_performance_level(struct radeon_device *rdev,
1677 enum radeon_dpm_forced_level level)
1678{
1679 int ret;
1680
1681 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1682 ret = kv_force_dpm_highest(rdev);
1683 if (ret)
1684 return ret;
1685 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1686 ret = kv_force_dpm_lowest(rdev);
1687 if (ret)
1688 return ret;
1689 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1690 ret = kv_unforce_levels(rdev);
1691 if (ret)
1692 return ret;
1693 }
1694
1695 rdev->pm.dpm.forced_level = level;
1696
1697 return 0;
1698}
1699
1700int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1701{
1702 struct kv_power_info *pi = kv_get_pi(rdev);
1703 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1704 struct radeon_ps *new_ps = &requested_ps;
1705
1706 kv_update_requested_ps(rdev, new_ps);
1707
1708 kv_apply_state_adjust_rules(rdev,
1709 &pi->requested_rps,
1710 &pi->current_rps);
1711
1712 return 0;
1713}
1714
1715int kv_dpm_set_power_state(struct radeon_device *rdev)
1716{
1717 struct kv_power_info *pi = kv_get_pi(rdev);
1718 struct radeon_ps *new_ps = &pi->requested_rps;
1719 /*struct radeon_ps *old_ps = &pi->current_rps;*/
1720 int ret;
1721
1722 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1723 RADEON_CG_BLOCK_SDMA |
1724 RADEON_CG_BLOCK_BIF |
1725 RADEON_CG_BLOCK_HDP), false);
1726
1727 if (rdev->family == CHIP_KABINI) {
1728 if (pi->enable_dpm) {
1729 kv_set_valid_clock_range(rdev, new_ps);
1730 kv_update_dfs_bypass_settings(rdev, new_ps);
1731 ret = kv_calculate_ds_divider(rdev);
1732 if (ret) {
1733 DRM_ERROR("kv_calculate_ds_divider failed\n");
1734 return ret;
1735 }
1736 kv_calculate_nbps_level_settings(rdev);
1737 kv_calculate_dpm_settings(rdev);
1738 kv_force_lowest_valid(rdev);
1739 kv_enable_new_levels(rdev);
1740 kv_upload_dpm_settings(rdev);
1741 kv_program_nbps_index_settings(rdev, new_ps);
1742 kv_unforce_levels(rdev);
1743 kv_set_enabled_levels(rdev);
1744 kv_force_lowest_valid(rdev);
1745 kv_unforce_levels(rdev);
1746#if 0
1747 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1748 if (ret) {
1749 DRM_ERROR("kv_update_vce_dpm failed\n");
1750 return ret;
1751 }
1752#endif
1753 kv_update_sclk_t(rdev);
1754 }
1755 } else {
1756 if (pi->enable_dpm) {
1757 kv_set_valid_clock_range(rdev, new_ps);
1758 kv_update_dfs_bypass_settings(rdev, new_ps);
1759 ret = kv_calculate_ds_divider(rdev);
1760 if (ret) {
1761 DRM_ERROR("kv_calculate_ds_divider failed\n");
1762 return ret;
1763 }
1764 kv_calculate_nbps_level_settings(rdev);
1765 kv_calculate_dpm_settings(rdev);
1766 kv_freeze_sclk_dpm(rdev, true);
1767 kv_upload_dpm_settings(rdev);
1768 kv_program_nbps_index_settings(rdev, new_ps);
1769 kv_freeze_sclk_dpm(rdev, false);
1770 kv_set_enabled_levels(rdev);
1771#if 0
1772 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1773 if (ret) {
1774 DRM_ERROR("kv_update_vce_dpm failed\n");
1775 return ret;
1776 }
1777#endif
1778 kv_update_sclk_t(rdev);
1779 kv_enable_nb_dpm(rdev);
1780 }
1781 }
1782
1783 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1784 RADEON_CG_BLOCK_SDMA |
1785 RADEON_CG_BLOCK_BIF |
1786 RADEON_CG_BLOCK_HDP), true);
1787
1788 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1789 return 0;
1790}
1791
1792void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1793{
1794 struct kv_power_info *pi = kv_get_pi(rdev);
1795 struct radeon_ps *new_ps = &pi->requested_rps;
1796
1797 kv_update_current_ps(rdev, new_ps);
1798}
1799
1800void kv_dpm_setup_asic(struct radeon_device *rdev)
1801{
1802 sumo_take_smu_control(rdev, true);
1803 kv_init_powergate_state(rdev);
1804 kv_init_sclk_t(rdev);
1805}
1806
1807void kv_dpm_reset_asic(struct radeon_device *rdev)
1808{
1809 kv_force_lowest_valid(rdev);
1810 kv_init_graphics_levels(rdev);
1811 kv_program_bootup_state(rdev);
1812 kv_upload_dpm_settings(rdev);
1813 kv_force_lowest_valid(rdev);
1814 kv_unforce_levels(rdev);
1815}
1816
1817//XXX use sumo_dpm_display_configuration_changed
1818
1819static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1820 struct radeon_clock_and_voltage_limits *table)
1821{
1822 struct kv_power_info *pi = kv_get_pi(rdev);
1823
1824 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1825 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1826 table->sclk =
1827 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1828 table->vddc =
1829 kv_convert_2bit_index_to_voltage(rdev,
1830 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1831 }
1832
1833 table->mclk = pi->sys_info.nbp_memory_clock[0];
1834}
1835
1836static void kv_patch_voltage_values(struct radeon_device *rdev)
1837{
1838 int i;
1839 struct radeon_uvd_clock_voltage_dependency_table *table =
1840 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1841
1842 if (table->count) {
1843 for (i = 0; i < table->count; i++)
1844 table->entries[i].v =
1845 kv_convert_8bit_index_to_voltage(rdev,
1846 table->entries[i].v);
1847 }
1848
1849}
1850
1851static void kv_construct_boot_state(struct radeon_device *rdev)
1852{
1853 struct kv_power_info *pi = kv_get_pi(rdev);
1854
1855 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1856 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1857 pi->boot_pl.ds_divider_index = 0;
1858 pi->boot_pl.ss_divider_index = 0;
1859 pi->boot_pl.allow_gnb_slow = 1;
1860 pi->boot_pl.force_nbp_state = 0;
1861 pi->boot_pl.display_wm = 0;
1862 pi->boot_pl.vce_wm = 0;
1863}
1864
1865static int kv_force_dpm_highest(struct radeon_device *rdev)
1866{
1867 int ret;
1868 u32 enable_mask, i;
1869
1870 ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1871 if (ret)
1872 return ret;
1873
1874 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
1875 if (enable_mask & (1 << i))
1876 break;
1877 }
1878
1879 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1880}
1881
1882static int kv_force_dpm_lowest(struct radeon_device *rdev)
1883{
1884 int ret;
1885 u32 enable_mask, i;
1886
1887 ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1888 if (ret)
1889 return ret;
1890
1891 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
1892 if (enable_mask & (1 << i))
1893 break;
1894 }
1895
1896 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1897}
1898
1899static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1900 u32 sclk, u32 min_sclk_in_sr)
1901{
1902 struct kv_power_info *pi = kv_get_pi(rdev);
1903 u32 i;
1904 u32 temp;
1905 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
1906 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
1907
1908 if (sclk < min)
1909 return 0;
1910
1911 if (!pi->caps_sclk_ds)
1912 return 0;
1913
1914 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
1915 temp = sclk / sumo_get_sleep_divider_from_id(i);
1916 if ((temp >= min) || (i == 0))
1917 break;
1918 }
1919
1920 return (u8)i;
1921}
1922
1923static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
1924{
1925 struct kv_power_info *pi = kv_get_pi(rdev);
1926 struct radeon_clock_voltage_dependency_table *table =
1927 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1928 int i;
1929
1930 if (table && table->count) {
1931 for (i = table->count - 1; i >= 0; i--) {
1932 if (pi->high_voltage_t &&
1933 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
1934 pi->high_voltage_t)) {
1935 *limit = i;
1936 return 0;
1937 }
1938 }
1939 } else {
1940 struct sumo_sclk_voltage_mapping_table *table =
1941 &pi->sys_info.sclk_voltage_mapping_table;
1942
1943 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
1944 if (pi->high_voltage_t &&
1945 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
1946 pi->high_voltage_t)) {
1947 *limit = i;
1948 return 0;
1949 }
1950 }
1951 }
1952
1953 *limit = 0;
1954 return 0;
1955}
1956
1957static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
1958 struct radeon_ps *new_rps,
1959 struct radeon_ps *old_rps)
1960{
1961 struct kv_ps *ps = kv_get_ps(new_rps);
1962 struct kv_power_info *pi = kv_get_pi(rdev);
1963 u32 min_sclk = 10000; /* ??? */
1964 u32 sclk, mclk = 0;
1965 int i, limit;
1966 bool force_high;
1967 struct radeon_clock_voltage_dependency_table *table =
1968 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1969 u32 stable_p_state_sclk = 0;
1970 struct radeon_clock_and_voltage_limits *max_limits =
1971 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1972
1973 mclk = max_limits->mclk;
1974 sclk = min_sclk;
1975
1976 if (pi->caps_stable_p_state) {
1977 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
1978
1979 for (i = table->count - 1; i >= 0; i++) {
1980 if (stable_p_state_sclk >= table->entries[i].clk) {
1981 stable_p_state_sclk = table->entries[i].clk;
1982 break;
1983 }
1984 }
1985
1986 if (i > 0)
1987 stable_p_state_sclk = table->entries[0].clk;
1988
1989 sclk = stable_p_state_sclk;
1990 }
1991
1992 ps->need_dfs_bypass = true;
1993
1994 for (i = 0; i < ps->num_levels; i++) {
1995 if (ps->levels[i].sclk < sclk)
1996 ps->levels[i].sclk = sclk;
1997 }
1998
1999 if (table && table->count) {
2000 for (i = 0; i < ps->num_levels; i++) {
2001 if (pi->high_voltage_t &&
2002 (pi->high_voltage_t <
2003 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2004 kv_get_high_voltage_limit(rdev, &limit);
2005 ps->levels[i].sclk = table->entries[limit].clk;
2006 }
2007 }
2008 } else {
2009 struct sumo_sclk_voltage_mapping_table *table =
2010 &pi->sys_info.sclk_voltage_mapping_table;
2011
2012 for (i = 0; i < ps->num_levels; i++) {
2013 if (pi->high_voltage_t &&
2014 (pi->high_voltage_t <
2015 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2016 kv_get_high_voltage_limit(rdev, &limit);
2017 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2018 }
2019 }
2020 }
2021
2022 if (pi->caps_stable_p_state) {
2023 for (i = 0; i < ps->num_levels; i++) {
2024 ps->levels[i].sclk = stable_p_state_sclk;
2025 }
2026 }
2027
2028 pi->video_start = new_rps->dclk || new_rps->vclk;
2029
2030 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2031 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2032 pi->battery_state = true;
2033 else
2034 pi->battery_state = false;
2035
2036 if (rdev->family == CHIP_KABINI) {
2037 ps->dpm0_pg_nb_ps_lo = 0x1;
2038 ps->dpm0_pg_nb_ps_hi = 0x0;
2039 ps->dpmx_nb_ps_lo = 0x1;
2040 ps->dpmx_nb_ps_hi = 0x0;
2041 } else {
2042 ps->dpm0_pg_nb_ps_lo = 0x1;
2043 ps->dpm0_pg_nb_ps_hi = 0x0;
2044 ps->dpmx_nb_ps_lo = 0x2;
2045 ps->dpmx_nb_ps_hi = 0x1;
2046
2047 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2048 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2049 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2050 pi->disable_nb_ps3_in_battery;
2051 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2052 ps->dpm0_pg_nb_ps_hi = 0x2;
2053 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2054 ps->dpmx_nb_ps_hi = 0x2;
2055 }
2056 }
2057}
2058
2059static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
2060 u32 index, bool enable)
2061{
2062 struct kv_power_info *pi = kv_get_pi(rdev);
2063
2064 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2065}
2066
2067static int kv_calculate_ds_divider(struct radeon_device *rdev)
2068{
2069 struct kv_power_info *pi = kv_get_pi(rdev);
2070 u32 sclk_in_sr = 10000; /* ??? */
2071 u32 i;
2072
2073 if (pi->lowest_valid > pi->highest_valid)
2074 return -EINVAL;
2075
2076 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2077 pi->graphics_level[i].DeepSleepDivId =
2078 kv_get_sleep_divider_id_from_clock(rdev,
2079 be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2080 sclk_in_sr);
2081 }
2082 return 0;
2083}
2084
2085static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2086{
2087 struct kv_power_info *pi = kv_get_pi(rdev);
2088 u32 i;
2089 bool force_high;
2090 struct radeon_clock_and_voltage_limits *max_limits =
2091 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2092 u32 mclk = max_limits->mclk;
2093
2094 if (pi->lowest_valid > pi->highest_valid)
2095 return -EINVAL;
2096
2097 if (rdev->family == CHIP_KABINI) {
2098 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2099 pi->graphics_level[i].GnbSlow = 1;
2100 pi->graphics_level[i].ForceNbPs1 = 0;
2101 pi->graphics_level[i].UpH = 0;
2102 }
2103
2104 if (!pi->sys_info.nb_dpm_enable)
2105 return 0;
2106
2107 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2108 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2109
2110 if (force_high) {
2111 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2112 pi->graphics_level[i].GnbSlow = 0;
2113 } else {
2114 if (pi->battery_state)
2115 pi->graphics_level[0].ForceNbPs1 = 1;
2116
2117 pi->graphics_level[1].GnbSlow = 0;
2118 pi->graphics_level[2].GnbSlow = 0;
2119 pi->graphics_level[3].GnbSlow = 0;
2120 pi->graphics_level[4].GnbSlow = 0;
2121 }
2122 } else {
2123 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2124 pi->graphics_level[i].GnbSlow = 1;
2125 pi->graphics_level[i].ForceNbPs1 = 0;
2126 pi->graphics_level[i].UpH = 0;
2127 }
2128
2129 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2130 pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2131 pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2132 if (pi->lowest_valid != pi->highest_valid)
2133 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2134 }
2135 }
2136 return 0;
2137}
2138
2139static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2140{
2141 struct kv_power_info *pi = kv_get_pi(rdev);
2142 u32 i;
2143
2144 if (pi->lowest_valid > pi->highest_valid)
2145 return -EINVAL;
2146
2147 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2148 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2149
2150 return 0;
2151}
2152
2153static void kv_init_graphics_levels(struct radeon_device *rdev)
2154{
2155 struct kv_power_info *pi = kv_get_pi(rdev);
2156 u32 i;
2157 struct radeon_clock_voltage_dependency_table *table =
2158 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2159
2160 if (table && table->count) {
2161 u32 vid_2bit;
2162
2163 pi->graphics_dpm_level_count = 0;
2164 for (i = 0; i < table->count; i++) {
2165 if (pi->high_voltage_t &&
2166 (pi->high_voltage_t <
2167 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2168 break;
2169
2170 kv_set_divider_value(rdev, i, table->entries[i].clk);
2171 vid_2bit = sumo_convert_vid7_to_vid2(rdev,
2172 &pi->sys_info.vid_mapping_table,
2173 table->entries[i].v);
2174 kv_set_vid(rdev, i, vid_2bit);
2175 kv_set_at(rdev, i, pi->at[i]);
2176 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2177 pi->graphics_dpm_level_count++;
2178 }
2179 } else {
2180 struct sumo_sclk_voltage_mapping_table *table =
2181 &pi->sys_info.sclk_voltage_mapping_table;
2182
2183 pi->graphics_dpm_level_count = 0;
2184 for (i = 0; i < table->num_max_dpm_entries; i++) {
2185 if (pi->high_voltage_t &&
2186 pi->high_voltage_t <
2187 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2188 break;
2189
2190 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2191 kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2192 kv_set_at(rdev, i, pi->at[i]);
2193 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2194 pi->graphics_dpm_level_count++;
2195 }
2196 }
2197
2198 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2199 kv_dpm_power_level_enable(rdev, i, false);
2200}
2201
2202static void kv_enable_new_levels(struct radeon_device *rdev)
2203{
2204 struct kv_power_info *pi = kv_get_pi(rdev);
2205 u32 i;
2206
2207 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2208 if (i >= pi->lowest_valid && i <= pi->highest_valid)
2209 kv_dpm_power_level_enable(rdev, i, true);
2210 }
2211}
2212
2213static int kv_set_enabled_levels(struct radeon_device *rdev)
2214{
2215 struct kv_power_info *pi = kv_get_pi(rdev);
2216 u32 i, new_mask = 0;
2217
2218 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2219 new_mask |= (1 << i);
2220
2221 return kv_send_msg_to_smc_with_parameter(rdev,
2222 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2223 new_mask);
2224}
2225
2226static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2227 struct radeon_ps *new_rps)
2228{
2229 struct kv_ps *new_ps = kv_get_ps(new_rps);
2230 struct kv_power_info *pi = kv_get_pi(rdev);
2231 u32 nbdpmconfig1;
2232
2233 if (rdev->family == CHIP_KABINI)
2234 return;
2235
2236 if (pi->sys_info.nb_dpm_enable) {
2237 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2238 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2239 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2240 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2241 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2242 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2243 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2244 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2245 }
2246}
2247
2248static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2249 int min_temp, int max_temp)
2250{
2251 int low_temp = 0 * 1000;
2252 int high_temp = 255 * 1000;
2253 u32 tmp;
2254
2255 if (low_temp < min_temp)
2256 low_temp = min_temp;
2257 if (high_temp > max_temp)
2258 high_temp = max_temp;
2259 if (high_temp < low_temp) {
2260 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2261 return -EINVAL;
2262 }
2263
2264 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2265 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2266 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2267 DIG_THERM_INTL(49 + (low_temp / 1000)));
2268 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2269
2270 rdev->pm.dpm.thermal.min_temp = low_temp;
2271 rdev->pm.dpm.thermal.max_temp = high_temp;
2272
2273 return 0;
2274}
2275
2276union igp_info {
2277 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2278 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2279 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2280 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2281 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2282 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2283};
2284
2285static int kv_parse_sys_info_table(struct radeon_device *rdev)
2286{
2287 struct kv_power_info *pi = kv_get_pi(rdev);
2288 struct radeon_mode_info *mode_info = &rdev->mode_info;
2289 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2290 union igp_info *igp_info;
2291 u8 frev, crev;
2292 u16 data_offset;
2293 int i;
2294
2295 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2296 &frev, &crev, &data_offset)) {
2297 igp_info = (union igp_info *)(mode_info->atom_context->bios +
2298 data_offset);
2299
2300 if (crev != 8) {
2301 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2302 return -EINVAL;
2303 }
2304 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2305 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2306 pi->sys_info.bootup_nb_voltage_index =
2307 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2308 if (igp_info->info_8.ucHtcTmpLmt == 0)
2309 pi->sys_info.htc_tmp_lmt = 203;
2310 else
2311 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2312 if (igp_info->info_8.ucHtcHystLmt == 0)
2313 pi->sys_info.htc_hyst_lmt = 5;
2314 else
2315 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2316 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2317 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2318 }
2319
2320 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2321 pi->sys_info.nb_dpm_enable = true;
2322 else
2323 pi->sys_info.nb_dpm_enable = false;
2324
2325 for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2326 pi->sys_info.nbp_memory_clock[i] =
2327 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2328 pi->sys_info.nbp_n_clock[i] =
2329 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2330 }
2331 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2332 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2333 pi->caps_enable_dfs_bypass = true;
2334
2335 sumo_construct_sclk_voltage_mapping_table(rdev,
2336 &pi->sys_info.sclk_voltage_mapping_table,
2337 igp_info->info_8.sAvail_SCLK);
2338
2339 sumo_construct_vid_mapping_table(rdev,
2340 &pi->sys_info.vid_mapping_table,
2341 igp_info->info_8.sAvail_SCLK);
2342
2343 kv_construct_max_power_limits_table(rdev,
2344 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2345 }
2346 return 0;
2347}
2348
2349union power_info {
2350 struct _ATOM_POWERPLAY_INFO info;
2351 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2352 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2353 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2354 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2355 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2356};
2357
2358union pplib_clock_info {
2359 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2360 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2361 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2362 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2363};
2364
2365union pplib_power_state {
2366 struct _ATOM_PPLIB_STATE v1;
2367 struct _ATOM_PPLIB_STATE_V2 v2;
2368};
2369
2370static void kv_patch_boot_state(struct radeon_device *rdev,
2371 struct kv_ps *ps)
2372{
2373 struct kv_power_info *pi = kv_get_pi(rdev);
2374
2375 ps->num_levels = 1;
2376 ps->levels[0] = pi->boot_pl;
2377}
2378
2379static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2380 struct radeon_ps *rps,
2381 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2382 u8 table_rev)
2383{
2384 struct kv_ps *ps = kv_get_ps(rps);
2385
2386 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2387 rps->class = le16_to_cpu(non_clock_info->usClassification);
2388 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2389
2390 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2391 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2392 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2393 } else {
2394 rps->vclk = 0;
2395 rps->dclk = 0;
2396 }
2397
2398 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2399 rdev->pm.dpm.boot_ps = rps;
2400 kv_patch_boot_state(rdev, ps);
2401 }
2402 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2403 rdev->pm.dpm.uvd_ps = rps;
2404}
2405
2406static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2407 struct radeon_ps *rps, int index,
2408 union pplib_clock_info *clock_info)
2409{
2410 struct kv_power_info *pi = kv_get_pi(rdev);
2411 struct kv_ps *ps = kv_get_ps(rps);
2412 struct kv_pl *pl = &ps->levels[index];
2413 u32 sclk;
2414
2415 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2416 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2417 pl->sclk = sclk;
2418 pl->vddc_index = clock_info->sumo.vddcIndex;
2419
2420 ps->num_levels = index + 1;
2421
2422 if (pi->caps_sclk_ds) {
2423 pl->ds_divider_index = 5;
2424 pl->ss_divider_index = 5;
2425 }
2426}
2427
2428static int kv_parse_power_table(struct radeon_device *rdev)
2429{
2430 struct radeon_mode_info *mode_info = &rdev->mode_info;
2431 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2432 union pplib_power_state *power_state;
2433 int i, j, k, non_clock_array_index, clock_array_index;
2434 union pplib_clock_info *clock_info;
2435 struct _StateArray *state_array;
2436 struct _ClockInfoArray *clock_info_array;
2437 struct _NonClockInfoArray *non_clock_info_array;
2438 union power_info *power_info;
2439 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2440 u16 data_offset;
2441 u8 frev, crev;
2442 u8 *power_state_offset;
2443 struct kv_ps *ps;
2444
2445 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2446 &frev, &crev, &data_offset))
2447 return -EINVAL;
2448 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2449
2450 state_array = (struct _StateArray *)
2451 (mode_info->atom_context->bios + data_offset +
2452 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2453 clock_info_array = (struct _ClockInfoArray *)
2454 (mode_info->atom_context->bios + data_offset +
2455 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2456 non_clock_info_array = (struct _NonClockInfoArray *)
2457 (mode_info->atom_context->bios + data_offset +
2458 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2459
2460 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2461 state_array->ucNumEntries, GFP_KERNEL);
2462 if (!rdev->pm.dpm.ps)
2463 return -ENOMEM;
2464 power_state_offset = (u8 *)state_array->states;
2465 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2466 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2467 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2468 for (i = 0; i < state_array->ucNumEntries; i++) {
2469 u8 *idx;
2470 power_state = (union pplib_power_state *)power_state_offset;
2471 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2472 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2473 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2474 if (!rdev->pm.power_state[i].clock_info)
2475 return -EINVAL;
2476 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2477 if (ps == NULL) {
2478 kfree(rdev->pm.dpm.ps);
2479 return -ENOMEM;
2480 }
2481 rdev->pm.dpm.ps[i].ps_priv = ps;
2482 k = 0;
2483 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2484 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2485 clock_array_index = idx[j];
2486 if (clock_array_index >= clock_info_array->ucNumEntries)
2487 continue;
2488 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2489 break;
2490 clock_info = (union pplib_clock_info *)
2491 ((u8 *)&clock_info_array->clockInfo[0] +
2492 (clock_array_index * clock_info_array->ucEntrySize));
2493 kv_parse_pplib_clock_info(rdev,
2494 &rdev->pm.dpm.ps[i], k,
2495 clock_info);
2496 k++;
2497 }
2498 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2499 non_clock_info,
2500 non_clock_info_array->ucEntrySize);
2501 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2502 }
2503 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2504 return 0;
2505}
2506
2507int kv_dpm_init(struct radeon_device *rdev)
2508{
2509 struct kv_power_info *pi;
2510 int ret, i;
2511
2512 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2513 if (pi == NULL)
2514 return -ENOMEM;
2515 rdev->pm.dpm.priv = pi;
2516
2517 ret = r600_parse_extended_power_table(rdev);
2518 if (ret)
2519 return ret;
2520
2521 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2522 pi->at[i] = TRINITY_AT_DFLT;
2523
2524 pi->sram_end = SMC_RAM_END;
2525
2526 if (rdev->family == CHIP_KABINI)
2527 pi->high_voltage_t = 4001;
2528
2529 pi->enable_nb_dpm = true;
2530
2531 pi->caps_power_containment = true;
2532 pi->caps_cac = true;
2533 pi->enable_didt = false;
2534 if (pi->enable_didt) {
2535 pi->caps_sq_ramping = true;
2536 pi->caps_db_ramping = true;
2537 pi->caps_td_ramping = true;
2538 pi->caps_tcp_ramping = true;
2539 }
2540
2541 pi->caps_sclk_ds = true;
2542 pi->enable_auto_thermal_throttling = true;
2543 pi->disable_nb_ps3_in_battery = false;
2544 pi->bapm_enable = true;
2545 pi->voltage_drop_t = 0;
2546 pi->caps_sclk_throttle_low_notification = false;
2547 pi->caps_fps = false; /* true? */
2548 pi->caps_uvd_pg = true;
2549 pi->caps_uvd_dpm = true;
2550 pi->caps_vce_pg = false;
2551 pi->caps_samu_pg = false;
2552 pi->caps_acp_pg = false;
2553 pi->caps_stable_p_state = false;
2554
2555 ret = kv_parse_sys_info_table(rdev);
2556 if (ret)
2557 return ret;
2558
2559 kv_patch_voltage_values(rdev);
2560 kv_construct_boot_state(rdev);
2561
2562 ret = kv_parse_power_table(rdev);
2563 if (ret)
2564 return ret;
2565
2566 pi->enable_dpm = true;
2567
2568 return 0;
2569}
2570
2571void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2572 struct seq_file *m)
2573{
2574 struct kv_power_info *pi = kv_get_pi(rdev);
2575 u32 current_index =
2576 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2577 CURR_SCLK_INDEX_SHIFT;
2578 u32 sclk, tmp;
2579 u16 vddc;
2580
2581 if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2582 seq_printf(m, "invalid dpm profile %d\n", current_index);
2583 } else {
2584 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2585 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2586 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2587 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2588 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
2589 current_index, sclk, vddc);
2590 }
2591}
2592
2593void kv_dpm_print_power_state(struct radeon_device *rdev,
2594 struct radeon_ps *rps)
2595{
2596 int i;
2597 struct kv_ps *ps = kv_get_ps(rps);
2598
2599 r600_dpm_print_class_info(rps->class, rps->class2);
2600 r600_dpm_print_cap_info(rps->caps);
2601 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2602 for (i = 0; i < ps->num_levels; i++) {
2603 struct kv_pl *pl = &ps->levels[i];
2604 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2605 i, pl->sclk,
2606 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2607 }
2608 r600_dpm_print_ps_status(rdev, rps);
2609}
2610
2611void kv_dpm_fini(struct radeon_device *rdev)
2612{
2613 int i;
2614
2615 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2616 kfree(rdev->pm.dpm.ps[i].ps_priv);
2617 }
2618 kfree(rdev->pm.dpm.ps);
2619 kfree(rdev->pm.dpm.priv);
2620 r600_free_extended_power_table(rdev);
2621}
2622
2623void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2624{
2625
2626}
2627
2628u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2629{
2630 struct kv_power_info *pi = kv_get_pi(rdev);
2631 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2632
2633 if (low)
2634 return requested_state->levels[0].sclk;
2635 else
2636 return requested_state->levels[requested_state->num_levels - 1].sclk;
2637}
2638
2639u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2640{
2641 struct kv_power_info *pi = kv_get_pi(rdev);
2642
2643 return pi->sys_info.bootup_uma_clk;
2644}
2645
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
new file mode 100644
index 000000000000..32bb079572d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __KV_DPM_H__
24#define __KV_DPM_H__
25
26#define SMU__NUM_SCLK_DPM_STATE 8
27#define SMU__NUM_MCLK_DPM_LEVELS 4
28#define SMU__NUM_LCLK_DPM_LEVELS 8
29#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
30#include "smu7_fusion.h"
31#include "trinity_dpm.h"
32#include "ppsmc.h"
33
34#define KV_NUM_NBPSTATES 4
35
36enum kv_pt_config_reg_type {
37 KV_CONFIGREG_MMR = 0,
38 KV_CONFIGREG_SMC_IND,
39 KV_CONFIGREG_DIDT_IND,
40 KV_CONFIGREG_CACHE,
41 KV_CONFIGREG_MAX
42};
43
44struct kv_pt_config_reg {
45 u32 offset;
46 u32 mask;
47 u32 shift;
48 u32 value;
49 enum kv_pt_config_reg_type type;
50};
51
52struct kv_lcac_config_values {
53 u32 block_id;
54 u32 signal_id;
55 u32 t;
56};
57
58struct kv_lcac_config_reg {
59 u32 cntl;
60 u32 block_mask;
61 u32 block_shift;
62 u32 signal_mask;
63 u32 signal_shift;
64 u32 t_mask;
65 u32 t_shift;
66 u32 enable_mask;
67 u32 enable_shift;
68};
69
70struct kv_pl {
71 u32 sclk;
72 u8 vddc_index;
73 u8 ds_divider_index;
74 u8 ss_divider_index;
75 u8 allow_gnb_slow;
76 u8 force_nbp_state;
77 u8 display_wm;
78 u8 vce_wm;
79};
80
81struct kv_ps {
82 struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
83 u32 num_levels;
84 bool need_dfs_bypass;
85 u8 dpm0_pg_nb_ps_lo;
86 u8 dpm0_pg_nb_ps_hi;
87 u8 dpmx_nb_ps_lo;
88 u8 dpmx_nb_ps_hi;
89};
90
91struct kv_sys_info {
92 u32 bootup_uma_clk;
93 u32 bootup_sclk;
94 u32 dentist_vco_freq;
95 u32 nb_dpm_enable;
96 u32 nbp_memory_clock[KV_NUM_NBPSTATES];
97 u32 nbp_n_clock[KV_NUM_NBPSTATES];
98 u16 bootup_nb_voltage_index;
99 u8 htc_tmp_lmt;
100 u8 htc_hyst_lmt;
101 struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
102 struct sumo_vid_mapping_table vid_mapping_table;
103 u32 uma_channel_number;
104};
105
106struct kv_power_info {
107 u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
108 u32 voltage_drop_t;
109 struct kv_sys_info sys_info;
110 struct kv_pl boot_pl;
111 bool enable_nb_ps_policy;
112 bool disable_nb_ps3_in_battery;
113 bool video_start;
114 bool battery_state;
115 u32 lowest_valid;
116 u32 highest_valid;
117 u16 high_voltage_t;
118 bool cac_enabled;
119 bool bapm_enable;
120 /* smc offsets */
121 u32 sram_end;
122 u32 dpm_table_start;
123 u32 soft_regs_start;
124 /* dpm SMU tables */
125 u8 graphics_dpm_level_count;
126 u8 uvd_level_count;
127 u8 vce_level_count;
128 u8 acp_level_count;
129 u8 samu_level_count;
130 u16 fps_high_t;
131 SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
132 SMU7_Fusion_ACPILevel acpi_level;
133 SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
134 SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
135 SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
136 SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
137 u8 uvd_boot_level;
138 u8 vce_boot_level;
139 u8 acp_boot_level;
140 u8 samu_boot_level;
141 u8 uvd_interval;
142 u8 vce_interval;
143 u8 acp_interval;
144 u8 samu_interval;
145 u8 graphics_boot_level;
146 u8 graphics_interval;
147 u8 graphics_therm_throttle_enable;
148 u8 graphics_voltage_change_enable;
149 u8 graphics_clk_slow_enable;
150 u8 graphics_clk_slow_divider;
151 u8 fps_low_t;
152 u32 low_sclk_interrupt_t;
153 bool uvd_power_gated;
154 bool vce_power_gated;
155 bool acp_power_gated;
156 bool samu_power_gated;
157 bool nb_dpm_enabled;
158 /* flags */
159 bool enable_didt;
160 bool enable_dpm;
161 bool enable_auto_thermal_throttling;
162 bool enable_nb_dpm;
163 /* caps */
164 bool caps_cac;
165 bool caps_power_containment;
166 bool caps_sq_ramping;
167 bool caps_db_ramping;
168 bool caps_td_ramping;
169 bool caps_tcp_ramping;
170 bool caps_sclk_throttle_low_notification;
171 bool caps_fps;
172 bool caps_uvd_dpm;
173 bool caps_uvd_pg;
174 bool caps_vce_pg;
175 bool caps_samu_pg;
176 bool caps_acp_pg;
177 bool caps_stable_p_state;
178 bool caps_enable_dfs_bypass;
179 bool caps_sclk_ds;
180 struct radeon_ps current_rps;
181 struct kv_ps current_ps;
182 struct radeon_ps requested_rps;
183 struct kv_ps requested_ps;
184};
185
186
187/* kv_smc.c */
188int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id);
189int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask);
190int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
191 PPSMC_Msg msg, u32 parameter);
192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
193 u32 *value, u32 limit);
194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
195int kv_copy_bytes_to_smc(struct radeon_device *rdev,
196 u32 smc_start_address,
197 const u8 *src, u32 byte_count, u32 limit);
198
199#endif
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
new file mode 100644
index 000000000000..34a226d7e34a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "cikd.h"
28#include "kv_dpm.h"
29
30int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
31{
32 u32 i;
33 u32 tmp = 0;
34
35 WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
36
37 for (i = 0; i < rdev->usec_timeout; i++) {
38 if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
39 break;
40 udelay(1);
41 }
42 tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
43
44 if (tmp != 1) {
45 if (tmp == 0xFF)
46 return -EINVAL;
47 else if (tmp == 0xFE)
48 return -EINVAL;
49 }
50
51 return 0;
52}
53
54int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
55{
56 int ret;
57
58 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
59
60 if (ret == 0)
61 *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
62
63 return ret;
64}
65
66int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
67 PPSMC_Msg msg, u32 parameter)
68{
69
70 WREG32(SMC_MSG_ARG_0, parameter);
71
72 return kv_notify_message_to_smu(rdev, msg);
73}
74
75static int kv_set_smc_sram_address(struct radeon_device *rdev,
76 u32 smc_address, u32 limit)
77{
78 if (smc_address & 3)
79 return -EINVAL;
80 if ((smc_address + 3) > limit)
81 return -EINVAL;
82
83 WREG32(SMC_IND_INDEX_0, smc_address);
84 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
85
86 return 0;
87}
88
89int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
90 u32 *value, u32 limit)
91{
92 int ret;
93
94 ret = kv_set_smc_sram_address(rdev, smc_address, limit);
95 if (ret)
96 return ret;
97
98 *value = RREG32(SMC_IND_DATA_0);
99 return 0;
100}
101
102int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
103{
104 if (enable)
105 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
106 else
107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
108}
109
110int kv_copy_bytes_to_smc(struct radeon_device *rdev,
111 u32 smc_start_address,
112 const u8 *src, u32 byte_count, u32 limit)
113{
114 int ret;
115 u32 data, original_data, addr, extra_shift, t_byte, count, mask;
116
117 if ((smc_start_address + byte_count) > limit)
118 return -EINVAL;
119
120 addr = smc_start_address;
121 t_byte = addr & 3;
122
123 /* RMW for the initial bytes */
124 if (t_byte != 0) {
125 addr -= t_byte;
126
127 ret = kv_set_smc_sram_address(rdev, addr, limit);
128 if (ret)
129 return ret;
130
131 original_data = RREG32(SMC_IND_DATA_0);
132
133 data = 0;
134 mask = 0;
135 count = 4;
136 while (count > 0) {
137 if (t_byte > 0) {
138 mask = (mask << 8) | 0xff;
139 t_byte--;
140 } else if (byte_count > 0) {
141 data = (data << 8) + *src++;
142 byte_count--;
143 mask <<= 8;
144 } else {
145 data <<= 8;
146 mask = (mask << 8) | 0xff;
147 }
148 count--;
149 }
150
151 data |= original_data & mask;
152
153 ret = kv_set_smc_sram_address(rdev, addr, limit);
154 if (ret)
155 return ret;
156
157 WREG32(SMC_IND_DATA_0, data);
158
159 addr += 4;
160 }
161
162 while (byte_count >= 4) {
163 /* SMC address space is BE */
164 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
165
166 ret = kv_set_smc_sram_address(rdev, addr, limit);
167 if (ret)
168 return ret;
169
170 WREG32(SMC_IND_DATA_0, data);
171
172 src += 4;
173 byte_count -= 4;
174 addr += 4;
175 }
176
177 /* RMW for the final bytes */
178 if (byte_count > 0) {
179 data = 0;
180
181 ret = kv_set_smc_sram_address(rdev, addr, limit);
182 if (ret)
183 return ret;
184
185 original_data= RREG32(SMC_IND_DATA_0);
186
187 extra_shift = 8 * (4 - byte_count);
188
189 while (byte_count > 0) {
190 /* SMC address space is BE */
191 data = (data << 8) + *src++;
192 byte_count--;
193 }
194
195 data <<= extra_shift;
196
197 data |= (original_data & ~((~0UL) << extra_shift));
198
199 ret = kv_set_smc_sram_address(rdev, addr, limit);
200 if (ret)
201 return ret;
202
203 WREG32(SMC_IND_DATA_0, data);
204 }
205 return 0;
206}
207
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ccb4f8b54852..93c1f9ef5da9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -35,7 +35,7 @@
35#include "radeon_ucode.h" 35#include "radeon_ucode.h"
36#include "clearstate_cayman.h" 36#include "clearstate_cayman.h"
37 37
38static u32 tn_rlc_save_restore_register_list[] = 38static const u32 tn_rlc_save_restore_register_list[] =
39{ 39{
40 0x98fc, 40 0x98fc,
41 0x98f0, 41 0x98f0,
@@ -160,7 +160,6 @@ static u32 tn_rlc_save_restore_register_list[] =
160 0x9830, 160 0x9830,
161 0x802c, 161 0x802c,
162}; 162};
163static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
164 163
165extern bool evergreen_is_display_hung(struct radeon_device *rdev); 164extern bool evergreen_is_display_hung(struct radeon_device *rdev);
166extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 165extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
@@ -175,6 +174,11 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
175extern void evergreen_program_aspm(struct radeon_device *rdev); 174extern void evergreen_program_aspm(struct radeon_device *rdev);
176extern void sumo_rlc_fini(struct radeon_device *rdev); 175extern void sumo_rlc_fini(struct radeon_device *rdev);
177extern int sumo_rlc_init(struct radeon_device *rdev); 176extern int sumo_rlc_init(struct radeon_device *rdev);
177extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
178 struct radeon_ib *ib,
179 uint64_t pe,
180 uint64_t addr, unsigned count,
181 uint32_t incr, uint32_t flags);
178 182
179/* Firmware Names */ 183/* Firmware Names */
180MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 184MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -1374,23 +1378,6 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1374 radeon_ring_write(ring, 10); /* poll interval */ 1378 radeon_ring_write(ring, 10); /* poll interval */
1375} 1379}
1376 1380
1377void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
1378 struct radeon_ring *ring,
1379 struct radeon_semaphore *semaphore,
1380 bool emit_wait)
1381{
1382 uint64_t addr = semaphore->gpu_addr;
1383
1384 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
1385 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
1386
1387 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
1388 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
1389
1390 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
1391 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
1392}
1393
1394static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1381static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1395{ 1382{
1396 if (enable) 1383 if (enable)
@@ -1564,8 +1551,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1564 1551
1565 /* Set ring buffer size */ 1552 /* Set ring buffer size */
1566 ring = &rdev->ring[ridx[i]]; 1553 ring = &rdev->ring[ridx[i]];
1567 rb_cntl = drm_order(ring->ring_size / 8); 1554 rb_cntl = order_base_2(ring->ring_size / 8);
1568 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; 1555 rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1569#ifdef __BIG_ENDIAN 1556#ifdef __BIG_ENDIAN
1570 rb_cntl |= BUF_SWAP_32BIT; 1557 rb_cntl |= BUF_SWAP_32BIT;
1571#endif 1558#endif
@@ -1613,186 +1600,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1613 return 0; 1600 return 0;
1614} 1601}
1615 1602
1616/* 1603u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1617 * DMA
1618 * Starting with R600, the GPU has an asynchronous
1619 * DMA engine. The programming model is very similar
1620 * to the 3D engine (ring buffer, IBs, etc.), but the
1621 * DMA controller has it's own packet format that is
1622 * different form the PM4 format used by the 3D engine.
1623 * It supports copying data, writing embedded data,
1624 * solid fills, and a number of other things. It also
1625 * has support for tiling/detiling of buffers.
1626 * Cayman and newer support two asynchronous DMA engines.
1627 */
1628/**
1629 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1630 *
1631 * @rdev: radeon_device pointer
1632 * @ib: IB object to schedule
1633 *
1634 * Schedule an IB in the DMA ring (cayman-SI).
1635 */
1636void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1637 struct radeon_ib *ib)
1638{
1639 struct radeon_ring *ring = &rdev->ring[ib->ring];
1640
1641 if (rdev->wb.enabled) {
1642 u32 next_rptr = ring->wptr + 4;
1643 while ((next_rptr & 7) != 5)
1644 next_rptr++;
1645 next_rptr += 3;
1646 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1647 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1648 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1649 radeon_ring_write(ring, next_rptr);
1650 }
1651
1652 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1653 * Pad as necessary with NOPs.
1654 */
1655 while ((ring->wptr & 7) != 5)
1656 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1657 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1658 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1659 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1660
1661}
1662
1663/**
1664 * cayman_dma_stop - stop the async dma engines
1665 *
1666 * @rdev: radeon_device pointer
1667 *
1668 * Stop the async dma engines (cayman-SI).
1669 */
1670void cayman_dma_stop(struct radeon_device *rdev)
1671{
1672 u32 rb_cntl;
1673
1674 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1675
1676 /* dma0 */
1677 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1678 rb_cntl &= ~DMA_RB_ENABLE;
1679 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1680
1681 /* dma1 */
1682 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1683 rb_cntl &= ~DMA_RB_ENABLE;
1684 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1685
1686 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1687 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1688}
1689
1690/**
1691 * cayman_dma_resume - setup and start the async dma engines
1692 *
1693 * @rdev: radeon_device pointer
1694 *
1695 * Set up the DMA ring buffers and enable them. (cayman-SI).
1696 * Returns 0 for success, error for failure.
1697 */
1698int cayman_dma_resume(struct radeon_device *rdev)
1699{
1700 struct radeon_ring *ring;
1701 u32 rb_cntl, dma_cntl, ib_cntl;
1702 u32 rb_bufsz;
1703 u32 reg_offset, wb_offset;
1704 int i, r;
1705
1706 /* Reset dma */
1707 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1708 RREG32(SRBM_SOFT_RESET);
1709 udelay(50);
1710 WREG32(SRBM_SOFT_RESET, 0);
1711
1712 for (i = 0; i < 2; i++) {
1713 if (i == 0) {
1714 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1715 reg_offset = DMA0_REGISTER_OFFSET;
1716 wb_offset = R600_WB_DMA_RPTR_OFFSET;
1717 } else {
1718 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1719 reg_offset = DMA1_REGISTER_OFFSET;
1720 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1721 }
1722
1723 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1724 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1725
1726 /* Set ring buffer size in dwords */
1727 rb_bufsz = drm_order(ring->ring_size / 4);
1728 rb_cntl = rb_bufsz << 1;
1729#ifdef __BIG_ENDIAN
1730 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1731#endif
1732 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1733
1734 /* Initialize the ring buffer's read and write pointers */
1735 WREG32(DMA_RB_RPTR + reg_offset, 0);
1736 WREG32(DMA_RB_WPTR + reg_offset, 0);
1737
1738 /* set the wb address whether it's enabled or not */
1739 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1740 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1741 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1742 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1743
1744 if (rdev->wb.enabled)
1745 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1746
1747 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1748
1749 /* enable DMA IBs */
1750 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1751#ifdef __BIG_ENDIAN
1752 ib_cntl |= DMA_IB_SWAP_ENABLE;
1753#endif
1754 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
1755
1756 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1757 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1758 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1759
1760 ring->wptr = 0;
1761 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1762
1763 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1764
1765 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1766
1767 ring->ready = true;
1768
1769 r = radeon_ring_test(rdev, ring->idx, ring);
1770 if (r) {
1771 ring->ready = false;
1772 return r;
1773 }
1774 }
1775
1776 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1777
1778 return 0;
1779}
1780
1781/**
1782 * cayman_dma_fini - tear down the async dma engines
1783 *
1784 * @rdev: radeon_device pointer
1785 *
1786 * Stop the async dma engines and free the rings (cayman-SI).
1787 */
1788void cayman_dma_fini(struct radeon_device *rdev)
1789{
1790 cayman_dma_stop(rdev);
1791 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1792 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1793}
1794
1795static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1796{ 1604{
1797 u32 reset_mask = 0; 1605 u32 reset_mask = 0;
1798 u32 tmp; 1606 u32 tmp;
@@ -2045,34 +1853,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2045 return radeon_ring_test_lockup(rdev, ring); 1853 return radeon_ring_test_lockup(rdev, ring);
2046} 1854}
2047 1855
2048/**
2049 * cayman_dma_is_lockup - Check if the DMA engine is locked up
2050 *
2051 * @rdev: radeon_device pointer
2052 * @ring: radeon_ring structure holding ring information
2053 *
2054 * Check if the async DMA engine is locked up.
2055 * Returns true if the engine appears to be locked up, false if not.
2056 */
2057bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2058{
2059 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
2060 u32 mask;
2061
2062 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
2063 mask = RADEON_RESET_DMA;
2064 else
2065 mask = RADEON_RESET_DMA1;
2066
2067 if (!(reset_mask & mask)) {
2068 radeon_ring_lockup_update(ring);
2069 return false;
2070 }
2071 /* force ring activities */
2072 radeon_ring_force_activity(rdev, ring);
2073 return radeon_ring_test_lockup(rdev, ring);
2074}
2075
2076static int cayman_startup(struct radeon_device *rdev) 1856static int cayman_startup(struct radeon_device *rdev)
2077{ 1857{
2078 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1858 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -2083,6 +1863,11 @@ static int cayman_startup(struct radeon_device *rdev)
2083 /* enable aspm */ 1863 /* enable aspm */
2084 evergreen_program_aspm(rdev); 1864 evergreen_program_aspm(rdev);
2085 1865
1866 /* scratch needs to be initialized before MC */
1867 r = r600_vram_scratch_init(rdev);
1868 if (r)
1869 return r;
1870
2086 evergreen_mc_program(rdev); 1871 evergreen_mc_program(rdev);
2087 1872
2088 if (rdev->flags & RADEON_IS_IGP) { 1873 if (rdev->flags & RADEON_IS_IGP) {
@@ -2109,26 +1894,16 @@ static int cayman_startup(struct radeon_device *rdev)
2109 } 1894 }
2110 } 1895 }
2111 1896
2112 r = r600_vram_scratch_init(rdev);
2113 if (r)
2114 return r;
2115
2116 r = cayman_pcie_gart_enable(rdev); 1897 r = cayman_pcie_gart_enable(rdev);
2117 if (r) 1898 if (r)
2118 return r; 1899 return r;
2119 cayman_gpu_init(rdev); 1900 cayman_gpu_init(rdev);
2120 1901
2121 r = evergreen_blit_init(rdev);
2122 if (r) {
2123 r600_blit_fini(rdev);
2124 rdev->asic->copy.copy = NULL;
2125 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2126 }
2127
2128 /* allocate rlc buffers */ 1902 /* allocate rlc buffers */
2129 if (rdev->flags & RADEON_IS_IGP) { 1903 if (rdev->flags & RADEON_IS_IGP) {
2130 rdev->rlc.reg_list = tn_rlc_save_restore_register_list; 1904 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2131 rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size; 1905 rdev->rlc.reg_list_size =
1906 (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2132 rdev->rlc.cs_data = cayman_cs_data; 1907 rdev->rlc.cs_data = cayman_cs_data;
2133 r = sumo_rlc_init(rdev); 1908 r = sumo_rlc_init(rdev);
2134 if (r) { 1909 if (r) {
@@ -2148,7 +1923,7 @@ static int cayman_startup(struct radeon_device *rdev)
2148 return r; 1923 return r;
2149 } 1924 }
2150 1925
2151 r = rv770_uvd_resume(rdev); 1926 r = uvd_v2_2_resume(rdev);
2152 if (!r) { 1927 if (!r) {
2153 r = radeon_fence_driver_start_ring(rdev, 1928 r = radeon_fence_driver_start_ring(rdev,
2154 R600_RING_TYPE_UVD_INDEX); 1929 R600_RING_TYPE_UVD_INDEX);
@@ -2199,7 +1974,7 @@ static int cayman_startup(struct radeon_device *rdev)
2199 1974
2200 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1975 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2201 CP_RB0_RPTR, CP_RB0_WPTR, 1976 CP_RB0_RPTR, CP_RB0_WPTR,
2202 0, 0xfffff, RADEON_CP_PACKET2); 1977 RADEON_CP_PACKET2);
2203 if (r) 1978 if (r)
2204 return r; 1979 return r;
2205 1980
@@ -2207,7 +1982,7 @@ static int cayman_startup(struct radeon_device *rdev)
2207 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1982 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2208 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 1983 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
2209 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 1984 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
2210 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1985 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2211 if (r) 1986 if (r)
2212 return r; 1987 return r;
2213 1988
@@ -2215,7 +1990,7 @@ static int cayman_startup(struct radeon_device *rdev)
2215 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 1990 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2216 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 1991 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
2217 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 1992 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
2218 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1993 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2219 if (r) 1994 if (r)
2220 return r; 1995 return r;
2221 1996
@@ -2232,12 +2007,11 @@ static int cayman_startup(struct radeon_device *rdev)
2232 2007
2233 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 2008 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2234 if (ring->ring_size) { 2009 if (ring->ring_size) {
2235 r = radeon_ring_init(rdev, ring, ring->ring_size, 2010 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
2236 R600_WB_UVD_RPTR_OFFSET,
2237 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 2011 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
2238 0, 0xfffff, RADEON_CP_PACKET2); 2012 RADEON_CP_PACKET2);
2239 if (!r) 2013 if (!r)
2240 r = r600_uvd_init(rdev); 2014 r = uvd_v1_0_init(rdev);
2241 if (r) 2015 if (r)
2242 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 2016 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2243 } 2017 }
@@ -2254,9 +2028,15 @@ static int cayman_startup(struct radeon_device *rdev)
2254 return r; 2028 return r;
2255 } 2029 }
2256 2030
2257 r = r600_audio_init(rdev); 2031 if (ASIC_IS_DCE6(rdev)) {
2258 if (r) 2032 r = dce6_audio_init(rdev);
2259 return r; 2033 if (r)
2034 return r;
2035 } else {
2036 r = r600_audio_init(rdev);
2037 if (r)
2038 return r;
2039 }
2260 2040
2261 return 0; 2041 return 0;
2262} 2042}
@@ -2287,11 +2067,14 @@ int cayman_resume(struct radeon_device *rdev)
2287 2067
2288int cayman_suspend(struct radeon_device *rdev) 2068int cayman_suspend(struct radeon_device *rdev)
2289{ 2069{
2290 r600_audio_fini(rdev); 2070 if (ASIC_IS_DCE6(rdev))
2071 dce6_audio_fini(rdev);
2072 else
2073 r600_audio_fini(rdev);
2291 radeon_vm_manager_fini(rdev); 2074 radeon_vm_manager_fini(rdev);
2292 cayman_cp_enable(rdev, false); 2075 cayman_cp_enable(rdev, false);
2293 cayman_dma_stop(rdev); 2076 cayman_dma_stop(rdev);
2294 r600_uvd_stop(rdev); 2077 uvd_v1_0_fini(rdev);
2295 radeon_uvd_suspend(rdev); 2078 radeon_uvd_suspend(rdev);
2296 evergreen_irq_suspend(rdev); 2079 evergreen_irq_suspend(rdev);
2297 radeon_wb_disable(rdev); 2080 radeon_wb_disable(rdev);
@@ -2413,7 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
2413 2196
2414void cayman_fini(struct radeon_device *rdev) 2197void cayman_fini(struct radeon_device *rdev)
2415{ 2198{
2416 r600_blit_fini(rdev);
2417 cayman_cp_fini(rdev); 2199 cayman_cp_fini(rdev);
2418 cayman_dma_fini(rdev); 2200 cayman_dma_fini(rdev);
2419 r600_irq_fini(rdev); 2201 r600_irq_fini(rdev);
@@ -2423,7 +2205,7 @@ void cayman_fini(struct radeon_device *rdev)
2423 radeon_vm_manager_fini(rdev); 2205 radeon_vm_manager_fini(rdev);
2424 radeon_ib_pool_fini(rdev); 2206 radeon_ib_pool_fini(rdev);
2425 radeon_irq_kms_fini(rdev); 2207 radeon_irq_kms_fini(rdev);
2426 r600_uvd_stop(rdev); 2208 uvd_v1_0_fini(rdev);
2427 radeon_uvd_fini(rdev); 2209 radeon_uvd_fini(rdev);
2428 cayman_pcie_gart_fini(rdev); 2210 cayman_pcie_gart_fini(rdev);
2429 r600_vram_scratch_fini(rdev); 2211 r600_vram_scratch_fini(rdev);
@@ -2684,61 +2466,7 @@ void cayman_vm_set_page(struct radeon_device *rdev,
2684 } 2466 }
2685 } 2467 }
2686 } else { 2468 } else {
2687 if ((flags & RADEON_VM_PAGE_SYSTEM) || 2469 cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
2688 (count == 1)) {
2689 while (count) {
2690 ndw = count * 2;
2691 if (ndw > 0xFFFFE)
2692 ndw = 0xFFFFE;
2693
2694 /* for non-physically contiguous pages (system) */
2695 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2696 ib->ptr[ib->length_dw++] = pe;
2697 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2698 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2699 if (flags & RADEON_VM_PAGE_SYSTEM) {
2700 value = radeon_vm_map_gart(rdev, addr);
2701 value &= 0xFFFFFFFFFFFFF000ULL;
2702 } else if (flags & RADEON_VM_PAGE_VALID) {
2703 value = addr;
2704 } else {
2705 value = 0;
2706 }
2707 addr += incr;
2708 value |= r600_flags;
2709 ib->ptr[ib->length_dw++] = value;
2710 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2711 }
2712 }
2713 while (ib->length_dw & 0x7)
2714 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2715 } else {
2716 while (count) {
2717 ndw = count * 2;
2718 if (ndw > 0xFFFFE)
2719 ndw = 0xFFFFE;
2720
2721 if (flags & RADEON_VM_PAGE_VALID)
2722 value = addr;
2723 else
2724 value = 0;
2725 /* for physically contiguous pages (vram) */
2726 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
2727 ib->ptr[ib->length_dw++] = pe; /* dst addr */
2728 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2729 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
2730 ib->ptr[ib->length_dw++] = 0;
2731 ib->ptr[ib->length_dw++] = value; /* value */
2732 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2733 ib->ptr[ib->length_dw++] = incr; /* increment size */
2734 ib->ptr[ib->length_dw++] = 0;
2735 pe += ndw * 4;
2736 addr += (ndw / 2) * incr;
2737 count -= ndw / 2;
2738 }
2739 }
2740 while (ib->length_dw & 0x7)
2741 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2742 } 2470 }
2743} 2471}
2744 2472
@@ -2772,26 +2500,3 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2772 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2500 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2773 radeon_ring_write(ring, 0x0); 2501 radeon_ring_write(ring, 0x0);
2774} 2502}
2775
2776void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2777{
2778 struct radeon_ring *ring = &rdev->ring[ridx];
2779
2780 if (vm == NULL)
2781 return;
2782
2783 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2784 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
2785 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2786
2787 /* flush hdp cache */
2788 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2789 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
2790 radeon_ring_write(ring, 1);
2791
2792 /* bits 0-7 are the VM contexts0-7 */
2793 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2794 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
2795 radeon_ring_write(ring, 1 << vm->id);
2796}
2797
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
new file mode 100644
index 000000000000..dd6e9688fbef
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "nid.h"
28
29u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/*
32 * DMA
33 * Starting with R600, the GPU has an asynchronous
34 * DMA engine. The programming model is very similar
35 * to the 3D engine (ring buffer, IBs, etc.), but the
36 * DMA controller has it's own packet format that is
37 * different form the PM4 format used by the 3D engine.
38 * It supports copying data, writing embedded data,
39 * solid fills, and a number of other things. It also
40 * has support for tiling/detiling of buffers.
41 * Cayman and newer support two asynchronous DMA engines.
42 */
43
44/**
45 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
46 *
47 * @rdev: radeon_device pointer
48 * @ib: IB object to schedule
49 *
50 * Schedule an IB in the DMA ring (cayman-SI).
51 */
52void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
53 struct radeon_ib *ib)
54{
55 struct radeon_ring *ring = &rdev->ring[ib->ring];
56
57 if (rdev->wb.enabled) {
58 u32 next_rptr = ring->wptr + 4;
59 while ((next_rptr & 7) != 5)
60 next_rptr++;
61 next_rptr += 3;
62 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
63 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
64 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
65 radeon_ring_write(ring, next_rptr);
66 }
67
68 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
69 * Pad as necessary with NOPs.
70 */
71 while ((ring->wptr & 7) != 5)
72 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
73 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
74 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
75 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
76
77}
78
79/**
80 * cayman_dma_stop - stop the async dma engines
81 *
82 * @rdev: radeon_device pointer
83 *
84 * Stop the async dma engines (cayman-SI).
85 */
86void cayman_dma_stop(struct radeon_device *rdev)
87{
88 u32 rb_cntl;
89
90 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
91
92 /* dma0 */
93 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
94 rb_cntl &= ~DMA_RB_ENABLE;
95 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
96
97 /* dma1 */
98 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
99 rb_cntl &= ~DMA_RB_ENABLE;
100 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
101
102 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
103 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
104}
105
106/**
107 * cayman_dma_resume - setup and start the async dma engines
108 *
109 * @rdev: radeon_device pointer
110 *
111 * Set up the DMA ring buffers and enable them. (cayman-SI).
112 * Returns 0 for success, error for failure.
113 */
114int cayman_dma_resume(struct radeon_device *rdev)
115{
116 struct radeon_ring *ring;
117 u32 rb_cntl, dma_cntl, ib_cntl;
118 u32 rb_bufsz;
119 u32 reg_offset, wb_offset;
120 int i, r;
121
122 /* Reset dma */
123 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
124 RREG32(SRBM_SOFT_RESET);
125 udelay(50);
126 WREG32(SRBM_SOFT_RESET, 0);
127
128 for (i = 0; i < 2; i++) {
129 if (i == 0) {
130 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
131 reg_offset = DMA0_REGISTER_OFFSET;
132 wb_offset = R600_WB_DMA_RPTR_OFFSET;
133 } else {
134 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
135 reg_offset = DMA1_REGISTER_OFFSET;
136 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
137 }
138
139 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
140 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
141
142 /* Set ring buffer size in dwords */
143 rb_bufsz = order_base_2(ring->ring_size / 4);
144 rb_cntl = rb_bufsz << 1;
145#ifdef __BIG_ENDIAN
146 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
147#endif
148 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
149
150 /* Initialize the ring buffer's read and write pointers */
151 WREG32(DMA_RB_RPTR + reg_offset, 0);
152 WREG32(DMA_RB_WPTR + reg_offset, 0);
153
154 /* set the wb address whether it's enabled or not */
155 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
156 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
157 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
158 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
159
160 if (rdev->wb.enabled)
161 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
162
163 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
164
165 /* enable DMA IBs */
166 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
167#ifdef __BIG_ENDIAN
168 ib_cntl |= DMA_IB_SWAP_ENABLE;
169#endif
170 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
171
172 dma_cntl = RREG32(DMA_CNTL + reg_offset);
173 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
174 WREG32(DMA_CNTL + reg_offset, dma_cntl);
175
176 ring->wptr = 0;
177 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
178
179 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
180
181 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
182
183 ring->ready = true;
184
185 r = radeon_ring_test(rdev, ring->idx, ring);
186 if (r) {
187 ring->ready = false;
188 return r;
189 }
190 }
191
192 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
193
194 return 0;
195}
196
197/**
198 * cayman_dma_fini - tear down the async dma engines
199 *
200 * @rdev: radeon_device pointer
201 *
202 * Stop the async dma engines and free the rings (cayman-SI).
203 */
204void cayman_dma_fini(struct radeon_device *rdev)
205{
206 cayman_dma_stop(rdev);
207 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
208 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
209}
210
211/**
212 * cayman_dma_is_lockup - Check if the DMA engine is locked up
213 *
214 * @rdev: radeon_device pointer
215 * @ring: radeon_ring structure holding ring information
216 *
217 * Check if the async DMA engine is locked up.
218 * Returns true if the engine appears to be locked up, false if not.
219 */
220bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
221{
222 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
223 u32 mask;
224
225 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
226 mask = RADEON_RESET_DMA;
227 else
228 mask = RADEON_RESET_DMA1;
229
230 if (!(reset_mask & mask)) {
231 radeon_ring_lockup_update(ring);
232 return false;
233 }
234 /* force ring activities */
235 radeon_ring_force_activity(rdev, ring);
236 return radeon_ring_test_lockup(rdev, ring);
237}
238
239/**
240 * cayman_dma_vm_set_page - update the page tables using the DMA
241 *
242 * @rdev: radeon_device pointer
243 * @ib: indirect buffer to fill with commands
244 * @pe: addr of the page entry
245 * @addr: dst addr to write into pe
246 * @count: number of page entries to update
247 * @incr: increase next addr by incr bytes
248 * @flags: access flags
249 * @r600_flags: hw access flags
250 *
251 * Update the page tables using the DMA (cayman/TN).
252 */
253void cayman_dma_vm_set_page(struct radeon_device *rdev,
254 struct radeon_ib *ib,
255 uint64_t pe,
256 uint64_t addr, unsigned count,
257 uint32_t incr, uint32_t flags)
258{
259 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
260 uint64_t value;
261 unsigned ndw;
262
263 if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
264 while (count) {
265 ndw = count * 2;
266 if (ndw > 0xFFFFE)
267 ndw = 0xFFFFE;
268
269 /* for non-physically contiguous pages (system) */
270 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
271 ib->ptr[ib->length_dw++] = pe;
272 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
273 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
274 if (flags & RADEON_VM_PAGE_SYSTEM) {
275 value = radeon_vm_map_gart(rdev, addr);
276 value &= 0xFFFFFFFFFFFFF000ULL;
277 } else if (flags & RADEON_VM_PAGE_VALID) {
278 value = addr;
279 } else {
280 value = 0;
281 }
282 addr += incr;
283 value |= r600_flags;
284 ib->ptr[ib->length_dw++] = value;
285 ib->ptr[ib->length_dw++] = upper_32_bits(value);
286 }
287 }
288 } else {
289 while (count) {
290 ndw = count * 2;
291 if (ndw > 0xFFFFE)
292 ndw = 0xFFFFE;
293
294 if (flags & RADEON_VM_PAGE_VALID)
295 value = addr;
296 else
297 value = 0;
298 /* for physically contiguous pages (vram) */
299 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
300 ib->ptr[ib->length_dw++] = pe; /* dst addr */
301 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
302 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
303 ib->ptr[ib->length_dw++] = 0;
304 ib->ptr[ib->length_dw++] = value; /* value */
305 ib->ptr[ib->length_dw++] = upper_32_bits(value);
306 ib->ptr[ib->length_dw++] = incr; /* increment size */
307 ib->ptr[ib->length_dw++] = 0;
308 pe += ndw * 4;
309 addr += (ndw / 2) * incr;
310 count -= ndw / 2;
311 }
312 }
313 while (ib->length_dw & 0x7)
314 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
315}
316
317void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
318{
319 struct radeon_ring *ring = &rdev->ring[ridx];
320
321 if (vm == NULL)
322 return;
323
324 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
325 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
326 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
327
328 /* flush hdp cache */
329 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
330 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
331 radeon_ring_write(ring, 1);
332
333 /* bits 0-7 are the VM contexts0-7 */
334 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
335 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
336 radeon_ring_write(ring, 1 << vm->id);
337}
338
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f0f5f748938a..f7b625c9e0e9 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -769,7 +769,8 @@ bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
769{ 769{
770 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 770 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
771 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 771 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
772 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 772 /* we never hit the non-gddr5 limit so disable it */
773 u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
773 774
774 if (vblank_time < switch_limit) 775 if (vblank_time < switch_limit)
775 return true; 776 return true;
@@ -4037,6 +4038,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
4037 (power_state->v1.ucNonClockStateIndex * 4038 (power_state->v1.ucNonClockStateIndex *
4038 power_info->pplib.ucNonClockSize)); 4039 power_info->pplib.ucNonClockSize));
4039 if (power_info->pplib.ucStateEntrySize - 1) { 4040 if (power_info->pplib.ucStateEntrySize - 1) {
4041 u8 *idx;
4040 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL); 4042 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
4041 if (ps == NULL) { 4043 if (ps == NULL) {
4042 kfree(rdev->pm.dpm.ps); 4044 kfree(rdev->pm.dpm.ps);
@@ -4046,12 +4048,12 @@ static int ni_parse_power_table(struct radeon_device *rdev)
4046 ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 4048 ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4047 non_clock_info, 4049 non_clock_info,
4048 power_info->pplib.ucNonClockSize); 4050 power_info->pplib.ucNonClockSize);
4051 idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
4049 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 4052 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
4050 clock_info = (union pplib_clock_info *) 4053 clock_info = (union pplib_clock_info *)
4051 (mode_info->atom_context->bios + data_offset + 4054 (mode_info->atom_context->bios + data_offset +
4052 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 4055 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
4053 (power_state->v1.ucClockStateIndices[j] * 4056 (idx[j] * power_info->pplib.ucClockInfoSize));
4054 power_info->pplib.ucClockInfoSize));
4055 ni_parse_pplib_clock_info(rdev, 4057 ni_parse_pplib_clock_info(rdev,
4056 &rdev->pm.dpm.ps[i], j, 4058 &rdev->pm.dpm.ps[i], j,
4057 clock_info); 4059 clock_info);
@@ -4270,6 +4272,12 @@ int ni_dpm_init(struct radeon_device *rdev)
4270 4272
4271 ni_pi->use_power_boost_limit = true; 4273 ni_pi->use_power_boost_limit = true;
4272 4274
4275 /* make sure dc limits are valid */
4276 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
4277 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
4278 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
4279 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4280
4273 return 0; 4281 return 0;
4274} 4282}
4275 4283
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index b5564a3645d2..682842804bce 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -99,11 +99,68 @@ typedef uint8_t PPSMC_Result;
99#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) 99#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
100#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) 100#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
101 101
102/* CI/KV/KB */
103#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
104#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
105#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
106#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
107#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
108#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
109#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
110#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
111#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
112#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
113#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
114#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
115#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
116#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
117#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
118#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
119#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
120#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
121#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
122#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
123#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
124#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
125#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
126#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
127#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
128#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
129#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
130#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
131#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
132#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
133#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
134#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
135#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
136#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
137#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
138#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
139#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
140#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
141#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
142#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
143#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
144#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
145#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
146#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
147#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
148#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
149#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
150#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
151#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
152
153#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
154#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
155
102/* TN */ 156/* TN */
103#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) 157#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
104#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) 158#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
105#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) 159#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
106#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) 160#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
161#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
162#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
107#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
108#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
109#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 166#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
new file mode 100644
index 000000000000..da43ab328833
--- /dev/null
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -0,0 +1,682 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _PPTABLE_H
24#define _PPTABLE_H
25
26#pragma pack(push, 1)
27
28typedef struct _ATOM_PPLIB_THERMALCONTROLLER
29
30{
31 UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
32 UCHAR ucI2cLine; // as interpreted by DAL I2C
33 UCHAR ucI2cAddress;
34 UCHAR ucFanParameters; // Fan Control Parameters.
35 UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
36 UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
37 UCHAR ucReserved; // ----
38 UCHAR ucFlags; // to be defined
39} ATOM_PPLIB_THERMALCONTROLLER;
40
41#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
42#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
43
44#define ATOM_PP_THERMALCONTROLLER_NONE 0
45#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
46#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
47#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
48#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
49#define ATOM_PP_THERMALCONTROLLER_LM64 5
50#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
51#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
52#define ATOM_PP_THERMALCONTROLLER_RV770 8
53#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
54#define ATOM_PP_THERMALCONTROLLER_KONG 10
55#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
56#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
57#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
58#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
59#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
60#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
61#define ATOM_PP_THERMALCONTROLLER_LM96163 17
62#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
63#define ATOM_PP_THERMALCONTROLLER_KAVERI 19
64
65
66// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
67// We probably should reserve the bit 0x80 for this use.
68// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
69// The driver can pick the correct internal controller based on the ASIC.
70
71#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
72#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
73
74typedef struct _ATOM_PPLIB_STATE
75{
76 UCHAR ucNonClockStateIndex;
77 UCHAR ucClockStateIndices[1]; // variable-sized
78} ATOM_PPLIB_STATE;
79
80
81typedef struct _ATOM_PPLIB_FANTABLE
82{
83 UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
84 UCHAR ucTHyst; // Temperature hysteresis. Integer.
85 USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
86 USHORT usTMed; // The middle temperature where we change slopes.
87 USHORT usTHigh; // The high point above TMed for adjusting the second slope.
88 USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
89 USHORT usPWMMed; // The PWM value (in percent) at TMed.
90 USHORT usPWMHigh; // The PWM value at THigh.
91} ATOM_PPLIB_FANTABLE;
92
93typedef struct _ATOM_PPLIB_FANTABLE2
94{
95 ATOM_PPLIB_FANTABLE basicTable;
96 USHORT usTMax; // The max temperature
97} ATOM_PPLIB_FANTABLE2;
98
99typedef struct _ATOM_PPLIB_EXTENDEDHEADER
100{
101 USHORT usSize;
102 ULONG ulMaxEngineClock; // For Overdrive.
103 ULONG ulMaxMemoryClock; // For Overdrive.
104 // Add extra system parameters here, always adjust size to include all fields.
105 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
106 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
107 USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
108 USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
109 USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table
110 USHORT usPowerTuneTableOffset; //points to ATOM_PPLIB_POWERTUNE_Table
111} ATOM_PPLIB_EXTENDEDHEADER;
112
113//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
114#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
115#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
116#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
117#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
118#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
119#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
120#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
121#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
122#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
123#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
124#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
125#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
126#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
127#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
128#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
129#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
130#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
131#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
132#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
133#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
134#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
135#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
136#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature.
137#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000
138
139typedef struct _ATOM_PPLIB_POWERPLAYTABLE
140{
141 ATOM_COMMON_TABLE_HEADER sHeader;
142
143 UCHAR ucDataRevision;
144
145 UCHAR ucNumStates;
146 UCHAR ucStateEntrySize;
147 UCHAR ucClockInfoSize;
148 UCHAR ucNonClockSize;
149
150 // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
151 USHORT usStateArrayOffset;
152
153 // offset from start of this table to array of ASIC-specific structures,
154 // currently ATOM_PPLIB_CLOCK_INFO.
155 USHORT usClockInfoArrayOffset;
156
157 // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
158 USHORT usNonClockInfoArrayOffset;
159
160 USHORT usBackbiasTime; // in microseconds
161 USHORT usVoltageTime; // in microseconds
162 USHORT usTableSize; //the size of this structure, or the extended structure
163
164 ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
165
166 ATOM_PPLIB_THERMALCONTROLLER sThermalController;
167
168 USHORT usBootClockInfoOffset;
169 USHORT usBootNonClockInfoOffset;
170
171} ATOM_PPLIB_POWERPLAYTABLE;
172
173typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
174{
175 ATOM_PPLIB_POWERPLAYTABLE basicTable;
176 UCHAR ucNumCustomThermalPolicy;
177 USHORT usCustomThermalPolicyArrayOffset;
178}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
179
180typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
181{
182 ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
183 USHORT usFormatID; // To be used ONLY by PPGen.
184 USHORT usFanTableOffset;
185 USHORT usExtendendedHeaderOffset;
186} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
187
188typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
189{
190 ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
191 ULONG ulGoldenPPID; // PPGen use only
192 ULONG ulGoldenRevision; // PPGen use only
193 USHORT usVddcDependencyOnSCLKOffset;
194 USHORT usVddciDependencyOnMCLKOffset;
195 USHORT usVddcDependencyOnMCLKOffset;
196 USHORT usMaxClockVoltageOnDCOffset;
197 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
198 USHORT usMvddDependencyOnMCLKOffset;
199} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
200
201typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
202{
203 ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
204 ULONG ulTDPLimit;
205 ULONG ulNearTDPLimit;
206 ULONG ulSQRampingThreshold;
207 USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
208 ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
209 USHORT usTDPODLimit;
210 USHORT usLoadLineSlope; // in milliOhms * 100
211} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
212
213//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
214#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
215#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
216#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
217#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
218#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
219#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
220// 2, 4, 6, 7 are reserved
221
222#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
223#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
224#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
225#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
226#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
227#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
228#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
229#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
230#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
231#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
232#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
233#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
234#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
235
236//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
237#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
238#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
239#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
240
241//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
242#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
243#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
244
245// 0 is 2.5Gb/s, 1 is 5Gb/s
246#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
247#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
248
249// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
250#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
251#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
252
253// lookup into reduced refresh-rate table
254#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
255#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
256
257#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
258#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
259// 2-15 TBD as needed.
260
261#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
262#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
263
264#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
265
266#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
267
268//memory related flags
269#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
270
271//M3 Arb //2bits, current 3 sets of parameters in total
272#define ATOM_PPLIB_M3ARB_MASK 0x00060000
273#define ATOM_PPLIB_M3ARB_SHIFT 17
274
275#define ATOM_PPLIB_ENABLE_DRR 0x00080000
276
277// remaining 16 bits are reserved
278typedef struct _ATOM_PPLIB_THERMAL_STATE
279{
280 UCHAR ucMinTemperature;
281 UCHAR ucMaxTemperature;
282 UCHAR ucThermalAction;
283}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
284
285// Contained in an array starting at the offset
286// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
287// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
288#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
289#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
290typedef struct _ATOM_PPLIB_NONCLOCK_INFO
291{
292 USHORT usClassification;
293 UCHAR ucMinTemperature;
294 UCHAR ucMaxTemperature;
295 ULONG ulCapsAndSettings;
296 UCHAR ucRequiredPower;
297 USHORT usClassification2;
298 ULONG ulVCLK;
299 ULONG ulDCLK;
300 UCHAR ucUnused[5];
301} ATOM_PPLIB_NONCLOCK_INFO;
302
303// Contained in an array starting at the offset
304// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
305// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
306typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
307{
308 USHORT usEngineClockLow;
309 UCHAR ucEngineClockHigh;
310
311 USHORT usMemoryClockLow;
312 UCHAR ucMemoryClockHigh;
313
314 USHORT usVDDC;
315 USHORT usUnused1;
316 USHORT usUnused2;
317
318 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
319
320} ATOM_PPLIB_R600_CLOCK_INFO;
321
322// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
323#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
324#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
325#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
326#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
327#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
328#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
329
330typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
331
332{
333 USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
334 UCHAR ucLowEngineClockHigh;
335 USHORT usHighEngineClockLow; // High Engine clock in MHz.
336 UCHAR ucHighEngineClockHigh;
337 USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
338 UCHAR ucMemoryClockHigh; // Currentyl unused.
339 UCHAR ucPadding; // For proper alignment and size.
340 USHORT usVDDC; // For the 780, use: None, Low, High, Variable
341 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
342 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could
343 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
344 ULONG ulFlags;
345} ATOM_PPLIB_RS780_CLOCK_INFO;
346
347#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
348#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
349#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
350#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
351
352#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
353#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
354#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
355
356#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
357#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
358#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
359
360typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
361{
362 USHORT usEngineClockLow;
363 UCHAR ucEngineClockHigh;
364
365 USHORT usMemoryClockLow;
366 UCHAR ucMemoryClockHigh;
367
368 USHORT usVDDC;
369 USHORT usVDDCI;
370 USHORT usUnused;
371
372 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
373
374} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
375
376typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
377{
378 USHORT usEngineClockLow;
379 UCHAR ucEngineClockHigh;
380
381 USHORT usMemoryClockLow;
382 UCHAR ucMemoryClockHigh;
383
384 USHORT usVDDC;
385 USHORT usVDDCI;
386 UCHAR ucPCIEGen;
387 UCHAR ucUnused1;
388
389 ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
390
391} ATOM_PPLIB_SI_CLOCK_INFO;
392
393typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
394{
395 USHORT usEngineClockLow;
396 UCHAR ucEngineClockHigh;
397
398 USHORT usMemoryClockLow;
399 UCHAR ucMemoryClockHigh;
400
401 UCHAR ucPCIEGen;
402 USHORT usPCIELane;
403} ATOM_PPLIB_CI_CLOCK_INFO;
404
405typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
406 USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
407 UCHAR ucEngineClockHigh; //clockfrequency >> 16.
408 UCHAR vddcIndex; //2-bit vddc index;
409 USHORT tdpLimit;
410 //please initalize to 0
411 USHORT rsv1;
412 //please initialize to 0s
413 ULONG rsv2[2];
414}ATOM_PPLIB_SUMO_CLOCK_INFO;
415
416typedef struct _ATOM_PPLIB_STATE_V2
417{
418 //number of valid dpm levels in this state; Driver uses it to calculate the whole
419 //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
420 UCHAR ucNumDPMLevels;
421
422 //a index to the array of nonClockInfos
423 UCHAR nonClockInfoIndex;
424 /**
425 * Driver will read the first ucNumDPMLevels in this array
426 */
427 UCHAR clockInfoIndex[1];
428} ATOM_PPLIB_STATE_V2;
429
430typedef struct _StateArray{
431 //how many states we have
432 UCHAR ucNumEntries;
433
434 ATOM_PPLIB_STATE_V2 states[1];
435}StateArray;
436
437
438typedef struct _ClockInfoArray{
439 //how many clock levels we have
440 UCHAR ucNumEntries;
441
442 //sizeof(ATOM_PPLIB_CLOCK_INFO)
443 UCHAR ucEntrySize;
444
445 UCHAR clockInfo[1];
446}ClockInfoArray;
447
448typedef struct _NonClockInfoArray{
449
450 //how many non-clock levels we have. normally should be same as number of states
451 UCHAR ucNumEntries;
452 //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
453 UCHAR ucEntrySize;
454
455 ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
456}NonClockInfoArray;
457
458typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
459{
460 USHORT usClockLow;
461 UCHAR ucClockHigh;
462 USHORT usVoltage;
463}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
464
465typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
466{
467 UCHAR ucNumEntries; // Number of entries.
468 ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
469}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
470
471typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
472{
473 USHORT usSclkLow;
474 UCHAR ucSclkHigh;
475 USHORT usMclkLow;
476 UCHAR ucMclkHigh;
477 USHORT usVddc;
478 USHORT usVddci;
479}ATOM_PPLIB_Clock_Voltage_Limit_Record;
480
481typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
482{
483 UCHAR ucNumEntries; // Number of entries.
484 ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
485}ATOM_PPLIB_Clock_Voltage_Limit_Table;
486
487union _ATOM_PPLIB_CAC_Leakage_Record
488{
489 struct
490 {
491 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
492 ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
493
494 };
495 struct
496 {
497 USHORT usVddc1;
498 USHORT usVddc2;
499 USHORT usVddc3;
500 };
501};
502
503typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
504
505typedef struct _ATOM_PPLIB_CAC_Leakage_Table
506{
507 UCHAR ucNumEntries; // Number of entries.
508 ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
509}ATOM_PPLIB_CAC_Leakage_Table;
510
511typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
512{
513 USHORT usVoltage;
514 USHORT usSclkLow;
515 UCHAR ucSclkHigh;
516 USHORT usMclkLow;
517 UCHAR ucMclkHigh;
518}ATOM_PPLIB_PhaseSheddingLimits_Record;
519
520typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
521{
522 UCHAR ucNumEntries; // Number of entries.
523 ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
524}ATOM_PPLIB_PhaseSheddingLimits_Table;
525
526typedef struct _VCEClockInfo{
527 USHORT usEVClkLow;
528 UCHAR ucEVClkHigh;
529 USHORT usECClkLow;
530 UCHAR ucECClkHigh;
531}VCEClockInfo;
532
533typedef struct _VCEClockInfoArray{
534 UCHAR ucNumEntries;
535 VCEClockInfo entries[1];
536}VCEClockInfoArray;
537
538typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
539{
540 USHORT usVoltage;
541 UCHAR ucVCEClockInfoIndex;
542}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
543
544typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
545{
546 UCHAR numEntries;
547 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
548}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
549
550typedef struct _ATOM_PPLIB_VCE_State_Record
551{
552 UCHAR ucVCEClockInfoIndex;
553 UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
554}ATOM_PPLIB_VCE_State_Record;
555
556typedef struct _ATOM_PPLIB_VCE_State_Table
557{
558 UCHAR numEntries;
559 ATOM_PPLIB_VCE_State_Record entries[1];
560}ATOM_PPLIB_VCE_State_Table;
561
562
563typedef struct _ATOM_PPLIB_VCE_Table
564{
565 UCHAR revid;
566// VCEClockInfoArray array;
567// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
568// ATOM_PPLIB_VCE_State_Table states;
569}ATOM_PPLIB_VCE_Table;
570
571
572typedef struct _UVDClockInfo{
573 USHORT usVClkLow;
574 UCHAR ucVClkHigh;
575 USHORT usDClkLow;
576 UCHAR ucDClkHigh;
577}UVDClockInfo;
578
579typedef struct _UVDClockInfoArray{
580 UCHAR ucNumEntries;
581 UVDClockInfo entries[1];
582}UVDClockInfoArray;
583
584typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
585{
586 USHORT usVoltage;
587 UCHAR ucUVDClockInfoIndex;
588}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
589
590typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
591{
592 UCHAR numEntries;
593 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
594}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
595
596typedef struct _ATOM_PPLIB_UVD_Table
597{
598 UCHAR revid;
599// UVDClockInfoArray array;
600// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
601}ATOM_PPLIB_UVD_Table;
602
603typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
604{
605 USHORT usVoltage;
606 USHORT usSAMClockLow;
607 UCHAR ucSAMClockHigh;
608}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
609
610typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
611 UCHAR numEntries;
612 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
613}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
614
615typedef struct _ATOM_PPLIB_SAMU_Table
616{
617 UCHAR revid;
618 ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
619}ATOM_PPLIB_SAMU_Table;
620
621typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
622{
623 USHORT usVoltage;
624 USHORT usACPClockLow;
625 UCHAR ucACPClockHigh;
626}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
627
628typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
629 UCHAR numEntries;
630 ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
631}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
632
633typedef struct _ATOM_PPLIB_ACP_Table
634{
635 UCHAR revid;
636 ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
637}ATOM_PPLIB_ACP_Table;
638
639typedef struct _ATOM_PowerTune_Table{
640 USHORT usTDP;
641 USHORT usConfigurableTDP;
642 USHORT usTDC;
643 USHORT usBatteryPowerLimit;
644 USHORT usSmallPowerLimit;
645 USHORT usLowCACLeakage;
646 USHORT usHighCACLeakage;
647}ATOM_PowerTune_Table;
648
649typedef struct _ATOM_PPLIB_POWERTUNE_Table
650{
651 UCHAR revid;
652 ATOM_PowerTune_Table power_tune_table;
653}ATOM_PPLIB_POWERTUNE_Table;
654
655typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
656{
657 UCHAR revid;
658 ATOM_PowerTune_Table power_tune_table;
659 USHORT usMaximumPowerDeliveryLimit;
660 USHORT usReserve[7];
661} ATOM_PPLIB_POWERTUNE_Table_V1;
662
663#define ATOM_PPM_A_A 1
664#define ATOM_PPM_A_I 2
665typedef struct _ATOM_PPLIB_PPM_Table
666{
667 UCHAR ucRevId;
668 UCHAR ucPpmDesign; //A+I or A+A
669 USHORT usCpuCoreNumber;
670 ULONG ulPlatformTDP;
671 ULONG ulSmallACPlatformTDP;
672 ULONG ulPlatformTDC;
673 ULONG ulSmallACPlatformTDC;
674 ULONG ulApuTDP;
675 ULONG ulDGpuTDP;
676 ULONG ulDGpuUlvPower;
677 ULONG ulTjmax;
678} ATOM_PPLIB_PPM_Table;
679
680#pragma pack(pop)
681
682#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 75349cdaa84b..9fc61dd68bc0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1097,12 +1097,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1097 } 1097 }
1098 1098
1099 /* Align ring size */ 1099 /* Align ring size */
1100 rb_bufsz = drm_order(ring_size / 8); 1100 rb_bufsz = order_base_2(ring_size / 8);
1101 ring_size = (1 << (rb_bufsz + 1)) * 4; 1101 ring_size = (1 << (rb_bufsz + 1)) * 4;
1102 r100_cp_load_microcode(rdev); 1102 r100_cp_load_microcode(rdev);
1103 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1103 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1104 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, 1104 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1105 0, 0x7fffff, RADEON_CP_PACKET2); 1105 RADEON_CP_PACKET2);
1106 if (r) { 1106 if (r) {
1107 return r; 1107 return r;
1108 } 1108 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e66e72077350..ea4d3734e6d9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1374,7 +1374,7 @@ static bool r600_is_display_hung(struct radeon_device *rdev)
1374 return true; 1374 return true;
1375} 1375}
1376 1376
1377static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) 1377u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1378{ 1378{
1379 u32 reset_mask = 0; 1379 u32 reset_mask = 0;
1380 u32 tmp; 1380 u32 tmp;
@@ -1622,28 +1622,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1622 return radeon_ring_test_lockup(rdev, ring); 1622 return radeon_ring_test_lockup(rdev, ring);
1623} 1623}
1624 1624
1625/**
1626 * r600_dma_is_lockup - Check if the DMA engine is locked up
1627 *
1628 * @rdev: radeon_device pointer
1629 * @ring: radeon_ring structure holding ring information
1630 *
1631 * Check if the async DMA engine is locked up.
1632 * Returns true if the engine appears to be locked up, false if not.
1633 */
1634bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1635{
1636 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1637
1638 if (!(reset_mask & RADEON_RESET_DMA)) {
1639 radeon_ring_lockup_update(ring);
1640 return false;
1641 }
1642 /* force ring activities */
1643 radeon_ring_force_activity(rdev, ring);
1644 return radeon_ring_test_lockup(rdev, ring);
1645}
1646
1647u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1625u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1648 u32 tiling_pipe_num, 1626 u32 tiling_pipe_num,
1649 u32 max_rb_num, 1627 u32 max_rb_num,
@@ -2417,8 +2395,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2417 WREG32(GRBM_SOFT_RESET, 0); 2395 WREG32(GRBM_SOFT_RESET, 0);
2418 2396
2419 /* Set ring buffer size */ 2397 /* Set ring buffer size */
2420 rb_bufsz = drm_order(ring->ring_size / 8); 2398 rb_bufsz = order_base_2(ring->ring_size / 8);
2421 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2399 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2422#ifdef __BIG_ENDIAN 2400#ifdef __BIG_ENDIAN
2423 tmp |= BUF_SWAP_32BIT; 2401 tmp |= BUF_SWAP_32BIT;
2424#endif 2402#endif
@@ -2471,7 +2449,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
2471 int r; 2449 int r;
2472 2450
2473 /* Align ring size */ 2451 /* Align ring size */
2474 rb_bufsz = drm_order(ring_size / 8); 2452 rb_bufsz = order_base_2(ring_size / 8);
2475 ring_size = (1 << (rb_bufsz + 1)) * 4; 2453 ring_size = (1 << (rb_bufsz + 1)) * 4;
2476 ring->ring_size = ring_size; 2454 ring->ring_size = ring_size;
2477 ring->align_mask = 16 - 1; 2455 ring->align_mask = 16 - 1;
@@ -2494,345 +2472,6 @@ void r600_cp_fini(struct radeon_device *rdev)
2494} 2472}
2495 2473
2496/* 2474/*
2497 * DMA
2498 * Starting with R600, the GPU has an asynchronous
2499 * DMA engine. The programming model is very similar
2500 * to the 3D engine (ring buffer, IBs, etc.), but the
2501 * DMA controller has it's own packet format that is
2502 * different form the PM4 format used by the 3D engine.
2503 * It supports copying data, writing embedded data,
2504 * solid fills, and a number of other things. It also
2505 * has support for tiling/detiling of buffers.
2506 */
2507/**
2508 * r600_dma_stop - stop the async dma engine
2509 *
2510 * @rdev: radeon_device pointer
2511 *
2512 * Stop the async dma engine (r6xx-evergreen).
2513 */
2514void r600_dma_stop(struct radeon_device *rdev)
2515{
2516 u32 rb_cntl = RREG32(DMA_RB_CNTL);
2517
2518 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2519
2520 rb_cntl &= ~DMA_RB_ENABLE;
2521 WREG32(DMA_RB_CNTL, rb_cntl);
2522
2523 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
2524}
2525
2526/**
2527 * r600_dma_resume - setup and start the async dma engine
2528 *
2529 * @rdev: radeon_device pointer
2530 *
2531 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
2532 * Returns 0 for success, error for failure.
2533 */
2534int r600_dma_resume(struct radeon_device *rdev)
2535{
2536 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2537 u32 rb_cntl, dma_cntl, ib_cntl;
2538 u32 rb_bufsz;
2539 int r;
2540
2541 /* Reset dma */
2542 if (rdev->family >= CHIP_RV770)
2543 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
2544 else
2545 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2546 RREG32(SRBM_SOFT_RESET);
2547 udelay(50);
2548 WREG32(SRBM_SOFT_RESET, 0);
2549
2550 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
2551 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2552
2553 /* Set ring buffer size in dwords */
2554 rb_bufsz = drm_order(ring->ring_size / 4);
2555 rb_cntl = rb_bufsz << 1;
2556#ifdef __BIG_ENDIAN
2557 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
2558#endif
2559 WREG32(DMA_RB_CNTL, rb_cntl);
2560
2561 /* Initialize the ring buffer's read and write pointers */
2562 WREG32(DMA_RB_RPTR, 0);
2563 WREG32(DMA_RB_WPTR, 0);
2564
2565 /* set the wb address whether it's enabled or not */
2566 WREG32(DMA_RB_RPTR_ADDR_HI,
2567 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
2568 WREG32(DMA_RB_RPTR_ADDR_LO,
2569 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
2570
2571 if (rdev->wb.enabled)
2572 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
2573
2574 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
2575
2576 /* enable DMA IBs */
2577 ib_cntl = DMA_IB_ENABLE;
2578#ifdef __BIG_ENDIAN
2579 ib_cntl |= DMA_IB_SWAP_ENABLE;
2580#endif
2581 WREG32(DMA_IB_CNTL, ib_cntl);
2582
2583 dma_cntl = RREG32(DMA_CNTL);
2584 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
2585 WREG32(DMA_CNTL, dma_cntl);
2586
2587 if (rdev->family >= CHIP_RV770)
2588 WREG32(DMA_MODE, 1);
2589
2590 ring->wptr = 0;
2591 WREG32(DMA_RB_WPTR, ring->wptr << 2);
2592
2593 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
2594
2595 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
2596
2597 ring->ready = true;
2598
2599 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
2600 if (r) {
2601 ring->ready = false;
2602 return r;
2603 }
2604
2605 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2606
2607 return 0;
2608}
2609
2610/**
2611 * r600_dma_fini - tear down the async dma engine
2612 *
2613 * @rdev: radeon_device pointer
2614 *
2615 * Stop the async dma engine and free the ring (r6xx-evergreen).
2616 */
2617void r600_dma_fini(struct radeon_device *rdev)
2618{
2619 r600_dma_stop(rdev);
2620 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
2621}
2622
2623/*
2624 * UVD
2625 */
2626int r600_uvd_rbc_start(struct radeon_device *rdev)
2627{
2628 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2629 uint64_t rptr_addr;
2630 uint32_t rb_bufsz, tmp;
2631 int r;
2632
2633 rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
2634
2635 if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
2636 DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
2637 return -EINVAL;
2638 }
2639
2640 /* force RBC into idle state */
2641 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2642
2643 /* Set the write pointer delay */
2644 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
2645
2646 /* set the wb address */
2647 WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
2648
2649 /* programm the 4GB memory segment for rptr and ring buffer */
2650 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
2651 (0x7 << 16) | (0x1 << 31));
2652
2653 /* Initialize the ring buffer's read and write pointers */
2654 WREG32(UVD_RBC_RB_RPTR, 0x0);
2655
2656 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
2657 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
2658
2659 /* set the ring address */
2660 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
2661
2662 /* Set ring buffer size */
2663 rb_bufsz = drm_order(ring->ring_size);
2664 rb_bufsz = (0x1 << 8) | rb_bufsz;
2665 WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
2666
2667 ring->ready = true;
2668 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
2669 if (r) {
2670 ring->ready = false;
2671 return r;
2672 }
2673
2674 r = radeon_ring_lock(rdev, ring, 10);
2675 if (r) {
2676 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
2677 return r;
2678 }
2679
2680 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
2681 radeon_ring_write(ring, tmp);
2682 radeon_ring_write(ring, 0xFFFFF);
2683
2684 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
2685 radeon_ring_write(ring, tmp);
2686 radeon_ring_write(ring, 0xFFFFF);
2687
2688 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
2689 radeon_ring_write(ring, tmp);
2690 radeon_ring_write(ring, 0xFFFFF);
2691
2692 /* Clear timeout status bits */
2693 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
2694 radeon_ring_write(ring, 0x8);
2695
2696 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
2697 radeon_ring_write(ring, 3);
2698
2699 radeon_ring_unlock_commit(rdev, ring);
2700
2701 return 0;
2702}
2703
2704void r600_uvd_stop(struct radeon_device *rdev)
2705{
2706 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2707
2708 /* force RBC into idle state */
2709 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2710
2711 /* Stall UMC and register bus before resetting VCPU */
2712 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2713 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2714 mdelay(1);
2715
2716 /* put VCPU into reset */
2717 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2718 mdelay(5);
2719
2720 /* disable VCPU clock */
2721 WREG32(UVD_VCPU_CNTL, 0x0);
2722
2723 /* Unstall UMC and register bus */
2724 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
2725 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
2726
2727 ring->ready = false;
2728}
2729
2730int r600_uvd_init(struct radeon_device *rdev)
2731{
2732 int i, j, r;
2733 /* disable byte swapping */
2734 u32 lmi_swap_cntl = 0;
2735 u32 mp_swap_cntl = 0;
2736
2737 /* raise clocks while booting up the VCPU */
2738 radeon_set_uvd_clocks(rdev, 53300, 40000);
2739
2740 /* disable clock gating */
2741 WREG32(UVD_CGC_GATE, 0);
2742
2743 /* disable interupt */
2744 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
2745
2746 /* Stall UMC and register bus before resetting VCPU */
2747 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2748 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2749 mdelay(1);
2750
2751 /* put LMI, VCPU, RBC etc... into reset */
2752 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
2753 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
2754 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
2755 mdelay(5);
2756
2757 /* take UVD block out of reset */
2758 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
2759 mdelay(5);
2760
2761 /* initialize UVD memory controller */
2762 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
2763 (1 << 21) | (1 << 9) | (1 << 20));
2764
2765#ifdef __BIG_ENDIAN
2766 /* swap (8 in 32) RB and IB */
2767 lmi_swap_cntl = 0xa;
2768 mp_swap_cntl = 0;
2769#endif
2770 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
2771 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
2772
2773 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
2774 WREG32(UVD_MPC_SET_MUXA1, 0x0);
2775 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
2776 WREG32(UVD_MPC_SET_MUXB1, 0x0);
2777 WREG32(UVD_MPC_SET_ALU, 0);
2778 WREG32(UVD_MPC_SET_MUX, 0x88);
2779
2780 /* take all subblocks out of reset, except VCPU */
2781 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2782 mdelay(5);
2783
2784 /* enable VCPU clock */
2785 WREG32(UVD_VCPU_CNTL, 1 << 9);
2786
2787 /* enable UMC */
2788 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
2789
2790 /* boot up the VCPU */
2791 WREG32(UVD_SOFT_RESET, 0);
2792 mdelay(10);
2793
2794 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
2795
2796 for (i = 0; i < 10; ++i) {
2797 uint32_t status;
2798 for (j = 0; j < 100; ++j) {
2799 status = RREG32(UVD_STATUS);
2800 if (status & 2)
2801 break;
2802 mdelay(10);
2803 }
2804 r = 0;
2805 if (status & 2)
2806 break;
2807
2808 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
2809 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
2810 mdelay(10);
2811 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
2812 mdelay(10);
2813 r = -1;
2814 }
2815
2816 if (r) {
2817 DRM_ERROR("UVD not responding, giving up!!!\n");
2818 radeon_set_uvd_clocks(rdev, 0, 0);
2819 return r;
2820 }
2821
2822 /* enable interupt */
2823 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
2824
2825 r = r600_uvd_rbc_start(rdev);
2826 if (!r)
2827 DRM_INFO("UVD initialized successfully.\n");
2828
2829 /* lower clocks again */
2830 radeon_set_uvd_clocks(rdev, 0, 0);
2831
2832 return r;
2833}
2834
2835/*
2836 * GPU scratch registers helpers function. 2475 * GPU scratch registers helpers function.
2837 */ 2476 */
2838void r600_scratch_init(struct radeon_device *rdev) 2477void r600_scratch_init(struct radeon_device *rdev)
@@ -2887,94 +2526,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2887 return r; 2526 return r;
2888} 2527}
2889 2528
2890/**
2891 * r600_dma_ring_test - simple async dma engine test
2892 *
2893 * @rdev: radeon_device pointer
2894 * @ring: radeon_ring structure holding ring information
2895 *
2896 * Test the DMA engine by writing using it to write an
2897 * value to memory. (r6xx-SI).
2898 * Returns 0 for success, error for failure.
2899 */
2900int r600_dma_ring_test(struct radeon_device *rdev,
2901 struct radeon_ring *ring)
2902{
2903 unsigned i;
2904 int r;
2905 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2906 u32 tmp;
2907
2908 if (!ptr) {
2909 DRM_ERROR("invalid vram scratch pointer\n");
2910 return -EINVAL;
2911 }
2912
2913 tmp = 0xCAFEDEAD;
2914 writel(tmp, ptr);
2915
2916 r = radeon_ring_lock(rdev, ring, 4);
2917 if (r) {
2918 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
2919 return r;
2920 }
2921 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
2922 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
2923 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
2924 radeon_ring_write(ring, 0xDEADBEEF);
2925 radeon_ring_unlock_commit(rdev, ring);
2926
2927 for (i = 0; i < rdev->usec_timeout; i++) {
2928 tmp = readl(ptr);
2929 if (tmp == 0xDEADBEEF)
2930 break;
2931 DRM_UDELAY(1);
2932 }
2933
2934 if (i < rdev->usec_timeout) {
2935 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2936 } else {
2937 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2938 ring->idx, tmp);
2939 r = -EINVAL;
2940 }
2941 return r;
2942}
2943
2944int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2945{
2946 uint32_t tmp = 0;
2947 unsigned i;
2948 int r;
2949
2950 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
2951 r = radeon_ring_lock(rdev, ring, 3);
2952 if (r) {
2953 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
2954 ring->idx, r);
2955 return r;
2956 }
2957 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2958 radeon_ring_write(ring, 0xDEADBEEF);
2959 radeon_ring_unlock_commit(rdev, ring);
2960 for (i = 0; i < rdev->usec_timeout; i++) {
2961 tmp = RREG32(UVD_CONTEXT_ID);
2962 if (tmp == 0xDEADBEEF)
2963 break;
2964 DRM_UDELAY(1);
2965 }
2966
2967 if (i < rdev->usec_timeout) {
2968 DRM_INFO("ring test on %d succeeded in %d usecs\n",
2969 ring->idx, i);
2970 } else {
2971 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2972 ring->idx, tmp);
2973 r = -EINVAL;
2974 }
2975 return r;
2976}
2977
2978/* 2529/*
2979 * CP fences/semaphores 2530 * CP fences/semaphores
2980 */ 2531 */
@@ -3026,30 +2577,6 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
3026 } 2577 }
3027} 2578}
3028 2579
3029void r600_uvd_fence_emit(struct radeon_device *rdev,
3030 struct radeon_fence *fence)
3031{
3032 struct radeon_ring *ring = &rdev->ring[fence->ring];
3033 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
3034
3035 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
3036 radeon_ring_write(ring, fence->seq);
3037 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
3038 radeon_ring_write(ring, addr & 0xffffffff);
3039 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
3040 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
3041 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
3042 radeon_ring_write(ring, 0);
3043
3044 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
3045 radeon_ring_write(ring, 0);
3046 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
3047 radeon_ring_write(ring, 0);
3048 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
3049 radeon_ring_write(ring, 2);
3050 return;
3051}
3052
3053void r600_semaphore_ring_emit(struct radeon_device *rdev, 2580void r600_semaphore_ring_emit(struct radeon_device *rdev,
3054 struct radeon_ring *ring, 2581 struct radeon_ring *ring,
3055 struct radeon_semaphore *semaphore, 2582 struct radeon_semaphore *semaphore,
@@ -3066,95 +2593,6 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
3066 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2593 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
3067} 2594}
3068 2595
3069/*
3070 * DMA fences/semaphores
3071 */
3072
3073/**
3074 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
3075 *
3076 * @rdev: radeon_device pointer
3077 * @fence: radeon fence object
3078 *
3079 * Add a DMA fence packet to the ring to write
3080 * the fence seq number and DMA trap packet to generate
3081 * an interrupt if needed (r6xx-r7xx).
3082 */
3083void r600_dma_fence_ring_emit(struct radeon_device *rdev,
3084 struct radeon_fence *fence)
3085{
3086 struct radeon_ring *ring = &rdev->ring[fence->ring];
3087 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3088
3089 /* write the fence */
3090 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
3091 radeon_ring_write(ring, addr & 0xfffffffc);
3092 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3093 radeon_ring_write(ring, lower_32_bits(fence->seq));
3094 /* generate an interrupt */
3095 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
3096}
3097
3098/**
3099 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
3100 *
3101 * @rdev: radeon_device pointer
3102 * @ring: radeon_ring structure holding ring information
3103 * @semaphore: radeon semaphore object
3104 * @emit_wait: wait or signal semaphore
3105 *
3106 * Add a DMA semaphore packet to the ring wait on or signal
3107 * other rings (r6xx-SI).
3108 */
3109void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
3110 struct radeon_ring *ring,
3111 struct radeon_semaphore *semaphore,
3112 bool emit_wait)
3113{
3114 u64 addr = semaphore->gpu_addr;
3115 u32 s = emit_wait ? 0 : 1;
3116
3117 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
3118 radeon_ring_write(ring, addr & 0xfffffffc);
3119 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
3120}
3121
3122void r600_uvd_semaphore_emit(struct radeon_device *rdev,
3123 struct radeon_ring *ring,
3124 struct radeon_semaphore *semaphore,
3125 bool emit_wait)
3126{
3127 uint64_t addr = semaphore->gpu_addr;
3128
3129 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
3130 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
3131
3132 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
3133 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
3134
3135 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
3136 radeon_ring_write(ring, emit_wait ? 1 : 0);
3137}
3138
3139int r600_copy_blit(struct radeon_device *rdev,
3140 uint64_t src_offset,
3141 uint64_t dst_offset,
3142 unsigned num_gpu_pages,
3143 struct radeon_fence **fence)
3144{
3145 struct radeon_semaphore *sem = NULL;
3146 struct radeon_sa_bo *vb = NULL;
3147 int r;
3148
3149 r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
3150 if (r) {
3151 return r;
3152 }
3153 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
3154 r600_blit_done_copy(rdev, fence, vb, sem);
3155 return 0;
3156}
3157
3158/** 2596/**
3159 * r600_copy_cpdma - copy pages using the CP DMA engine 2597 * r600_copy_cpdma - copy pages using the CP DMA engine
3160 * 2598 *
@@ -3239,80 +2677,6 @@ int r600_copy_cpdma(struct radeon_device *rdev,
3239 return r; 2677 return r;
3240} 2678}
3241 2679
3242/**
3243 * r600_copy_dma - copy pages using the DMA engine
3244 *
3245 * @rdev: radeon_device pointer
3246 * @src_offset: src GPU address
3247 * @dst_offset: dst GPU address
3248 * @num_gpu_pages: number of GPU pages to xfer
3249 * @fence: radeon fence object
3250 *
3251 * Copy GPU paging using the DMA engine (r6xx).
3252 * Used by the radeon ttm implementation to move pages if
3253 * registered as the asic copy callback.
3254 */
3255int r600_copy_dma(struct radeon_device *rdev,
3256 uint64_t src_offset, uint64_t dst_offset,
3257 unsigned num_gpu_pages,
3258 struct radeon_fence **fence)
3259{
3260 struct radeon_semaphore *sem = NULL;
3261 int ring_index = rdev->asic->copy.dma_ring_index;
3262 struct radeon_ring *ring = &rdev->ring[ring_index];
3263 u32 size_in_dw, cur_size_in_dw;
3264 int i, num_loops;
3265 int r = 0;
3266
3267 r = radeon_semaphore_create(rdev, &sem);
3268 if (r) {
3269 DRM_ERROR("radeon: moving bo (%d).\n", r);
3270 return r;
3271 }
3272
3273 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3274 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
3275 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
3276 if (r) {
3277 DRM_ERROR("radeon: moving bo (%d).\n", r);
3278 radeon_semaphore_free(rdev, &sem, NULL);
3279 return r;
3280 }
3281
3282 if (radeon_fence_need_sync(*fence, ring->idx)) {
3283 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3284 ring->idx);
3285 radeon_fence_note_sync(*fence, ring->idx);
3286 } else {
3287 radeon_semaphore_free(rdev, &sem, NULL);
3288 }
3289
3290 for (i = 0; i < num_loops; i++) {
3291 cur_size_in_dw = size_in_dw;
3292 if (cur_size_in_dw > 0xFFFE)
3293 cur_size_in_dw = 0xFFFE;
3294 size_in_dw -= cur_size_in_dw;
3295 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
3296 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3297 radeon_ring_write(ring, src_offset & 0xfffffffc);
3298 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
3299 (upper_32_bits(src_offset) & 0xff)));
3300 src_offset += cur_size_in_dw * 4;
3301 dst_offset += cur_size_in_dw * 4;
3302 }
3303
3304 r = radeon_fence_emit(rdev, fence, ring->idx);
3305 if (r) {
3306 radeon_ring_unlock_undo(rdev, ring);
3307 return r;
3308 }
3309
3310 radeon_ring_unlock_commit(rdev, ring);
3311 radeon_semaphore_free(rdev, &sem, *fence);
3312
3313 return r;
3314}
3315
3316int r600_set_surface_reg(struct radeon_device *rdev, int reg, 2680int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3317 uint32_t tiling_flags, uint32_t pitch, 2681 uint32_t tiling_flags, uint32_t pitch,
3318 uint32_t offset, uint32_t obj_size) 2682 uint32_t offset, uint32_t obj_size)
@@ -3334,6 +2698,11 @@ static int r600_startup(struct radeon_device *rdev)
3334 /* enable pcie gen2 link */ 2698 /* enable pcie gen2 link */
3335 r600_pcie_gen2_enable(rdev); 2699 r600_pcie_gen2_enable(rdev);
3336 2700
2701 /* scratch needs to be initialized before MC */
2702 r = r600_vram_scratch_init(rdev);
2703 if (r)
2704 return r;
2705
3337 r600_mc_program(rdev); 2706 r600_mc_program(rdev);
3338 2707
3339 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2708 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -3344,10 +2713,6 @@ static int r600_startup(struct radeon_device *rdev)
3344 } 2713 }
3345 } 2714 }
3346 2715
3347 r = r600_vram_scratch_init(rdev);
3348 if (r)
3349 return r;
3350
3351 if (rdev->flags & RADEON_IS_AGP) { 2716 if (rdev->flags & RADEON_IS_AGP) {
3352 r600_agp_enable(rdev); 2717 r600_agp_enable(rdev);
3353 } else { 2718 } else {
@@ -3356,12 +2721,6 @@ static int r600_startup(struct radeon_device *rdev)
3356 return r; 2721 return r;
3357 } 2722 }
3358 r600_gpu_init(rdev); 2723 r600_gpu_init(rdev);
3359 r = r600_blit_init(rdev);
3360 if (r) {
3361 r600_blit_fini(rdev);
3362 rdev->asic->copy.copy = NULL;
3363 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
3364 }
3365 2724
3366 /* allocate wb buffer */ 2725 /* allocate wb buffer */
3367 r = radeon_wb_init(rdev); 2726 r = radeon_wb_init(rdev);
@@ -3398,14 +2757,14 @@ static int r600_startup(struct radeon_device *rdev)
3398 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2757 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3399 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 2758 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3400 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 2759 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3401 0, 0xfffff, RADEON_CP_PACKET2); 2760 RADEON_CP_PACKET2);
3402 if (r) 2761 if (r)
3403 return r; 2762 return r;
3404 2763
3405 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2764 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3406 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 2765 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3407 DMA_RB_RPTR, DMA_RB_WPTR, 2766 DMA_RB_RPTR, DMA_RB_WPTR,
3408 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 2767 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3409 if (r) 2768 if (r)
3410 return r; 2769 return r;
3411 2770
@@ -3574,7 +2933,6 @@ int r600_init(struct radeon_device *rdev)
3574void r600_fini(struct radeon_device *rdev) 2933void r600_fini(struct radeon_device *rdev)
3575{ 2934{
3576 r600_audio_fini(rdev); 2935 r600_audio_fini(rdev);
3577 r600_blit_fini(rdev);
3578 r600_cp_fini(rdev); 2936 r600_cp_fini(rdev);
3579 r600_dma_fini(rdev); 2937 r600_dma_fini(rdev);
3580 r600_irq_fini(rdev); 2938 r600_irq_fini(rdev);
@@ -3626,16 +2984,6 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3626 radeon_ring_write(ring, ib->length_dw); 2984 radeon_ring_write(ring, ib->length_dw);
3627} 2985}
3628 2986
3629void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3630{
3631 struct radeon_ring *ring = &rdev->ring[ib->ring];
3632
3633 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
3634 radeon_ring_write(ring, ib->gpu_addr);
3635 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
3636 radeon_ring_write(ring, ib->length_dw);
3637}
3638
3639int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 2987int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3640{ 2988{
3641 struct radeon_ib ib; 2989 struct radeon_ib ib;
@@ -3689,139 +3037,6 @@ free_scratch:
3689 return r; 3037 return r;
3690} 3038}
3691 3039
3692/**
3693 * r600_dma_ib_test - test an IB on the DMA engine
3694 *
3695 * @rdev: radeon_device pointer
3696 * @ring: radeon_ring structure holding ring information
3697 *
3698 * Test a simple IB in the DMA ring (r6xx-SI).
3699 * Returns 0 on success, error on failure.
3700 */
3701int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3702{
3703 struct radeon_ib ib;
3704 unsigned i;
3705 int r;
3706 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3707 u32 tmp = 0;
3708
3709 if (!ptr) {
3710 DRM_ERROR("invalid vram scratch pointer\n");
3711 return -EINVAL;
3712 }
3713
3714 tmp = 0xCAFEDEAD;
3715 writel(tmp, ptr);
3716
3717 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3718 if (r) {
3719 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3720 return r;
3721 }
3722
3723 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
3724 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3725 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
3726 ib.ptr[3] = 0xDEADBEEF;
3727 ib.length_dw = 4;
3728
3729 r = radeon_ib_schedule(rdev, &ib, NULL);
3730 if (r) {
3731 radeon_ib_free(rdev, &ib);
3732 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3733 return r;
3734 }
3735 r = radeon_fence_wait(ib.fence, false);
3736 if (r) {
3737 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3738 return r;
3739 }
3740 for (i = 0; i < rdev->usec_timeout; i++) {
3741 tmp = readl(ptr);
3742 if (tmp == 0xDEADBEEF)
3743 break;
3744 DRM_UDELAY(1);
3745 }
3746 if (i < rdev->usec_timeout) {
3747 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3748 } else {
3749 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3750 r = -EINVAL;
3751 }
3752 radeon_ib_free(rdev, &ib);
3753 return r;
3754}
3755
3756int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3757{
3758 struct radeon_fence *fence = NULL;
3759 int r;
3760
3761 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
3762 if (r) {
3763 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
3764 return r;
3765 }
3766
3767 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
3768 if (r) {
3769 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
3770 goto error;
3771 }
3772
3773 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
3774 if (r) {
3775 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
3776 goto error;
3777 }
3778
3779 r = radeon_fence_wait(fence, false);
3780 if (r) {
3781 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3782 goto error;
3783 }
3784 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
3785error:
3786 radeon_fence_unref(&fence);
3787 radeon_set_uvd_clocks(rdev, 0, 0);
3788 return r;
3789}
3790
3791/**
3792 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3793 *
3794 * @rdev: radeon_device pointer
3795 * @ib: IB object to schedule
3796 *
3797 * Schedule an IB in the DMA ring (r6xx-r7xx).
3798 */
3799void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3800{
3801 struct radeon_ring *ring = &rdev->ring[ib->ring];
3802
3803 if (rdev->wb.enabled) {
3804 u32 next_rptr = ring->wptr + 4;
3805 while ((next_rptr & 7) != 5)
3806 next_rptr++;
3807 next_rptr += 3;
3808 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3809 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3810 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3811 radeon_ring_write(ring, next_rptr);
3812 }
3813
3814 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3815 * Pad as necessary with NOPs.
3816 */
3817 while ((ring->wptr & 7) != 5)
3818 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3819 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3820 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3821 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3822
3823}
3824
3825/* 3040/*
3826 * Interrupts 3041 * Interrupts
3827 * 3042 *
@@ -3838,7 +3053,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3838 u32 rb_bufsz; 3053 u32 rb_bufsz;
3839 3054
3840 /* Align ring size */ 3055 /* Align ring size */
3841 rb_bufsz = drm_order(ring_size / 4); 3056 rb_bufsz = order_base_2(ring_size / 4);
3842 ring_size = (1 << rb_bufsz) * 4; 3057 ring_size = (1 << rb_bufsz) * 4;
3843 rdev->ih.ring_size = ring_size; 3058 rdev->ih.ring_size = ring_size;
3844 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3059 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4075,7 +3290,7 @@ int r600_irq_init(struct radeon_device *rdev)
4075 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3290 WREG32(INTERRUPT_CNTL, interrupt_cntl);
4076 3291
4077 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3292 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
4078 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 3293 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
4079 3294
4080 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3295 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
4081 IH_WPTR_OVERFLOW_CLEAR | 3296 IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index c92eb86a8e55..47fc2b886979 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,12 +57,12 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
57 */ 57 */
58static int r600_audio_chipset_supported(struct radeon_device *rdev) 58static int r600_audio_chipset_supported(struct radeon_device *rdev)
59{ 59{
60 return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev); 60 return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
61} 61}
62 62
63struct r600_audio r600_audio_status(struct radeon_device *rdev) 63struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
64{ 64{
65 struct r600_audio status; 65 struct r600_audio_pin status;
66 uint32_t value; 66 uint32_t value;
67 67
68 value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); 68 value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
@@ -120,16 +120,16 @@ void r600_audio_update_hdmi(struct work_struct *work)
120 struct radeon_device *rdev = container_of(work, struct radeon_device, 120 struct radeon_device *rdev = container_of(work, struct radeon_device,
121 audio_work); 121 audio_work);
122 struct drm_device *dev = rdev->ddev; 122 struct drm_device *dev = rdev->ddev;
123 struct r600_audio audio_status = r600_audio_status(rdev); 123 struct r600_audio_pin audio_status = r600_audio_status(rdev);
124 struct drm_encoder *encoder; 124 struct drm_encoder *encoder;
125 bool changed = false; 125 bool changed = false;
126 126
127 if (rdev->audio_status.channels != audio_status.channels || 127 if (rdev->audio.pin[0].channels != audio_status.channels ||
128 rdev->audio_status.rate != audio_status.rate || 128 rdev->audio.pin[0].rate != audio_status.rate ||
129 rdev->audio_status.bits_per_sample != audio_status.bits_per_sample || 129 rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
130 rdev->audio_status.status_bits != audio_status.status_bits || 130 rdev->audio.pin[0].status_bits != audio_status.status_bits ||
131 rdev->audio_status.category_code != audio_status.category_code) { 131 rdev->audio.pin[0].category_code != audio_status.category_code) {
132 rdev->audio_status = audio_status; 132 rdev->audio.pin[0] = audio_status;
133 changed = true; 133 changed = true;
134 } 134 }
135 135
@@ -141,13 +141,13 @@ void r600_audio_update_hdmi(struct work_struct *work)
141 } 141 }
142} 142}
143 143
144/* 144/* enable the audio stream */
145 * turn on/off audio engine 145static void r600_audio_enable(struct radeon_device *rdev,
146 */ 146 struct r600_audio_pin *pin,
147static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) 147 bool enable)
148{ 148{
149 u32 value = 0; 149 u32 value = 0;
150 DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling"); 150
151 if (ASIC_IS_DCE4(rdev)) { 151 if (ASIC_IS_DCE4(rdev)) {
152 if (enable) { 152 if (enable) {
153 value |= 0x81000000; /* Required to enable audio */ 153 value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +158,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
158 WREG32_P(R600_AUDIO_ENABLE, 158 WREG32_P(R600_AUDIO_ENABLE,
159 enable ? 0x81000000 : 0x0, ~0x81000000); 159 enable ? 0x81000000 : 0x0, ~0x81000000);
160 } 160 }
161 rdev->audio_enabled = enable; 161 DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
162} 162}
163 163
164/* 164/*
@@ -169,13 +169,17 @@ int r600_audio_init(struct radeon_device *rdev)
169 if (!radeon_audio || !r600_audio_chipset_supported(rdev)) 169 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
170 return 0; 170 return 0;
171 171
172 r600_audio_engine_enable(rdev, true); 172 rdev->audio.enabled = true;
173
174 rdev->audio.num_pins = 1;
175 rdev->audio.pin[0].channels = -1;
176 rdev->audio.pin[0].rate = -1;
177 rdev->audio.pin[0].bits_per_sample = -1;
178 rdev->audio.pin[0].status_bits = 0;
179 rdev->audio.pin[0].category_code = 0;
180 rdev->audio.pin[0].id = 0;
173 181
174 rdev->audio_status.channels = -1; 182 r600_audio_enable(rdev, &rdev->audio.pin[0], true);
175 rdev->audio_status.rate = -1;
176 rdev->audio_status.bits_per_sample = -1;
177 rdev->audio_status.status_bits = 0;
178 rdev->audio_status.category_code = 0;
179 183
180 return 0; 184 return 0;
181} 185}
@@ -186,8 +190,16 @@ int r600_audio_init(struct radeon_device *rdev)
186 */ 190 */
187void r600_audio_fini(struct radeon_device *rdev) 191void r600_audio_fini(struct radeon_device *rdev)
188{ 192{
189 if (!rdev->audio_enabled) 193 if (!rdev->audio.enabled)
190 return; 194 return;
191 195
192 r600_audio_engine_enable(rdev, false); 196 r600_audio_enable(rdev, &rdev->audio.pin[0], false);
197
198 rdev->audio.enabled = false;
199}
200
201struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
202{
203 /* only one pin on 6xx-NI */
204 return &rdev->audio.pin[0];
193} 205}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index f651881eb0ae..daf7572be976 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -31,6 +31,37 @@
31 31
32#include "r600_blit_shaders.h" 32#include "r600_blit_shaders.h"
33 33
34/* 23 bits of float fractional data */
35#define I2F_FRAC_BITS 23
36#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
37
38/*
39 * Converts unsigned integer into 32-bit IEEE floating point representation.
40 * Will be exact from 0 to 2^24. Above that, we round towards zero
41 * as the fractional bits will not fit in a float. (It would be better to
42 * round towards even as the fpu does, but that is slower.)
43 */
44static __pure uint32_t int2float(uint32_t x)
45{
46 uint32_t msb, exponent, fraction;
47
48 /* Zero is special */
49 if (!x) return 0;
50
51 /* Get location of the most significant bit */
52 msb = __fls(x);
53
54 /*
55 * Use a rotate instead of a shift because that works both leftwards
56 * and rightwards due to the mod(32) behaviour. This means we don't
57 * need to check to see if we are above 2^24 or not.
58 */
59 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
60 exponent = (127 + msb) << I2F_FRAC_BITS;
61
62 return fraction + exponent;
63}
64
34#define DI_PT_RECTLIST 0x11 65#define DI_PT_RECTLIST 0x11
35#define DI_INDEX_SIZE_16_BIT 0x0 66#define DI_INDEX_SIZE_16_BIT 0x0
36#define DI_SRC_SEL_AUTO_INDEX 0x2 67#define DI_SRC_SEL_AUTO_INDEX 0x2
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
deleted file mode 100644
index 9fb5780a552f..000000000000
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ /dev/null
@@ -1,785 +0,0 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include <drm/drmP.h>
27#include <drm/radeon_drm.h>
28#include "radeon.h"
29
30#include "r600d.h"
31#include "r600_blit_shaders.h"
32#include "radeon_blit_common.h"
33
34/* 23 bits of float fractional data */
35#define I2F_FRAC_BITS 23
36#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
37
38/*
39 * Converts unsigned integer into 32-bit IEEE floating point representation.
40 * Will be exact from 0 to 2^24. Above that, we round towards zero
41 * as the fractional bits will not fit in a float. (It would be better to
42 * round towards even as the fpu does, but that is slower.)
43 */
44__pure uint32_t int2float(uint32_t x)
45{
46 uint32_t msb, exponent, fraction;
47
48 /* Zero is special */
49 if (!x) return 0;
50
51 /* Get location of the most significant bit */
52 msb = __fls(x);
53
54 /*
55 * Use a rotate instead of a shift because that works both leftwards
56 * and rightwards due to the mod(32) behaviour. This means we don't
57 * need to check to see if we are above 2^24 or not.
58 */
59 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
60 exponent = (127 + msb) << I2F_FRAC_BITS;
61
62 return fraction + exponent;
63}
64
65/* emits 21 on rv770+, 23 on r600 */
66static void
67set_render_target(struct radeon_device *rdev, int format,
68 int w, int h, u64 gpu_addr)
69{
70 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
71 u32 cb_color_info;
72 int pitch, slice;
73
74 h = ALIGN(h, 8);
75 if (h < 8)
76 h = 8;
77
78 cb_color_info = CB_FORMAT(format) |
79 CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
80 CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
81 pitch = (w / 8) - 1;
82 slice = ((w * h) / 64) - 1;
83
84 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
85 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
86 radeon_ring_write(ring, gpu_addr >> 8);
87
88 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
89 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
90 radeon_ring_write(ring, 2 << 0);
91 }
92
93 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
94 radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
95 radeon_ring_write(ring, (pitch << 0) | (slice << 10));
96
97 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
98 radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
99 radeon_ring_write(ring, 0);
100
101 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
102 radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
103 radeon_ring_write(ring, cb_color_info);
104
105 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
106 radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
107 radeon_ring_write(ring, 0);
108
109 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
110 radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
111 radeon_ring_write(ring, 0);
112
113 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
114 radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
115 radeon_ring_write(ring, 0);
116}
117
118/* emits 5dw */
119static void
120cp_set_surface_sync(struct radeon_device *rdev,
121 u32 sync_type, u32 size,
122 u64 mc_addr)
123{
124 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
125 u32 cp_coher_size;
126
127 if (size == 0xffffffff)
128 cp_coher_size = 0xffffffff;
129 else
130 cp_coher_size = ((size + 255) >> 8);
131
132 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
133 radeon_ring_write(ring, sync_type);
134 radeon_ring_write(ring, cp_coher_size);
135 radeon_ring_write(ring, mc_addr >> 8);
136 radeon_ring_write(ring, 10); /* poll interval */
137}
138
139/* emits 21dw + 1 surface sync = 26dw */
140static void
141set_shaders(struct radeon_device *rdev)
142{
143 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
144 u64 gpu_addr;
145 u32 sq_pgm_resources;
146
147 /* setup shader regs */
148 sq_pgm_resources = (1 << 0);
149
150 /* VS */
151 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
152 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
153 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
154 radeon_ring_write(ring, gpu_addr >> 8);
155
156 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
157 radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
158 radeon_ring_write(ring, sq_pgm_resources);
159
160 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
161 radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
162 radeon_ring_write(ring, 0);
163
164 /* PS */
165 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
166 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
167 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
168 radeon_ring_write(ring, gpu_addr >> 8);
169
170 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
171 radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
172 radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
173
174 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
175 radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
176 radeon_ring_write(ring, 2);
177
178 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
179 radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
180 radeon_ring_write(ring, 0);
181
182 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
183 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
184}
185
186/* emits 9 + 1 sync (5) = 14*/
187static void
188set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
189{
190 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
191 u32 sq_vtx_constant_word2;
192
193 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
194 SQ_VTXC_STRIDE(16);
195#ifdef __BIG_ENDIAN
196 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
197#endif
198
199 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
200 radeon_ring_write(ring, 0x460);
201 radeon_ring_write(ring, gpu_addr & 0xffffffff);
202 radeon_ring_write(ring, 48 - 1);
203 radeon_ring_write(ring, sq_vtx_constant_word2);
204 radeon_ring_write(ring, 1 << 0);
205 radeon_ring_write(ring, 0);
206 radeon_ring_write(ring, 0);
207 radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
208
209 if ((rdev->family == CHIP_RV610) ||
210 (rdev->family == CHIP_RV620) ||
211 (rdev->family == CHIP_RS780) ||
212 (rdev->family == CHIP_RS880) ||
213 (rdev->family == CHIP_RV710))
214 cp_set_surface_sync(rdev,
215 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
216 else
217 cp_set_surface_sync(rdev,
218 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
219}
220
221/* emits 9 */
222static void
223set_tex_resource(struct radeon_device *rdev,
224 int format, int w, int h, int pitch,
225 u64 gpu_addr, u32 size)
226{
227 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
228 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
229
230 if (h < 1)
231 h = 1;
232
233 sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
234 S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
235 sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
236 S_038000_TEX_WIDTH(w - 1);
237
238 sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
239 sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
240
241 sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
242 S_038010_DST_SEL_X(SQ_SEL_X) |
243 S_038010_DST_SEL_Y(SQ_SEL_Y) |
244 S_038010_DST_SEL_Z(SQ_SEL_Z) |
245 S_038010_DST_SEL_W(SQ_SEL_W);
246
247 cp_set_surface_sync(rdev,
248 PACKET3_TC_ACTION_ENA, size, gpu_addr);
249
250 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
251 radeon_ring_write(ring, 0);
252 radeon_ring_write(ring, sq_tex_resource_word0);
253 radeon_ring_write(ring, sq_tex_resource_word1);
254 radeon_ring_write(ring, gpu_addr >> 8);
255 radeon_ring_write(ring, gpu_addr >> 8);
256 radeon_ring_write(ring, sq_tex_resource_word4);
257 radeon_ring_write(ring, 0);
258 radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
259}
260
261/* emits 12 */
262static void
263set_scissors(struct radeon_device *rdev, int x1, int y1,
264 int x2, int y2)
265{
266 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
267 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
268 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
269 radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
270 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
271
272 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
273 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
274 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
275 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
276
277 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
278 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
279 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
280 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
281}
282
283/* emits 10 */
284static void
285draw_auto(struct radeon_device *rdev)
286{
287 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
288 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
289 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
290 radeon_ring_write(ring, DI_PT_RECTLIST);
291
292 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
293 radeon_ring_write(ring,
294#ifdef __BIG_ENDIAN
295 (2 << 2) |
296#endif
297 DI_INDEX_SIZE_16_BIT);
298
299 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
300 radeon_ring_write(ring, 1);
301
302 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
303 radeon_ring_write(ring, 3);
304 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
305
306}
307
308/* emits 14 */
309static void
310set_default_state(struct radeon_device *rdev)
311{
312 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
313 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
314 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
315 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
316 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
317 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
318 u64 gpu_addr;
319 int dwords;
320
321 switch (rdev->family) {
322 case CHIP_R600:
323 num_ps_gprs = 192;
324 num_vs_gprs = 56;
325 num_temp_gprs = 4;
326 num_gs_gprs = 0;
327 num_es_gprs = 0;
328 num_ps_threads = 136;
329 num_vs_threads = 48;
330 num_gs_threads = 4;
331 num_es_threads = 4;
332 num_ps_stack_entries = 128;
333 num_vs_stack_entries = 128;
334 num_gs_stack_entries = 0;
335 num_es_stack_entries = 0;
336 break;
337 case CHIP_RV630:
338 case CHIP_RV635:
339 num_ps_gprs = 84;
340 num_vs_gprs = 36;
341 num_temp_gprs = 4;
342 num_gs_gprs = 0;
343 num_es_gprs = 0;
344 num_ps_threads = 144;
345 num_vs_threads = 40;
346 num_gs_threads = 4;
347 num_es_threads = 4;
348 num_ps_stack_entries = 40;
349 num_vs_stack_entries = 40;
350 num_gs_stack_entries = 32;
351 num_es_stack_entries = 16;
352 break;
353 case CHIP_RV610:
354 case CHIP_RV620:
355 case CHIP_RS780:
356 case CHIP_RS880:
357 default:
358 num_ps_gprs = 84;
359 num_vs_gprs = 36;
360 num_temp_gprs = 4;
361 num_gs_gprs = 0;
362 num_es_gprs = 0;
363 num_ps_threads = 136;
364 num_vs_threads = 48;
365 num_gs_threads = 4;
366 num_es_threads = 4;
367 num_ps_stack_entries = 40;
368 num_vs_stack_entries = 40;
369 num_gs_stack_entries = 32;
370 num_es_stack_entries = 16;
371 break;
372 case CHIP_RV670:
373 num_ps_gprs = 144;
374 num_vs_gprs = 40;
375 num_temp_gprs = 4;
376 num_gs_gprs = 0;
377 num_es_gprs = 0;
378 num_ps_threads = 136;
379 num_vs_threads = 48;
380 num_gs_threads = 4;
381 num_es_threads = 4;
382 num_ps_stack_entries = 40;
383 num_vs_stack_entries = 40;
384 num_gs_stack_entries = 32;
385 num_es_stack_entries = 16;
386 break;
387 case CHIP_RV770:
388 num_ps_gprs = 192;
389 num_vs_gprs = 56;
390 num_temp_gprs = 4;
391 num_gs_gprs = 0;
392 num_es_gprs = 0;
393 num_ps_threads = 188;
394 num_vs_threads = 60;
395 num_gs_threads = 0;
396 num_es_threads = 0;
397 num_ps_stack_entries = 256;
398 num_vs_stack_entries = 256;
399 num_gs_stack_entries = 0;
400 num_es_stack_entries = 0;
401 break;
402 case CHIP_RV730:
403 case CHIP_RV740:
404 num_ps_gprs = 84;
405 num_vs_gprs = 36;
406 num_temp_gprs = 4;
407 num_gs_gprs = 0;
408 num_es_gprs = 0;
409 num_ps_threads = 188;
410 num_vs_threads = 60;
411 num_gs_threads = 0;
412 num_es_threads = 0;
413 num_ps_stack_entries = 128;
414 num_vs_stack_entries = 128;
415 num_gs_stack_entries = 0;
416 num_es_stack_entries = 0;
417 break;
418 case CHIP_RV710:
419 num_ps_gprs = 192;
420 num_vs_gprs = 56;
421 num_temp_gprs = 4;
422 num_gs_gprs = 0;
423 num_es_gprs = 0;
424 num_ps_threads = 144;
425 num_vs_threads = 48;
426 num_gs_threads = 0;
427 num_es_threads = 0;
428 num_ps_stack_entries = 128;
429 num_vs_stack_entries = 128;
430 num_gs_stack_entries = 0;
431 num_es_stack_entries = 0;
432 break;
433 }
434
435 if ((rdev->family == CHIP_RV610) ||
436 (rdev->family == CHIP_RV620) ||
437 (rdev->family == CHIP_RS780) ||
438 (rdev->family == CHIP_RS880) ||
439 (rdev->family == CHIP_RV710))
440 sq_config = 0;
441 else
442 sq_config = VC_ENABLE;
443
444 sq_config |= (DX9_CONSTS |
445 ALU_INST_PREFER_VECTOR |
446 PS_PRIO(0) |
447 VS_PRIO(1) |
448 GS_PRIO(2) |
449 ES_PRIO(3));
450
451 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
452 NUM_VS_GPRS(num_vs_gprs) |
453 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
454 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
455 NUM_ES_GPRS(num_es_gprs));
456 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
457 NUM_VS_THREADS(num_vs_threads) |
458 NUM_GS_THREADS(num_gs_threads) |
459 NUM_ES_THREADS(num_es_threads));
460 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
461 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
462 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
463 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
464
465 /* emit an IB pointing at default state */
466 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
467 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
468 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
469 radeon_ring_write(ring,
470#ifdef __BIG_ENDIAN
471 (2 << 0) |
472#endif
473 (gpu_addr & 0xFFFFFFFC));
474 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
475 radeon_ring_write(ring, dwords);
476
477 /* SQ config */
478 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
479 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
480 radeon_ring_write(ring, sq_config);
481 radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
482 radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
483 radeon_ring_write(ring, sq_thread_resource_mgmt);
484 radeon_ring_write(ring, sq_stack_resource_mgmt_1);
485 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
486}
487
488int r600_blit_init(struct radeon_device *rdev)
489{
490 u32 obj_size;
491 int i, r, dwords;
492 void *ptr;
493 u32 packet2s[16];
494 int num_packet2s = 0;
495
496 rdev->r600_blit.primitives.set_render_target = set_render_target;
497 rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
498 rdev->r600_blit.primitives.set_shaders = set_shaders;
499 rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
500 rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
501 rdev->r600_blit.primitives.set_scissors = set_scissors;
502 rdev->r600_blit.primitives.draw_auto = draw_auto;
503 rdev->r600_blit.primitives.set_default_state = set_default_state;
504
505 rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
506 rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
507 rdev->r600_blit.ring_size_common += 5; /* done copy */
508 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
509
510 rdev->r600_blit.ring_size_per_loop = 76;
511 /* set_render_target emits 2 extra dwords on rv6xx */
512 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
513 rdev->r600_blit.ring_size_per_loop += 2;
514
515 rdev->r600_blit.max_dim = 8192;
516
517 rdev->r600_blit.state_offset = 0;
518
519 if (rdev->family >= CHIP_RV770)
520 rdev->r600_blit.state_len = r7xx_default_size;
521 else
522 rdev->r600_blit.state_len = r6xx_default_size;
523
524 dwords = rdev->r600_blit.state_len;
525 while (dwords & 0xf) {
526 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
527 dwords++;
528 }
529
530 obj_size = dwords * 4;
531 obj_size = ALIGN(obj_size, 256);
532
533 rdev->r600_blit.vs_offset = obj_size;
534 obj_size += r6xx_vs_size * 4;
535 obj_size = ALIGN(obj_size, 256);
536
537 rdev->r600_blit.ps_offset = obj_size;
538 obj_size += r6xx_ps_size * 4;
539 obj_size = ALIGN(obj_size, 256);
540
541 /* pin copy shader into vram if not already initialized */
542 if (rdev->r600_blit.shader_obj == NULL) {
543 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
544 RADEON_GEM_DOMAIN_VRAM,
545 NULL, &rdev->r600_blit.shader_obj);
546 if (r) {
547 DRM_ERROR("r600 failed to allocate shader\n");
548 return r;
549 }
550
551 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
552 if (unlikely(r != 0))
553 return r;
554 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
555 &rdev->r600_blit.shader_gpu_addr);
556 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
557 if (r) {
558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
559 return r;
560 }
561 }
562
563 DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
564 obj_size,
565 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
566
567 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
568 if (unlikely(r != 0))
569 return r;
570 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
571 if (r) {
572 DRM_ERROR("failed to map blit object %d\n", r);
573 return r;
574 }
575 if (rdev->family >= CHIP_RV770)
576 memcpy_toio(ptr + rdev->r600_blit.state_offset,
577 r7xx_default_state, rdev->r600_blit.state_len * 4);
578 else
579 memcpy_toio(ptr + rdev->r600_blit.state_offset,
580 r6xx_default_state, rdev->r600_blit.state_len * 4);
581 if (num_packet2s)
582 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
583 packet2s, num_packet2s * 4);
584 for (i = 0; i < r6xx_vs_size; i++)
585 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
586 for (i = 0; i < r6xx_ps_size; i++)
587 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
588 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
589 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
590
591 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
592 return 0;
593}
594
595void r600_blit_fini(struct radeon_device *rdev)
596{
597 int r;
598
599 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
600 if (rdev->r600_blit.shader_obj == NULL)
601 return;
602 /* If we can't reserve the bo, unref should be enough to destroy
603 * it when it becomes idle.
604 */
605 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
606 if (!r) {
607 radeon_bo_unpin(rdev->r600_blit.shader_obj);
608 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
609 }
610 radeon_bo_unref(&rdev->r600_blit.shader_obj);
611}
612
613static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
614 int *width, int *height, int max_dim)
615{
616 unsigned max_pages;
617 unsigned pages = num_gpu_pages;
618 int w, h;
619
620 if (num_gpu_pages == 0) {
621 /* not supposed to be called with no pages, but just in case */
622 h = 0;
623 w = 0;
624 pages = 0;
625 WARN_ON(1);
626 } else {
627 int rect_order = 2;
628 h = RECT_UNIT_H;
629 while (num_gpu_pages / rect_order) {
630 h *= 2;
631 rect_order *= 4;
632 if (h >= max_dim) {
633 h = max_dim;
634 break;
635 }
636 }
637 max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
638 if (pages > max_pages)
639 pages = max_pages;
640 w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
641 w = (w / RECT_UNIT_W) * RECT_UNIT_W;
642 pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
643 BUG_ON(pages == 0);
644 }
645
646
647 DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
648
649 /* return width and height only of the caller wants it */
650 if (height)
651 *height = h;
652 if (width)
653 *width = w;
654
655 return pages;
656}
657
658
659int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
660 struct radeon_fence **fence, struct radeon_sa_bo **vb,
661 struct radeon_semaphore **sem)
662{
663 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
664 int r;
665 int ring_size;
666 int num_loops = 0;
667 int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
668
669 /* num loops */
670 while (num_gpu_pages) {
671 num_gpu_pages -=
672 r600_blit_create_rect(num_gpu_pages, NULL, NULL,
673 rdev->r600_blit.max_dim);
674 num_loops++;
675 }
676
677 /* 48 bytes for vertex per loop */
678 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
679 (num_loops*48)+256, 256, true);
680 if (r) {
681 return r;
682 }
683
684 r = radeon_semaphore_create(rdev, sem);
685 if (r) {
686 radeon_sa_bo_free(rdev, vb, NULL);
687 return r;
688 }
689
690 /* calculate number of loops correctly */
691 ring_size = num_loops * dwords_per_loop;
692 ring_size += rdev->r600_blit.ring_size_common;
693 r = radeon_ring_lock(rdev, ring, ring_size);
694 if (r) {
695 radeon_sa_bo_free(rdev, vb, NULL);
696 radeon_semaphore_free(rdev, sem, NULL);
697 return r;
698 }
699
700 if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
701 radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
702 RADEON_RING_TYPE_GFX_INDEX);
703 radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
704 } else {
705 radeon_semaphore_free(rdev, sem, NULL);
706 }
707
708 rdev->r600_blit.primitives.set_default_state(rdev);
709 rdev->r600_blit.primitives.set_shaders(rdev);
710 return 0;
711}
712
713void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
714 struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
715{
716 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
717 int r;
718
719 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
720 if (r) {
721 radeon_ring_unlock_undo(rdev, ring);
722 return;
723 }
724
725 radeon_ring_unlock_commit(rdev, ring);
726 radeon_sa_bo_free(rdev, &vb, *fence);
727 radeon_semaphore_free(rdev, &sem, *fence);
728}
729
730void r600_kms_blit_copy(struct radeon_device *rdev,
731 u64 src_gpu_addr, u64 dst_gpu_addr,
732 unsigned num_gpu_pages,
733 struct radeon_sa_bo *vb)
734{
735 u64 vb_gpu_addr;
736 u32 *vb_cpu_addr;
737
738 DRM_DEBUG("emitting copy %16llx %16llx %d\n",
739 src_gpu_addr, dst_gpu_addr, num_gpu_pages);
740 vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
741 vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
742
743 while (num_gpu_pages) {
744 int w, h;
745 unsigned size_in_bytes;
746 unsigned pages_per_loop =
747 r600_blit_create_rect(num_gpu_pages, &w, &h,
748 rdev->r600_blit.max_dim);
749
750 size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
751 DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
752
753 vb_cpu_addr[0] = 0;
754 vb_cpu_addr[1] = 0;
755 vb_cpu_addr[2] = 0;
756 vb_cpu_addr[3] = 0;
757
758 vb_cpu_addr[4] = 0;
759 vb_cpu_addr[5] = int2float(h);
760 vb_cpu_addr[6] = 0;
761 vb_cpu_addr[7] = int2float(h);
762
763 vb_cpu_addr[8] = int2float(w);
764 vb_cpu_addr[9] = int2float(h);
765 vb_cpu_addr[10] = int2float(w);
766 vb_cpu_addr[11] = int2float(h);
767
768 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
769 w, h, w, src_gpu_addr, size_in_bytes);
770 rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
771 w, h, dst_gpu_addr);
772 rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
773 rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
774 rdev->r600_blit.primitives.draw_auto(rdev);
775 rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
776 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
777 size_in_bytes, dst_gpu_addr);
778
779 vb_cpu_addr += 12;
780 vb_gpu_addr += 4*12;
781 src_gpu_addr += size_in_bytes;
782 dst_gpu_addr += size_in_bytes;
783 num_gpu_pages -= pages_per_loop;
784 }
785}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index 2f3ce7a75976..f437d36dd98c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,5 +35,4 @@ extern const u32 r6xx_default_state[];
35extern const u32 r6xx_ps_size, r6xx_vs_size; 35extern const u32 r6xx_ps_size, r6xx_vs_size;
36extern const u32 r6xx_default_size, r7xx_default_size; 36extern const u32 r6xx_default_size, r7xx_default_size;
37 37
38__pure uint32_t int2float(uint32_t x);
39#endif 38#endif
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 1c51c08b1fde..d8eb48bff0ed 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
2201 + init->ring_size / sizeof(u32)); 2201 + init->ring_size / sizeof(u32));
2202 dev_priv->ring.size = init->ring_size; 2202 dev_priv->ring.size = init->ring_size;
2203 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 2203 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
2204 2204
2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
2206 dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8); 2206 dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
2207 2207
2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
2209 dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16); 2209 dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
2210 2210
2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
2212 2212
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
new file mode 100644
index 000000000000..3b317456512a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -0,0 +1,497 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "r600d.h"
28
29u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/*
32 * DMA
33 * Starting with R600, the GPU has an asynchronous
34 * DMA engine. The programming model is very similar
35 * to the 3D engine (ring buffer, IBs, etc.), but the
36 * DMA controller has it's own packet format that is
37 * different form the PM4 format used by the 3D engine.
38 * It supports copying data, writing embedded data,
39 * solid fills, and a number of other things. It also
40 * has support for tiling/detiling of buffers.
41 */
42
43/**
44 * r600_dma_get_rptr - get the current read pointer
45 *
46 * @rdev: radeon_device pointer
47 * @ring: radeon ring pointer
48 *
49 * Get the current rptr from the hardware (r6xx+).
50 */
51uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
52 struct radeon_ring *ring)
53{
54 return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
55}
56
57/**
58 * r600_dma_get_wptr - get the current write pointer
59 *
60 * @rdev: radeon_device pointer
61 * @ring: radeon ring pointer
62 *
63 * Get the current wptr from the hardware (r6xx+).
64 */
65uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
66 struct radeon_ring *ring)
67{
68 return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
69}
70
71/**
72 * r600_dma_set_wptr - commit the write pointer
73 *
74 * @rdev: radeon_device pointer
75 * @ring: radeon ring pointer
76 *
77 * Write the wptr back to the hardware (r6xx+).
78 */
79void r600_dma_set_wptr(struct radeon_device *rdev,
80 struct radeon_ring *ring)
81{
82 WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
83}
84
85/**
86 * r600_dma_stop - stop the async dma engine
87 *
88 * @rdev: radeon_device pointer
89 *
90 * Stop the async dma engine (r6xx-evergreen).
91 */
92void r600_dma_stop(struct radeon_device *rdev)
93{
94 u32 rb_cntl = RREG32(DMA_RB_CNTL);
95
96 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
97
98 rb_cntl &= ~DMA_RB_ENABLE;
99 WREG32(DMA_RB_CNTL, rb_cntl);
100
101 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
102}
103
104/**
105 * r600_dma_resume - setup and start the async dma engine
106 *
107 * @rdev: radeon_device pointer
108 *
109 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
110 * Returns 0 for success, error for failure.
111 */
112int r600_dma_resume(struct radeon_device *rdev)
113{
114 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
115 u32 rb_cntl, dma_cntl, ib_cntl;
116 u32 rb_bufsz;
117 int r;
118
119 /* Reset dma */
120 if (rdev->family >= CHIP_RV770)
121 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
122 else
123 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
124 RREG32(SRBM_SOFT_RESET);
125 udelay(50);
126 WREG32(SRBM_SOFT_RESET, 0);
127
128 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
129 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
130
131 /* Set ring buffer size in dwords */
132 rb_bufsz = order_base_2(ring->ring_size / 4);
133 rb_cntl = rb_bufsz << 1;
134#ifdef __BIG_ENDIAN
135 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
136#endif
137 WREG32(DMA_RB_CNTL, rb_cntl);
138
139 /* Initialize the ring buffer's read and write pointers */
140 WREG32(DMA_RB_RPTR, 0);
141 WREG32(DMA_RB_WPTR, 0);
142
143 /* set the wb address whether it's enabled or not */
144 WREG32(DMA_RB_RPTR_ADDR_HI,
145 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
146 WREG32(DMA_RB_RPTR_ADDR_LO,
147 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
148
149 if (rdev->wb.enabled)
150 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
151
152 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
153
154 /* enable DMA IBs */
155 ib_cntl = DMA_IB_ENABLE;
156#ifdef __BIG_ENDIAN
157 ib_cntl |= DMA_IB_SWAP_ENABLE;
158#endif
159 WREG32(DMA_IB_CNTL, ib_cntl);
160
161 dma_cntl = RREG32(DMA_CNTL);
162 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
163 WREG32(DMA_CNTL, dma_cntl);
164
165 if (rdev->family >= CHIP_RV770)
166 WREG32(DMA_MODE, 1);
167
168 ring->wptr = 0;
169 WREG32(DMA_RB_WPTR, ring->wptr << 2);
170
171 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
172
173 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
174
175 ring->ready = true;
176
177 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
178 if (r) {
179 ring->ready = false;
180 return r;
181 }
182
183 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
184
185 return 0;
186}
187
188/**
189 * r600_dma_fini - tear down the async dma engine
190 *
191 * @rdev: radeon_device pointer
192 *
193 * Stop the async dma engine and free the ring (r6xx-evergreen).
194 */
195void r600_dma_fini(struct radeon_device *rdev)
196{
197 r600_dma_stop(rdev);
198 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
199}
200
201/**
202 * r600_dma_is_lockup - Check if the DMA engine is locked up
203 *
204 * @rdev: radeon_device pointer
205 * @ring: radeon_ring structure holding ring information
206 *
207 * Check if the async DMA engine is locked up.
208 * Returns true if the engine appears to be locked up, false if not.
209 */
210bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
211{
212 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
213
214 if (!(reset_mask & RADEON_RESET_DMA)) {
215 radeon_ring_lockup_update(ring);
216 return false;
217 }
218 /* force ring activities */
219 radeon_ring_force_activity(rdev, ring);
220 return radeon_ring_test_lockup(rdev, ring);
221}
222
223
224/**
225 * r600_dma_ring_test - simple async dma engine test
226 *
227 * @rdev: radeon_device pointer
228 * @ring: radeon_ring structure holding ring information
229 *
230 * Test the DMA engine by writing using it to write an
231 * value to memory. (r6xx-SI).
232 * Returns 0 for success, error for failure.
233 */
234int r600_dma_ring_test(struct radeon_device *rdev,
235 struct radeon_ring *ring)
236{
237 unsigned i;
238 int r;
239 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
240 u32 tmp;
241
242 if (!ptr) {
243 DRM_ERROR("invalid vram scratch pointer\n");
244 return -EINVAL;
245 }
246
247 tmp = 0xCAFEDEAD;
248 writel(tmp, ptr);
249
250 r = radeon_ring_lock(rdev, ring, 4);
251 if (r) {
252 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
253 return r;
254 }
255 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
256 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
257 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
258 radeon_ring_write(ring, 0xDEADBEEF);
259 radeon_ring_unlock_commit(rdev, ring);
260
261 for (i = 0; i < rdev->usec_timeout; i++) {
262 tmp = readl(ptr);
263 if (tmp == 0xDEADBEEF)
264 break;
265 DRM_UDELAY(1);
266 }
267
268 if (i < rdev->usec_timeout) {
269 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
270 } else {
271 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
272 ring->idx, tmp);
273 r = -EINVAL;
274 }
275 return r;
276}
277
278/**
279 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
280 *
281 * @rdev: radeon_device pointer
282 * @fence: radeon fence object
283 *
284 * Add a DMA fence packet to the ring to write
285 * the fence seq number and DMA trap packet to generate
286 * an interrupt if needed (r6xx-r7xx).
287 */
288void r600_dma_fence_ring_emit(struct radeon_device *rdev,
289 struct radeon_fence *fence)
290{
291 struct radeon_ring *ring = &rdev->ring[fence->ring];
292 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
293
294 /* write the fence */
295 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
296 radeon_ring_write(ring, addr & 0xfffffffc);
297 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
298 radeon_ring_write(ring, lower_32_bits(fence->seq));
299 /* generate an interrupt */
300 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
301}
302
303/**
304 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
305 *
306 * @rdev: radeon_device pointer
307 * @ring: radeon_ring structure holding ring information
308 * @semaphore: radeon semaphore object
309 * @emit_wait: wait or signal semaphore
310 *
311 * Add a DMA semaphore packet to the ring wait on or signal
312 * other rings (r6xx-SI).
313 */
314void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
315 struct radeon_ring *ring,
316 struct radeon_semaphore *semaphore,
317 bool emit_wait)
318{
319 u64 addr = semaphore->gpu_addr;
320 u32 s = emit_wait ? 0 : 1;
321
322 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
323 radeon_ring_write(ring, addr & 0xfffffffc);
324 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
325}
326
327/**
328 * r600_dma_ib_test - test an IB on the DMA engine
329 *
330 * @rdev: radeon_device pointer
331 * @ring: radeon_ring structure holding ring information
332 *
333 * Test a simple IB in the DMA ring (r6xx-SI).
334 * Returns 0 on success, error on failure.
335 */
336int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
337{
338 struct radeon_ib ib;
339 unsigned i;
340 int r;
341 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
342 u32 tmp = 0;
343
344 if (!ptr) {
345 DRM_ERROR("invalid vram scratch pointer\n");
346 return -EINVAL;
347 }
348
349 tmp = 0xCAFEDEAD;
350 writel(tmp, ptr);
351
352 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
353 if (r) {
354 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
355 return r;
356 }
357
358 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
359 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
360 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
361 ib.ptr[3] = 0xDEADBEEF;
362 ib.length_dw = 4;
363
364 r = radeon_ib_schedule(rdev, &ib, NULL);
365 if (r) {
366 radeon_ib_free(rdev, &ib);
367 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
368 return r;
369 }
370 r = radeon_fence_wait(ib.fence, false);
371 if (r) {
372 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
373 return r;
374 }
375 for (i = 0; i < rdev->usec_timeout; i++) {
376 tmp = readl(ptr);
377 if (tmp == 0xDEADBEEF)
378 break;
379 DRM_UDELAY(1);
380 }
381 if (i < rdev->usec_timeout) {
382 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
383 } else {
384 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
385 r = -EINVAL;
386 }
387 radeon_ib_free(rdev, &ib);
388 return r;
389}
390
391/**
392 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
393 *
394 * @rdev: radeon_device pointer
395 * @ib: IB object to schedule
396 *
397 * Schedule an IB in the DMA ring (r6xx-r7xx).
398 */
399void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
400{
401 struct radeon_ring *ring = &rdev->ring[ib->ring];
402
403 if (rdev->wb.enabled) {
404 u32 next_rptr = ring->wptr + 4;
405 while ((next_rptr & 7) != 5)
406 next_rptr++;
407 next_rptr += 3;
408 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
409 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
410 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
411 radeon_ring_write(ring, next_rptr);
412 }
413
414 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
415 * Pad as necessary with NOPs.
416 */
417 while ((ring->wptr & 7) != 5)
418 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
419 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
420 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
421 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
422
423}
424
425/**
426 * r600_copy_dma - copy pages using the DMA engine
427 *
428 * @rdev: radeon_device pointer
429 * @src_offset: src GPU address
430 * @dst_offset: dst GPU address
431 * @num_gpu_pages: number of GPU pages to xfer
432 * @fence: radeon fence object
433 *
434 * Copy GPU paging using the DMA engine (r6xx).
435 * Used by the radeon ttm implementation to move pages if
436 * registered as the asic copy callback.
437 */
438int r600_copy_dma(struct radeon_device *rdev,
439 uint64_t src_offset, uint64_t dst_offset,
440 unsigned num_gpu_pages,
441 struct radeon_fence **fence)
442{
443 struct radeon_semaphore *sem = NULL;
444 int ring_index = rdev->asic->copy.dma_ring_index;
445 struct radeon_ring *ring = &rdev->ring[ring_index];
446 u32 size_in_dw, cur_size_in_dw;
447 int i, num_loops;
448 int r = 0;
449
450 r = radeon_semaphore_create(rdev, &sem);
451 if (r) {
452 DRM_ERROR("radeon: moving bo (%d).\n", r);
453 return r;
454 }
455
456 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
457 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
458 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
459 if (r) {
460 DRM_ERROR("radeon: moving bo (%d).\n", r);
461 radeon_semaphore_free(rdev, &sem, NULL);
462 return r;
463 }
464
465 if (radeon_fence_need_sync(*fence, ring->idx)) {
466 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
467 ring->idx);
468 radeon_fence_note_sync(*fence, ring->idx);
469 } else {
470 radeon_semaphore_free(rdev, &sem, NULL);
471 }
472
473 for (i = 0; i < num_loops; i++) {
474 cur_size_in_dw = size_in_dw;
475 if (cur_size_in_dw > 0xFFFE)
476 cur_size_in_dw = 0xFFFE;
477 size_in_dw -= cur_size_in_dw;
478 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
479 radeon_ring_write(ring, dst_offset & 0xfffffffc);
480 radeon_ring_write(ring, src_offset & 0xfffffffc);
481 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
482 (upper_32_bits(src_offset) & 0xff)));
483 src_offset += cur_size_in_dw * 4;
484 dst_offset += cur_size_in_dw * 4;
485 }
486
487 r = radeon_fence_emit(rdev, fence, ring->idx);
488 if (r) {
489 radeon_ring_unlock_undo(rdev, ring);
490 return r;
491 }
492
493 radeon_ring_unlock_commit(rdev, ring);
494 radeon_semaphore_free(rdev, &sem, *fence);
495
496 return r;
497}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e5c860f4ccbe..fa0de46fcc0d 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -174,6 +174,24 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
174 return vblank_time_us; 174 return vblank_time_us;
175} 175}
176 176
177u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
178{
179 struct drm_device *dev = rdev->ddev;
180 struct drm_crtc *crtc;
181 struct radeon_crtc *radeon_crtc;
182 u32 vrefresh = 0;
183
184 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
185 radeon_crtc = to_radeon_crtc(crtc);
186 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
187 vrefresh = radeon_crtc->hw_mode.vrefresh;
188 break;
189 }
190 }
191
192 return vrefresh;
193}
194
177void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 195void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
178 u32 *p, u32 *u) 196 u32 *p, u32 *u)
179{ 197{
@@ -745,6 +763,8 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
745 case THERMAL_TYPE_SUMO: 763 case THERMAL_TYPE_SUMO:
746 case THERMAL_TYPE_NI: 764 case THERMAL_TYPE_NI:
747 case THERMAL_TYPE_SI: 765 case THERMAL_TYPE_SI:
766 case THERMAL_TYPE_CI:
767 case THERMAL_TYPE_KV:
748 return true; 768 return true;
749 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 769 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
750 case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 770 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
@@ -779,15 +799,19 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
779 u32 size = atom_table->ucNumEntries * 799 u32 size = atom_table->ucNumEntries *
780 sizeof(struct radeon_clock_voltage_dependency_entry); 800 sizeof(struct radeon_clock_voltage_dependency_entry);
781 int i; 801 int i;
802 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
782 803
783 radeon_table->entries = kzalloc(size, GFP_KERNEL); 804 radeon_table->entries = kzalloc(size, GFP_KERNEL);
784 if (!radeon_table->entries) 805 if (!radeon_table->entries)
785 return -ENOMEM; 806 return -ENOMEM;
786 807
808 entry = &atom_table->entries[0];
787 for (i = 0; i < atom_table->ucNumEntries; i++) { 809 for (i = 0; i < atom_table->ucNumEntries; i++) {
788 radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) | 810 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
789 (atom_table->entries[i].ucClockHigh << 16); 811 (entry->ucClockHigh << 16);
790 radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage); 812 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
813 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
814 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
791 } 815 }
792 radeon_table->count = atom_table->ucNumEntries; 816 radeon_table->count = atom_table->ucNumEntries;
793 817
@@ -875,6 +899,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
875 return ret; 899 return ret;
876 } 900 }
877 } 901 }
902 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
903 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
904 (mode_info->atom_context->bios + data_offset +
905 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
906 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
907 dep_table);
908 if (ret) {
909 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
910 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
911 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
912 return ret;
913 }
914 }
878 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 915 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
879 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 916 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
880 (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 917 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
@@ -898,27 +935,27 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
898 (ATOM_PPLIB_PhaseSheddingLimits_Table *) 935 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
899 (mode_info->atom_context->bios + data_offset + 936 (mode_info->atom_context->bios + data_offset +
900 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 937 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
938 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
901 939
902 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 940 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
903 kzalloc(psl->ucNumEntries * 941 kzalloc(psl->ucNumEntries *
904 sizeof(struct radeon_phase_shedding_limits_entry), 942 sizeof(struct radeon_phase_shedding_limits_entry),
905 GFP_KERNEL); 943 GFP_KERNEL);
906 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 944 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
907 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 945 r600_free_extended_power_table(rdev);
908 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
909 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
910 return -ENOMEM; 946 return -ENOMEM;
911 } 947 }
912 948
949 entry = &psl->entries[0];
913 for (i = 0; i < psl->ucNumEntries; i++) { 950 for (i = 0; i < psl->ucNumEntries; i++) {
914 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 951 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
915 le16_to_cpu(psl->entries[i].usSclkLow) | 952 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
916 (psl->entries[i].ucSclkHigh << 16);
917 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 953 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
918 le16_to_cpu(psl->entries[i].usMclkLow) | 954 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
919 (psl->entries[i].ucMclkHigh << 16);
920 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 955 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
921 le16_to_cpu(psl->entries[i].usVoltage); 956 le16_to_cpu(entry->usVoltage);
957 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
958 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
922 } 959 }
923 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 960 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
924 psl->ucNumEntries; 961 psl->ucNumEntries;
@@ -945,30 +982,140 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
945 (ATOM_PPLIB_CAC_Leakage_Table *) 982 (ATOM_PPLIB_CAC_Leakage_Table *)
946 (mode_info->atom_context->bios + data_offset + 983 (mode_info->atom_context->bios + data_offset +
947 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 984 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
985 ATOM_PPLIB_CAC_Leakage_Record *entry;
948 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); 986 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
949 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 987 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
950 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 988 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
951 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 989 r600_free_extended_power_table(rdev);
952 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
953 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
954 return -ENOMEM; 990 return -ENOMEM;
955 } 991 }
992 entry = &cac_table->entries[0];
956 for (i = 0; i < cac_table->ucNumEntries; i++) { 993 for (i = 0; i < cac_table->ucNumEntries; i++) {
957 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 994 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
958 le16_to_cpu(cac_table->entries[i].usVddc); 995 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
959 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 996 le16_to_cpu(entry->usVddc1);
960 le32_to_cpu(cac_table->entries[i].ulLeakageValue); 997 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
998 le16_to_cpu(entry->usVddc2);
999 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1000 le16_to_cpu(entry->usVddc3);
1001 } else {
1002 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1003 le16_to_cpu(entry->usVddc);
1004 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1005 le32_to_cpu(entry->ulLeakageValue);
1006 }
1007 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1008 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
961 } 1009 }
962 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 1010 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
963 } 1011 }
964 } 1012 }
965 1013
966 /* ppm table */ 1014 /* ext tables */
967 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1015 if (le16_to_cpu(power_info->pplib.usTableSize) >=
968 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 1016 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
969 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 1017 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
970 (mode_info->atom_context->bios + data_offset + 1018 (mode_info->atom_context->bios + data_offset +
971 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 1019 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1020 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1021 ext_hdr->usVCETableOffset) {
1022 VCEClockInfoArray *array = (VCEClockInfoArray *)
1023 (mode_info->atom_context->bios + data_offset +
1024 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1025 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1026 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1027 (mode_info->atom_context->bios + data_offset +
1028 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1029 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1030 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1031 u32 size = limits->numEntries *
1032 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1033 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1034 kzalloc(size, GFP_KERNEL);
1035 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1036 r600_free_extended_power_table(rdev);
1037 return -ENOMEM;
1038 }
1039 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1040 limits->numEntries;
1041 entry = &limits->entries[0];
1042 for (i = 0; i < limits->numEntries; i++) {
1043 VCEClockInfo *vce_clk = (VCEClockInfo *)
1044 ((u8 *)&array->entries[0] +
1045 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1046 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1047 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1048 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1049 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1050 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1051 le16_to_cpu(entry->usVoltage);
1052 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1053 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1054 }
1055 }
1056 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1057 ext_hdr->usUVDTableOffset) {
1058 UVDClockInfoArray *array = (UVDClockInfoArray *)
1059 (mode_info->atom_context->bios + data_offset +
1060 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1061 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1062 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1063 (mode_info->atom_context->bios + data_offset +
1064 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1065 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1066 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1067 u32 size = limits->numEntries *
1068 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1069 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1070 kzalloc(size, GFP_KERNEL);
1071 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1072 r600_free_extended_power_table(rdev);
1073 return -ENOMEM;
1074 }
1075 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1076 limits->numEntries;
1077 entry = &limits->entries[0];
1078 for (i = 0; i < limits->numEntries; i++) {
1079 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1080 ((u8 *)&array->entries[0] +
1081 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1082 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1083 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1087 le16_to_cpu(limits->entries[i].usVoltage);
1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1090 }
1091 }
1092 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1093 ext_hdr->usSAMUTableOffset) {
1094 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1095 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1096 (mode_info->atom_context->bios + data_offset +
1097 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1098 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1099 u32 size = limits->numEntries *
1100 sizeof(struct radeon_clock_voltage_dependency_entry);
1101 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1102 kzalloc(size, GFP_KERNEL);
1103 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1104 r600_free_extended_power_table(rdev);
1105 return -ENOMEM;
1106 }
1107 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1108 limits->numEntries;
1109 entry = &limits->entries[0];
1110 for (i = 0; i < limits->numEntries; i++) {
1111 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1112 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1113 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1114 le16_to_cpu(entry->usVoltage);
1115 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1116 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1117 }
1118 }
972 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 1119 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
973 ext_hdr->usPPMTableOffset) { 1120 ext_hdr->usPPMTableOffset) {
974 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 1121 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
@@ -977,10 +1124,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
977 rdev->pm.dpm.dyn_state.ppm_table = 1124 rdev->pm.dpm.dyn_state.ppm_table =
978 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); 1125 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
979 if (!rdev->pm.dpm.dyn_state.ppm_table) { 1126 if (!rdev->pm.dpm.dyn_state.ppm_table) {
980 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 1127 r600_free_extended_power_table(rdev);
981 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
982 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
983 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
984 return -ENOMEM; 1128 return -ENOMEM;
985 } 1129 }
986 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 1130 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
@@ -1003,6 +1147,71 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1003 rdev->pm.dpm.dyn_state.ppm_table->tj_max = 1147 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1004 le32_to_cpu(ppm->ulTjmax); 1148 le32_to_cpu(ppm->ulTjmax);
1005 } 1149 }
1150 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1151 ext_hdr->usACPTableOffset) {
1152 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1153 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1154 (mode_info->atom_context->bios + data_offset +
1155 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1156 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1157 u32 size = limits->numEntries *
1158 sizeof(struct radeon_clock_voltage_dependency_entry);
1159 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1160 kzalloc(size, GFP_KERNEL);
1161 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1162 r600_free_extended_power_table(rdev);
1163 return -ENOMEM;
1164 }
1165 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1166 limits->numEntries;
1167 entry = &limits->entries[0];
1168 for (i = 0; i < limits->numEntries; i++) {
1169 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1170 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1171 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1172 le16_to_cpu(entry->usVoltage);
1173 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1174 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1175 }
1176 }
1177 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1178 ext_hdr->usPowerTuneTableOffset) {
1179 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1180 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1181 ATOM_PowerTune_Table *pt;
1182 rdev->pm.dpm.dyn_state.cac_tdp_table =
1183 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1184 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1185 r600_free_extended_power_table(rdev);
1186 return -ENOMEM;
1187 }
1188 if (rev > 0) {
1189 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1190 (mode_info->atom_context->bios + data_offset +
1191 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1192 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1193 ppt->usMaximumPowerDeliveryLimit;
1194 pt = &ppt->power_tune_table;
1195 } else {
1196 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1197 (mode_info->atom_context->bios + data_offset +
1198 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1199 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1200 pt = &ppt->power_tune_table;
1201 }
1202 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1203 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1204 le16_to_cpu(pt->usConfigurableTDP);
1205 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1206 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1207 le16_to_cpu(pt->usBatteryPowerLimit);
1208 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1209 le16_to_cpu(pt->usSmallPowerLimit);
1210 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1211 le16_to_cpu(pt->usLowCACLeakage);
1212 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1213 le16_to_cpu(pt->usHighCACLeakage);
1214 }
1006 } 1215 }
1007 1216
1008 return 0; 1217 return 0;
@@ -1016,12 +1225,24 @@ void r600_free_extended_power_table(struct radeon_device *rdev)
1016 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 1225 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
1017 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) 1226 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
1018 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 1227 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
1228 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
1229 kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
1019 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) 1230 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
1020 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); 1231 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
1021 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 1232 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
1022 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); 1233 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
1023 if (rdev->pm.dpm.dyn_state.ppm_table) 1234 if (rdev->pm.dpm.dyn_state.ppm_table)
1024 kfree(rdev->pm.dpm.dyn_state.ppm_table); 1235 kfree(rdev->pm.dpm.dyn_state.ppm_table);
1236 if (rdev->pm.dpm.dyn_state.cac_tdp_table)
1237 kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
1238 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
1239 kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
1240 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
1241 kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
1242 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
1243 kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
1244 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
1245 kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
1025} 1246}
1026 1247
1027enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1248enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
@@ -1046,3 +1267,36 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1046 } 1267 }
1047 return RADEON_PCIE_GEN1; 1268 return RADEON_PCIE_GEN1;
1048} 1269}
1270
1271u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1272 u16 asic_lanes,
1273 u16 default_lanes)
1274{
1275 switch (asic_lanes) {
1276 case 0:
1277 default:
1278 return default_lanes;
1279 case 1:
1280 return 1;
1281 case 2:
1282 return 2;
1283 case 4:
1284 return 4;
1285 case 8:
1286 return 8;
1287 case 12:
1288 return 12;
1289 case 16:
1290 return 16;
1291 }
1292}
1293
1294u8 r600_encode_pci_lane_width(u32 lanes)
1295{
1296 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1297
1298 if (lanes > 16)
1299 return 0;
1300
1301 return encoded_lanes[lanes];
1302}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 7c822d9ae53d..1000bf9719f2 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -130,6 +130,7 @@ void r600_dpm_print_cap_info(u32 caps);
130void r600_dpm_print_ps_status(struct radeon_device *rdev, 130void r600_dpm_print_ps_status(struct radeon_device *rdev,
131 struct radeon_ps *rps); 131 struct radeon_ps *rps);
132u32 r600_dpm_get_vblank_time(struct radeon_device *rdev); 132u32 r600_dpm_get_vblank_time(struct radeon_device *rdev);
133u32 r600_dpm_get_vrefresh(struct radeon_device *rdev);
133bool r600_is_uvd_state(u32 class, u32 class2); 134bool r600_is_uvd_state(u32 class, u32 class2);
134void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 135void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
135 u32 *p, u32 *u); 136 u32 *p, u32 *u);
@@ -224,4 +225,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
224 enum radeon_pcie_gen asic_gen, 225 enum radeon_pcie_gen asic_gen,
225 enum radeon_pcie_gen default_gen); 226 enum radeon_pcie_gen default_gen);
226 227
228u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
229 u16 asic_lanes,
230 u16 default_lanes);
231u8 r600_encode_pci_lane_width(u32 lanes);
232
227#endif 233#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f264df5470f7..f443010ce90b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -283,6 +283,107 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
283 } 283 }
284} 284}
285 285
286static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
287{
288 struct radeon_device *rdev = encoder->dev->dev_private;
289 struct drm_connector *connector;
290 struct radeon_connector *radeon_connector = NULL;
291 u32 tmp;
292 u8 *sadb;
293 int sad_count;
294
295 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
296 if (connector->encoder == encoder)
297 radeon_connector = to_radeon_connector(connector);
298 }
299
300 if (!radeon_connector) {
301 DRM_ERROR("Couldn't find encoder's connector\n");
302 return;
303 }
304
305 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
306 if (sad_count < 0) {
307 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
308 return;
309 }
310
311 /* program the speaker allocation */
312 tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
313 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
314 /* set HDMI mode */
315 tmp |= HDMI_CONNECTION;
316 if (sad_count)
317 tmp |= SPEAKER_ALLOCATION(sadb[0]);
318 else
319 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
320 WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
321
322 kfree(sadb);
323}
324
325static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
326{
327 struct radeon_device *rdev = encoder->dev->dev_private;
328 struct drm_connector *connector;
329 struct radeon_connector *radeon_connector = NULL;
330 struct cea_sad *sads;
331 int i, sad_count;
332
333 static const u16 eld_reg_to_type[][2] = {
334 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
335 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
336 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
337 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
338 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
339 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
340 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
341 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
342 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
343 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
344 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
345 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
346 };
347
348 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
349 if (connector->encoder == encoder)
350 radeon_connector = to_radeon_connector(connector);
351 }
352
353 if (!radeon_connector) {
354 DRM_ERROR("Couldn't find encoder's connector\n");
355 return;
356 }
357
358 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
359 if (sad_count < 0) {
360 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
361 return;
362 }
363 BUG_ON(!sads);
364
365 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
366 u32 value = 0;
367 int j;
368
369 for (j = 0; j < sad_count; j++) {
370 struct cea_sad *sad = &sads[j];
371
372 if (sad->format == eld_reg_to_type[i][1]) {
373 value = MAX_CHANNELS(sad->channels) |
374 DESCRIPTOR_BYTE_2(sad->byte2) |
375 SUPPORTED_FREQUENCIES(sad->freq);
376 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
377 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
378 break;
379 }
380 }
381 WREG32(eld_reg_to_type[i][0], value);
382 }
383
384 kfree(sads);
385}
386
286/* 387/*
287 * update the info frames with the data from the current display mode 388 * update the info frames with the data from the current display mode
288 */ 389 */
@@ -327,6 +428,11 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
327 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 428 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
328 } 429 }
329 430
431 if (ASIC_IS_DCE32(rdev)) {
432 dce3_2_afmt_write_speaker_allocation(encoder);
433 dce3_2_afmt_write_sad_regs(encoder);
434 }
435
330 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 436 WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
331 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 437 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
332 HDMI0_ACR_SOURCE); /* select SW CTS value */ 438 HDMI0_ACR_SOURCE); /* select SW CTS value */
@@ -382,7 +488,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
382 struct radeon_device *rdev = dev->dev_private; 488 struct radeon_device *rdev = dev->dev_private;
383 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 489 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
384 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 490 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
385 struct r600_audio audio = r600_audio_status(rdev); 491 struct r600_audio_pin audio = r600_audio_status(rdev);
386 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; 492 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
387 struct hdmi_audio_infoframe frame; 493 struct hdmi_audio_infoframe frame;
388 uint32_t offset; 494 uint32_t offset;
@@ -491,6 +597,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
491 if (!enable && !dig->afmt->enabled) 597 if (!enable && !dig->afmt->enabled)
492 return; 598 return;
493 599
600 if (enable)
601 dig->afmt->pin = r600_audio_get_pin(rdev);
602 else
603 dig->afmt->pin = NULL;
604
494 /* Older chipsets require setting HDMI and routing manually */ 605 /* Older chipsets require setting HDMI and routing manually */
495 if (!ASIC_IS_DCE3(rdev)) { 606 if (!ASIC_IS_DCE3(rdev)) {
496 if (enable) 607 if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7c780839a7f4..454f90a849e4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -960,6 +960,42 @@
960# define DIG_MODE_SDVO 4 960# define DIG_MODE_SDVO 4
961#define DIG1_CNTL 0x79a0 961#define DIG1_CNTL 0x79a0
962 962
963#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc
964#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
965#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
966#define SPEAKER_ALLOCATION_SHIFT 0
967#define HDMI_CONNECTION (1 << 16)
968#define DP_CONNECTION (1 << 17)
969
970#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
971#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
972#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
973#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
974#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
975#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
976#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
977#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
978#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
979#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
980#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
981#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
982#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
983#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
984# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
985/* max channels minus one. 7 = 8 channels */
986# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
987# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
988# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
989/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
990 * bit0 = 32 kHz
991 * bit1 = 44.1 kHz
992 * bit2 = 48 kHz
993 * bit3 = 88.2 kHz
994 * bit4 = 96 kHz
995 * bit5 = 176.4 kHz
996 * bit6 = 192 kHz
997 */
998
963/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one 999/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
964 * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly 1000 * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
965 * different due to the new DIG blocks, but also have 2 instances. 1001 * different due to the new DIG blocks, but also have 2 instances.
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9f19259667df..ff8b564ce2b2 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -152,6 +152,47 @@ extern int radeon_aspm;
152#define RADEON_RESET_MC (1 << 10) 152#define RADEON_RESET_MC (1 << 10)
153#define RADEON_RESET_DISPLAY (1 << 11) 153#define RADEON_RESET_DISPLAY (1 << 11)
154 154
155/* CG block flags */
156#define RADEON_CG_BLOCK_GFX (1 << 0)
157#define RADEON_CG_BLOCK_MC (1 << 1)
158#define RADEON_CG_BLOCK_SDMA (1 << 2)
159#define RADEON_CG_BLOCK_UVD (1 << 3)
160#define RADEON_CG_BLOCK_VCE (1 << 4)
161#define RADEON_CG_BLOCK_HDP (1 << 5)
162#define RADEON_CG_BLOCK_BIF (1 << 6)
163
164/* CG flags */
165#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0)
166#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1)
167#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2)
168#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3)
169#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4)
170#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
171#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6)
172#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7)
173#define RADEON_CG_SUPPORT_MC_LS (1 << 8)
174#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9)
175#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10)
176#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11)
177#define RADEON_CG_SUPPORT_BIF_LS (1 << 12)
178#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13)
179#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14)
180#define RADEON_CG_SUPPORT_HDP_LS (1 << 15)
181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
182
183/* PG flags */
184#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
187#define RADEON_PG_SUPPORT_UVD (1 << 3)
188#define RADEON_PG_SUPPORT_VCE (1 << 4)
189#define RADEON_PG_SUPPORT_CP (1 << 5)
190#define RADEON_PG_SUPPORT_GDS (1 << 6)
191#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7)
192#define RADEON_PG_SUPPORT_SDMA (1 << 8)
193#define RADEON_PG_SUPPORT_ACP (1 << 9)
194#define RADEON_PG_SUPPORT_SAMU (1 << 10)
195
155/* max cursor sizes (in pixels) */ 196/* max cursor sizes (in pixels) */
156#define CURSOR_WIDTH 64 197#define CURSOR_WIDTH 64
157#define CURSOR_HEIGHT 64 198#define CURSOR_HEIGHT 64
@@ -238,6 +279,12 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
238int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev, 279int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
239 u16 *voltage, 280 u16 *voltage,
240 u16 leakage_idx); 281 u16 leakage_idx);
282int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
283 u16 *leakage_id);
284int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
285 u16 *vddc, u16 *vddci,
286 u16 virtual_voltage_id,
287 u16 vbios_voltage_id);
241int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, 288int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
242 u8 voltage_type, 289 u8 voltage_type,
243 u16 nominal_voltage, 290 u16 nominal_voltage,
@@ -492,9 +539,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
492int radeon_mode_dumb_mmap(struct drm_file *filp, 539int radeon_mode_dumb_mmap(struct drm_file *filp,
493 struct drm_device *dev, 540 struct drm_device *dev,
494 uint32_t handle, uint64_t *offset_p); 541 uint32_t handle, uint64_t *offset_p);
495int radeon_mode_dumb_destroy(struct drm_file *file_priv,
496 struct drm_device *dev,
497 uint32_t handle);
498 542
499/* 543/*
500 * Semaphores. 544 * Semaphores.
@@ -682,7 +726,7 @@ union radeon_irq_stat_regs {
682 726
683#define RADEON_MAX_HPD_PINS 6 727#define RADEON_MAX_HPD_PINS 6
684#define RADEON_MAX_CRTCS 6 728#define RADEON_MAX_CRTCS 6
685#define RADEON_MAX_AFMT_BLOCKS 6 729#define RADEON_MAX_AFMT_BLOCKS 7
686 730
687struct radeon_irq { 731struct radeon_irq {
688 bool installed; 732 bool installed;
@@ -746,8 +790,6 @@ struct radeon_ring {
746 uint32_t align_mask; 790 uint32_t align_mask;
747 uint32_t ptr_mask; 791 uint32_t ptr_mask;
748 bool ready; 792 bool ready;
749 u32 ptr_reg_shift;
750 u32 ptr_reg_mask;
751 u32 nop; 793 u32 nop;
752 u32 idx; 794 u32 idx;
753 u64 last_semaphore_signal_addr; 795 u64 last_semaphore_signal_addr;
@@ -844,35 +886,6 @@ struct r600_ih {
844 bool enabled; 886 bool enabled;
845}; 887};
846 888
847struct r600_blit_cp_primitives {
848 void (*set_render_target)(struct radeon_device *rdev, int format,
849 int w, int h, u64 gpu_addr);
850 void (*cp_set_surface_sync)(struct radeon_device *rdev,
851 u32 sync_type, u32 size,
852 u64 mc_addr);
853 void (*set_shaders)(struct radeon_device *rdev);
854 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
855 void (*set_tex_resource)(struct radeon_device *rdev,
856 int format, int w, int h, int pitch,
857 u64 gpu_addr, u32 size);
858 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
859 int x2, int y2);
860 void (*draw_auto)(struct radeon_device *rdev);
861 void (*set_default_state)(struct radeon_device *rdev);
862};
863
864struct r600_blit {
865 struct radeon_bo *shader_obj;
866 struct r600_blit_cp_primitives primitives;
867 int max_dim;
868 int ring_size_common;
869 int ring_size_per_loop;
870 u64 shader_gpu_addr;
871 u32 vs_offset, ps_offset;
872 u32 state_offset;
873 u32 state_len;
874};
875
876/* 889/*
877 * RLC stuff 890 * RLC stuff
878 */ 891 */
@@ -883,13 +896,19 @@ struct radeon_rlc {
883 struct radeon_bo *save_restore_obj; 896 struct radeon_bo *save_restore_obj;
884 uint64_t save_restore_gpu_addr; 897 uint64_t save_restore_gpu_addr;
885 volatile uint32_t *sr_ptr; 898 volatile uint32_t *sr_ptr;
886 u32 *reg_list; 899 const u32 *reg_list;
887 u32 reg_list_size; 900 u32 reg_list_size;
888 /* for clear state */ 901 /* for clear state */
889 struct radeon_bo *clear_state_obj; 902 struct radeon_bo *clear_state_obj;
890 uint64_t clear_state_gpu_addr; 903 uint64_t clear_state_gpu_addr;
891 volatile uint32_t *cs_ptr; 904 volatile uint32_t *cs_ptr;
892 struct cs_section_def *cs_data; 905 const struct cs_section_def *cs_data;
906 u32 clear_state_size;
907 /* for cp tables */
908 struct radeon_bo *cp_table_obj;
909 uint64_t cp_table_gpu_addr;
910 volatile uint32_t *cp_table_ptr;
911 u32 cp_table_size;
893}; 912};
894 913
895int radeon_ib_get(struct radeon_device *rdev, int ring, 914int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -921,8 +940,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
921int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, 940int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
922 unsigned size, uint32_t *data); 941 unsigned size, uint32_t *data);
923int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, 942int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
924 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 943 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
925 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
926void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); 944void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
927 945
928 946
@@ -1036,7 +1054,6 @@ struct radeon_wb {
1036#define R600_WB_DMA_RPTR_OFFSET 1792 1054#define R600_WB_DMA_RPTR_OFFSET 1792
1037#define R600_WB_IH_WPTR_OFFSET 2048 1055#define R600_WB_IH_WPTR_OFFSET 2048
1038#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 1056#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
1039#define R600_WB_UVD_RPTR_OFFSET 2560
1040#define R600_WB_EVENT_OFFSET 3072 1057#define R600_WB_EVENT_OFFSET 3072
1041#define CIK_WB_CP1_WPTR_OFFSET 3328 1058#define CIK_WB_CP1_WPTR_OFFSET 3328
1042#define CIK_WB_CP2_WPTR_OFFSET 3584 1059#define CIK_WB_CP2_WPTR_OFFSET 3584
@@ -1147,6 +1164,7 @@ enum radeon_int_thermal_type {
1147 THERMAL_TYPE_SI, 1164 THERMAL_TYPE_SI,
1148 THERMAL_TYPE_EMC2103_WITH_INTERNAL, 1165 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1149 THERMAL_TYPE_CI, 1166 THERMAL_TYPE_CI,
1167 THERMAL_TYPE_KV,
1150}; 1168};
1151 1169
1152struct radeon_voltage { 1170struct radeon_voltage {
@@ -1220,6 +1238,9 @@ struct radeon_ps {
1220 /* UVD clocks */ 1238 /* UVD clocks */
1221 u32 vclk; 1239 u32 vclk;
1222 u32 dclk; 1240 u32 dclk;
1241 /* VCE clocks */
1242 u32 evclk;
1243 u32 ecclk;
1223 /* asic priv */ 1244 /* asic priv */
1224 void *ps_priv; 1245 void *ps_priv;
1225}; 1246};
@@ -1270,14 +1291,21 @@ struct radeon_clock_voltage_dependency_table {
1270 struct radeon_clock_voltage_dependency_entry *entries; 1291 struct radeon_clock_voltage_dependency_entry *entries;
1271}; 1292};
1272 1293
1273struct radeon_cac_leakage_entry { 1294union radeon_cac_leakage_entry {
1274 u16 vddc; 1295 struct {
1275 u32 leakage; 1296 u16 vddc;
1297 u32 leakage;
1298 };
1299 struct {
1300 u16 vddc1;
1301 u16 vddc2;
1302 u16 vddc3;
1303 };
1276}; 1304};
1277 1305
1278struct radeon_cac_leakage_table { 1306struct radeon_cac_leakage_table {
1279 u32 count; 1307 u32 count;
1280 struct radeon_cac_leakage_entry *entries; 1308 union radeon_cac_leakage_entry *entries;
1281}; 1309};
1282 1310
1283struct radeon_phase_shedding_limits_entry { 1311struct radeon_phase_shedding_limits_entry {
@@ -1291,6 +1319,28 @@ struct radeon_phase_shedding_limits_table {
1291 struct radeon_phase_shedding_limits_entry *entries; 1319 struct radeon_phase_shedding_limits_entry *entries;
1292}; 1320};
1293 1321
1322struct radeon_uvd_clock_voltage_dependency_entry {
1323 u32 vclk;
1324 u32 dclk;
1325 u16 v;
1326};
1327
1328struct radeon_uvd_clock_voltage_dependency_table {
1329 u8 count;
1330 struct radeon_uvd_clock_voltage_dependency_entry *entries;
1331};
1332
1333struct radeon_vce_clock_voltage_dependency_entry {
1334 u32 ecclk;
1335 u32 evclk;
1336 u16 v;
1337};
1338
1339struct radeon_vce_clock_voltage_dependency_table {
1340 u8 count;
1341 struct radeon_vce_clock_voltage_dependency_entry *entries;
1342};
1343
1294struct radeon_ppm_table { 1344struct radeon_ppm_table {
1295 u8 ppm_design; 1345 u8 ppm_design;
1296 u16 cpu_core_number; 1346 u16 cpu_core_number;
@@ -1304,11 +1354,27 @@ struct radeon_ppm_table {
1304 u32 tj_max; 1354 u32 tj_max;
1305}; 1355};
1306 1356
1357struct radeon_cac_tdp_table {
1358 u16 tdp;
1359 u16 configurable_tdp;
1360 u16 tdc;
1361 u16 battery_power_limit;
1362 u16 small_power_limit;
1363 u16 low_cac_leakage;
1364 u16 high_cac_leakage;
1365 u16 maximum_power_delivery_limit;
1366};
1367
1307struct radeon_dpm_dynamic_state { 1368struct radeon_dpm_dynamic_state {
1308 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk; 1369 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
1309 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk; 1370 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
1310 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk; 1371 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
1372 struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1311 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk; 1373 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1374 struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1375 struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1376 struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1377 struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1312 struct radeon_clock_array valid_sclk_values; 1378 struct radeon_clock_array valid_sclk_values;
1313 struct radeon_clock_array valid_mclk_values; 1379 struct radeon_clock_array valid_mclk_values;
1314 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc; 1380 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
@@ -1320,6 +1386,7 @@ struct radeon_dpm_dynamic_state {
1320 struct radeon_cac_leakage_table cac_leakage_table; 1386 struct radeon_cac_leakage_table cac_leakage_table;
1321 struct radeon_phase_shedding_limits_table phase_shedding_limits_table; 1387 struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
1322 struct radeon_ppm_table *ppm_table; 1388 struct radeon_ppm_table *ppm_table;
1389 struct radeon_cac_tdp_table *cac_tdp_table;
1323}; 1390};
1324 1391
1325struct radeon_dpm_fan { 1392struct radeon_dpm_fan {
@@ -1389,11 +1456,12 @@ struct radeon_dpm {
1389 struct radeon_dpm_thermal thermal; 1456 struct radeon_dpm_thermal thermal;
1390 /* forced levels */ 1457 /* forced levels */
1391 enum radeon_dpm_forced_level forced_level; 1458 enum radeon_dpm_forced_level forced_level;
1459 /* track UVD streams */
1460 unsigned sd;
1461 unsigned hd;
1392}; 1462};
1393 1463
1394void radeon_dpm_enable_power_state(struct radeon_device *rdev, 1464void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
1395 enum radeon_pm_state_type dpm_state);
1396
1397 1465
1398struct radeon_pm { 1466struct radeon_pm {
1399 struct mutex mutex; 1467 struct mutex mutex;
@@ -1470,6 +1538,7 @@ struct radeon_uvd {
1470 void *saved_bo; 1538 void *saved_bo;
1471 atomic_t handles[RADEON_MAX_UVD_HANDLES]; 1539 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1472 struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; 1540 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1541 unsigned img_size[RADEON_MAX_UVD_HANDLES];
1473 struct delayed_work idle_work; 1542 struct delayed_work idle_work;
1474}; 1543};
1475 1544
@@ -1498,12 +1567,21 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1498int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, 1567int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1499 unsigned cg_upll_func_cntl); 1568 unsigned cg_upll_func_cntl);
1500 1569
1501struct r600_audio { 1570struct r600_audio_pin {
1502 int channels; 1571 int channels;
1503 int rate; 1572 int rate;
1504 int bits_per_sample; 1573 int bits_per_sample;
1505 u8 status_bits; 1574 u8 status_bits;
1506 u8 category_code; 1575 u8 category_code;
1576 u32 offset;
1577 bool connected;
1578 u32 id;
1579};
1580
1581struct r600_audio {
1582 bool enabled;
1583 struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
1584 int num_pins;
1507}; 1585};
1508 1586
1509/* 1587/*
@@ -1535,6 +1613,34 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
1535 unsigned nfiles); 1613 unsigned nfiles);
1536int radeon_debugfs_fence_init(struct radeon_device *rdev); 1614int radeon_debugfs_fence_init(struct radeon_device *rdev);
1537 1615
1616/*
1617 * ASIC ring specific functions.
1618 */
1619struct radeon_asic_ring {
1620 /* ring read/write ptr handling */
1621 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1622 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1623 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1624
1625 /* validating and patching of IBs */
1626 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1627 int (*cs_parse)(struct radeon_cs_parser *p);
1628
1629 /* command emmit functions */
1630 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1631 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1632 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1633 struct radeon_semaphore *semaphore, bool emit_wait);
1634 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1635
1636 /* testing functions */
1637 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1638 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1639 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1640
1641 /* deprecated */
1642 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1643};
1538 1644
1539/* 1645/*
1540 * ASIC specific functions. 1646 * ASIC specific functions.
@@ -1578,23 +1684,7 @@ struct radeon_asic {
1578 uint32_t incr, uint32_t flags); 1684 uint32_t incr, uint32_t flags);
1579 } vm; 1685 } vm;
1580 /* ring specific callbacks */ 1686 /* ring specific callbacks */
1581 struct { 1687 struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
1582 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1583 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1584 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1585 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1586 struct radeon_semaphore *semaphore, bool emit_wait);
1587 int (*cs_parse)(struct radeon_cs_parser *p);
1588 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1589 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1590 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1591 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1592 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1593
1594 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1595 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1596 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1597 } ring[RADEON_NUM_RINGS];
1598 /* irqs */ 1688 /* irqs */
1599 struct { 1689 struct {
1600 int (*set)(struct radeon_device *rdev); 1690 int (*set)(struct radeon_device *rdev);
@@ -1687,6 +1777,7 @@ struct radeon_asic {
1687 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m); 1777 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
1688 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
1689 bool (*vblank_too_short)(struct radeon_device *rdev); 1779 bool (*vblank_too_short)(struct radeon_device *rdev);
1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
1690 } dpm; 1781 } dpm;
1691 /* pageflipping */ 1782 /* pageflipping */
1692 struct { 1783 struct {
@@ -2066,7 +2157,6 @@ struct radeon_device {
2066 const struct firmware *sdma_fw; /* CIK SDMA firmware */ 2157 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2067 const struct firmware *smc_fw; /* SMC firmware */ 2158 const struct firmware *smc_fw; /* SMC firmware */
2068 const struct firmware *uvd_fw; /* UVD firmware */ 2159 const struct firmware *uvd_fw; /* UVD firmware */
2069 struct r600_blit r600_blit;
2070 struct r600_vram_scratch vram_scratch; 2160 struct r600_vram_scratch vram_scratch;
2071 int msi_enabled; /* msi enabled */ 2161 int msi_enabled; /* msi enabled */
2072 struct r600_ih ih; /* r6/700 interrupt ring */ 2162 struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2077,9 +2167,8 @@ struct radeon_device {
2077 struct work_struct reset_work; 2167 struct work_struct reset_work;
2078 int num_crtc; /* number of crtcs */ 2168 int num_crtc; /* number of crtcs */
2079 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 2169 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
2080 bool audio_enabled;
2081 bool has_uvd; 2170 bool has_uvd;
2082 struct r600_audio audio_status; /* audio stuff */ 2171 struct r600_audio audio; /* audio stuff */
2083 struct notifier_block acpi_nb; 2172 struct notifier_block acpi_nb;
2084 /* only one userspace can use Hyperz features or CMASK at a time */ 2173 /* only one userspace can use Hyperz features or CMASK at a time */
2085 struct drm_file *hyperz_filp; 2174 struct drm_file *hyperz_filp;
@@ -2097,6 +2186,9 @@ struct radeon_device {
2097 struct radeon_atcs atcs; 2186 struct radeon_atcs atcs;
2098 /* srbm instance registers */ 2187 /* srbm instance registers */
2099 struct mutex srbm_mutex; 2188 struct mutex srbm_mutex;
2189 /* clock, powergating flags */
2190 u32 cg_flags;
2191 u32 pg_flags;
2100}; 2192};
2101 2193
2102int radeon_device_init(struct radeon_device *rdev, 2194int radeon_device_init(struct radeon_device *rdev,
@@ -2155,6 +2247,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2155#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v)) 2247#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
2156#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg)) 2248#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
2157#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v)) 2249#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
2250#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
2251#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
2158#define WREG32_P(reg, val, mask) \ 2252#define WREG32_P(reg, val, mask) \
2159 do { \ 2253 do { \
2160 uint32_t tmp_ = RREG32(reg); \ 2254 uint32_t tmp_ = RREG32(reg); \
@@ -2286,6 +2380,22 @@ static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2286 WREG32(R600_UVD_CTX_DATA, (v)); 2380 WREG32(R600_UVD_CTX_DATA, (v));
2287} 2381}
2288 2382
2383
2384static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
2385{
2386 u32 r;
2387
2388 WREG32(CIK_DIDT_IND_INDEX, (reg));
2389 r = RREG32(CIK_DIDT_IND_DATA);
2390 return r;
2391}
2392
2393static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2394{
2395 WREG32(CIK_DIDT_IND_INDEX, (reg));
2396 WREG32(CIK_DIDT_IND_DATA, (v));
2397}
2398
2289void r100_pll_errata_after_index(struct radeon_device *rdev); 2399void r100_pll_errata_after_index(struct radeon_device *rdev);
2290 2400
2291 2401
@@ -2381,7 +2491,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2381#define radeon_fini(rdev) (rdev)->asic->fini((rdev)) 2491#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
2382#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 2492#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
2383#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 2493#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
2384#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) 2494#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
2385#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2495#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2386#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2496#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2387#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2497#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
@@ -2389,16 +2499,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2389#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2499#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2390#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2500#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2391#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) 2501#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2392#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) 2502#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
2393#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) 2503#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
2394#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 2504#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
2395#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 2505#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
2396#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 2506#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
2397#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 2507#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
2398#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) 2508#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
2399#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r)) 2509#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
2400#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r)) 2510#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
2401#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r)) 2511#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
2402#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 2512#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
2403#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 2513#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
2404#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 2514#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -2406,8 +2516,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2406#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) 2516#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
2407#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b)) 2517#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
2408#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) 2518#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
2409#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 2519#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
2410#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 2520#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
2411#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 2521#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
2412#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) 2522#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
2413#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) 2523#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
@@ -2458,6 +2568,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2458#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m)) 2568#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
2459#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2569#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2460#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2570#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2571#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2461 2572
2462/* Common functions */ 2573/* Common functions */
2463/* AGP */ 2574/* AGP */
@@ -2524,6 +2635,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
2524 2635
2525/* audio */ 2636/* audio */
2526void r600_audio_update_hdmi(struct work_struct *work); 2637void r600_audio_update_hdmi(struct work_struct *work);
2638struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
2639struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
2527 2640
2528/* 2641/*
2529 * R600 vram scratch functions 2642 * R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f8f8b3113ddd..630853b96841 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -172,6 +172,21 @@ void radeon_agp_disable(struct radeon_device *rdev)
172/* 172/*
173 * ASIC 173 * ASIC
174 */ 174 */
175
176static struct radeon_asic_ring r100_gfx_ring = {
177 .ib_execute = &r100_ring_ib_execute,
178 .emit_fence = &r100_fence_ring_emit,
179 .emit_semaphore = &r100_semaphore_ring_emit,
180 .cs_parse = &r100_cs_parse,
181 .ring_start = &r100_ring_start,
182 .ring_test = &r100_ring_test,
183 .ib_test = &r100_ib_test,
184 .is_lockup = &r100_gpu_is_lockup,
185 .get_rptr = &radeon_ring_generic_get_rptr,
186 .get_wptr = &radeon_ring_generic_get_wptr,
187 .set_wptr = &radeon_ring_generic_set_wptr,
188};
189
175static struct radeon_asic r100_asic = { 190static struct radeon_asic r100_asic = {
176 .init = &r100_init, 191 .init = &r100_init,
177 .fini = &r100_fini, 192 .fini = &r100_fini,
@@ -187,19 +202,7 @@ static struct radeon_asic r100_asic = {
187 .set_page = &r100_pci_gart_set_page, 202 .set_page = &r100_pci_gart_set_page,
188 }, 203 },
189 .ring = { 204 .ring = {
190 [RADEON_RING_TYPE_GFX_INDEX] = { 205 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
191 .ib_execute = &r100_ring_ib_execute,
192 .emit_fence = &r100_fence_ring_emit,
193 .emit_semaphore = &r100_semaphore_ring_emit,
194 .cs_parse = &r100_cs_parse,
195 .ring_start = &r100_ring_start,
196 .ring_test = &r100_ring_test,
197 .ib_test = &r100_ib_test,
198 .is_lockup = &r100_gpu_is_lockup,
199 .get_rptr = &radeon_ring_generic_get_rptr,
200 .get_wptr = &radeon_ring_generic_get_wptr,
201 .set_wptr = &radeon_ring_generic_set_wptr,
202 }
203 }, 206 },
204 .irq = { 207 .irq = {
205 .set = &r100_irq_set, 208 .set = &r100_irq_set,
@@ -266,19 +269,7 @@ static struct radeon_asic r200_asic = {
266 .set_page = &r100_pci_gart_set_page, 269 .set_page = &r100_pci_gart_set_page,
267 }, 270 },
268 .ring = { 271 .ring = {
269 [RADEON_RING_TYPE_GFX_INDEX] = { 272 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
270 .ib_execute = &r100_ring_ib_execute,
271 .emit_fence = &r100_fence_ring_emit,
272 .emit_semaphore = &r100_semaphore_ring_emit,
273 .cs_parse = &r100_cs_parse,
274 .ring_start = &r100_ring_start,
275 .ring_test = &r100_ring_test,
276 .ib_test = &r100_ib_test,
277 .is_lockup = &r100_gpu_is_lockup,
278 .get_rptr = &radeon_ring_generic_get_rptr,
279 .get_wptr = &radeon_ring_generic_get_wptr,
280 .set_wptr = &radeon_ring_generic_set_wptr,
281 }
282 }, 273 },
283 .irq = { 274 .irq = {
284 .set = &r100_irq_set, 275 .set = &r100_irq_set,
@@ -330,6 +321,20 @@ static struct radeon_asic r200_asic = {
330 }, 321 },
331}; 322};
332 323
324static struct radeon_asic_ring r300_gfx_ring = {
325 .ib_execute = &r100_ring_ib_execute,
326 .emit_fence = &r300_fence_ring_emit,
327 .emit_semaphore = &r100_semaphore_ring_emit,
328 .cs_parse = &r300_cs_parse,
329 .ring_start = &r300_ring_start,
330 .ring_test = &r100_ring_test,
331 .ib_test = &r100_ib_test,
332 .is_lockup = &r100_gpu_is_lockup,
333 .get_rptr = &radeon_ring_generic_get_rptr,
334 .get_wptr = &radeon_ring_generic_get_wptr,
335 .set_wptr = &radeon_ring_generic_set_wptr,
336};
337
333static struct radeon_asic r300_asic = { 338static struct radeon_asic r300_asic = {
334 .init = &r300_init, 339 .init = &r300_init,
335 .fini = &r300_fini, 340 .fini = &r300_fini,
@@ -345,19 +350,7 @@ static struct radeon_asic r300_asic = {
345 .set_page = &r100_pci_gart_set_page, 350 .set_page = &r100_pci_gart_set_page,
346 }, 351 },
347 .ring = { 352 .ring = {
348 [RADEON_RING_TYPE_GFX_INDEX] = { 353 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
349 .ib_execute = &r100_ring_ib_execute,
350 .emit_fence = &r300_fence_ring_emit,
351 .emit_semaphore = &r100_semaphore_ring_emit,
352 .cs_parse = &r300_cs_parse,
353 .ring_start = &r300_ring_start,
354 .ring_test = &r100_ring_test,
355 .ib_test = &r100_ib_test,
356 .is_lockup = &r100_gpu_is_lockup,
357 .get_rptr = &radeon_ring_generic_get_rptr,
358 .get_wptr = &radeon_ring_generic_get_wptr,
359 .set_wptr = &radeon_ring_generic_set_wptr,
360 }
361 }, 354 },
362 .irq = { 355 .irq = {
363 .set = &r100_irq_set, 356 .set = &r100_irq_set,
@@ -424,19 +417,7 @@ static struct radeon_asic r300_asic_pcie = {
424 .set_page = &rv370_pcie_gart_set_page, 417 .set_page = &rv370_pcie_gart_set_page,
425 }, 418 },
426 .ring = { 419 .ring = {
427 [RADEON_RING_TYPE_GFX_INDEX] = { 420 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
428 .ib_execute = &r100_ring_ib_execute,
429 .emit_fence = &r300_fence_ring_emit,
430 .emit_semaphore = &r100_semaphore_ring_emit,
431 .cs_parse = &r300_cs_parse,
432 .ring_start = &r300_ring_start,
433 .ring_test = &r100_ring_test,
434 .ib_test = &r100_ib_test,
435 .is_lockup = &r100_gpu_is_lockup,
436 .get_rptr = &radeon_ring_generic_get_rptr,
437 .get_wptr = &radeon_ring_generic_get_wptr,
438 .set_wptr = &radeon_ring_generic_set_wptr,
439 }
440 }, 421 },
441 .irq = { 422 .irq = {
442 .set = &r100_irq_set, 423 .set = &r100_irq_set,
@@ -503,19 +484,7 @@ static struct radeon_asic r420_asic = {
503 .set_page = &rv370_pcie_gart_set_page, 484 .set_page = &rv370_pcie_gart_set_page,
504 }, 485 },
505 .ring = { 486 .ring = {
506 [RADEON_RING_TYPE_GFX_INDEX] = { 487 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
507 .ib_execute = &r100_ring_ib_execute,
508 .emit_fence = &r300_fence_ring_emit,
509 .emit_semaphore = &r100_semaphore_ring_emit,
510 .cs_parse = &r300_cs_parse,
511 .ring_start = &r300_ring_start,
512 .ring_test = &r100_ring_test,
513 .ib_test = &r100_ib_test,
514 .is_lockup = &r100_gpu_is_lockup,
515 .get_rptr = &radeon_ring_generic_get_rptr,
516 .get_wptr = &radeon_ring_generic_get_wptr,
517 .set_wptr = &radeon_ring_generic_set_wptr,
518 }
519 }, 488 },
520 .irq = { 489 .irq = {
521 .set = &r100_irq_set, 490 .set = &r100_irq_set,
@@ -582,19 +551,7 @@ static struct radeon_asic rs400_asic = {
582 .set_page = &rs400_gart_set_page, 551 .set_page = &rs400_gart_set_page,
583 }, 552 },
584 .ring = { 553 .ring = {
585 [RADEON_RING_TYPE_GFX_INDEX] = { 554 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
586 .ib_execute = &r100_ring_ib_execute,
587 .emit_fence = &r300_fence_ring_emit,
588 .emit_semaphore = &r100_semaphore_ring_emit,
589 .cs_parse = &r300_cs_parse,
590 .ring_start = &r300_ring_start,
591 .ring_test = &r100_ring_test,
592 .ib_test = &r100_ib_test,
593 .is_lockup = &r100_gpu_is_lockup,
594 .get_rptr = &radeon_ring_generic_get_rptr,
595 .get_wptr = &radeon_ring_generic_get_wptr,
596 .set_wptr = &radeon_ring_generic_set_wptr,
597 }
598 }, 555 },
599 .irq = { 556 .irq = {
600 .set = &r100_irq_set, 557 .set = &r100_irq_set,
@@ -661,19 +618,7 @@ static struct radeon_asic rs600_asic = {
661 .set_page = &rs600_gart_set_page, 618 .set_page = &rs600_gart_set_page,
662 }, 619 },
663 .ring = { 620 .ring = {
664 [RADEON_RING_TYPE_GFX_INDEX] = { 621 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
665 .ib_execute = &r100_ring_ib_execute,
666 .emit_fence = &r300_fence_ring_emit,
667 .emit_semaphore = &r100_semaphore_ring_emit,
668 .cs_parse = &r300_cs_parse,
669 .ring_start = &r300_ring_start,
670 .ring_test = &r100_ring_test,
671 .ib_test = &r100_ib_test,
672 .is_lockup = &r100_gpu_is_lockup,
673 .get_rptr = &radeon_ring_generic_get_rptr,
674 .get_wptr = &radeon_ring_generic_get_wptr,
675 .set_wptr = &radeon_ring_generic_set_wptr,
676 }
677 }, 622 },
678 .irq = { 623 .irq = {
679 .set = &rs600_irq_set, 624 .set = &rs600_irq_set,
@@ -742,19 +687,7 @@ static struct radeon_asic rs690_asic = {
742 .set_page = &rs400_gart_set_page, 687 .set_page = &rs400_gart_set_page,
743 }, 688 },
744 .ring = { 689 .ring = {
745 [RADEON_RING_TYPE_GFX_INDEX] = { 690 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
746 .ib_execute = &r100_ring_ib_execute,
747 .emit_fence = &r300_fence_ring_emit,
748 .emit_semaphore = &r100_semaphore_ring_emit,
749 .cs_parse = &r300_cs_parse,
750 .ring_start = &r300_ring_start,
751 .ring_test = &r100_ring_test,
752 .ib_test = &r100_ib_test,
753 .is_lockup = &r100_gpu_is_lockup,
754 .get_rptr = &radeon_ring_generic_get_rptr,
755 .get_wptr = &radeon_ring_generic_get_wptr,
756 .set_wptr = &radeon_ring_generic_set_wptr,
757 }
758 }, 691 },
759 .irq = { 692 .irq = {
760 .set = &rs600_irq_set, 693 .set = &rs600_irq_set,
@@ -823,19 +756,7 @@ static struct radeon_asic rv515_asic = {
823 .set_page = &rv370_pcie_gart_set_page, 756 .set_page = &rv370_pcie_gart_set_page,
824 }, 757 },
825 .ring = { 758 .ring = {
826 [RADEON_RING_TYPE_GFX_INDEX] = { 759 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
827 .ib_execute = &r100_ring_ib_execute,
828 .emit_fence = &r300_fence_ring_emit,
829 .emit_semaphore = &r100_semaphore_ring_emit,
830 .cs_parse = &r300_cs_parse,
831 .ring_start = &rv515_ring_start,
832 .ring_test = &r100_ring_test,
833 .ib_test = &r100_ib_test,
834 .is_lockup = &r100_gpu_is_lockup,
835 .get_rptr = &radeon_ring_generic_get_rptr,
836 .get_wptr = &radeon_ring_generic_get_wptr,
837 .set_wptr = &radeon_ring_generic_set_wptr,
838 }
839 }, 760 },
840 .irq = { 761 .irq = {
841 .set = &rs600_irq_set, 762 .set = &rs600_irq_set,
@@ -902,19 +823,7 @@ static struct radeon_asic r520_asic = {
902 .set_page = &rv370_pcie_gart_set_page, 823 .set_page = &rv370_pcie_gart_set_page,
903 }, 824 },
904 .ring = { 825 .ring = {
905 [RADEON_RING_TYPE_GFX_INDEX] = { 826 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
906 .ib_execute = &r100_ring_ib_execute,
907 .emit_fence = &r300_fence_ring_emit,
908 .emit_semaphore = &r100_semaphore_ring_emit,
909 .cs_parse = &r300_cs_parse,
910 .ring_start = &rv515_ring_start,
911 .ring_test = &r100_ring_test,
912 .ib_test = &r100_ib_test,
913 .is_lockup = &r100_gpu_is_lockup,
914 .get_rptr = &radeon_ring_generic_get_rptr,
915 .get_wptr = &radeon_ring_generic_get_wptr,
916 .set_wptr = &radeon_ring_generic_set_wptr,
917 }
918 }, 827 },
919 .irq = { 828 .irq = {
920 .set = &rs600_irq_set, 829 .set = &rs600_irq_set,
@@ -966,6 +875,32 @@ static struct radeon_asic r520_asic = {
966 }, 875 },
967}; 876};
968 877
878static struct radeon_asic_ring r600_gfx_ring = {
879 .ib_execute = &r600_ring_ib_execute,
880 .emit_fence = &r600_fence_ring_emit,
881 .emit_semaphore = &r600_semaphore_ring_emit,
882 .cs_parse = &r600_cs_parse,
883 .ring_test = &r600_ring_test,
884 .ib_test = &r600_ib_test,
885 .is_lockup = &r600_gfx_is_lockup,
886 .get_rptr = &radeon_ring_generic_get_rptr,
887 .get_wptr = &radeon_ring_generic_get_wptr,
888 .set_wptr = &radeon_ring_generic_set_wptr,
889};
890
891static struct radeon_asic_ring r600_dma_ring = {
892 .ib_execute = &r600_dma_ring_ib_execute,
893 .emit_fence = &r600_dma_fence_ring_emit,
894 .emit_semaphore = &r600_dma_semaphore_ring_emit,
895 .cs_parse = &r600_dma_cs_parse,
896 .ring_test = &r600_dma_ring_test,
897 .ib_test = &r600_dma_ib_test,
898 .is_lockup = &r600_dma_is_lockup,
899 .get_rptr = &r600_dma_get_rptr,
900 .get_wptr = &r600_dma_get_wptr,
901 .set_wptr = &r600_dma_set_wptr,
902};
903
969static struct radeon_asic r600_asic = { 904static struct radeon_asic r600_asic = {
970 .init = &r600_init, 905 .init = &r600_init,
971 .fini = &r600_fini, 906 .fini = &r600_fini,
@@ -983,30 +918,8 @@ static struct radeon_asic r600_asic = {
983 .set_page = &rs600_gart_set_page, 918 .set_page = &rs600_gart_set_page,
984 }, 919 },
985 .ring = { 920 .ring = {
986 [RADEON_RING_TYPE_GFX_INDEX] = { 921 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
987 .ib_execute = &r600_ring_ib_execute, 922 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
988 .emit_fence = &r600_fence_ring_emit,
989 .emit_semaphore = &r600_semaphore_ring_emit,
990 .cs_parse = &r600_cs_parse,
991 .ring_test = &r600_ring_test,
992 .ib_test = &r600_ib_test,
993 .is_lockup = &r600_gfx_is_lockup,
994 .get_rptr = &radeon_ring_generic_get_rptr,
995 .get_wptr = &radeon_ring_generic_get_wptr,
996 .set_wptr = &radeon_ring_generic_set_wptr,
997 },
998 [R600_RING_TYPE_DMA_INDEX] = {
999 .ib_execute = &r600_dma_ring_ib_execute,
1000 .emit_fence = &r600_dma_fence_ring_emit,
1001 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1002 .cs_parse = &r600_dma_cs_parse,
1003 .ring_test = &r600_dma_ring_test,
1004 .ib_test = &r600_dma_ib_test,
1005 .is_lockup = &r600_dma_is_lockup,
1006 .get_rptr = &radeon_ring_generic_get_rptr,
1007 .get_wptr = &radeon_ring_generic_get_wptr,
1008 .set_wptr = &radeon_ring_generic_set_wptr,
1009 }
1010 }, 923 },
1011 .irq = { 924 .irq = {
1012 .set = &r600_irq_set, 925 .set = &r600_irq_set,
@@ -1022,7 +935,7 @@ static struct radeon_asic r600_asic = {
1022 .hdmi_setmode = &r600_hdmi_setmode, 935 .hdmi_setmode = &r600_hdmi_setmode,
1023 }, 936 },
1024 .copy = { 937 .copy = {
1025 .blit = &r600_copy_blit, 938 .blit = &r600_copy_cpdma,
1026 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 939 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1027 .dma = &r600_copy_dma, 940 .dma = &r600_copy_dma,
1028 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 941 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1078,30 +991,8 @@ static struct radeon_asic rv6xx_asic = {
1078 .set_page = &rs600_gart_set_page, 991 .set_page = &rs600_gart_set_page,
1079 }, 992 },
1080 .ring = { 993 .ring = {
1081 [RADEON_RING_TYPE_GFX_INDEX] = { 994 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1082 .ib_execute = &r600_ring_ib_execute, 995 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1083 .emit_fence = &r600_fence_ring_emit,
1084 .emit_semaphore = &r600_semaphore_ring_emit,
1085 .cs_parse = &r600_cs_parse,
1086 .ring_test = &r600_ring_test,
1087 .ib_test = &r600_ib_test,
1088 .is_lockup = &r600_gfx_is_lockup,
1089 .get_rptr = &radeon_ring_generic_get_rptr,
1090 .get_wptr = &radeon_ring_generic_get_wptr,
1091 .set_wptr = &radeon_ring_generic_set_wptr,
1092 },
1093 [R600_RING_TYPE_DMA_INDEX] = {
1094 .ib_execute = &r600_dma_ring_ib_execute,
1095 .emit_fence = &r600_dma_fence_ring_emit,
1096 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1097 .cs_parse = &r600_dma_cs_parse,
1098 .ring_test = &r600_dma_ring_test,
1099 .ib_test = &r600_dma_ib_test,
1100 .is_lockup = &r600_dma_is_lockup,
1101 .get_rptr = &radeon_ring_generic_get_rptr,
1102 .get_wptr = &radeon_ring_generic_get_wptr,
1103 .set_wptr = &radeon_ring_generic_set_wptr,
1104 }
1105 }, 996 },
1106 .irq = { 997 .irq = {
1107 .set = &r600_irq_set, 998 .set = &r600_irq_set,
@@ -1115,7 +1006,7 @@ static struct radeon_asic rv6xx_asic = {
1115 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1116 }, 1007 },
1117 .copy = { 1008 .copy = {
1118 .blit = &r600_copy_blit, 1009 .blit = &r600_copy_cpdma,
1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1010 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1120 .dma = &r600_copy_dma, 1011 .dma = &r600_copy_dma,
1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1012 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1187,30 +1078,8 @@ static struct radeon_asic rs780_asic = {
1187 .set_page = &rs600_gart_set_page, 1078 .set_page = &rs600_gart_set_page,
1188 }, 1079 },
1189 .ring = { 1080 .ring = {
1190 [RADEON_RING_TYPE_GFX_INDEX] = { 1081 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1191 .ib_execute = &r600_ring_ib_execute, 1082 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1192 .emit_fence = &r600_fence_ring_emit,
1193 .emit_semaphore = &r600_semaphore_ring_emit,
1194 .cs_parse = &r600_cs_parse,
1195 .ring_test = &r600_ring_test,
1196 .ib_test = &r600_ib_test,
1197 .is_lockup = &r600_gfx_is_lockup,
1198 .get_rptr = &radeon_ring_generic_get_rptr,
1199 .get_wptr = &radeon_ring_generic_get_wptr,
1200 .set_wptr = &radeon_ring_generic_set_wptr,
1201 },
1202 [R600_RING_TYPE_DMA_INDEX] = {
1203 .ib_execute = &r600_dma_ring_ib_execute,
1204 .emit_fence = &r600_dma_fence_ring_emit,
1205 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1206 .cs_parse = &r600_dma_cs_parse,
1207 .ring_test = &r600_dma_ring_test,
1208 .ib_test = &r600_dma_ib_test,
1209 .is_lockup = &r600_dma_is_lockup,
1210 .get_rptr = &radeon_ring_generic_get_rptr,
1211 .get_wptr = &radeon_ring_generic_get_wptr,
1212 .set_wptr = &radeon_ring_generic_set_wptr,
1213 }
1214 }, 1083 },
1215 .irq = { 1084 .irq = {
1216 .set = &r600_irq_set, 1085 .set = &r600_irq_set,
@@ -1226,7 +1095,7 @@ static struct radeon_asic rs780_asic = {
1226 .hdmi_setmode = &r600_hdmi_setmode, 1095 .hdmi_setmode = &r600_hdmi_setmode,
1227 }, 1096 },
1228 .copy = { 1097 .copy = {
1229 .blit = &r600_copy_blit, 1098 .blit = &r600_copy_cpdma,
1230 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1099 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1231 .dma = &r600_copy_dma, 1100 .dma = &r600_copy_dma,
1232 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1101 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1280,6 +1149,19 @@ static struct radeon_asic rs780_asic = {
1280 }, 1149 },
1281}; 1150};
1282 1151
1152static struct radeon_asic_ring rv770_uvd_ring = {
1153 .ib_execute = &uvd_v1_0_ib_execute,
1154 .emit_fence = &uvd_v2_2_fence_emit,
1155 .emit_semaphore = &uvd_v1_0_semaphore_emit,
1156 .cs_parse = &radeon_uvd_cs_parse,
1157 .ring_test = &uvd_v1_0_ring_test,
1158 .ib_test = &uvd_v1_0_ib_test,
1159 .is_lockup = &radeon_ring_test_lockup,
1160 .get_rptr = &uvd_v1_0_get_rptr,
1161 .get_wptr = &uvd_v1_0_get_wptr,
1162 .set_wptr = &uvd_v1_0_set_wptr,
1163};
1164
1283static struct radeon_asic rv770_asic = { 1165static struct radeon_asic rv770_asic = {
1284 .init = &rv770_init, 1166 .init = &rv770_init,
1285 .fini = &rv770_fini, 1167 .fini = &rv770_fini,
@@ -1297,42 +1179,9 @@ static struct radeon_asic rv770_asic = {
1297 .set_page = &rs600_gart_set_page, 1179 .set_page = &rs600_gart_set_page,
1298 }, 1180 },
1299 .ring = { 1181 .ring = {
1300 [RADEON_RING_TYPE_GFX_INDEX] = { 1182 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1301 .ib_execute = &r600_ring_ib_execute, 1183 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1302 .emit_fence = &r600_fence_ring_emit, 1184 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1303 .emit_semaphore = &r600_semaphore_ring_emit,
1304 .cs_parse = &r600_cs_parse,
1305 .ring_test = &r600_ring_test,
1306 .ib_test = &r600_ib_test,
1307 .is_lockup = &r600_gfx_is_lockup,
1308 .get_rptr = &radeon_ring_generic_get_rptr,
1309 .get_wptr = &radeon_ring_generic_get_wptr,
1310 .set_wptr = &radeon_ring_generic_set_wptr,
1311 },
1312 [R600_RING_TYPE_DMA_INDEX] = {
1313 .ib_execute = &r600_dma_ring_ib_execute,
1314 .emit_fence = &r600_dma_fence_ring_emit,
1315 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1316 .cs_parse = &r600_dma_cs_parse,
1317 .ring_test = &r600_dma_ring_test,
1318 .ib_test = &r600_dma_ib_test,
1319 .is_lockup = &r600_dma_is_lockup,
1320 .get_rptr = &radeon_ring_generic_get_rptr,
1321 .get_wptr = &radeon_ring_generic_get_wptr,
1322 .set_wptr = &radeon_ring_generic_set_wptr,
1323 },
1324 [R600_RING_TYPE_UVD_INDEX] = {
1325 .ib_execute = &r600_uvd_ib_execute,
1326 .emit_fence = &r600_uvd_fence_emit,
1327 .emit_semaphore = &r600_uvd_semaphore_emit,
1328 .cs_parse = &radeon_uvd_cs_parse,
1329 .ring_test = &r600_uvd_ring_test,
1330 .ib_test = &r600_uvd_ib_test,
1331 .is_lockup = &radeon_ring_test_lockup,
1332 .get_rptr = &radeon_ring_generic_get_rptr,
1333 .get_wptr = &radeon_ring_generic_get_wptr,
1334 .set_wptr = &radeon_ring_generic_set_wptr,
1335 }
1336 }, 1185 },
1337 .irq = { 1186 .irq = {
1338 .set = &r600_irq_set, 1187 .set = &r600_irq_set,
@@ -1348,7 +1197,7 @@ static struct radeon_asic rv770_asic = {
1348 .hdmi_setmode = &r600_hdmi_setmode, 1197 .hdmi_setmode = &r600_hdmi_setmode,
1349 }, 1198 },
1350 .copy = { 1199 .copy = {
1351 .blit = &r600_copy_blit, 1200 .blit = &r600_copy_cpdma,
1352 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1201 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1353 .dma = &rv770_copy_dma, 1202 .dma = &rv770_copy_dma,
1354 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1203 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1405,6 +1254,32 @@ static struct radeon_asic rv770_asic = {
1405 }, 1254 },
1406}; 1255};
1407 1256
1257static struct radeon_asic_ring evergreen_gfx_ring = {
1258 .ib_execute = &evergreen_ring_ib_execute,
1259 .emit_fence = &r600_fence_ring_emit,
1260 .emit_semaphore = &r600_semaphore_ring_emit,
1261 .cs_parse = &evergreen_cs_parse,
1262 .ring_test = &r600_ring_test,
1263 .ib_test = &r600_ib_test,
1264 .is_lockup = &evergreen_gfx_is_lockup,
1265 .get_rptr = &radeon_ring_generic_get_rptr,
1266 .get_wptr = &radeon_ring_generic_get_wptr,
1267 .set_wptr = &radeon_ring_generic_set_wptr,
1268};
1269
1270static struct radeon_asic_ring evergreen_dma_ring = {
1271 .ib_execute = &evergreen_dma_ring_ib_execute,
1272 .emit_fence = &evergreen_dma_fence_ring_emit,
1273 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1274 .cs_parse = &evergreen_dma_cs_parse,
1275 .ring_test = &r600_dma_ring_test,
1276 .ib_test = &r600_dma_ib_test,
1277 .is_lockup = &evergreen_dma_is_lockup,
1278 .get_rptr = &r600_dma_get_rptr,
1279 .get_wptr = &r600_dma_get_wptr,
1280 .set_wptr = &r600_dma_set_wptr,
1281};
1282
1408static struct radeon_asic evergreen_asic = { 1283static struct radeon_asic evergreen_asic = {
1409 .init = &evergreen_init, 1284 .init = &evergreen_init,
1410 .fini = &evergreen_fini, 1285 .fini = &evergreen_fini,
@@ -1422,42 +1297,9 @@ static struct radeon_asic evergreen_asic = {
1422 .set_page = &rs600_gart_set_page, 1297 .set_page = &rs600_gart_set_page,
1423 }, 1298 },
1424 .ring = { 1299 .ring = {
1425 [RADEON_RING_TYPE_GFX_INDEX] = { 1300 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1426 .ib_execute = &evergreen_ring_ib_execute, 1301 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1427 .emit_fence = &r600_fence_ring_emit, 1302 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1428 .emit_semaphore = &r600_semaphore_ring_emit,
1429 .cs_parse = &evergreen_cs_parse,
1430 .ring_test = &r600_ring_test,
1431 .ib_test = &r600_ib_test,
1432 .is_lockup = &evergreen_gfx_is_lockup,
1433 .get_rptr = &radeon_ring_generic_get_rptr,
1434 .get_wptr = &radeon_ring_generic_get_wptr,
1435 .set_wptr = &radeon_ring_generic_set_wptr,
1436 },
1437 [R600_RING_TYPE_DMA_INDEX] = {
1438 .ib_execute = &evergreen_dma_ring_ib_execute,
1439 .emit_fence = &evergreen_dma_fence_ring_emit,
1440 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1441 .cs_parse = &evergreen_dma_cs_parse,
1442 .ring_test = &r600_dma_ring_test,
1443 .ib_test = &r600_dma_ib_test,
1444 .is_lockup = &evergreen_dma_is_lockup,
1445 .get_rptr = &radeon_ring_generic_get_rptr,
1446 .get_wptr = &radeon_ring_generic_get_wptr,
1447 .set_wptr = &radeon_ring_generic_set_wptr,
1448 },
1449 [R600_RING_TYPE_UVD_INDEX] = {
1450 .ib_execute = &r600_uvd_ib_execute,
1451 .emit_fence = &r600_uvd_fence_emit,
1452 .emit_semaphore = &r600_uvd_semaphore_emit,
1453 .cs_parse = &radeon_uvd_cs_parse,
1454 .ring_test = &r600_uvd_ring_test,
1455 .ib_test = &r600_uvd_ib_test,
1456 .is_lockup = &radeon_ring_test_lockup,
1457 .get_rptr = &radeon_ring_generic_get_rptr,
1458 .get_wptr = &radeon_ring_generic_get_wptr,
1459 .set_wptr = &radeon_ring_generic_set_wptr,
1460 }
1461 }, 1303 },
1462 .irq = { 1304 .irq = {
1463 .set = &evergreen_irq_set, 1305 .set = &evergreen_irq_set,
@@ -1473,7 +1315,7 @@ static struct radeon_asic evergreen_asic = {
1473 .hdmi_setmode = &evergreen_hdmi_setmode, 1315 .hdmi_setmode = &evergreen_hdmi_setmode,
1474 }, 1316 },
1475 .copy = { 1317 .copy = {
1476 .blit = &r600_copy_blit, 1318 .blit = &r600_copy_cpdma,
1477 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1319 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1478 .dma = &evergreen_copy_dma, 1320 .dma = &evergreen_copy_dma,
1479 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1321 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1547,42 +1389,9 @@ static struct radeon_asic sumo_asic = {
1547 .set_page = &rs600_gart_set_page, 1389 .set_page = &rs600_gart_set_page,
1548 }, 1390 },
1549 .ring = { 1391 .ring = {
1550 [RADEON_RING_TYPE_GFX_INDEX] = { 1392 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1551 .ib_execute = &evergreen_ring_ib_execute, 1393 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1552 .emit_fence = &r600_fence_ring_emit, 1394 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1553 .emit_semaphore = &r600_semaphore_ring_emit,
1554 .cs_parse = &evergreen_cs_parse,
1555 .ring_test = &r600_ring_test,
1556 .ib_test = &r600_ib_test,
1557 .is_lockup = &evergreen_gfx_is_lockup,
1558 .get_rptr = &radeon_ring_generic_get_rptr,
1559 .get_wptr = &radeon_ring_generic_get_wptr,
1560 .set_wptr = &radeon_ring_generic_set_wptr,
1561 },
1562 [R600_RING_TYPE_DMA_INDEX] = {
1563 .ib_execute = &evergreen_dma_ring_ib_execute,
1564 .emit_fence = &evergreen_dma_fence_ring_emit,
1565 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1566 .cs_parse = &evergreen_dma_cs_parse,
1567 .ring_test = &r600_dma_ring_test,
1568 .ib_test = &r600_dma_ib_test,
1569 .is_lockup = &evergreen_dma_is_lockup,
1570 .get_rptr = &radeon_ring_generic_get_rptr,
1571 .get_wptr = &radeon_ring_generic_get_wptr,
1572 .set_wptr = &radeon_ring_generic_set_wptr,
1573 },
1574 [R600_RING_TYPE_UVD_INDEX] = {
1575 .ib_execute = &r600_uvd_ib_execute,
1576 .emit_fence = &r600_uvd_fence_emit,
1577 .emit_semaphore = &r600_uvd_semaphore_emit,
1578 .cs_parse = &radeon_uvd_cs_parse,
1579 .ring_test = &r600_uvd_ring_test,
1580 .ib_test = &r600_uvd_ib_test,
1581 .is_lockup = &radeon_ring_test_lockup,
1582 .get_rptr = &radeon_ring_generic_get_rptr,
1583 .get_wptr = &radeon_ring_generic_get_wptr,
1584 .set_wptr = &radeon_ring_generic_set_wptr,
1585 }
1586 }, 1395 },
1587 .irq = { 1396 .irq = {
1588 .set = &evergreen_irq_set, 1397 .set = &evergreen_irq_set,
@@ -1598,7 +1407,7 @@ static struct radeon_asic sumo_asic = {
1598 .hdmi_setmode = &evergreen_hdmi_setmode, 1407 .hdmi_setmode = &evergreen_hdmi_setmode,
1599 }, 1408 },
1600 .copy = { 1409 .copy = {
1601 .blit = &r600_copy_blit, 1410 .blit = &r600_copy_cpdma,
1602 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1411 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1603 .dma = &evergreen_copy_dma, 1412 .dma = &evergreen_copy_dma,
1604 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1413 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1671,42 +1480,9 @@ static struct radeon_asic btc_asic = {
1671 .set_page = &rs600_gart_set_page, 1480 .set_page = &rs600_gart_set_page,
1672 }, 1481 },
1673 .ring = { 1482 .ring = {
1674 [RADEON_RING_TYPE_GFX_INDEX] = { 1483 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1675 .ib_execute = &evergreen_ring_ib_execute, 1484 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1676 .emit_fence = &r600_fence_ring_emit, 1485 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1677 .emit_semaphore = &r600_semaphore_ring_emit,
1678 .cs_parse = &evergreen_cs_parse,
1679 .ring_test = &r600_ring_test,
1680 .ib_test = &r600_ib_test,
1681 .is_lockup = &evergreen_gfx_is_lockup,
1682 .get_rptr = &radeon_ring_generic_get_rptr,
1683 .get_wptr = &radeon_ring_generic_get_wptr,
1684 .set_wptr = &radeon_ring_generic_set_wptr,
1685 },
1686 [R600_RING_TYPE_DMA_INDEX] = {
1687 .ib_execute = &evergreen_dma_ring_ib_execute,
1688 .emit_fence = &evergreen_dma_fence_ring_emit,
1689 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1690 .cs_parse = &evergreen_dma_cs_parse,
1691 .ring_test = &r600_dma_ring_test,
1692 .ib_test = &r600_dma_ib_test,
1693 .is_lockup = &evergreen_dma_is_lockup,
1694 .get_rptr = &radeon_ring_generic_get_rptr,
1695 .get_wptr = &radeon_ring_generic_get_wptr,
1696 .set_wptr = &radeon_ring_generic_set_wptr,
1697 },
1698 [R600_RING_TYPE_UVD_INDEX] = {
1699 .ib_execute = &r600_uvd_ib_execute,
1700 .emit_fence = &r600_uvd_fence_emit,
1701 .emit_semaphore = &r600_uvd_semaphore_emit,
1702 .cs_parse = &radeon_uvd_cs_parse,
1703 .ring_test = &r600_uvd_ring_test,
1704 .ib_test = &r600_uvd_ib_test,
1705 .is_lockup = &radeon_ring_test_lockup,
1706 .get_rptr = &radeon_ring_generic_get_rptr,
1707 .get_wptr = &radeon_ring_generic_get_wptr,
1708 .set_wptr = &radeon_ring_generic_set_wptr,
1709 }
1710 }, 1486 },
1711 .irq = { 1487 .irq = {
1712 .set = &evergreen_irq_set, 1488 .set = &evergreen_irq_set,
@@ -1722,7 +1498,7 @@ static struct radeon_asic btc_asic = {
1722 .hdmi_setmode = &evergreen_hdmi_setmode, 1498 .hdmi_setmode = &evergreen_hdmi_setmode,
1723 }, 1499 },
1724 .copy = { 1500 .copy = {
1725 .blit = &r600_copy_blit, 1501 .blit = &r600_copy_cpdma,
1726 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1502 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1727 .dma = &evergreen_copy_dma, 1503 .dma = &evergreen_copy_dma,
1728 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1504 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1779,6 +1555,49 @@ static struct radeon_asic btc_asic = {
1779 }, 1555 },
1780}; 1556};
1781 1557
1558static struct radeon_asic_ring cayman_gfx_ring = {
1559 .ib_execute = &cayman_ring_ib_execute,
1560 .ib_parse = &evergreen_ib_parse,
1561 .emit_fence = &cayman_fence_ring_emit,
1562 .emit_semaphore = &r600_semaphore_ring_emit,
1563 .cs_parse = &evergreen_cs_parse,
1564 .ring_test = &r600_ring_test,
1565 .ib_test = &r600_ib_test,
1566 .is_lockup = &cayman_gfx_is_lockup,
1567 .vm_flush = &cayman_vm_flush,
1568 .get_rptr = &radeon_ring_generic_get_rptr,
1569 .get_wptr = &radeon_ring_generic_get_wptr,
1570 .set_wptr = &radeon_ring_generic_set_wptr,
1571};
1572
1573static struct radeon_asic_ring cayman_dma_ring = {
1574 .ib_execute = &cayman_dma_ring_ib_execute,
1575 .ib_parse = &evergreen_dma_ib_parse,
1576 .emit_fence = &evergreen_dma_fence_ring_emit,
1577 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1578 .cs_parse = &evergreen_dma_cs_parse,
1579 .ring_test = &r600_dma_ring_test,
1580 .ib_test = &r600_dma_ib_test,
1581 .is_lockup = &cayman_dma_is_lockup,
1582 .vm_flush = &cayman_dma_vm_flush,
1583 .get_rptr = &r600_dma_get_rptr,
1584 .get_wptr = &r600_dma_get_wptr,
1585 .set_wptr = &r600_dma_set_wptr
1586};
1587
1588static struct radeon_asic_ring cayman_uvd_ring = {
1589 .ib_execute = &uvd_v1_0_ib_execute,
1590 .emit_fence = &uvd_v2_2_fence_emit,
1591 .emit_semaphore = &uvd_v3_1_semaphore_emit,
1592 .cs_parse = &radeon_uvd_cs_parse,
1593 .ring_test = &uvd_v1_0_ring_test,
1594 .ib_test = &uvd_v1_0_ib_test,
1595 .is_lockup = &radeon_ring_test_lockup,
1596 .get_rptr = &uvd_v1_0_get_rptr,
1597 .get_wptr = &uvd_v1_0_get_wptr,
1598 .set_wptr = &uvd_v1_0_set_wptr,
1599};
1600
1782static struct radeon_asic cayman_asic = { 1601static struct radeon_asic cayman_asic = {
1783 .init = &cayman_init, 1602 .init = &cayman_init,
1784 .fini = &cayman_fini, 1603 .fini = &cayman_fini,
@@ -1802,88 +1621,12 @@ static struct radeon_asic cayman_asic = {
1802 .set_page = &cayman_vm_set_page, 1621 .set_page = &cayman_vm_set_page,
1803 }, 1622 },
1804 .ring = { 1623 .ring = {
1805 [RADEON_RING_TYPE_GFX_INDEX] = { 1624 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
1806 .ib_execute = &cayman_ring_ib_execute, 1625 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
1807 .ib_parse = &evergreen_ib_parse, 1626 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
1808 .emit_fence = &cayman_fence_ring_emit, 1627 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
1809 .emit_semaphore = &r600_semaphore_ring_emit, 1628 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
1810 .cs_parse = &evergreen_cs_parse, 1629 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
1811 .ring_test = &r600_ring_test,
1812 .ib_test = &r600_ib_test,
1813 .is_lockup = &cayman_gfx_is_lockup,
1814 .vm_flush = &cayman_vm_flush,
1815 .get_rptr = &radeon_ring_generic_get_rptr,
1816 .get_wptr = &radeon_ring_generic_get_wptr,
1817 .set_wptr = &radeon_ring_generic_set_wptr,
1818 },
1819 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1820 .ib_execute = &cayman_ring_ib_execute,
1821 .ib_parse = &evergreen_ib_parse,
1822 .emit_fence = &cayman_fence_ring_emit,
1823 .emit_semaphore = &r600_semaphore_ring_emit,
1824 .cs_parse = &evergreen_cs_parse,
1825 .ring_test = &r600_ring_test,
1826 .ib_test = &r600_ib_test,
1827 .is_lockup = &cayman_gfx_is_lockup,
1828 .vm_flush = &cayman_vm_flush,
1829 .get_rptr = &radeon_ring_generic_get_rptr,
1830 .get_wptr = &radeon_ring_generic_get_wptr,
1831 .set_wptr = &radeon_ring_generic_set_wptr,
1832 },
1833 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1834 .ib_execute = &cayman_ring_ib_execute,
1835 .ib_parse = &evergreen_ib_parse,
1836 .emit_fence = &cayman_fence_ring_emit,
1837 .emit_semaphore = &r600_semaphore_ring_emit,
1838 .cs_parse = &evergreen_cs_parse,
1839 .ring_test = &r600_ring_test,
1840 .ib_test = &r600_ib_test,
1841 .is_lockup = &cayman_gfx_is_lockup,
1842 .vm_flush = &cayman_vm_flush,
1843 .get_rptr = &radeon_ring_generic_get_rptr,
1844 .get_wptr = &radeon_ring_generic_get_wptr,
1845 .set_wptr = &radeon_ring_generic_set_wptr,
1846 },
1847 [R600_RING_TYPE_DMA_INDEX] = {
1848 .ib_execute = &cayman_dma_ring_ib_execute,
1849 .ib_parse = &evergreen_dma_ib_parse,
1850 .emit_fence = &evergreen_dma_fence_ring_emit,
1851 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1852 .cs_parse = &evergreen_dma_cs_parse,
1853 .ring_test = &r600_dma_ring_test,
1854 .ib_test = &r600_dma_ib_test,
1855 .is_lockup = &cayman_dma_is_lockup,
1856 .vm_flush = &cayman_dma_vm_flush,
1857 .get_rptr = &radeon_ring_generic_get_rptr,
1858 .get_wptr = &radeon_ring_generic_get_wptr,
1859 .set_wptr = &radeon_ring_generic_set_wptr,
1860 },
1861 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1862 .ib_execute = &cayman_dma_ring_ib_execute,
1863 .ib_parse = &evergreen_dma_ib_parse,
1864 .emit_fence = &evergreen_dma_fence_ring_emit,
1865 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1866 .cs_parse = &evergreen_dma_cs_parse,
1867 .ring_test = &r600_dma_ring_test,
1868 .ib_test = &r600_dma_ib_test,
1869 .is_lockup = &cayman_dma_is_lockup,
1870 .vm_flush = &cayman_dma_vm_flush,
1871 .get_rptr = &radeon_ring_generic_get_rptr,
1872 .get_wptr = &radeon_ring_generic_get_wptr,
1873 .set_wptr = &radeon_ring_generic_set_wptr,
1874 },
1875 [R600_RING_TYPE_UVD_INDEX] = {
1876 .ib_execute = &r600_uvd_ib_execute,
1877 .emit_fence = &r600_uvd_fence_emit,
1878 .emit_semaphore = &cayman_uvd_semaphore_emit,
1879 .cs_parse = &radeon_uvd_cs_parse,
1880 .ring_test = &r600_uvd_ring_test,
1881 .ib_test = &r600_uvd_ib_test,
1882 .is_lockup = &radeon_ring_test_lockup,
1883 .get_rptr = &radeon_ring_generic_get_rptr,
1884 .get_wptr = &radeon_ring_generic_get_wptr,
1885 .set_wptr = &radeon_ring_generic_set_wptr,
1886 }
1887 }, 1630 },
1888 .irq = { 1631 .irq = {
1889 .set = &evergreen_irq_set, 1632 .set = &evergreen_irq_set,
@@ -1899,7 +1642,7 @@ static struct radeon_asic cayman_asic = {
1899 .hdmi_setmode = &evergreen_hdmi_setmode, 1642 .hdmi_setmode = &evergreen_hdmi_setmode,
1900 }, 1643 },
1901 .copy = { 1644 .copy = {
1902 .blit = &r600_copy_blit, 1645 .blit = &r600_copy_cpdma,
1903 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1646 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1904 .dma = &evergreen_copy_dma, 1647 .dma = &evergreen_copy_dma,
1905 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1648 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1979,88 +1722,12 @@ static struct radeon_asic trinity_asic = {
1979 .set_page = &cayman_vm_set_page, 1722 .set_page = &cayman_vm_set_page,
1980 }, 1723 },
1981 .ring = { 1724 .ring = {
1982 [RADEON_RING_TYPE_GFX_INDEX] = { 1725 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
1983 .ib_execute = &cayman_ring_ib_execute, 1726 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
1984 .ib_parse = &evergreen_ib_parse, 1727 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
1985 .emit_fence = &cayman_fence_ring_emit, 1728 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
1986 .emit_semaphore = &r600_semaphore_ring_emit, 1729 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
1987 .cs_parse = &evergreen_cs_parse, 1730 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
1988 .ring_test = &r600_ring_test,
1989 .ib_test = &r600_ib_test,
1990 .is_lockup = &cayman_gfx_is_lockup,
1991 .vm_flush = &cayman_vm_flush,
1992 .get_rptr = &radeon_ring_generic_get_rptr,
1993 .get_wptr = &radeon_ring_generic_get_wptr,
1994 .set_wptr = &radeon_ring_generic_set_wptr,
1995 },
1996 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1997 .ib_execute = &cayman_ring_ib_execute,
1998 .ib_parse = &evergreen_ib_parse,
1999 .emit_fence = &cayman_fence_ring_emit,
2000 .emit_semaphore = &r600_semaphore_ring_emit,
2001 .cs_parse = &evergreen_cs_parse,
2002 .ring_test = &r600_ring_test,
2003 .ib_test = &r600_ib_test,
2004 .is_lockup = &cayman_gfx_is_lockup,
2005 .vm_flush = &cayman_vm_flush,
2006 .get_rptr = &radeon_ring_generic_get_rptr,
2007 .get_wptr = &radeon_ring_generic_get_wptr,
2008 .set_wptr = &radeon_ring_generic_set_wptr,
2009 },
2010 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2011 .ib_execute = &cayman_ring_ib_execute,
2012 .ib_parse = &evergreen_ib_parse,
2013 .emit_fence = &cayman_fence_ring_emit,
2014 .emit_semaphore = &r600_semaphore_ring_emit,
2015 .cs_parse = &evergreen_cs_parse,
2016 .ring_test = &r600_ring_test,
2017 .ib_test = &r600_ib_test,
2018 .is_lockup = &cayman_gfx_is_lockup,
2019 .vm_flush = &cayman_vm_flush,
2020 .get_rptr = &radeon_ring_generic_get_rptr,
2021 .get_wptr = &radeon_ring_generic_get_wptr,
2022 .set_wptr = &radeon_ring_generic_set_wptr,
2023 },
2024 [R600_RING_TYPE_DMA_INDEX] = {
2025 .ib_execute = &cayman_dma_ring_ib_execute,
2026 .ib_parse = &evergreen_dma_ib_parse,
2027 .emit_fence = &evergreen_dma_fence_ring_emit,
2028 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2029 .cs_parse = &evergreen_dma_cs_parse,
2030 .ring_test = &r600_dma_ring_test,
2031 .ib_test = &r600_dma_ib_test,
2032 .is_lockup = &cayman_dma_is_lockup,
2033 .vm_flush = &cayman_dma_vm_flush,
2034 .get_rptr = &radeon_ring_generic_get_rptr,
2035 .get_wptr = &radeon_ring_generic_get_wptr,
2036 .set_wptr = &radeon_ring_generic_set_wptr,
2037 },
2038 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2039 .ib_execute = &cayman_dma_ring_ib_execute,
2040 .ib_parse = &evergreen_dma_ib_parse,
2041 .emit_fence = &evergreen_dma_fence_ring_emit,
2042 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2043 .cs_parse = &evergreen_dma_cs_parse,
2044 .ring_test = &r600_dma_ring_test,
2045 .ib_test = &r600_dma_ib_test,
2046 .is_lockup = &cayman_dma_is_lockup,
2047 .vm_flush = &cayman_dma_vm_flush,
2048 .get_rptr = &radeon_ring_generic_get_rptr,
2049 .get_wptr = &radeon_ring_generic_get_wptr,
2050 .set_wptr = &radeon_ring_generic_set_wptr,
2051 },
2052 [R600_RING_TYPE_UVD_INDEX] = {
2053 .ib_execute = &r600_uvd_ib_execute,
2054 .emit_fence = &r600_uvd_fence_emit,
2055 .emit_semaphore = &cayman_uvd_semaphore_emit,
2056 .cs_parse = &radeon_uvd_cs_parse,
2057 .ring_test = &r600_uvd_ring_test,
2058 .ib_test = &r600_uvd_ib_test,
2059 .is_lockup = &radeon_ring_test_lockup,
2060 .get_rptr = &radeon_ring_generic_get_rptr,
2061 .get_wptr = &radeon_ring_generic_get_wptr,
2062 .set_wptr = &radeon_ring_generic_set_wptr,
2063 }
2064 }, 1731 },
2065 .irq = { 1732 .irq = {
2066 .set = &evergreen_irq_set, 1733 .set = &evergreen_irq_set,
@@ -2072,9 +1739,11 @@ static struct radeon_asic trinity_asic = {
2072 .wait_for_vblank = &dce4_wait_for_vblank, 1739 .wait_for_vblank = &dce4_wait_for_vblank,
2073 .set_backlight_level = &atombios_set_backlight_level, 1740 .set_backlight_level = &atombios_set_backlight_level,
2074 .get_backlight_level = &atombios_get_backlight_level, 1741 .get_backlight_level = &atombios_get_backlight_level,
1742 .hdmi_enable = &evergreen_hdmi_enable,
1743 .hdmi_setmode = &evergreen_hdmi_setmode,
2075 }, 1744 },
2076 .copy = { 1745 .copy = {
2077 .blit = &r600_copy_blit, 1746 .blit = &r600_copy_cpdma,
2078 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1747 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2079 .dma = &evergreen_copy_dma, 1748 .dma = &evergreen_copy_dma,
2080 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1749 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2130,6 +1799,36 @@ static struct radeon_asic trinity_asic = {
2130 }, 1799 },
2131}; 1800};
2132 1801
1802static struct radeon_asic_ring si_gfx_ring = {
1803 .ib_execute = &si_ring_ib_execute,
1804 .ib_parse = &si_ib_parse,
1805 .emit_fence = &si_fence_ring_emit,
1806 .emit_semaphore = &r600_semaphore_ring_emit,
1807 .cs_parse = NULL,
1808 .ring_test = &r600_ring_test,
1809 .ib_test = &r600_ib_test,
1810 .is_lockup = &si_gfx_is_lockup,
1811 .vm_flush = &si_vm_flush,
1812 .get_rptr = &radeon_ring_generic_get_rptr,
1813 .get_wptr = &radeon_ring_generic_get_wptr,
1814 .set_wptr = &radeon_ring_generic_set_wptr,
1815};
1816
1817static struct radeon_asic_ring si_dma_ring = {
1818 .ib_execute = &cayman_dma_ring_ib_execute,
1819 .ib_parse = &evergreen_dma_ib_parse,
1820 .emit_fence = &evergreen_dma_fence_ring_emit,
1821 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1822 .cs_parse = NULL,
1823 .ring_test = &r600_dma_ring_test,
1824 .ib_test = &r600_dma_ib_test,
1825 .is_lockup = &si_dma_is_lockup,
1826 .vm_flush = &si_dma_vm_flush,
1827 .get_rptr = &r600_dma_get_rptr,
1828 .get_wptr = &r600_dma_get_wptr,
1829 .set_wptr = &r600_dma_set_wptr,
1830};
1831
2133static struct radeon_asic si_asic = { 1832static struct radeon_asic si_asic = {
2134 .init = &si_init, 1833 .init = &si_init,
2135 .fini = &si_fini, 1834 .fini = &si_fini,
@@ -2153,88 +1852,12 @@ static struct radeon_asic si_asic = {
2153 .set_page = &si_vm_set_page, 1852 .set_page = &si_vm_set_page,
2154 }, 1853 },
2155 .ring = { 1854 .ring = {
2156 [RADEON_RING_TYPE_GFX_INDEX] = { 1855 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
2157 .ib_execute = &si_ring_ib_execute, 1856 [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
2158 .ib_parse = &si_ib_parse, 1857 [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
2159 .emit_fence = &si_fence_ring_emit, 1858 [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
2160 .emit_semaphore = &r600_semaphore_ring_emit, 1859 [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
2161 .cs_parse = NULL, 1860 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2162 .ring_test = &r600_ring_test,
2163 .ib_test = &r600_ib_test,
2164 .is_lockup = &si_gfx_is_lockup,
2165 .vm_flush = &si_vm_flush,
2166 .get_rptr = &radeon_ring_generic_get_rptr,
2167 .get_wptr = &radeon_ring_generic_get_wptr,
2168 .set_wptr = &radeon_ring_generic_set_wptr,
2169 },
2170 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2171 .ib_execute = &si_ring_ib_execute,
2172 .ib_parse = &si_ib_parse,
2173 .emit_fence = &si_fence_ring_emit,
2174 .emit_semaphore = &r600_semaphore_ring_emit,
2175 .cs_parse = NULL,
2176 .ring_test = &r600_ring_test,
2177 .ib_test = &r600_ib_test,
2178 .is_lockup = &si_gfx_is_lockup,
2179 .vm_flush = &si_vm_flush,
2180 .get_rptr = &radeon_ring_generic_get_rptr,
2181 .get_wptr = &radeon_ring_generic_get_wptr,
2182 .set_wptr = &radeon_ring_generic_set_wptr,
2183 },
2184 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2185 .ib_execute = &si_ring_ib_execute,
2186 .ib_parse = &si_ib_parse,
2187 .emit_fence = &si_fence_ring_emit,
2188 .emit_semaphore = &r600_semaphore_ring_emit,
2189 .cs_parse = NULL,
2190 .ring_test = &r600_ring_test,
2191 .ib_test = &r600_ib_test,
2192 .is_lockup = &si_gfx_is_lockup,
2193 .vm_flush = &si_vm_flush,
2194 .get_rptr = &radeon_ring_generic_get_rptr,
2195 .get_wptr = &radeon_ring_generic_get_wptr,
2196 .set_wptr = &radeon_ring_generic_set_wptr,
2197 },
2198 [R600_RING_TYPE_DMA_INDEX] = {
2199 .ib_execute = &cayman_dma_ring_ib_execute,
2200 .ib_parse = &evergreen_dma_ib_parse,
2201 .emit_fence = &evergreen_dma_fence_ring_emit,
2202 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2203 .cs_parse = NULL,
2204 .ring_test = &r600_dma_ring_test,
2205 .ib_test = &r600_dma_ib_test,
2206 .is_lockup = &si_dma_is_lockup,
2207 .vm_flush = &si_dma_vm_flush,
2208 .get_rptr = &radeon_ring_generic_get_rptr,
2209 .get_wptr = &radeon_ring_generic_get_wptr,
2210 .set_wptr = &radeon_ring_generic_set_wptr,
2211 },
2212 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2213 .ib_execute = &cayman_dma_ring_ib_execute,
2214 .ib_parse = &evergreen_dma_ib_parse,
2215 .emit_fence = &evergreen_dma_fence_ring_emit,
2216 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2217 .cs_parse = NULL,
2218 .ring_test = &r600_dma_ring_test,
2219 .ib_test = &r600_dma_ib_test,
2220 .is_lockup = &si_dma_is_lockup,
2221 .vm_flush = &si_dma_vm_flush,
2222 .get_rptr = &radeon_ring_generic_get_rptr,
2223 .get_wptr = &radeon_ring_generic_get_wptr,
2224 .set_wptr = &radeon_ring_generic_set_wptr,
2225 },
2226 [R600_RING_TYPE_UVD_INDEX] = {
2227 .ib_execute = &r600_uvd_ib_execute,
2228 .emit_fence = &r600_uvd_fence_emit,
2229 .emit_semaphore = &cayman_uvd_semaphore_emit,
2230 .cs_parse = &radeon_uvd_cs_parse,
2231 .ring_test = &r600_uvd_ring_test,
2232 .ib_test = &r600_uvd_ib_test,
2233 .is_lockup = &radeon_ring_test_lockup,
2234 .get_rptr = &radeon_ring_generic_get_rptr,
2235 .get_wptr = &radeon_ring_generic_get_wptr,
2236 .set_wptr = &radeon_ring_generic_set_wptr,
2237 }
2238 }, 1861 },
2239 .irq = { 1862 .irq = {
2240 .set = &si_irq_set, 1863 .set = &si_irq_set,
@@ -2246,6 +1869,8 @@ static struct radeon_asic si_asic = {
2246 .wait_for_vblank = &dce4_wait_for_vblank, 1869 .wait_for_vblank = &dce4_wait_for_vblank,
2247 .set_backlight_level = &atombios_set_backlight_level, 1870 .set_backlight_level = &atombios_set_backlight_level,
2248 .get_backlight_level = &atombios_get_backlight_level, 1871 .get_backlight_level = &atombios_get_backlight_level,
1872 .hdmi_enable = &evergreen_hdmi_enable,
1873 .hdmi_setmode = &evergreen_hdmi_setmode,
2249 }, 1874 },
2250 .copy = { 1875 .copy = {
2251 .blit = NULL, 1876 .blit = NULL,
@@ -2305,6 +1930,51 @@ static struct radeon_asic si_asic = {
2305 }, 1930 },
2306}; 1931};
2307 1932
1933static struct radeon_asic_ring ci_gfx_ring = {
1934 .ib_execute = &cik_ring_ib_execute,
1935 .ib_parse = &cik_ib_parse,
1936 .emit_fence = &cik_fence_gfx_ring_emit,
1937 .emit_semaphore = &cik_semaphore_ring_emit,
1938 .cs_parse = NULL,
1939 .ring_test = &cik_ring_test,
1940 .ib_test = &cik_ib_test,
1941 .is_lockup = &cik_gfx_is_lockup,
1942 .vm_flush = &cik_vm_flush,
1943 .get_rptr = &radeon_ring_generic_get_rptr,
1944 .get_wptr = &radeon_ring_generic_get_wptr,
1945 .set_wptr = &radeon_ring_generic_set_wptr,
1946};
1947
1948static struct radeon_asic_ring ci_cp_ring = {
1949 .ib_execute = &cik_ring_ib_execute,
1950 .ib_parse = &cik_ib_parse,
1951 .emit_fence = &cik_fence_compute_ring_emit,
1952 .emit_semaphore = &cik_semaphore_ring_emit,
1953 .cs_parse = NULL,
1954 .ring_test = &cik_ring_test,
1955 .ib_test = &cik_ib_test,
1956 .is_lockup = &cik_gfx_is_lockup,
1957 .vm_flush = &cik_vm_flush,
1958 .get_rptr = &cik_compute_ring_get_rptr,
1959 .get_wptr = &cik_compute_ring_get_wptr,
1960 .set_wptr = &cik_compute_ring_set_wptr,
1961};
1962
1963static struct radeon_asic_ring ci_dma_ring = {
1964 .ib_execute = &cik_sdma_ring_ib_execute,
1965 .ib_parse = &cik_ib_parse,
1966 .emit_fence = &cik_sdma_fence_ring_emit,
1967 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
1968 .cs_parse = NULL,
1969 .ring_test = &cik_sdma_ring_test,
1970 .ib_test = &cik_sdma_ib_test,
1971 .is_lockup = &cik_sdma_is_lockup,
1972 .vm_flush = &cik_dma_vm_flush,
1973 .get_rptr = &r600_dma_get_rptr,
1974 .get_wptr = &r600_dma_get_wptr,
1975 .set_wptr = &r600_dma_set_wptr,
1976};
1977
2308static struct radeon_asic ci_asic = { 1978static struct radeon_asic ci_asic = {
2309 .init = &cik_init, 1979 .init = &cik_init,
2310 .fini = &cik_fini, 1980 .fini = &cik_fini,
@@ -2328,88 +1998,12 @@ static struct radeon_asic ci_asic = {
2328 .set_page = &cik_vm_set_page, 1998 .set_page = &cik_vm_set_page,
2329 }, 1999 },
2330 .ring = { 2000 .ring = {
2331 [RADEON_RING_TYPE_GFX_INDEX] = { 2001 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
2332 .ib_execute = &cik_ring_ib_execute, 2002 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
2333 .ib_parse = &cik_ib_parse, 2003 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
2334 .emit_fence = &cik_fence_gfx_ring_emit, 2004 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2335 .emit_semaphore = &cik_semaphore_ring_emit, 2005 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2336 .cs_parse = NULL, 2006 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2337 .ring_test = &cik_ring_test,
2338 .ib_test = &cik_ib_test,
2339 .is_lockup = &cik_gfx_is_lockup,
2340 .vm_flush = &cik_vm_flush,
2341 .get_rptr = &radeon_ring_generic_get_rptr,
2342 .get_wptr = &radeon_ring_generic_get_wptr,
2343 .set_wptr = &radeon_ring_generic_set_wptr,
2344 },
2345 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2346 .ib_execute = &cik_ring_ib_execute,
2347 .ib_parse = &cik_ib_parse,
2348 .emit_fence = &cik_fence_compute_ring_emit,
2349 .emit_semaphore = &cik_semaphore_ring_emit,
2350 .cs_parse = NULL,
2351 .ring_test = &cik_ring_test,
2352 .ib_test = &cik_ib_test,
2353 .is_lockup = &cik_gfx_is_lockup,
2354 .vm_flush = &cik_vm_flush,
2355 .get_rptr = &cik_compute_ring_get_rptr,
2356 .get_wptr = &cik_compute_ring_get_wptr,
2357 .set_wptr = &cik_compute_ring_set_wptr,
2358 },
2359 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2360 .ib_execute = &cik_ring_ib_execute,
2361 .ib_parse = &cik_ib_parse,
2362 .emit_fence = &cik_fence_compute_ring_emit,
2363 .emit_semaphore = &cik_semaphore_ring_emit,
2364 .cs_parse = NULL,
2365 .ring_test = &cik_ring_test,
2366 .ib_test = &cik_ib_test,
2367 .is_lockup = &cik_gfx_is_lockup,
2368 .vm_flush = &cik_vm_flush,
2369 .get_rptr = &cik_compute_ring_get_rptr,
2370 .get_wptr = &cik_compute_ring_get_wptr,
2371 .set_wptr = &cik_compute_ring_set_wptr,
2372 },
2373 [R600_RING_TYPE_DMA_INDEX] = {
2374 .ib_execute = &cik_sdma_ring_ib_execute,
2375 .ib_parse = &cik_ib_parse,
2376 .emit_fence = &cik_sdma_fence_ring_emit,
2377 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2378 .cs_parse = NULL,
2379 .ring_test = &cik_sdma_ring_test,
2380 .ib_test = &cik_sdma_ib_test,
2381 .is_lockup = &cik_sdma_is_lockup,
2382 .vm_flush = &cik_dma_vm_flush,
2383 .get_rptr = &radeon_ring_generic_get_rptr,
2384 .get_wptr = &radeon_ring_generic_get_wptr,
2385 .set_wptr = &radeon_ring_generic_set_wptr,
2386 },
2387 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2388 .ib_execute = &cik_sdma_ring_ib_execute,
2389 .ib_parse = &cik_ib_parse,
2390 .emit_fence = &cik_sdma_fence_ring_emit,
2391 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2392 .cs_parse = NULL,
2393 .ring_test = &cik_sdma_ring_test,
2394 .ib_test = &cik_sdma_ib_test,
2395 .is_lockup = &cik_sdma_is_lockup,
2396 .vm_flush = &cik_dma_vm_flush,
2397 .get_rptr = &radeon_ring_generic_get_rptr,
2398 .get_wptr = &radeon_ring_generic_get_wptr,
2399 .set_wptr = &radeon_ring_generic_set_wptr,
2400 },
2401 [R600_RING_TYPE_UVD_INDEX] = {
2402 .ib_execute = &r600_uvd_ib_execute,
2403 .emit_fence = &r600_uvd_fence_emit,
2404 .emit_semaphore = &cayman_uvd_semaphore_emit,
2405 .cs_parse = &radeon_uvd_cs_parse,
2406 .ring_test = &r600_uvd_ring_test,
2407 .ib_test = &r600_uvd_ib_test,
2408 .is_lockup = &radeon_ring_test_lockup,
2409 .get_rptr = &radeon_ring_generic_get_rptr,
2410 .get_wptr = &radeon_ring_generic_get_wptr,
2411 .set_wptr = &radeon_ring_generic_set_wptr,
2412 }
2413 }, 2007 },
2414 .irq = { 2008 .irq = {
2415 .set = &cik_irq_set, 2009 .set = &cik_irq_set,
@@ -2419,6 +2013,8 @@ static struct radeon_asic ci_asic = {
2419 .bandwidth_update = &dce8_bandwidth_update, 2013 .bandwidth_update = &dce8_bandwidth_update,
2420 .get_vblank_counter = &evergreen_get_vblank_counter, 2014 .get_vblank_counter = &evergreen_get_vblank_counter,
2421 .wait_for_vblank = &dce4_wait_for_vblank, 2015 .wait_for_vblank = &dce4_wait_for_vblank,
2016 .hdmi_enable = &evergreen_hdmi_enable,
2017 .hdmi_setmode = &evergreen_hdmi_setmode,
2422 }, 2018 },
2423 .copy = { 2019 .copy = {
2424 .blit = NULL, 2020 .blit = NULL,
@@ -2452,6 +2048,25 @@ static struct radeon_asic ci_asic = {
2452 .set_pcie_lanes = NULL, 2048 .set_pcie_lanes = NULL,
2453 .set_clock_gating = NULL, 2049 .set_clock_gating = NULL,
2454 .set_uvd_clocks = &cik_set_uvd_clocks, 2050 .set_uvd_clocks = &cik_set_uvd_clocks,
2051 .get_temperature = &ci_get_temp,
2052 },
2053 .dpm = {
2054 .init = &ci_dpm_init,
2055 .setup_asic = &ci_dpm_setup_asic,
2056 .enable = &ci_dpm_enable,
2057 .disable = &ci_dpm_disable,
2058 .pre_set_power_state = &ci_dpm_pre_set_power_state,
2059 .set_power_state = &ci_dpm_set_power_state,
2060 .post_set_power_state = &ci_dpm_post_set_power_state,
2061 .display_configuration_changed = &ci_dpm_display_configuration_changed,
2062 .fini = &ci_dpm_fini,
2063 .get_sclk = &ci_dpm_get_sclk,
2064 .get_mclk = &ci_dpm_get_mclk,
2065 .print_power_state = &ci_dpm_print_power_state,
2066 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
2067 .force_performance_level = &ci_dpm_force_performance_level,
2068 .vblank_too_short = &ci_dpm_vblank_too_short,
2069 .powergate_uvd = &ci_dpm_powergate_uvd,
2455 }, 2070 },
2456 .pflip = { 2071 .pflip = {
2457 .pre_page_flip = &evergreen_pre_page_flip, 2072 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2483,88 +2098,12 @@ static struct radeon_asic kv_asic = {
2483 .set_page = &cik_vm_set_page, 2098 .set_page = &cik_vm_set_page,
2484 }, 2099 },
2485 .ring = { 2100 .ring = {
2486 [RADEON_RING_TYPE_GFX_INDEX] = { 2101 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
2487 .ib_execute = &cik_ring_ib_execute, 2102 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
2488 .ib_parse = &cik_ib_parse, 2103 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
2489 .emit_fence = &cik_fence_gfx_ring_emit, 2104 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2490 .emit_semaphore = &cik_semaphore_ring_emit, 2105 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2491 .cs_parse = NULL, 2106 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2492 .ring_test = &cik_ring_test,
2493 .ib_test = &cik_ib_test,
2494 .is_lockup = &cik_gfx_is_lockup,
2495 .vm_flush = &cik_vm_flush,
2496 .get_rptr = &radeon_ring_generic_get_rptr,
2497 .get_wptr = &radeon_ring_generic_get_wptr,
2498 .set_wptr = &radeon_ring_generic_set_wptr,
2499 },
2500 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2501 .ib_execute = &cik_ring_ib_execute,
2502 .ib_parse = &cik_ib_parse,
2503 .emit_fence = &cik_fence_compute_ring_emit,
2504 .emit_semaphore = &cik_semaphore_ring_emit,
2505 .cs_parse = NULL,
2506 .ring_test = &cik_ring_test,
2507 .ib_test = &cik_ib_test,
2508 .is_lockup = &cik_gfx_is_lockup,
2509 .vm_flush = &cik_vm_flush,
2510 .get_rptr = &cik_compute_ring_get_rptr,
2511 .get_wptr = &cik_compute_ring_get_wptr,
2512 .set_wptr = &cik_compute_ring_set_wptr,
2513 },
2514 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2515 .ib_execute = &cik_ring_ib_execute,
2516 .ib_parse = &cik_ib_parse,
2517 .emit_fence = &cik_fence_compute_ring_emit,
2518 .emit_semaphore = &cik_semaphore_ring_emit,
2519 .cs_parse = NULL,
2520 .ring_test = &cik_ring_test,
2521 .ib_test = &cik_ib_test,
2522 .is_lockup = &cik_gfx_is_lockup,
2523 .vm_flush = &cik_vm_flush,
2524 .get_rptr = &cik_compute_ring_get_rptr,
2525 .get_wptr = &cik_compute_ring_get_wptr,
2526 .set_wptr = &cik_compute_ring_set_wptr,
2527 },
2528 [R600_RING_TYPE_DMA_INDEX] = {
2529 .ib_execute = &cik_sdma_ring_ib_execute,
2530 .ib_parse = &cik_ib_parse,
2531 .emit_fence = &cik_sdma_fence_ring_emit,
2532 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2533 .cs_parse = NULL,
2534 .ring_test = &cik_sdma_ring_test,
2535 .ib_test = &cik_sdma_ib_test,
2536 .is_lockup = &cik_sdma_is_lockup,
2537 .vm_flush = &cik_dma_vm_flush,
2538 .get_rptr = &radeon_ring_generic_get_rptr,
2539 .get_wptr = &radeon_ring_generic_get_wptr,
2540 .set_wptr = &radeon_ring_generic_set_wptr,
2541 },
2542 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2543 .ib_execute = &cik_sdma_ring_ib_execute,
2544 .ib_parse = &cik_ib_parse,
2545 .emit_fence = &cik_sdma_fence_ring_emit,
2546 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2547 .cs_parse = NULL,
2548 .ring_test = &cik_sdma_ring_test,
2549 .ib_test = &cik_sdma_ib_test,
2550 .is_lockup = &cik_sdma_is_lockup,
2551 .vm_flush = &cik_dma_vm_flush,
2552 .get_rptr = &radeon_ring_generic_get_rptr,
2553 .get_wptr = &radeon_ring_generic_get_wptr,
2554 .set_wptr = &radeon_ring_generic_set_wptr,
2555 },
2556 [R600_RING_TYPE_UVD_INDEX] = {
2557 .ib_execute = &r600_uvd_ib_execute,
2558 .emit_fence = &r600_uvd_fence_emit,
2559 .emit_semaphore = &cayman_uvd_semaphore_emit,
2560 .cs_parse = &radeon_uvd_cs_parse,
2561 .ring_test = &r600_uvd_ring_test,
2562 .ib_test = &r600_uvd_ib_test,
2563 .is_lockup = &radeon_ring_test_lockup,
2564 .get_rptr = &radeon_ring_generic_get_rptr,
2565 .get_wptr = &radeon_ring_generic_get_wptr,
2566 .set_wptr = &radeon_ring_generic_set_wptr,
2567 }
2568 }, 2107 },
2569 .irq = { 2108 .irq = {
2570 .set = &cik_irq_set, 2109 .set = &cik_irq_set,
@@ -2574,6 +2113,8 @@ static struct radeon_asic kv_asic = {
2574 .bandwidth_update = &dce8_bandwidth_update, 2113 .bandwidth_update = &dce8_bandwidth_update,
2575 .get_vblank_counter = &evergreen_get_vblank_counter, 2114 .get_vblank_counter = &evergreen_get_vblank_counter,
2576 .wait_for_vblank = &dce4_wait_for_vblank, 2115 .wait_for_vblank = &dce4_wait_for_vblank,
2116 .hdmi_enable = &evergreen_hdmi_enable,
2117 .hdmi_setmode = &evergreen_hdmi_setmode,
2577 }, 2118 },
2578 .copy = { 2119 .copy = {
2579 .blit = NULL, 2120 .blit = NULL,
@@ -2607,6 +2148,24 @@ static struct radeon_asic kv_asic = {
2607 .set_pcie_lanes = NULL, 2148 .set_pcie_lanes = NULL,
2608 .set_clock_gating = NULL, 2149 .set_clock_gating = NULL,
2609 .set_uvd_clocks = &cik_set_uvd_clocks, 2150 .set_uvd_clocks = &cik_set_uvd_clocks,
2151 .get_temperature = &kv_get_temp,
2152 },
2153 .dpm = {
2154 .init = &kv_dpm_init,
2155 .setup_asic = &kv_dpm_setup_asic,
2156 .enable = &kv_dpm_enable,
2157 .disable = &kv_dpm_disable,
2158 .pre_set_power_state = &kv_dpm_pre_set_power_state,
2159 .set_power_state = &kv_dpm_set_power_state,
2160 .post_set_power_state = &kv_dpm_post_set_power_state,
2161 .display_configuration_changed = &kv_dpm_display_configuration_changed,
2162 .fini = &kv_dpm_fini,
2163 .get_sclk = &kv_dpm_get_sclk,
2164 .get_mclk = &kv_dpm_get_mclk,
2165 .print_power_state = &kv_dpm_print_power_state,
2166 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
2167 .force_performance_level = &kv_dpm_force_performance_level,
2168 .powergate_uvd = &kv_dpm_powergate_uvd,
2610 }, 2169 },
2611 .pflip = { 2170 .pflip = {
2612 .pre_page_flip = &evergreen_pre_page_flip, 2171 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2776,19 +2335,188 @@ int radeon_asic_init(struct radeon_device *rdev)
2776 rdev->has_uvd = false; 2335 rdev->has_uvd = false;
2777 else 2336 else
2778 rdev->has_uvd = true; 2337 rdev->has_uvd = true;
2338 switch (rdev->family) {
2339 case CHIP_TAHITI:
2340 rdev->cg_flags =
2341 RADEON_CG_SUPPORT_GFX_MGCG |
2342 RADEON_CG_SUPPORT_GFX_MGLS |
2343 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2344 RADEON_CG_SUPPORT_GFX_CGLS |
2345 RADEON_CG_SUPPORT_GFX_CGTS |
2346 RADEON_CG_SUPPORT_GFX_CP_LS |
2347 RADEON_CG_SUPPORT_MC_MGCG |
2348 RADEON_CG_SUPPORT_SDMA_MGCG |
2349 RADEON_CG_SUPPORT_BIF_LS |
2350 RADEON_CG_SUPPORT_VCE_MGCG |
2351 RADEON_CG_SUPPORT_UVD_MGCG |
2352 RADEON_CG_SUPPORT_HDP_LS |
2353 RADEON_CG_SUPPORT_HDP_MGCG;
2354 rdev->pg_flags = 0;
2355 break;
2356 case CHIP_PITCAIRN:
2357 rdev->cg_flags =
2358 RADEON_CG_SUPPORT_GFX_MGCG |
2359 RADEON_CG_SUPPORT_GFX_MGLS |
2360 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2361 RADEON_CG_SUPPORT_GFX_CGLS |
2362 RADEON_CG_SUPPORT_GFX_CGTS |
2363 RADEON_CG_SUPPORT_GFX_CP_LS |
2364 RADEON_CG_SUPPORT_GFX_RLC_LS |
2365 RADEON_CG_SUPPORT_MC_LS |
2366 RADEON_CG_SUPPORT_MC_MGCG |
2367 RADEON_CG_SUPPORT_SDMA_MGCG |
2368 RADEON_CG_SUPPORT_BIF_LS |
2369 RADEON_CG_SUPPORT_VCE_MGCG |
2370 RADEON_CG_SUPPORT_UVD_MGCG |
2371 RADEON_CG_SUPPORT_HDP_LS |
2372 RADEON_CG_SUPPORT_HDP_MGCG;
2373 rdev->pg_flags = 0;
2374 break;
2375 case CHIP_VERDE:
2376 rdev->cg_flags =
2377 RADEON_CG_SUPPORT_GFX_MGCG |
2378 RADEON_CG_SUPPORT_GFX_MGLS |
2379 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2380 RADEON_CG_SUPPORT_GFX_CGLS |
2381 RADEON_CG_SUPPORT_GFX_CGTS |
2382 RADEON_CG_SUPPORT_GFX_CP_LS |
2383 RADEON_CG_SUPPORT_GFX_RLC_LS |
2384 RADEON_CG_SUPPORT_MC_LS |
2385 RADEON_CG_SUPPORT_MC_MGCG |
2386 RADEON_CG_SUPPORT_SDMA_MGCG |
2387 RADEON_CG_SUPPORT_BIF_LS |
2388 RADEON_CG_SUPPORT_VCE_MGCG |
2389 RADEON_CG_SUPPORT_UVD_MGCG |
2390 RADEON_CG_SUPPORT_HDP_LS |
2391 RADEON_CG_SUPPORT_HDP_MGCG;
2392 rdev->pg_flags = 0 |
2393 /*RADEON_PG_SUPPORT_GFX_CG | */
2394 RADEON_PG_SUPPORT_SDMA;
2395 break;
2396 case CHIP_OLAND:
2397 rdev->cg_flags =
2398 RADEON_CG_SUPPORT_GFX_MGCG |
2399 RADEON_CG_SUPPORT_GFX_MGLS |
2400 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2401 RADEON_CG_SUPPORT_GFX_CGLS |
2402 RADEON_CG_SUPPORT_GFX_CGTS |
2403 RADEON_CG_SUPPORT_GFX_CP_LS |
2404 RADEON_CG_SUPPORT_GFX_RLC_LS |
2405 RADEON_CG_SUPPORT_MC_LS |
2406 RADEON_CG_SUPPORT_MC_MGCG |
2407 RADEON_CG_SUPPORT_SDMA_MGCG |
2408 RADEON_CG_SUPPORT_BIF_LS |
2409 RADEON_CG_SUPPORT_UVD_MGCG |
2410 RADEON_CG_SUPPORT_HDP_LS |
2411 RADEON_CG_SUPPORT_HDP_MGCG;
2412 rdev->pg_flags = 0;
2413 break;
2414 case CHIP_HAINAN:
2415 rdev->cg_flags =
2416 RADEON_CG_SUPPORT_GFX_MGCG |
2417 RADEON_CG_SUPPORT_GFX_MGLS |
2418 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2419 RADEON_CG_SUPPORT_GFX_CGLS |
2420 RADEON_CG_SUPPORT_GFX_CGTS |
2421 RADEON_CG_SUPPORT_GFX_CP_LS |
2422 RADEON_CG_SUPPORT_GFX_RLC_LS |
2423 RADEON_CG_SUPPORT_MC_LS |
2424 RADEON_CG_SUPPORT_MC_MGCG |
2425 RADEON_CG_SUPPORT_SDMA_MGCG |
2426 RADEON_CG_SUPPORT_BIF_LS |
2427 RADEON_CG_SUPPORT_HDP_LS |
2428 RADEON_CG_SUPPORT_HDP_MGCG;
2429 rdev->pg_flags = 0;
2430 break;
2431 default:
2432 rdev->cg_flags = 0;
2433 rdev->pg_flags = 0;
2434 break;
2435 }
2779 break; 2436 break;
2780 case CHIP_BONAIRE: 2437 case CHIP_BONAIRE:
2781 rdev->asic = &ci_asic; 2438 rdev->asic = &ci_asic;
2782 rdev->num_crtc = 6; 2439 rdev->num_crtc = 6;
2440 rdev->has_uvd = true;
2441 rdev->cg_flags =
2442 RADEON_CG_SUPPORT_GFX_MGCG |
2443 RADEON_CG_SUPPORT_GFX_MGLS |
2444 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2445 RADEON_CG_SUPPORT_GFX_CGLS |
2446 RADEON_CG_SUPPORT_GFX_CGTS |
2447 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2448 RADEON_CG_SUPPORT_GFX_CP_LS |
2449 RADEON_CG_SUPPORT_MC_LS |
2450 RADEON_CG_SUPPORT_MC_MGCG |
2451 RADEON_CG_SUPPORT_SDMA_MGCG |
2452 RADEON_CG_SUPPORT_SDMA_LS |
2453 RADEON_CG_SUPPORT_BIF_LS |
2454 RADEON_CG_SUPPORT_VCE_MGCG |
2455 RADEON_CG_SUPPORT_UVD_MGCG |
2456 RADEON_CG_SUPPORT_HDP_LS |
2457 RADEON_CG_SUPPORT_HDP_MGCG;
2458 rdev->pg_flags = 0;
2783 break; 2459 break;
2784 case CHIP_KAVERI: 2460 case CHIP_KAVERI:
2785 case CHIP_KABINI: 2461 case CHIP_KABINI:
2786 rdev->asic = &kv_asic; 2462 rdev->asic = &kv_asic;
2787 /* set num crtcs */ 2463 /* set num crtcs */
2788 if (rdev->family == CHIP_KAVERI) 2464 if (rdev->family == CHIP_KAVERI) {
2789 rdev->num_crtc = 4; 2465 rdev->num_crtc = 4;
2790 else 2466 rdev->cg_flags =
2467 RADEON_CG_SUPPORT_GFX_MGCG |
2468 RADEON_CG_SUPPORT_GFX_MGLS |
2469 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2470 RADEON_CG_SUPPORT_GFX_CGLS |
2471 RADEON_CG_SUPPORT_GFX_CGTS |
2472 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2473 RADEON_CG_SUPPORT_GFX_CP_LS |
2474 RADEON_CG_SUPPORT_SDMA_MGCG |
2475 RADEON_CG_SUPPORT_SDMA_LS |
2476 RADEON_CG_SUPPORT_BIF_LS |
2477 RADEON_CG_SUPPORT_VCE_MGCG |
2478 RADEON_CG_SUPPORT_UVD_MGCG |
2479 RADEON_CG_SUPPORT_HDP_LS |
2480 RADEON_CG_SUPPORT_HDP_MGCG;
2481 rdev->pg_flags = 0;
2482 /*RADEON_PG_SUPPORT_GFX_CG |
2483 RADEON_PG_SUPPORT_GFX_SMG |
2484 RADEON_PG_SUPPORT_GFX_DMG |
2485 RADEON_PG_SUPPORT_UVD |
2486 RADEON_PG_SUPPORT_VCE |
2487 RADEON_PG_SUPPORT_CP |
2488 RADEON_PG_SUPPORT_GDS |
2489 RADEON_PG_SUPPORT_RLC_SMU_HS |
2490 RADEON_PG_SUPPORT_ACP |
2491 RADEON_PG_SUPPORT_SAMU;*/
2492 } else {
2791 rdev->num_crtc = 2; 2493 rdev->num_crtc = 2;
2494 rdev->cg_flags =
2495 RADEON_CG_SUPPORT_GFX_MGCG |
2496 RADEON_CG_SUPPORT_GFX_MGLS |
2497 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2498 RADEON_CG_SUPPORT_GFX_CGLS |
2499 RADEON_CG_SUPPORT_GFX_CGTS |
2500 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2501 RADEON_CG_SUPPORT_GFX_CP_LS |
2502 RADEON_CG_SUPPORT_SDMA_MGCG |
2503 RADEON_CG_SUPPORT_SDMA_LS |
2504 RADEON_CG_SUPPORT_BIF_LS |
2505 RADEON_CG_SUPPORT_VCE_MGCG |
2506 RADEON_CG_SUPPORT_UVD_MGCG |
2507 RADEON_CG_SUPPORT_HDP_LS |
2508 RADEON_CG_SUPPORT_HDP_MGCG;
2509 rdev->pg_flags = 0;
2510 /*RADEON_PG_SUPPORT_GFX_CG |
2511 RADEON_PG_SUPPORT_GFX_SMG |
2512 RADEON_PG_SUPPORT_UVD |
2513 RADEON_PG_SUPPORT_VCE |
2514 RADEON_PG_SUPPORT_CP |
2515 RADEON_PG_SUPPORT_GDS |
2516 RADEON_PG_SUPPORT_RLC_SMU_HS |
2517 RADEON_PG_SUPPORT_SAMU;*/
2518 }
2519 rdev->has_uvd = true;
2792 break; 2520 break;
2793 default: 2521 default:
2794 /* FIXME: not supported yet */ 2522 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d61d5aac18f..818bbe6b884b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -336,10 +336,6 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
336void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 336void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
337int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 337int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
338int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 338int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
339int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
340int r600_copy_blit(struct radeon_device *rdev,
341 uint64_t src_offset, uint64_t dst_offset,
342 unsigned num_gpu_pages, struct radeon_fence **fence);
343int r600_copy_cpdma(struct radeon_device *rdev, 339int r600_copy_cpdma(struct radeon_device *rdev,
344 uint64_t src_offset, uint64_t dst_offset, 340 uint64_t src_offset, uint64_t dst_offset,
345 unsigned num_gpu_pages, struct radeon_fence **fence); 341 unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -371,8 +367,6 @@ int r600_count_pipe_bits(uint32_t val);
371int r600_mc_wait_for_idle(struct radeon_device *rdev); 367int r600_mc_wait_for_idle(struct radeon_device *rdev);
372int r600_pcie_gart_init(struct radeon_device *rdev); 368int r600_pcie_gart_init(struct radeon_device *rdev);
373void r600_scratch_init(struct radeon_device *rdev); 369void r600_scratch_init(struct radeon_device *rdev);
374int r600_blit_init(struct radeon_device *rdev);
375void r600_blit_fini(struct radeon_device *rdev);
376int r600_init_microcode(struct radeon_device *rdev); 370int r600_init_microcode(struct radeon_device *rdev);
377/* r600 irq */ 371/* r600 irq */
378int r600_irq_process(struct radeon_device *rdev); 372int r600_irq_process(struct radeon_device *rdev);
@@ -385,28 +379,25 @@ void r600_disable_interrupts(struct radeon_device *rdev);
385void r600_rlc_stop(struct radeon_device *rdev); 379void r600_rlc_stop(struct radeon_device *rdev);
386/* r600 audio */ 380/* r600 audio */
387int r600_audio_init(struct radeon_device *rdev); 381int r600_audio_init(struct radeon_device *rdev);
388struct r600_audio r600_audio_status(struct radeon_device *rdev); 382struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
389void r600_audio_fini(struct radeon_device *rdev); 383void r600_audio_fini(struct radeon_device *rdev);
390int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 384int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
391void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 385void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
392void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); 386void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
393void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 387void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
394/* r600 blit */
395int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
396 struct radeon_fence **fence, struct radeon_sa_bo **vb,
397 struct radeon_semaphore **sem);
398void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
399 struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
400void r600_kms_blit_copy(struct radeon_device *rdev,
401 u64 src_gpu_addr, u64 dst_gpu_addr,
402 unsigned num_gpu_pages,
403 struct radeon_sa_bo *vb);
404int r600_mc_wait_for_idle(struct radeon_device *rdev); 388int r600_mc_wait_for_idle(struct radeon_device *rdev);
405u32 r600_get_xclk(struct radeon_device *rdev); 389u32 r600_get_xclk(struct radeon_device *rdev);
406uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
407int rv6xx_get_temp(struct radeon_device *rdev); 391int rv6xx_get_temp(struct radeon_device *rdev);
408int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 392int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
409void r600_dpm_post_set_power_state(struct radeon_device *rdev); 393void r600_dpm_post_set_power_state(struct radeon_device *rdev);
394/* r600 dma */
395uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
396 struct radeon_ring *ring);
397uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
398 struct radeon_ring *ring);
399void r600_dma_set_wptr(struct radeon_device *rdev,
400 struct radeon_ring *ring);
410/* rv6xx dpm */ 401/* rv6xx dpm */
411int rv6xx_dpm_init(struct radeon_device *rdev); 402int rv6xx_dpm_init(struct radeon_device *rdev);
412int rv6xx_dpm_enable(struct radeon_device *rdev); 403int rv6xx_dpm_enable(struct radeon_device *rdev);
@@ -438,19 +429,6 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
438void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 429void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
439 struct seq_file *m); 430 struct seq_file *m);
440 431
441/* uvd */
442int r600_uvd_init(struct radeon_device *rdev);
443int r600_uvd_rbc_start(struct radeon_device *rdev);
444void r600_uvd_stop(struct radeon_device *rdev);
445int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
446void r600_uvd_fence_emit(struct radeon_device *rdev,
447 struct radeon_fence *fence);
448void r600_uvd_semaphore_emit(struct radeon_device *rdev,
449 struct radeon_ring *ring,
450 struct radeon_semaphore *semaphore,
451 bool emit_wait);
452void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
453
454/* 432/*
455 * rv770,rv730,rv710,rv740 433 * rv770,rv730,rv710,rv740
456 */ 434 */
@@ -468,7 +446,6 @@ int rv770_copy_dma(struct radeon_device *rdev,
468 unsigned num_gpu_pages, 446 unsigned num_gpu_pages,
469 struct radeon_fence **fence); 447 struct radeon_fence **fence);
470u32 rv770_get_xclk(struct radeon_device *rdev); 448u32 rv770_get_xclk(struct radeon_device *rdev);
471int rv770_uvd_resume(struct radeon_device *rdev);
472int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 449int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
473int rv770_get_temp(struct radeon_device *rdev); 450int rv770_get_temp(struct radeon_device *rdev);
474/* rv7xx pm */ 451/* rv7xx pm */
@@ -530,7 +507,6 @@ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_ba
530extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 507extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
531extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); 508extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
532void evergreen_disable_interrupt_state(struct radeon_device *rdev); 509void evergreen_disable_interrupt_state(struct radeon_device *rdev);
533int evergreen_blit_init(struct radeon_device *rdev);
534int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 510int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
535void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, 511void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
536 struct radeon_fence *fence); 512 struct radeon_fence *fence);
@@ -652,6 +628,8 @@ int trinity_dpm_force_performance_level(struct radeon_device *rdev,
652 628
653/* DCE6 - SI */ 629/* DCE6 - SI */
654void dce6_bandwidth_update(struct radeon_device *rdev); 630void dce6_bandwidth_update(struct radeon_device *rdev);
631int dce6_audio_init(struct radeon_device *rdev);
632void dce6_audio_fini(struct radeon_device *rdev);
655 633
656/* 634/*
657 * si 635 * si
@@ -712,7 +690,6 @@ u32 cik_get_xclk(struct radeon_device *rdev);
712uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg); 690uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
713void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 691void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
714int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 692int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
715int cik_uvd_resume(struct radeon_device *rdev);
716void cik_sdma_fence_ring_emit(struct radeon_device *rdev, 693void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
717 struct radeon_fence *fence); 694 struct radeon_fence *fence);
718void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 695void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
@@ -763,5 +740,81 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
763 struct radeon_ring *ring); 740 struct radeon_ring *ring);
764void cik_compute_ring_set_wptr(struct radeon_device *rdev, 741void cik_compute_ring_set_wptr(struct radeon_device *rdev,
765 struct radeon_ring *ring); 742 struct radeon_ring *ring);
743int ci_get_temp(struct radeon_device *rdev);
744int kv_get_temp(struct radeon_device *rdev);
745
746int ci_dpm_init(struct radeon_device *rdev);
747int ci_dpm_enable(struct radeon_device *rdev);
748void ci_dpm_disable(struct radeon_device *rdev);
749int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
750int ci_dpm_set_power_state(struct radeon_device *rdev);
751void ci_dpm_post_set_power_state(struct radeon_device *rdev);
752void ci_dpm_setup_asic(struct radeon_device *rdev);
753void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
754void ci_dpm_fini(struct radeon_device *rdev);
755u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
756u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
757void ci_dpm_print_power_state(struct radeon_device *rdev,
758 struct radeon_ps *ps);
759void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
760 struct seq_file *m);
761int ci_dpm_force_performance_level(struct radeon_device *rdev,
762 enum radeon_dpm_forced_level level);
763bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
764void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
765
766int kv_dpm_init(struct radeon_device *rdev);
767int kv_dpm_enable(struct radeon_device *rdev);
768void kv_dpm_disable(struct radeon_device *rdev);
769int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
770int kv_dpm_set_power_state(struct radeon_device *rdev);
771void kv_dpm_post_set_power_state(struct radeon_device *rdev);
772void kv_dpm_setup_asic(struct radeon_device *rdev);
773void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
774void kv_dpm_fini(struct radeon_device *rdev);
775u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
776u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
777void kv_dpm_print_power_state(struct radeon_device *rdev,
778 struct radeon_ps *ps);
779void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
780 struct seq_file *m);
781int kv_dpm_force_performance_level(struct radeon_device *rdev,
782 enum radeon_dpm_forced_level level);
783void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
784
785/* uvd v1.0 */
786uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
787 struct radeon_ring *ring);
788uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
789 struct radeon_ring *ring);
790void uvd_v1_0_set_wptr(struct radeon_device *rdev,
791 struct radeon_ring *ring);
792
793int uvd_v1_0_init(struct radeon_device *rdev);
794void uvd_v1_0_fini(struct radeon_device *rdev);
795int uvd_v1_0_start(struct radeon_device *rdev);
796void uvd_v1_0_stop(struct radeon_device *rdev);
797
798int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
799int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
800void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
801 struct radeon_ring *ring,
802 struct radeon_semaphore *semaphore,
803 bool emit_wait);
804void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
805
806/* uvd v2.2 */
807int uvd_v2_2_resume(struct radeon_device *rdev);
808void uvd_v2_2_fence_emit(struct radeon_device *rdev,
809 struct radeon_fence *fence);
810
811/* uvd v3.1 */
812void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
813 struct radeon_ring *ring,
814 struct radeon_semaphore *semaphore,
815 bool emit_wait);
816
817/* uvd v4.2 */
818int uvd_v4_2_resume(struct radeon_device *rdev);
766 819
767#endif 820#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4ccd61f60eb6..404e25d285ba 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -163,8 +163,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
163 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 163 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
164 sizeof(ATOM_GPIO_I2C_ASSIGMENT); 164 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
165 165
166 gpio = &i2c_info->asGPIO_Info[0];
166 for (i = 0; i < num_indices; i++) { 167 for (i = 0; i < num_indices; i++) {
167 gpio = &i2c_info->asGPIO_Info[i];
168 168
169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
170 170
@@ -172,6 +172,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
173 break; 173 break;
174 } 174 }
175 gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
176 ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
175 } 177 }
176 } 178 }
177 179
@@ -195,9 +197,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
195 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 197 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
196 sizeof(ATOM_GPIO_I2C_ASSIGMENT); 198 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
197 199
200 gpio = &i2c_info->asGPIO_Info[0];
198 for (i = 0; i < num_indices; i++) { 201 for (i = 0; i < num_indices; i++) {
199 gpio = &i2c_info->asGPIO_Info[i];
200
201 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 202 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
202 203
203 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 204 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
@@ -206,12 +207,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
206 sprintf(stmp, "0x%x", i2c.i2c_id); 207 sprintf(stmp, "0x%x", i2c.i2c_id);
207 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 208 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
208 } 209 }
210 gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
211 ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
209 } 212 }
210 } 213 }
211} 214}
212 215
213static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, 216static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
214 u8 id) 217 u8 id)
215{ 218{
216 struct atom_context *ctx = rdev->mode_info.atom_context; 219 struct atom_context *ctx = rdev->mode_info.atom_context;
217 struct radeon_gpio_rec gpio; 220 struct radeon_gpio_rec gpio;
@@ -230,8 +233,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
230 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 233 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
231 sizeof(ATOM_GPIO_PIN_ASSIGNMENT); 234 sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
232 235
236 pin = gpio_info->asGPIO_Pin;
233 for (i = 0; i < num_indices; i++) { 237 for (i = 0; i < num_indices; i++) {
234 pin = &gpio_info->asGPIO_Pin[i];
235 if (id == pin->ucGPIO_ID) { 238 if (id == pin->ucGPIO_ID) {
236 gpio.id = pin->ucGPIO_ID; 239 gpio.id = pin->ucGPIO_ID;
237 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; 240 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
@@ -239,6 +242,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
239 gpio.valid = true; 242 gpio.valid = true;
240 break; 243 break;
241 } 244 }
245 pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
246 ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
242 } 247 }
243 } 248 }
244 249
@@ -711,13 +716,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
711 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) 716 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
712 (ctx->bios + data_offset + 717 (ctx->bios + data_offset +
713 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); 718 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
719 u8 *num_dst_objs = (u8 *)
720 ((u8 *)router_src_dst_table + 1 +
721 (router_src_dst_table->ucNumberOfSrc * 2));
722 u16 *dst_objs = (u16 *)(num_dst_objs + 1);
714 int enum_id; 723 int enum_id;
715 724
716 router.router_id = router_obj_id; 725 router.router_id = router_obj_id;
717 for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; 726 for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
718 enum_id++) {
719 if (le16_to_cpu(path->usConnObjectId) == 727 if (le16_to_cpu(path->usConnObjectId) ==
720 le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) 728 le16_to_cpu(dst_objs[enum_id]))
721 break; 729 break;
722 } 730 }
723 731
@@ -1480,6 +1488,15 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1480 uint8_t frev, crev; 1488 uint8_t frev, crev;
1481 int i, num_indices; 1489 int i, num_indices;
1482 1490
1491 if (id == ASIC_INTERNAL_MEMORY_SS) {
1492 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
1493 return false;
1494 }
1495 if (id == ASIC_INTERNAL_ENGINE_SS) {
1496 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
1497 return false;
1498 }
1499
1483 memset(ss, 0, sizeof(struct radeon_atom_ss)); 1500 memset(ss, 0, sizeof(struct radeon_atom_ss));
1484 if (atom_parse_data_header(mode_info->atom_context, index, &size, 1501 if (atom_parse_data_header(mode_info->atom_context, index, &size,
1485 &frev, &crev, &data_offset)) { 1502 &frev, &crev, &data_offset)) {
@@ -1672,7 +1689,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1672 kfree(edid); 1689 kfree(edid);
1673 } 1690 }
1674 } 1691 }
1675 record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); 1692 record += fake_edid_record->ucFakeEDIDLength ?
1693 fake_edid_record->ucFakeEDIDLength + 2 :
1694 sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
1676 break; 1695 break;
1677 case LCD_PANEL_RESOLUTION_RECORD_TYPE: 1696 case LCD_PANEL_RESOLUTION_RECORD_TYPE:
1678 panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; 1697 panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
@@ -2237,6 +2256,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2237 (controller->ucFanParameters & 2256 (controller->ucFanParameters &
2238 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2257 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2239 rdev->pm.int_thermal_type = THERMAL_TYPE_CI; 2258 rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
2259 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
2260 DRM_INFO("Internal thermal controller %s fan control\n",
2261 (controller->ucFanParameters &
2262 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2263 rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
2240 } else if ((controller->ucType == 2264 } else if ((controller->ucType ==
2241 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || 2265 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
2242 (controller->ucType == 2266 (controller->ucType ==
@@ -3077,6 +3101,121 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev
3077 return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); 3101 return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
3078} 3102}
3079 3103
3104int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
3105 u16 *leakage_id)
3106{
3107 union set_voltage args;
3108 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
3109 u8 frev, crev;
3110
3111 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
3112 return -EINVAL;
3113
3114 switch (crev) {
3115 case 3:
3116 case 4:
3117 args.v3.ucVoltageType = 0;
3118 args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
3119 args.v3.usVoltageLevel = 0;
3120
3121 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
3122
3123 *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
3124 break;
3125 default:
3126 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3127 return -EINVAL;
3128 }
3129
3130 return 0;
3131}
3132
3133int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
3134 u16 *vddc, u16 *vddci,
3135 u16 virtual_voltage_id,
3136 u16 vbios_voltage_id)
3137{
3138 int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
3139 u8 frev, crev;
3140 u16 data_offset, size;
3141 int i, j;
3142 ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
3143 u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
3144
3145 *vddc = 0;
3146 *vddci = 0;
3147
3148 if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3149 &frev, &crev, &data_offset))
3150 return -EINVAL;
3151
3152 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
3153 (rdev->mode_info.atom_context->bios + data_offset);
3154
3155 switch (frev) {
3156 case 1:
3157 return -EINVAL;
3158 case 2:
3159 switch (crev) {
3160 case 1:
3161 if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
3162 return -EINVAL;
3163 leakage_bin = (u16 *)
3164 (rdev->mode_info.atom_context->bios + data_offset +
3165 le16_to_cpu(profile->usLeakageBinArrayOffset));
3166 vddc_id_buf = (u16 *)
3167 (rdev->mode_info.atom_context->bios + data_offset +
3168 le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
3169 vddc_buf = (u16 *)
3170 (rdev->mode_info.atom_context->bios + data_offset +
3171 le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
3172 vddci_id_buf = (u16 *)
3173 (rdev->mode_info.atom_context->bios + data_offset +
3174 le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
3175 vddci_buf = (u16 *)
3176 (rdev->mode_info.atom_context->bios + data_offset +
3177 le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
3178
3179 if (profile->ucElbVDDC_Num > 0) {
3180 for (i = 0; i < profile->ucElbVDDC_Num; i++) {
3181 if (vddc_id_buf[i] == virtual_voltage_id) {
3182 for (j = 0; j < profile->ucLeakageBinNum; j++) {
3183 if (vbios_voltage_id <= leakage_bin[j]) {
3184 *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
3185 break;
3186 }
3187 }
3188 break;
3189 }
3190 }
3191 }
3192 if (profile->ucElbVDDCI_Num > 0) {
3193 for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
3194 if (vddci_id_buf[i] == virtual_voltage_id) {
3195 for (j = 0; j < profile->ucLeakageBinNum; j++) {
3196 if (vbios_voltage_id <= leakage_bin[j]) {
3197 *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
3198 break;
3199 }
3200 }
3201 break;
3202 }
3203 }
3204 }
3205 break;
3206 default:
3207 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3208 return -EINVAL;
3209 }
3210 break;
3211 default:
3212 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3213 return -EINVAL;
3214 }
3215
3216 return 0;
3217}
3218
3080int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, 3219int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
3081 u16 voltage_level, u8 voltage_type, 3220 u16 voltage_level, u8 voltage_type,
3082 u32 *gpio_value, u32 *gpio_mask) 3221 u32 *gpio_value, u32 *gpio_mask)
@@ -3279,10 +3418,11 @@ int radeon_atom_get_max_voltage(struct radeon_device *rdev,
3279 ATOM_VOLTAGE_FORMULA_V2 *formula = 3418 ATOM_VOLTAGE_FORMULA_V2 *formula =
3280 &voltage_object->v2.asFormula; 3419 &voltage_object->v2.asFormula;
3281 if (formula->ucNumOfVoltageEntries) { 3420 if (formula->ucNumOfVoltageEntries) {
3421 VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
3422 ((u8 *)&formula->asVIDAdjustEntries[0] +
3423 (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
3282 *max_voltage = 3424 *max_voltage =
3283 le16_to_cpu(formula->asVIDAdjustEntries[ 3425 le16_to_cpu(lut->usVoltageValue);
3284 formula->ucNumOfVoltageEntries - 1
3285 ].usVoltageValue);
3286 return 0; 3426 return 0;
3287 } 3427 }
3288 } 3428 }
@@ -3442,11 +3582,13 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3442 if (voltage_object) { 3582 if (voltage_object) {
3443 ATOM_VOLTAGE_FORMULA_V2 *formula = 3583 ATOM_VOLTAGE_FORMULA_V2 *formula =
3444 &voltage_object->v2.asFormula; 3584 &voltage_object->v2.asFormula;
3585 VOLTAGE_LUT_ENTRY *lut;
3445 if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES) 3586 if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
3446 return -EINVAL; 3587 return -EINVAL;
3588 lut = &formula->asVIDAdjustEntries[0];
3447 for (i = 0; i < formula->ucNumOfVoltageEntries; i++) { 3589 for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
3448 voltage_table->entries[i].value = 3590 voltage_table->entries[i].value =
3449 le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue); 3591 le16_to_cpu(lut->usVoltageValue);
3450 ret = radeon_atom_get_voltage_gpio_settings(rdev, 3592 ret = radeon_atom_get_voltage_gpio_settings(rdev,
3451 voltage_table->entries[i].value, 3593 voltage_table->entries[i].value,
3452 voltage_type, 3594 voltage_type,
@@ -3454,6 +3596,8 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3454 &voltage_table->mask_low); 3596 &voltage_table->mask_low);
3455 if (ret) 3597 if (ret)
3456 return ret; 3598 return ret;
3599 lut = (VOLTAGE_LUT_ENTRY *)
3600 ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
3457 } 3601 }
3458 voltage_table->count = formula->ucNumOfVoltageEntries; 3602 voltage_table->count = formula->ucNumOfVoltageEntries;
3459 return 0; 3603 return 0;
@@ -3473,13 +3617,17 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3473 if (voltage_object) { 3617 if (voltage_object) {
3474 ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio = 3618 ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
3475 &voltage_object->v3.asGpioVoltageObj; 3619 &voltage_object->v3.asGpioVoltageObj;
3620 VOLTAGE_LUT_ENTRY_V2 *lut;
3476 if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES) 3621 if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
3477 return -EINVAL; 3622 return -EINVAL;
3623 lut = &gpio->asVolGpioLut[0];
3478 for (i = 0; i < gpio->ucGpioEntryNum; i++) { 3624 for (i = 0; i < gpio->ucGpioEntryNum; i++) {
3479 voltage_table->entries[i].value = 3625 voltage_table->entries[i].value =
3480 le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue); 3626 le16_to_cpu(lut->usVoltageValue);
3481 voltage_table->entries[i].smio_low = 3627 voltage_table->entries[i].smio_low =
3482 le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId); 3628 le32_to_cpu(lut->ulVoltageId);
3629 lut = (VOLTAGE_LUT_ENTRY_V2 *)
3630 ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
3483 } 3631 }
3484 voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal); 3632 voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
3485 voltage_table->count = gpio->ucGpioEntryNum; 3633 voltage_table->count = gpio->ucGpioEntryNum;
@@ -3605,7 +3753,6 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3605 union vram_info *vram_info; 3753 union vram_info *vram_info;
3606 u32 mem_timing_size = gddr5 ? 3754 u32 mem_timing_size = gddr5 ?
3607 sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT); 3755 sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
3608 u8 *p;
3609 3756
3610 memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table)); 3757 memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
3611 3758
@@ -3624,6 +3771,7 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3624 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { 3771 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3625 ATOM_VRAM_MODULE_V4 *vram_module = 3772 ATOM_VRAM_MODULE_V4 *vram_module =
3626 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; 3773 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3774 ATOM_MEMORY_TIMING_FORMAT *format;
3627 3775
3628 for (i = 0; i < module_index; i++) { 3776 for (i = 0; i < module_index; i++) {
3629 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3777 if (le16_to_cpu(vram_module->usModuleSize) == 0)
@@ -3634,11 +3782,11 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3634 mclk_range_table->num_entries = (u8) 3782 mclk_range_table->num_entries = (u8)
3635 ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / 3783 ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
3636 mem_timing_size); 3784 mem_timing_size);
3637 p = (u8 *)&vram_module->asMemTiming[0]; 3785 format = &vram_module->asMemTiming[0];
3638 for (i = 0; i < mclk_range_table->num_entries; i++) { 3786 for (i = 0; i < mclk_range_table->num_entries; i++) {
3639 ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
3640 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); 3787 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
3641 p += mem_timing_size; 3788 format = (ATOM_MEMORY_TIMING_FORMAT *)
3789 ((u8 *)format + mem_timing_size);
3642 } 3790 }
3643 } else 3791 } else
3644 return -EINVAL; 3792 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
deleted file mode 100644
index 4ecbe72c9d2d..000000000000
--- a/drivers/gpu/drm/radeon/radeon_blit_common.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 * Copyright 2012 Alcatel-Lucent, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __RADEON_BLIT_COMMON_H__
28
29#define DI_PT_RECTLIST 0x11
30#define DI_INDEX_SIZE_16_BIT 0x0
31#define DI_SRC_SEL_AUTO_INDEX 0x2
32
33#define FMT_8 0x1
34#define FMT_5_6_5 0x8
35#define FMT_8_8_8_8 0x1a
36#define COLOR_8 0x1
37#define COLOR_5_6_5 0x8
38#define COLOR_8_8_8_8 0x1a
39
40#define RECT_UNIT_H 32
41#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
42
43#define __RADEON_BLIT_COMMON_H__
44#endif
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index efc4f6441ef4..3cae2bbc1854 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1445 + init->ring_size / sizeof(u32)); 1445 + init->ring_size / sizeof(u32));
1446 dev_priv->ring.size = init->ring_size; 1446 dev_priv->ring.size = init->ring_size;
1447 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 1447 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
1448 1448
1449 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 1449 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1450 dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); 1450 dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
1451 1451
1452 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 1452 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1453 dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); 1453 dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
1454 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 1454 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1455 1455
1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 13a130fb3517..a56084410372 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -268,7 +268,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 /* we only support VM on some SI+ rings */ 270 /* we only support VM on some SI+ rings */
271 if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) && 271 if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
273 DRM_ERROR("Ring %d requires VM!\n", p->ring); 273 DRM_ERROR("Ring %d requires VM!\n", p->ring);
274 return -EINVAL; 274 return -EINVAL;
@@ -383,6 +383,10 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
383 DRM_ERROR("Invalid command stream !\n"); 383 DRM_ERROR("Invalid command stream !\n");
384 return r; 384 return r;
385 } 385 }
386
387 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
388 radeon_uvd_note_usage(rdev);
389
386 radeon_cs_sync_rings(parser); 390 radeon_cs_sync_rings(parser);
387 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 391 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
388 if (r) { 392 if (r) {
@@ -474,6 +478,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
474 return r; 478 return r;
475 } 479 }
476 480
481 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
482 radeon_uvd_note_usage(rdev);
483
477 mutex_lock(&rdev->vm_manager.lock); 484 mutex_lock(&rdev->vm_manager.lock);
478 mutex_lock(&vm->mutex); 485 mutex_lock(&vm->mutex);
479 r = radeon_vm_alloc_pt(rdev, vm); 486 r = radeon_vm_alloc_pt(rdev, vm);
@@ -552,10 +559,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
552 return r; 559 return r;
553 } 560 }
554 561
555 /* XXX pick SD/HD/MVC */
556 if (parser.ring == R600_RING_TYPE_UVD_INDEX)
557 radeon_uvd_note_usage(rdev);
558
559 r = radeon_cs_ib_chunk(rdev, &parser); 562 r = radeon_cs_ib_chunk(rdev, &parser);
560 if (r) { 563 if (r) {
561 goto out; 564 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 63398ae1dbf5..16cb8792b1e6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1003,16 +1003,28 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1003 radeon_vram_limit = 0; 1003 radeon_vram_limit = 0;
1004 } 1004 }
1005 1005
1006 if (radeon_gart_size == -1) {
1007 /* default to a larger gart size on newer asics */
1008 if (rdev->family >= CHIP_RV770)
1009 radeon_gart_size = 1024;
1010 else
1011 radeon_gart_size = 512;
1012 }
1006 /* gtt size must be power of two and greater or equal to 32M */ 1013 /* gtt size must be power of two and greater or equal to 32M */
1007 if (radeon_gart_size < 32) { 1014 if (radeon_gart_size < 32) {
1008 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 1015 dev_warn(rdev->dev, "gart size (%d) too small\n",
1009 radeon_gart_size); 1016 radeon_gart_size);
1010 radeon_gart_size = 512; 1017 if (rdev->family >= CHIP_RV770)
1011 1018 radeon_gart_size = 1024;
1019 else
1020 radeon_gart_size = 512;
1012 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 1021 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1013 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 1022 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1014 radeon_gart_size); 1023 radeon_gart_size);
1015 radeon_gart_size = 512; 1024 if (rdev->family >= CHIP_RV770)
1025 radeon_gart_size = 1024;
1026 else
1027 radeon_gart_size = 512;
1016 } 1028 }
1017 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 1029 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1018 1030
@@ -1144,7 +1156,7 @@ int radeon_device_init(struct radeon_device *rdev,
1144 rdev->family = flags & RADEON_FAMILY_MASK; 1156 rdev->family = flags & RADEON_FAMILY_MASK;
1145 rdev->is_atom_bios = false; 1157 rdev->is_atom_bios = false;
1146 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1158 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1147 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 1159 rdev->mc.gtt_size = 512 * 1024 * 1024;
1148 rdev->accel_working = false; 1160 rdev->accel_working = false;
1149 /* set up ring ids */ 1161 /* set up ring ids */
1150 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1162 for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1270,7 +1282,7 @@ int radeon_device_init(struct radeon_device *rdev,
1270 /* this will fail for cards that aren't VGA class devices, just 1282 /* this will fail for cards that aren't VGA class devices, just
1271 * ignore it */ 1283 * ignore it */
1272 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1284 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1273 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops); 1285 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
1274 1286
1275 r = radeon_init(rdev); 1287 r = radeon_init(rdev);
1276 if (r) 1288 if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c2b67b4e1ac2..b055bddaa94c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -345,7 +345,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
345 345
346static int radeon_crtc_page_flip(struct drm_crtc *crtc, 346static int radeon_crtc_page_flip(struct drm_crtc *crtc,
347 struct drm_framebuffer *fb, 347 struct drm_framebuffer *fb,
348 struct drm_pending_vblank_event *event) 348 struct drm_pending_vblank_event *event,
349 uint32_t page_flip_flags)
349{ 350{
350 struct drm_device *dev = crtc->dev; 351 struct drm_device *dev = crtc->dev;
351 struct radeon_device *rdev = dev->dev_private; 352 struct radeon_device *rdev = dev->dev_private;
@@ -1254,41 +1255,41 @@ static void radeon_afmt_init(struct radeon_device *rdev)
1254 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) 1255 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
1255 rdev->mode_info.afmt[i] = NULL; 1256 rdev->mode_info.afmt[i] = NULL;
1256 1257
1257 if (ASIC_IS_DCE6(rdev)) { 1258 if (ASIC_IS_NODCE(rdev)) {
1258 /* todo */ 1259 /* nothing to do */
1259 } else if (ASIC_IS_DCE4(rdev)) { 1260 } else if (ASIC_IS_DCE4(rdev)) {
1261 static uint32_t eg_offsets[] = {
1262 EVERGREEN_CRTC0_REGISTER_OFFSET,
1263 EVERGREEN_CRTC1_REGISTER_OFFSET,
1264 EVERGREEN_CRTC2_REGISTER_OFFSET,
1265 EVERGREEN_CRTC3_REGISTER_OFFSET,
1266 EVERGREEN_CRTC4_REGISTER_OFFSET,
1267 EVERGREEN_CRTC5_REGISTER_OFFSET,
1268 0x13830 - 0x7030,
1269 };
1270 int num_afmt;
1271
1272 /* DCE8 has 7 audio blocks tied to DIG encoders */
1273 /* DCE6 has 6 audio blocks tied to DIG encoders */
1260 /* DCE4/5 has 6 audio blocks tied to DIG encoders */ 1274 /* DCE4/5 has 6 audio blocks tied to DIG encoders */
1261 /* DCE4.1 has 2 audio blocks tied to DIG encoders */ 1275 /* DCE4.1 has 2 audio blocks tied to DIG encoders */
1262 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1276 if (ASIC_IS_DCE8(rdev))
1263 if (rdev->mode_info.afmt[0]) { 1277 num_afmt = 7;
1264 rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET; 1278 else if (ASIC_IS_DCE6(rdev))
1265 rdev->mode_info.afmt[0]->id = 0; 1279 num_afmt = 6;
1266 } 1280 else if (ASIC_IS_DCE5(rdev))
1267 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1281 num_afmt = 6;
1268 if (rdev->mode_info.afmt[1]) { 1282 else if (ASIC_IS_DCE41(rdev))
1269 rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET; 1283 num_afmt = 2;
1270 rdev->mode_info.afmt[1]->id = 1; 1284 else /* DCE4 */
1271 } 1285 num_afmt = 6;
1272 if (!ASIC_IS_DCE41(rdev)) { 1286
1273 rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1287 BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
1274 if (rdev->mode_info.afmt[2]) { 1288 for (i = 0; i < num_afmt; i++) {
1275 rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET; 1289 rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1276 rdev->mode_info.afmt[2]->id = 2; 1290 if (rdev->mode_info.afmt[i]) {
1277 } 1291 rdev->mode_info.afmt[i]->offset = eg_offsets[i];
1278 rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1292 rdev->mode_info.afmt[i]->id = i;
1279 if (rdev->mode_info.afmt[3]) {
1280 rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
1281 rdev->mode_info.afmt[3]->id = 3;
1282 }
1283 rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1284 if (rdev->mode_info.afmt[4]) {
1285 rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
1286 rdev->mode_info.afmt[4]->id = 4;
1287 }
1288 rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1289 if (rdev->mode_info.afmt[5]) {
1290 rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
1291 rdev->mode_info.afmt[5]->id = 5;
1292 } 1293 }
1293 } 1294 }
1294 } else if (ASIC_IS_DCE3(rdev)) { 1295 } else if (ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29876b1be8ec..cb4445f55a96 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,7 +81,6 @@
81#define KMS_DRIVER_PATCHLEVEL 0 81#define KMS_DRIVER_PATCHLEVEL 0
82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
83int radeon_driver_unload_kms(struct drm_device *dev); 83int radeon_driver_unload_kms(struct drm_device *dev);
84int radeon_driver_firstopen_kms(struct drm_device *dev);
85void radeon_driver_lastclose_kms(struct drm_device *dev); 84void radeon_driver_lastclose_kms(struct drm_device *dev);
86int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 85int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
87void radeon_driver_postclose_kms(struct drm_device *dev, 86void radeon_driver_postclose_kms(struct drm_device *dev,
@@ -101,8 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
101int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 100int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
102void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 101void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
103irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
104int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
105 struct drm_file *file_priv);
106int radeon_gem_object_init(struct drm_gem_object *obj); 103int radeon_gem_object_init(struct drm_gem_object *obj);
107void radeon_gem_object_free(struct drm_gem_object *obj); 104void radeon_gem_object_free(struct drm_gem_object *obj);
108int radeon_gem_object_open(struct drm_gem_object *obj, 105int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +108,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
111 struct drm_file *file_priv); 108 struct drm_file *file_priv);
112extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 109extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
113 int *vpos, int *hpos); 110 int *vpos, int *hpos);
114extern struct drm_ioctl_desc radeon_ioctls_kms[]; 111extern const struct drm_ioctl_desc radeon_ioctls_kms[];
115extern int radeon_max_kms_ioctl; 112extern int radeon_max_kms_ioctl;
116int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 113int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
117int radeon_mode_dumb_mmap(struct drm_file *filp, 114int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +117,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
120int radeon_mode_dumb_create(struct drm_file *file_priv, 117int radeon_mode_dumb_create(struct drm_file *file_priv,
121 struct drm_device *dev, 118 struct drm_device *dev,
122 struct drm_mode_create_dumb *args); 119 struct drm_mode_create_dumb *args);
123int radeon_mode_dumb_destroy(struct drm_file *file_priv,
124 struct drm_device *dev,
125 uint32_t handle);
126struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); 120struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
127struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, 121struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
128 size_t size, 122 size_t size,
@@ -154,7 +148,7 @@ int radeon_dynclks = -1;
154int radeon_r4xx_atom = 0; 148int radeon_r4xx_atom = 0;
155int radeon_agpmode = 0; 149int radeon_agpmode = 0;
156int radeon_vram_limit = 0; 150int radeon_vram_limit = 0;
157int radeon_gart_size = 512; /* default gart size */ 151int radeon_gart_size = -1; /* auto */
158int radeon_benchmarking = 0; 152int radeon_benchmarking = 0;
159int radeon_testing = 0; 153int radeon_testing = 0;
160int radeon_connector_table = 0; 154int radeon_connector_table = 0;
@@ -187,7 +181,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
187MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)"); 181MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
188module_param_named(agpmode, radeon_agpmode, int, 0444); 182module_param_named(agpmode, radeon_agpmode, int, 0444);
189 183
190MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)"); 184MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
191module_param_named(gartsize, radeon_gart_size, int, 0600); 185module_param_named(gartsize, radeon_gart_size, int, 0600);
192 186
193MODULE_PARM_DESC(benchmark, "Run benchmark"); 187MODULE_PARM_DESC(benchmark, "Run benchmark");
@@ -272,7 +266,6 @@ static const struct file_operations radeon_driver_old_fops = {
272 .unlocked_ioctl = drm_ioctl, 266 .unlocked_ioctl = drm_ioctl,
273 .mmap = drm_mmap, 267 .mmap = drm_mmap,
274 .poll = drm_poll, 268 .poll = drm_poll,
275 .fasync = drm_fasync,
276 .read = drm_read, 269 .read = drm_read,
277#ifdef CONFIG_COMPAT 270#ifdef CONFIG_COMPAT
278 .compat_ioctl = radeon_compat_ioctl, 271 .compat_ioctl = radeon_compat_ioctl,
@@ -282,7 +275,7 @@ static const struct file_operations radeon_driver_old_fops = {
282 275
283static struct drm_driver driver_old = { 276static struct drm_driver driver_old = {
284 .driver_features = 277 .driver_features =
285 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 278 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
286 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, 279 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
287 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 280 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
288 .load = radeon_driver_load, 281 .load = radeon_driver_load,
@@ -381,7 +374,6 @@ static const struct file_operations radeon_driver_kms_fops = {
381 .unlocked_ioctl = drm_ioctl, 374 .unlocked_ioctl = drm_ioctl,
382 .mmap = radeon_mmap, 375 .mmap = radeon_mmap,
383 .poll = drm_poll, 376 .poll = drm_poll,
384 .fasync = drm_fasync,
385 .read = drm_read, 377 .read = drm_read,
386#ifdef CONFIG_COMPAT 378#ifdef CONFIG_COMPAT
387 .compat_ioctl = radeon_kms_compat_ioctl, 379 .compat_ioctl = radeon_kms_compat_ioctl,
@@ -390,12 +382,11 @@ static const struct file_operations radeon_driver_kms_fops = {
390 382
391static struct drm_driver kms_driver = { 383static struct drm_driver kms_driver = {
392 .driver_features = 384 .driver_features =
393 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 385 DRIVER_USE_AGP |
394 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM | 386 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
395 DRIVER_PRIME, 387 DRIVER_PRIME | DRIVER_RENDER,
396 .dev_priv_size = 0, 388 .dev_priv_size = 0,
397 .load = radeon_driver_load_kms, 389 .load = radeon_driver_load_kms,
398 .firstopen = radeon_driver_firstopen_kms,
399 .open = radeon_driver_open_kms, 390 .open = radeon_driver_open_kms,
400 .preclose = radeon_driver_preclose_kms, 391 .preclose = radeon_driver_preclose_kms,
401 .postclose = radeon_driver_postclose_kms, 392 .postclose = radeon_driver_postclose_kms,
@@ -421,10 +412,9 @@ static struct drm_driver kms_driver = {
421 .gem_free_object = radeon_gem_object_free, 412 .gem_free_object = radeon_gem_object_free,
422 .gem_open_object = radeon_gem_object_open, 413 .gem_open_object = radeon_gem_object_open,
423 .gem_close_object = radeon_gem_object_close, 414 .gem_close_object = radeon_gem_object_close,
424 .dma_ioctl = radeon_dma_ioctl_kms,
425 .dumb_create = radeon_mode_dumb_create, 415 .dumb_create = radeon_mode_dumb_create,
426 .dumb_map_offset = radeon_mode_dumb_mmap, 416 .dumb_map_offset = radeon_mode_dumb_mmap,
427 .dumb_destroy = radeon_mode_dumb_destroy, 417 .dumb_destroy = drm_gem_dumb_destroy,
428 .fops = &radeon_driver_kms_fops, 418 .fops = &radeon_driver_kms_fops,
429 419
430 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 420 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa796031ab65..dce99c8a5835 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
570 return 0; 570 return 0;
571} 571}
572 572
573int radeon_mode_dumb_destroy(struct drm_file *file_priv,
574 struct drm_device *dev,
575 uint32_t handle)
576{
577 return drm_gem_handle_delete(file_priv, handle);
578}
579
580#if defined(CONFIG_DEBUG_FS) 573#if defined(CONFIG_DEBUG_FS)
581static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 574static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
582{ 575{
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 081886b0642d..cc9e8482cf30 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -275,17 +275,19 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
275 dev_info(rdev->dev, "radeon: using MSI.\n"); 275 dev_info(rdev->dev, "radeon: using MSI.\n");
276 } 276 }
277 } 277 }
278
279 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
280 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
281 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
282
278 rdev->irq.installed = true; 283 rdev->irq.installed = true;
279 r = drm_irq_install(rdev->ddev); 284 r = drm_irq_install(rdev->ddev);
280 if (r) { 285 if (r) {
281 rdev->irq.installed = false; 286 rdev->irq.installed = false;
287 flush_work(&rdev->hotplug_work);
282 return r; 288 return r;
283 } 289 }
284 290
285 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
286 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
288
289 DRM_INFO("radeon: irq initialized.\n"); 291 DRM_INFO("radeon: irq initialized.\n");
290 return 0; 292 return 0;
291} 293}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 49ff3d1a6102..61580ddc4eb2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -433,6 +433,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
433 return -EINVAL; 433 return -EINVAL;
434 } 434 }
435 break; 435 break;
436 case RADEON_INFO_SI_CP_DMA_COMPUTE:
437 *value = 1;
438 break;
436 default: 439 default:
437 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 440 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
438 return -EINVAL; 441 return -EINVAL;
@@ -449,19 +452,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
449 * Outdated mess for old drm with Xorg being in charge (void function now). 452 * Outdated mess for old drm with Xorg being in charge (void function now).
450 */ 453 */
451/** 454/**
452 * radeon_driver_firstopen_kms - drm callback for first open
453 *
454 * @dev: drm dev pointer
455 *
456 * Nothing to be done for KMS (all asics).
457 * Returns 0 on success.
458 */
459int radeon_driver_firstopen_kms(struct drm_device *dev)
460{
461 return 0;
462}
463
464/**
465 * radeon_driver_firstopen_kms - drm callback for last close 455 * radeon_driver_firstopen_kms - drm callback for last close
466 * 456 *
467 * @dev: drm dev pointer 457 * @dev: drm dev pointer
@@ -683,16 +673,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
683 drmcrtc); 673 drmcrtc);
684} 674}
685 675
686/*
687 * IOCTL.
688 */
689int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
690 struct drm_file *file_priv)
691{
692 /* Not valid in KMS. */
693 return -EINVAL;
694}
695
696#define KMS_INVALID_IOCTL(name) \ 676#define KMS_INVALID_IOCTL(name) \
697int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ 677int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
698{ \ 678{ \
@@ -732,7 +712,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
732KMS_INVALID_IOCTL(radeon_surface_free_kms) 712KMS_INVALID_IOCTL(radeon_surface_free_kms)
733 713
734 714
735struct drm_ioctl_desc radeon_ioctls_kms[] = { 715const struct drm_ioctl_desc radeon_ioctls_kms[] = {
736 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 716 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
737 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 717 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
738 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 718 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -761,18 +741,18 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
761 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 741 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
762 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 742 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
763 /* KMS */ 743 /* KMS */
764 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 744 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
765 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), 745 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
766 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), 746 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
767 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), 747 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
768 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 748 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
769 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 749 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
770 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), 750 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
771 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), 751 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
772 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 752 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
773 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 753 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
774 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 754 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
775 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 755 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
776 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED), 756 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
777}; 757};
778int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 758int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8296632a4235..d908d8d68f6b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -225,6 +225,7 @@ struct radeon_afmt {
225 int offset; 225 int offset;
226 bool last_buffer_filled_status; 226 bool last_buffer_filled_status;
227 int id; 227 int id;
228 struct r600_audio_pin *pin;
228}; 229};
229 230
230struct radeon_mode_info { 231struct radeon_mode_info {
@@ -233,7 +234,7 @@ struct radeon_mode_info {
233 enum radeon_connector_table connector_table; 234 enum radeon_connector_table connector_table;
234 bool mode_config_initialized; 235 bool mode_config_initialized;
235 struct radeon_crtc *crtcs[6]; 236 struct radeon_crtc *crtcs[6];
236 struct radeon_afmt *afmt[6]; 237 struct radeon_afmt *afmt[7];
237 /* DVI-I properties */ 238 /* DVI-I properties */
238 struct drm_property *coherent_mode_property; 239 struct drm_property *coherent_mode_property;
239 /* DAC enable load detect */ 240 /* DAC enable load detect */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2020bf4a3830..c0fa4aa9ceea 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -142,7 +142,6 @@ int radeon_bo_create(struct radeon_device *rdev,
142 return r; 142 return r;
143 } 143 }
144 bo->rdev = rdev; 144 bo->rdev = rdev;
145 bo->gem_base.driver_private = NULL;
146 bo->surface_reg = -1; 145 bo->surface_reg = -1;
147 INIT_LIST_HEAD(&bo->list); 146 INIT_LIST_HEAD(&bo->list);
148 INIT_LIST_HEAD(&bo->va); 147 INIT_LIST_HEAD(&bo->va);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 49c82c480013..209b11150263 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
113 * @bo: radeon object for which we query the offset 113 * @bo: radeon object for which we query the offset
114 * 114 *
115 * Returns mmap offset of the object. 115 * Returns mmap offset of the object.
116 *
117 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
118 * by any lock.
119 */ 116 */
120static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) 117static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
121{ 118{
122 return bo->tbo.addr_space_offset; 119 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
123} 120}
124 121
125extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, 122extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c557850cd345..d7555369a3e5 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -569,6 +569,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
569 case THERMAL_TYPE_NI: 569 case THERMAL_TYPE_NI:
570 case THERMAL_TYPE_SUMO: 570 case THERMAL_TYPE_SUMO:
571 case THERMAL_TYPE_SI: 571 case THERMAL_TYPE_SI:
572 case THERMAL_TYPE_CI:
573 case THERMAL_TYPE_KV:
572 if (rdev->asic->pm.get_temperature == NULL) 574 if (rdev->asic->pm.get_temperature == NULL)
573 return err; 575 return err;
574 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 576 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
@@ -624,7 +626,15 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
624 /* switch back the user state */ 626 /* switch back the user state */
625 dpm_state = rdev->pm.dpm.user_state; 627 dpm_state = rdev->pm.dpm.user_state;
626 } 628 }
627 radeon_dpm_enable_power_state(rdev, dpm_state); 629 mutex_lock(&rdev->pm.mutex);
630 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
631 rdev->pm.dpm.thermal_active = true;
632 else
633 rdev->pm.dpm.thermal_active = false;
634 rdev->pm.dpm.state = dpm_state;
635 mutex_unlock(&rdev->pm.mutex);
636
637 radeon_pm_compute_clocks(rdev);
628} 638}
629 639
630static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 640static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
@@ -687,7 +697,10 @@ restart_search:
687 break; 697 break;
688 /* internal states */ 698 /* internal states */
689 case POWER_STATE_TYPE_INTERNAL_UVD: 699 case POWER_STATE_TYPE_INTERNAL_UVD:
690 return rdev->pm.dpm.uvd_ps; 700 if (rdev->pm.dpm.uvd_ps)
701 return rdev->pm.dpm.uvd_ps;
702 else
703 break;
691 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 704 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
692 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 705 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
693 return ps; 706 return ps;
@@ -729,10 +742,17 @@ restart_search:
729 /* use a fallback state if we didn't match */ 742 /* use a fallback state if we didn't match */
730 switch (dpm_state) { 743 switch (dpm_state) {
731 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 744 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
745 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
746 goto restart_search;
732 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 747 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
733 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 748 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
734 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 749 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
735 return rdev->pm.dpm.uvd_ps; 750 if (rdev->pm.dpm.uvd_ps) {
751 return rdev->pm.dpm.uvd_ps;
752 } else {
753 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
754 goto restart_search;
755 }
736 case POWER_STATE_TYPE_INTERNAL_THERMAL: 756 case POWER_STATE_TYPE_INTERNAL_THERMAL:
737 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 757 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
738 goto restart_search; 758 goto restart_search;
@@ -850,38 +870,51 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
850 870
851 radeon_dpm_post_set_power_state(rdev); 871 radeon_dpm_post_set_power_state(rdev);
852 872
873 /* force low perf level for thermal */
874 if (rdev->pm.dpm.thermal_active &&
875 rdev->asic->dpm.force_performance_level) {
876 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
877 }
878
853done: 879done:
854 mutex_unlock(&rdev->ring_lock); 880 mutex_unlock(&rdev->ring_lock);
855 up_write(&rdev->pm.mclk_lock); 881 up_write(&rdev->pm.mclk_lock);
856 mutex_unlock(&rdev->ddev->struct_mutex); 882 mutex_unlock(&rdev->ddev->struct_mutex);
857} 883}
858 884
859void radeon_dpm_enable_power_state(struct radeon_device *rdev, 885void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
860 enum radeon_pm_state_type dpm_state)
861{ 886{
862 if (!rdev->pm.dpm_enabled) 887 enum radeon_pm_state_type dpm_state;
863 return;
864 888
865 mutex_lock(&rdev->pm.mutex); 889 if (rdev->asic->dpm.powergate_uvd) {
866 switch (dpm_state) { 890 mutex_lock(&rdev->pm.mutex);
867 case POWER_STATE_TYPE_INTERNAL_THERMAL: 891 /* enable/disable UVD */
868 rdev->pm.dpm.thermal_active = true; 892 radeon_dpm_powergate_uvd(rdev, !enable);
869 break; 893 mutex_unlock(&rdev->pm.mutex);
870 case POWER_STATE_TYPE_INTERNAL_UVD: 894 } else {
871 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 895 if (enable) {
872 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 896 mutex_lock(&rdev->pm.mutex);
873 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 897 rdev->pm.dpm.uvd_active = true;
874 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 898 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
875 rdev->pm.dpm.uvd_active = true; 899 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
876 break; 900 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
877 default: 901 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
878 rdev->pm.dpm.thermal_active = false; 902 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
879 rdev->pm.dpm.uvd_active = false; 903 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
880 break; 904 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
905 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
906 else
907 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
908 rdev->pm.dpm.state = dpm_state;
909 mutex_unlock(&rdev->pm.mutex);
910 } else {
911 mutex_lock(&rdev->pm.mutex);
912 rdev->pm.dpm.uvd_active = false;
913 mutex_unlock(&rdev->pm.mutex);
914 }
915
916 radeon_pm_compute_clocks(rdev);
881 } 917 }
882 rdev->pm.dpm.state = dpm_state;
883 mutex_unlock(&rdev->pm.mutex);
884 radeon_pm_compute_clocks(rdev);
885} 918}
886 919
887static void radeon_pm_suspend_old(struct radeon_device *rdev) 920static void radeon_pm_suspend_old(struct radeon_device *rdev)
@@ -1176,6 +1209,9 @@ int radeon_pm_init(struct radeon_device *rdev)
1176 case CHIP_VERDE: 1209 case CHIP_VERDE:
1177 case CHIP_OLAND: 1210 case CHIP_OLAND:
1178 case CHIP_HAINAN: 1211 case CHIP_HAINAN:
1212 case CHIP_BONAIRE:
1213 case CHIP_KABINI:
1214 case CHIP_KAVERI:
1179 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1215 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1180 if (!rdev->rlc_fw) 1216 if (!rdev->rlc_fw)
1181 rdev->pm.pm_method = PM_METHOD_PROFILE; 1217 rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 65b9eabd5a2f..20074560fc25 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -68,7 +68,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
68 RADEON_GEM_DOMAIN_GTT, sg, &bo); 68 RADEON_GEM_DOMAIN_GTT, sg, &bo);
69 if (ret) 69 if (ret)
70 return ERR_PTR(ret); 70 return ERR_PTR(ret);
71 bo->gem_base.driver_private = bo;
72 71
73 mutex_lock(&rdev->gem.mutex); 72 mutex_lock(&rdev->gem.mutex);
74 list_add_tail(&bo->list, &rdev->gem.objects); 73 list_add_tail(&bo->list, &rdev->gem.objects);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index fb5ea6208970..46a25f037b84 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -363,11 +363,10 @@ u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
363{ 363{
364 u32 rptr; 364 u32 rptr;
365 365
366 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX]) 366 if (rdev->wb.enabled)
367 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 367 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
368 else 368 else
369 rptr = RREG32(ring->rptr_reg); 369 rptr = RREG32(ring->rptr_reg);
370 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
371 370
372 return rptr; 371 return rptr;
373} 372}
@@ -378,7 +377,6 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
378 u32 wptr; 377 u32 wptr;
379 378
380 wptr = RREG32(ring->wptr_reg); 379 wptr = RREG32(ring->wptr_reg);
381 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
382 380
383 return wptr; 381 return wptr;
384} 382}
@@ -386,7 +384,7 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
386void radeon_ring_generic_set_wptr(struct radeon_device *rdev, 384void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
387 struct radeon_ring *ring) 385 struct radeon_ring *ring)
388{ 386{
389 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); 387 WREG32(ring->wptr_reg, ring->wptr);
390 (void)RREG32(ring->wptr_reg); 388 (void)RREG32(ring->wptr_reg);
391} 389}
392 390
@@ -719,16 +717,13 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
719 * @rptr_offs: offset of the rptr writeback location in the WB buffer 717 * @rptr_offs: offset of the rptr writeback location in the WB buffer
720 * @rptr_reg: MMIO offset of the rptr register 718 * @rptr_reg: MMIO offset of the rptr register
721 * @wptr_reg: MMIO offset of the wptr register 719 * @wptr_reg: MMIO offset of the wptr register
722 * @ptr_reg_shift: bit offset of the rptr/wptr values
723 * @ptr_reg_mask: bit mask of the rptr/wptr values
724 * @nop: nop packet for this ring 720 * @nop: nop packet for this ring
725 * 721 *
726 * Initialize the driver information for the selected ring (all asics). 722 * Initialize the driver information for the selected ring (all asics).
727 * Returns 0 on success, error on failure. 723 * Returns 0 on success, error on failure.
728 */ 724 */
729int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, 725int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
730 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 726 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
731 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
732{ 727{
733 int r; 728 int r;
734 729
@@ -736,8 +731,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
736 ring->rptr_offs = rptr_offs; 731 ring->rptr_offs = rptr_offs;
737 ring->rptr_reg = rptr_reg; 732 ring->rptr_reg = rptr_reg;
738 ring->wptr_reg = wptr_reg; 733 ring->wptr_reg = wptr_reg;
739 ring->ptr_reg_shift = ptr_reg_shift;
740 ring->ptr_reg_mask = ptr_reg_mask;
741 ring->nop = nop; 734 ring->nop = nop;
742 /* Allocate ring buffer */ 735 /* Allocate ring buffer */
743 if (ring->ring_obj == NULL) { 736 if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6c0ce8915fac..71245d6f34a2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -203,7 +203,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
203 203
204static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) 204static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
205{ 205{
206 return 0; 206 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
207
208 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
207} 209}
208 210
209static void radeon_move_null(struct ttm_buffer_object *bo, 211static void radeon_move_null(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index d8b05f7bcf1a..33858364fe89 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -35,6 +35,12 @@
35#define SI_PFP_UCODE_SIZE 2144 35#define SI_PFP_UCODE_SIZE 2144
36#define SI_PM4_UCODE_SIZE 2144 36#define SI_PM4_UCODE_SIZE 2144
37#define SI_CE_UCODE_SIZE 2144 37#define SI_CE_UCODE_SIZE 2144
38#define CIK_PFP_UCODE_SIZE 2144
39#define CIK_ME_UCODE_SIZE 2144
40#define CIK_CE_UCODE_SIZE 2144
41
42/* MEC */
43#define CIK_MEC_UCODE_SIZE 4192
38 44
39/* RLC */ 45/* RLC */
40#define R600_RLC_UCODE_SIZE 768 46#define R600_RLC_UCODE_SIZE 768
@@ -43,12 +49,20 @@
43#define CAYMAN_RLC_UCODE_SIZE 1024 49#define CAYMAN_RLC_UCODE_SIZE 1024
44#define ARUBA_RLC_UCODE_SIZE 1536 50#define ARUBA_RLC_UCODE_SIZE 1536
45#define SI_RLC_UCODE_SIZE 2048 51#define SI_RLC_UCODE_SIZE 2048
52#define BONAIRE_RLC_UCODE_SIZE 2048
53#define KB_RLC_UCODE_SIZE 2560
54#define KV_RLC_UCODE_SIZE 2560
46 55
47/* MC */ 56/* MC */
48#define BTC_MC_UCODE_SIZE 6024 57#define BTC_MC_UCODE_SIZE 6024
49#define CAYMAN_MC_UCODE_SIZE 6037 58#define CAYMAN_MC_UCODE_SIZE 6037
50#define SI_MC_UCODE_SIZE 7769 59#define SI_MC_UCODE_SIZE 7769
51#define OLAND_MC_UCODE_SIZE 7863 60#define OLAND_MC_UCODE_SIZE 7863
61#define CIK_MC_UCODE_SIZE 7866
62
63/* SDMA */
64#define CIK_SDMA_UCODE_SIZE 1050
65#define CIK_SDMA_UCODE_VERSION 64
52 66
53/* SMC */ 67/* SMC */
54#define RV770_SMC_UCODE_START 0x0100 68#define RV770_SMC_UCODE_START 0x0100
@@ -126,4 +140,7 @@
126#define HAINAN_SMC_UCODE_START 0x10000 140#define HAINAN_SMC_UCODE_START 0x10000
127#define HAINAN_SMC_UCODE_SIZE 0xe67C 141#define HAINAN_SMC_UCODE_SIZE 0xe67C
128 142
143#define BONAIRE_SMC_UCODE_START 0x20000
144#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
145
129#endif 146#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index b79f4f5cdd62..1a01bbff9bfa 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -147,6 +147,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
147 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 147 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
148 atomic_set(&rdev->uvd.handles[i], 0); 148 atomic_set(&rdev->uvd.handles[i], 0);
149 rdev->uvd.filp[i] = NULL; 149 rdev->uvd.filp[i] = NULL;
150 rdev->uvd.img_size[i] = 0;
150 } 151 }
151 152
152 return 0; 153 return 0;
@@ -347,6 +348,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
347 unsigned offset, unsigned buf_sizes[]) 348 unsigned offset, unsigned buf_sizes[])
348{ 349{
349 int32_t *msg, msg_type, handle; 350 int32_t *msg, msg_type, handle;
351 unsigned img_size = 0;
350 void *ptr; 352 void *ptr;
351 353
352 int i, r; 354 int i, r;
@@ -383,6 +385,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
383 if (msg_type == 1) { 385 if (msg_type == 1) {
384 /* it's a decode msg, calc buffer sizes */ 386 /* it's a decode msg, calc buffer sizes */
385 r = radeon_uvd_cs_msg_decode(msg, buf_sizes); 387 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
388 /* calc image size (width * height) */
389 img_size = msg[6] * msg[7];
386 radeon_bo_kunmap(bo); 390 radeon_bo_kunmap(bo);
387 if (r) 391 if (r)
388 return r; 392 return r;
@@ -394,6 +398,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
394 radeon_bo_kunmap(bo); 398 radeon_bo_kunmap(bo);
395 return 0; 399 return 0;
396 } else { 400 } else {
401 /* it's a create msg, calc image size (width * height) */
402 img_size = msg[7] * msg[8];
397 radeon_bo_kunmap(bo); 403 radeon_bo_kunmap(bo);
398 404
399 if (msg_type != 0) { 405 if (msg_type != 0) {
@@ -414,6 +420,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
414 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 420 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
415 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { 421 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
416 p->rdev->uvd.filp[i] = p->filp; 422 p->rdev->uvd.filp[i] = p->filp;
423 p->rdev->uvd.img_size[i] = img_size;
417 return 0; 424 return 0;
418 } 425 }
419 } 426 }
@@ -733,6 +740,34 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
733 return radeon_uvd_send_msg(rdev, ring, bo, fence); 740 return radeon_uvd_send_msg(rdev, ring, bo, fence);
734} 741}
735 742
743/**
744 * radeon_uvd_count_handles - count number of open streams
745 *
746 * @rdev: radeon_device pointer
747 * @sd: number of SD streams
748 * @hd: number of HD streams
749 *
750 * Count the number of open SD/HD streams as a hint for power mangement
751 */
752static void radeon_uvd_count_handles(struct radeon_device *rdev,
753 unsigned *sd, unsigned *hd)
754{
755 unsigned i;
756
757 *sd = 0;
758 *hd = 0;
759
760 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
761 if (!atomic_read(&rdev->uvd.handles[i]))
762 continue;
763
764 if (rdev->uvd.img_size[i] >= 720*576)
765 ++(*hd);
766 else
767 ++(*sd);
768 }
769}
770
736static void radeon_uvd_idle_work_handler(struct work_struct *work) 771static void radeon_uvd_idle_work_handler(struct work_struct *work)
737{ 772{
738 struct radeon_device *rdev = 773 struct radeon_device *rdev =
@@ -740,10 +775,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
740 775
741 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { 776 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
742 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 777 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
743 mutex_lock(&rdev->pm.mutex); 778 radeon_dpm_enable_uvd(rdev, false);
744 rdev->pm.dpm.uvd_active = false;
745 mutex_unlock(&rdev->pm.mutex);
746 radeon_pm_compute_clocks(rdev);
747 } else { 779 } else {
748 radeon_set_uvd_clocks(rdev, 0, 0); 780 radeon_set_uvd_clocks(rdev, 0, 0);
749 } 781 }
@@ -755,13 +787,25 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
755 787
756void radeon_uvd_note_usage(struct radeon_device *rdev) 788void radeon_uvd_note_usage(struct radeon_device *rdev)
757{ 789{
790 bool streams_changed = false;
758 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); 791 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
759 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, 792 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
760 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 793 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
761 if (set_clocks) { 794
795 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
796 unsigned hd = 0, sd = 0;
797 radeon_uvd_count_handles(rdev, &sd, &hd);
798 if ((rdev->pm.dpm.sd != sd) ||
799 (rdev->pm.dpm.hd != hd)) {
800 rdev->pm.dpm.sd = sd;
801 rdev->pm.dpm.hd = hd;
802 streams_changed = true;
803 }
804 }
805
806 if (set_clocks || streams_changed) {
762 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 807 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
763 /* XXX pick SD/HD/MVC */ 808 radeon_dpm_enable_uvd(rdev, true);
764 radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
765 } else { 809 } else {
766 radeon_set_uvd_clocks(rdev, 53300, 40000); 810 radeon_set_uvd_clocks(rdev, 53300, 40000);
767 } 811 }
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 233a9b9fa1f7..b8074a8ec75a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
174 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, 174 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
175 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ 175 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
176 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 176 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
177 WREG32_MC(RS480_MC_MISC_CNTL, 177 tmp = RREG32_MC(RS480_MC_MISC_CNTL);
178 (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); 178 tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
179 WREG32_MC(RS480_MC_MISC_CNTL, tmp);
179 } else { 180 } else {
180 WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); 181 tmp = RREG32_MC(RS480_MC_MISC_CNTL);
182 tmp |= RS480_GART_INDEX_REG_EN;
183 WREG32_MC(RS480_MC_MISC_CNTL, tmp);
181 } 184 }
182 /* Enable gart */ 185 /* Enable gart */
183 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); 186 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index bdd888b4db2b..ab1f2016f21e 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1918,6 +1918,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
1918 (power_state->v1.ucNonClockStateIndex * 1918 (power_state->v1.ucNonClockStateIndex *
1919 power_info->pplib.ucNonClockSize)); 1919 power_info->pplib.ucNonClockSize));
1920 if (power_info->pplib.ucStateEntrySize - 1) { 1920 if (power_info->pplib.ucStateEntrySize - 1) {
1921 u8 *idx;
1921 ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL); 1922 ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
1922 if (ps == NULL) { 1923 if (ps == NULL) {
1923 kfree(rdev->pm.dpm.ps); 1924 kfree(rdev->pm.dpm.ps);
@@ -1926,12 +1927,12 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
1926 rdev->pm.dpm.ps[i].ps_priv = ps; 1927 rdev->pm.dpm.ps[i].ps_priv = ps;
1927 rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 1928 rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1928 non_clock_info); 1929 non_clock_info);
1930 idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
1929 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 1931 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
1930 clock_info = (union pplib_clock_info *) 1932 clock_info = (union pplib_clock_info *)
1931 (mode_info->atom_context->bios + data_offset + 1933 (mode_info->atom_context->bios + data_offset +
1932 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 1934 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
1933 (power_state->v1.ucClockStateIndices[j] * 1935 (idx[j] * power_info->pplib.ucClockInfoSize));
1934 power_info->pplib.ucClockInfoSize));
1935 rv6xx_parse_pplib_clock_info(rdev, 1936 rv6xx_parse_pplib_clock_info(rdev,
1936 &rdev->pm.dpm.ps[i], j, 1937 &rdev->pm.dpm.ps[i], j,
1937 clock_info); 1938 clock_info);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f5e92cfcc140..9f5846743c9e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -801,103 +801,6 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
801 return reference_clock; 801 return reference_clock;
802} 802}
803 803
804int rv770_uvd_resume(struct radeon_device *rdev)
805{
806 uint64_t addr;
807 uint32_t chip_id, size;
808 int r;
809
810 r = radeon_uvd_resume(rdev);
811 if (r)
812 return r;
813
814 /* programm the VCPU memory controller bits 0-27 */
815 addr = rdev->uvd.gpu_addr >> 3;
816 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
817 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
818 WREG32(UVD_VCPU_CACHE_SIZE0, size);
819
820 addr += size;
821 size = RADEON_UVD_STACK_SIZE >> 3;
822 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
823 WREG32(UVD_VCPU_CACHE_SIZE1, size);
824
825 addr += size;
826 size = RADEON_UVD_HEAP_SIZE >> 3;
827 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
828 WREG32(UVD_VCPU_CACHE_SIZE2, size);
829
830 /* bits 28-31 */
831 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
832 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
833
834 /* bits 32-39 */
835 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
836 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
837
838 /* tell firmware which hardware it is running on */
839 switch (rdev->family) {
840 default:
841 return -EINVAL;
842 case CHIP_RV710:
843 chip_id = 0x01000005;
844 break;
845 case CHIP_RV730:
846 chip_id = 0x01000006;
847 break;
848 case CHIP_RV740:
849 chip_id = 0x01000007;
850 break;
851 case CHIP_CYPRESS:
852 case CHIP_HEMLOCK:
853 chip_id = 0x01000008;
854 break;
855 case CHIP_JUNIPER:
856 chip_id = 0x01000009;
857 break;
858 case CHIP_REDWOOD:
859 chip_id = 0x0100000a;
860 break;
861 case CHIP_CEDAR:
862 chip_id = 0x0100000b;
863 break;
864 case CHIP_SUMO:
865 case CHIP_SUMO2:
866 chip_id = 0x0100000c;
867 break;
868 case CHIP_PALM:
869 chip_id = 0x0100000e;
870 break;
871 case CHIP_CAYMAN:
872 chip_id = 0x0100000f;
873 break;
874 case CHIP_BARTS:
875 chip_id = 0x01000010;
876 break;
877 case CHIP_TURKS:
878 chip_id = 0x01000011;
879 break;
880 case CHIP_CAICOS:
881 chip_id = 0x01000012;
882 break;
883 case CHIP_TAHITI:
884 chip_id = 0x01000014;
885 break;
886 case CHIP_VERDE:
887 chip_id = 0x01000015;
888 break;
889 case CHIP_PITCAIRN:
890 chip_id = 0x01000016;
891 break;
892 case CHIP_ARUBA:
893 chip_id = 0x01000017;
894 break;
895 }
896 WREG32(UVD_VCPU_CHIP_ID, chip_id);
897
898 return 0;
899}
900
901u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 804u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
902{ 805{
903 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 806 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -1747,80 +1650,6 @@ static int rv770_mc_init(struct radeon_device *rdev)
1747 return 0; 1650 return 0;
1748} 1651}
1749 1652
1750/**
1751 * rv770_copy_dma - copy pages using the DMA engine
1752 *
1753 * @rdev: radeon_device pointer
1754 * @src_offset: src GPU address
1755 * @dst_offset: dst GPU address
1756 * @num_gpu_pages: number of GPU pages to xfer
1757 * @fence: radeon fence object
1758 *
1759 * Copy GPU paging using the DMA engine (r7xx).
1760 * Used by the radeon ttm implementation to move pages if
1761 * registered as the asic copy callback.
1762 */
1763int rv770_copy_dma(struct radeon_device *rdev,
1764 uint64_t src_offset, uint64_t dst_offset,
1765 unsigned num_gpu_pages,
1766 struct radeon_fence **fence)
1767{
1768 struct radeon_semaphore *sem = NULL;
1769 int ring_index = rdev->asic->copy.dma_ring_index;
1770 struct radeon_ring *ring = &rdev->ring[ring_index];
1771 u32 size_in_dw, cur_size_in_dw;
1772 int i, num_loops;
1773 int r = 0;
1774
1775 r = radeon_semaphore_create(rdev, &sem);
1776 if (r) {
1777 DRM_ERROR("radeon: moving bo (%d).\n", r);
1778 return r;
1779 }
1780
1781 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
1782 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
1783 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
1784 if (r) {
1785 DRM_ERROR("radeon: moving bo (%d).\n", r);
1786 radeon_semaphore_free(rdev, &sem, NULL);
1787 return r;
1788 }
1789
1790 if (radeon_fence_need_sync(*fence, ring->idx)) {
1791 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
1792 ring->idx);
1793 radeon_fence_note_sync(*fence, ring->idx);
1794 } else {
1795 radeon_semaphore_free(rdev, &sem, NULL);
1796 }
1797
1798 for (i = 0; i < num_loops; i++) {
1799 cur_size_in_dw = size_in_dw;
1800 if (cur_size_in_dw > 0xFFFF)
1801 cur_size_in_dw = 0xFFFF;
1802 size_in_dw -= cur_size_in_dw;
1803 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
1804 radeon_ring_write(ring, dst_offset & 0xfffffffc);
1805 radeon_ring_write(ring, src_offset & 0xfffffffc);
1806 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
1807 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
1808 src_offset += cur_size_in_dw * 4;
1809 dst_offset += cur_size_in_dw * 4;
1810 }
1811
1812 r = radeon_fence_emit(rdev, fence, ring->idx);
1813 if (r) {
1814 radeon_ring_unlock_undo(rdev, ring);
1815 return r;
1816 }
1817
1818 radeon_ring_unlock_commit(rdev, ring);
1819 radeon_semaphore_free(rdev, &sem, *fence);
1820
1821 return r;
1822}
1823
1824static int rv770_startup(struct radeon_device *rdev) 1653static int rv770_startup(struct radeon_device *rdev)
1825{ 1654{
1826 struct radeon_ring *ring; 1655 struct radeon_ring *ring;
@@ -1829,6 +1658,11 @@ static int rv770_startup(struct radeon_device *rdev)
1829 /* enable pcie gen2 link */ 1658 /* enable pcie gen2 link */
1830 rv770_pcie_gen2_enable(rdev); 1659 rv770_pcie_gen2_enable(rdev);
1831 1660
1661 /* scratch needs to be initialized before MC */
1662 r = r600_vram_scratch_init(rdev);
1663 if (r)
1664 return r;
1665
1832 rv770_mc_program(rdev); 1666 rv770_mc_program(rdev);
1833 1667
1834 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1668 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -1839,10 +1673,6 @@ static int rv770_startup(struct radeon_device *rdev)
1839 } 1673 }
1840 } 1674 }
1841 1675
1842 r = r600_vram_scratch_init(rdev);
1843 if (r)
1844 return r;
1845
1846 if (rdev->flags & RADEON_IS_AGP) { 1676 if (rdev->flags & RADEON_IS_AGP) {
1847 rv770_agp_enable(rdev); 1677 rv770_agp_enable(rdev);
1848 } else { 1678 } else {
@@ -1852,12 +1682,6 @@ static int rv770_startup(struct radeon_device *rdev)
1852 } 1682 }
1853 1683
1854 rv770_gpu_init(rdev); 1684 rv770_gpu_init(rdev);
1855 r = r600_blit_init(rdev);
1856 if (r) {
1857 r600_blit_fini(rdev);
1858 rdev->asic->copy.copy = NULL;
1859 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1860 }
1861 1685
1862 /* allocate wb buffer */ 1686 /* allocate wb buffer */
1863 r = radeon_wb_init(rdev); 1687 r = radeon_wb_init(rdev);
@@ -1876,7 +1700,7 @@ static int rv770_startup(struct radeon_device *rdev)
1876 return r; 1700 return r;
1877 } 1701 }
1878 1702
1879 r = rv770_uvd_resume(rdev); 1703 r = uvd_v2_2_resume(rdev);
1880 if (!r) { 1704 if (!r) {
1881 r = radeon_fence_driver_start_ring(rdev, 1705 r = radeon_fence_driver_start_ring(rdev,
1882 R600_RING_TYPE_UVD_INDEX); 1706 R600_RING_TYPE_UVD_INDEX);
@@ -1905,14 +1729,14 @@ static int rv770_startup(struct radeon_device *rdev)
1905 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1729 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1906 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1730 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1907 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 1731 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
1908 0, 0xfffff, RADEON_CP_PACKET2); 1732 RADEON_CP_PACKET2);
1909 if (r) 1733 if (r)
1910 return r; 1734 return r;
1911 1735
1912 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1736 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1913 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1737 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1914 DMA_RB_RPTR, DMA_RB_WPTR, 1738 DMA_RB_RPTR, DMA_RB_WPTR,
1915 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1739 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1916 if (r) 1740 if (r)
1917 return r; 1741 return r;
1918 1742
@@ -1929,12 +1753,11 @@ static int rv770_startup(struct radeon_device *rdev)
1929 1753
1930 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 1754 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1931 if (ring->ring_size) { 1755 if (ring->ring_size) {
1932 r = radeon_ring_init(rdev, ring, ring->ring_size, 1756 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
1933 R600_WB_UVD_RPTR_OFFSET,
1934 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 1757 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
1935 0, 0xfffff, RADEON_CP_PACKET2); 1758 RADEON_CP_PACKET2);
1936 if (!r) 1759 if (!r)
1937 r = r600_uvd_init(rdev); 1760 r = uvd_v1_0_init(rdev);
1938 1761
1939 if (r) 1762 if (r)
1940 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 1763 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
@@ -1984,7 +1807,7 @@ int rv770_resume(struct radeon_device *rdev)
1984int rv770_suspend(struct radeon_device *rdev) 1807int rv770_suspend(struct radeon_device *rdev)
1985{ 1808{
1986 r600_audio_fini(rdev); 1809 r600_audio_fini(rdev);
1987 r600_uvd_stop(rdev); 1810 uvd_v1_0_fini(rdev);
1988 radeon_uvd_suspend(rdev); 1811 radeon_uvd_suspend(rdev);
1989 r700_cp_stop(rdev); 1812 r700_cp_stop(rdev);
1990 r600_dma_stop(rdev); 1813 r600_dma_stop(rdev);
@@ -2092,7 +1915,6 @@ int rv770_init(struct radeon_device *rdev)
2092 1915
2093void rv770_fini(struct radeon_device *rdev) 1916void rv770_fini(struct radeon_device *rdev)
2094{ 1917{
2095 r600_blit_fini(rdev);
2096 r700_cp_fini(rdev); 1918 r700_cp_fini(rdev);
2097 r600_dma_fini(rdev); 1919 r600_dma_fini(rdev);
2098 r600_irq_fini(rdev); 1920 r600_irq_fini(rdev);
@@ -2100,7 +1922,7 @@ void rv770_fini(struct radeon_device *rdev)
2100 radeon_ib_pool_fini(rdev); 1922 radeon_ib_pool_fini(rdev);
2101 radeon_irq_kms_fini(rdev); 1923 radeon_irq_kms_fini(rdev);
2102 rv770_pcie_gart_fini(rdev); 1924 rv770_pcie_gart_fini(rdev);
2103 r600_uvd_stop(rdev); 1925 uvd_v1_0_fini(rdev);
2104 radeon_uvd_fini(rdev); 1926 radeon_uvd_fini(rdev);
2105 r600_vram_scratch_fini(rdev); 1927 r600_vram_scratch_fini(rdev);
2106 radeon_gem_fini(rdev); 1928 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
new file mode 100644
index 000000000000..f9b02e3d6830
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "rv770d.h"
28
29/**
30 * rv770_copy_dma - copy pages using the DMA engine
31 *
32 * @rdev: radeon_device pointer
33 * @src_offset: src GPU address
34 * @dst_offset: dst GPU address
35 * @num_gpu_pages: number of GPU pages to xfer
36 * @fence: radeon fence object
37 *
38 * Copy GPU paging using the DMA engine (r7xx).
39 * Used by the radeon ttm implementation to move pages if
40 * registered as the asic copy callback.
41 */
42int rv770_copy_dma(struct radeon_device *rdev,
43 uint64_t src_offset, uint64_t dst_offset,
44 unsigned num_gpu_pages,
45 struct radeon_fence **fence)
46{
47 struct radeon_semaphore *sem = NULL;
48 int ring_index = rdev->asic->copy.dma_ring_index;
49 struct radeon_ring *ring = &rdev->ring[ring_index];
50 u32 size_in_dw, cur_size_in_dw;
51 int i, num_loops;
52 int r = 0;
53
54 r = radeon_semaphore_create(rdev, &sem);
55 if (r) {
56 DRM_ERROR("radeon: moving bo (%d).\n", r);
57 return r;
58 }
59
60 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
61 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
62 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
63 if (r) {
64 DRM_ERROR("radeon: moving bo (%d).\n", r);
65 radeon_semaphore_free(rdev, &sem, NULL);
66 return r;
67 }
68
69 if (radeon_fence_need_sync(*fence, ring->idx)) {
70 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
71 ring->idx);
72 radeon_fence_note_sync(*fence, ring->idx);
73 } else {
74 radeon_semaphore_free(rdev, &sem, NULL);
75 }
76
77 for (i = 0; i < num_loops; i++) {
78 cur_size_in_dw = size_in_dw;
79 if (cur_size_in_dw > 0xFFFF)
80 cur_size_in_dw = 0xFFFF;
81 size_in_dw -= cur_size_in_dw;
82 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
83 radeon_ring_write(ring, dst_offset & 0xfffffffc);
84 radeon_ring_write(ring, src_offset & 0xfffffffc);
85 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
86 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
87 src_offset += cur_size_in_dw * 4;
88 dst_offset += cur_size_in_dw * 4;
89 }
90
91 r = radeon_fence_emit(rdev, fence, ring->idx);
92 if (r) {
93 radeon_ring_unlock_undo(rdev, ring);
94 return r;
95 }
96
97 radeon_ring_unlock_commit(rdev, ring);
98 radeon_semaphore_free(rdev, &sem, *fence);
99
100 return r;
101}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 094c67a29d0d..8cbb85dae5aa 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2294,6 +2294,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
2294 (power_state->v1.ucNonClockStateIndex * 2294 (power_state->v1.ucNonClockStateIndex *
2295 power_info->pplib.ucNonClockSize)); 2295 power_info->pplib.ucNonClockSize));
2296 if (power_info->pplib.ucStateEntrySize - 1) { 2296 if (power_info->pplib.ucStateEntrySize - 1) {
2297 u8 *idx;
2297 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL); 2298 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
2298 if (ps == NULL) { 2299 if (ps == NULL) {
2299 kfree(rdev->pm.dpm.ps); 2300 kfree(rdev->pm.dpm.ps);
@@ -2303,12 +2304,12 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
2303 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2304 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2304 non_clock_info, 2305 non_clock_info,
2305 power_info->pplib.ucNonClockSize); 2306 power_info->pplib.ucNonClockSize);
2307 idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
2306 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2308 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2307 clock_info = (union pplib_clock_info *) 2309 clock_info = (union pplib_clock_info *)
2308 (mode_info->atom_context->bios + data_offset + 2310 (mode_info->atom_context->bios + data_offset +
2309 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2311 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2310 (power_state->v1.ucClockStateIndices[j] * 2312 (idx[j] * power_info->pplib.ucClockInfoSize));
2311 power_info->pplib.ucClockInfoSize));
2312 rv7xx_parse_pplib_clock_info(rdev, 2313 rv7xx_parse_pplib_clock_info(rdev,
2313 &rdev->pm.dpm.ps[i], j, 2314 &rdev->pm.dpm.ps[i], j,
2314 clock_info); 2315 clock_info);
@@ -2517,8 +2518,16 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
2517bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) 2518bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
2518{ 2519{
2519 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2520 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2521 u32 switch_limit = 300;
2520 2522
2521 if (vblank_time < 300) 2523 /* quirks */
2524 /* ASUS K70AF */
2525 if ((rdev->pdev->device == 0x9553) &&
2526 (rdev->pdev->subsystem_vendor == 0x1043) &&
2527 (rdev->pdev->subsystem_device == 0x1c42))
2528 switch_limit = 200;
2529
2530 if (vblank_time < switch_limit)
2522 return true; 2531 return true;
2523 else 2532 else
2524 return false; 2533 return false;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 6bef2b7d601b..9fe60e542922 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -971,7 +971,21 @@
971# define TARGET_LINK_SPEED_MASK (0xf << 0) 971# define TARGET_LINK_SPEED_MASK (0xf << 0)
972# define SELECTABLE_DEEMPHASIS (1 << 6) 972# define SELECTABLE_DEEMPHASIS (1 << 6)
973 973
974/*
975 * PM4
976 */
977#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
978 (((reg) >> 2) & 0xFFFF) | \
979 ((n) & 0x3FFF) << 16)
980#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
981 (((op) & 0xFF) << 8) | \
982 ((n) & 0x3FFF) << 16)
983
974/* UVD */ 984/* UVD */
985#define UVD_GPCOM_VCPU_CMD 0xef0c
986#define UVD_GPCOM_VCPU_DATA0 0xef10
987#define UVD_GPCOM_VCPU_DATA1 0xef14
988
975#define UVD_LMI_EXT40_ADDR 0xf498 989#define UVD_LMI_EXT40_ADDR 0xf498
976#define UVD_VCPU_CHIP_ID 0xf4d4 990#define UVD_VCPU_CHIP_ID 0xf4d4
977#define UVD_VCPU_CACHE_OFFSET0 0xf4d8 991#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
@@ -985,4 +999,6 @@
985#define UVD_RBC_RB_RPTR 0xf690 999#define UVD_RBC_RB_RPTR 0xf690
986#define UVD_RBC_RB_WPTR 0xf694 1000#define UVD_RBC_RB_WPTR 0xf694
987 1001
1002#define UVD_CONTEXT_ID 0xf6f4
1003
988#endif 1004#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index daa8d2df8ec5..3e23b757dcfa 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -68,6 +68,8 @@ MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
68 68
69static void si_pcie_gen3_enable(struct radeon_device *rdev); 69static void si_pcie_gen3_enable(struct radeon_device *rdev);
70static void si_program_aspm(struct radeon_device *rdev); 70static void si_program_aspm(struct radeon_device *rdev);
71extern void sumo_rlc_fini(struct radeon_device *rdev);
72extern int sumo_rlc_init(struct radeon_device *rdev);
71extern int r600_ih_ring_alloc(struct radeon_device *rdev); 73extern int r600_ih_ring_alloc(struct radeon_device *rdev);
72extern void r600_ih_ring_fini(struct radeon_device *rdev); 74extern void r600_ih_ring_fini(struct radeon_device *rdev);
73extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 75extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
@@ -76,6 +78,11 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
76extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); 78extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
77extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 79extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
78extern bool evergreen_is_display_hung(struct radeon_device *rdev); 80extern bool evergreen_is_display_hung(struct radeon_device *rdev);
81extern void si_dma_vm_set_page(struct radeon_device *rdev,
82 struct radeon_ib *ib,
83 uint64_t pe,
84 uint64_t addr, unsigned count,
85 uint32_t incr, uint32_t flags);
79 86
80static const u32 verde_rlc_save_restore_register_list[] = 87static const u32 verde_rlc_save_restore_register_list[] =
81{ 88{
@@ -1704,7 +1711,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1704 struct drm_display_mode *mode, 1711 struct drm_display_mode *mode,
1705 struct drm_display_mode *other_mode) 1712 struct drm_display_mode *other_mode)
1706{ 1713{
1707 u32 tmp; 1714 u32 tmp, buffer_alloc, i;
1715 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1708 /* 1716 /*
1709 * Line Buffer Setup 1717 * Line Buffer Setup
1710 * There are 3 line buffers, each one shared by 2 display controllers. 1718 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -1719,16 +1727,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1719 * non-linked crtcs for maximum line buffer allocation. 1727 * non-linked crtcs for maximum line buffer allocation.
1720 */ 1728 */
1721 if (radeon_crtc->base.enabled && mode) { 1729 if (radeon_crtc->base.enabled && mode) {
1722 if (other_mode) 1730 if (other_mode) {
1723 tmp = 0; /* 1/2 */ 1731 tmp = 0; /* 1/2 */
1724 else 1732 buffer_alloc = 1;
1733 } else {
1725 tmp = 2; /* whole */ 1734 tmp = 2; /* whole */
1726 } else 1735 buffer_alloc = 2;
1736 }
1737 } else {
1727 tmp = 0; 1738 tmp = 0;
1739 buffer_alloc = 0;
1740 }
1728 1741
1729 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, 1742 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1730 DC_LB_MEMORY_CONFIG(tmp)); 1743 DC_LB_MEMORY_CONFIG(tmp));
1731 1744
1745 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1746 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1747 for (i = 0; i < rdev->usec_timeout; i++) {
1748 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1749 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1750 break;
1751 udelay(1);
1752 }
1753
1732 if (radeon_crtc->base.enabled && mode) { 1754 if (radeon_crtc->base.enabled && mode) {
1733 switch (tmp) { 1755 switch (tmp) {
1734 case 0: 1756 case 0:
@@ -3364,17 +3386,6 @@ static int si_cp_resume(struct radeon_device *rdev)
3364 u32 rb_bufsz; 3386 u32 rb_bufsz;
3365 int r; 3387 int r;
3366 3388
3367 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
3368 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
3369 SOFT_RESET_PA |
3370 SOFT_RESET_VGT |
3371 SOFT_RESET_SPI |
3372 SOFT_RESET_SX));
3373 RREG32(GRBM_SOFT_RESET);
3374 mdelay(15);
3375 WREG32(GRBM_SOFT_RESET, 0);
3376 RREG32(GRBM_SOFT_RESET);
3377
3378 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3389 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3379 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3390 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3380 3391
@@ -3387,8 +3398,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3387 /* ring 0 - compute and gfx */ 3398 /* ring 0 - compute and gfx */
3388 /* Set ring buffer size */ 3399 /* Set ring buffer size */
3389 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3400 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3390 rb_bufsz = drm_order(ring->ring_size / 8); 3401 rb_bufsz = order_base_2(ring->ring_size / 8);
3391 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3402 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3392#ifdef __BIG_ENDIAN 3403#ifdef __BIG_ENDIAN
3393 tmp |= BUF_SWAP_32BIT; 3404 tmp |= BUF_SWAP_32BIT;
3394#endif 3405#endif
@@ -3420,8 +3431,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3420 /* ring1 - compute only */ 3431 /* ring1 - compute only */
3421 /* Set ring buffer size */ 3432 /* Set ring buffer size */
3422 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 3433 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3423 rb_bufsz = drm_order(ring->ring_size / 8); 3434 rb_bufsz = order_base_2(ring->ring_size / 8);
3424 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3435 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3425#ifdef __BIG_ENDIAN 3436#ifdef __BIG_ENDIAN
3426 tmp |= BUF_SWAP_32BIT; 3437 tmp |= BUF_SWAP_32BIT;
3427#endif 3438#endif
@@ -3446,8 +3457,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3446 /* ring2 - compute only */ 3457 /* ring2 - compute only */
3447 /* Set ring buffer size */ 3458 /* Set ring buffer size */
3448 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 3459 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3449 rb_bufsz = drm_order(ring->ring_size / 8); 3460 rb_bufsz = order_base_2(ring->ring_size / 8);
3450 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3461 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3451#ifdef __BIG_ENDIAN 3462#ifdef __BIG_ENDIAN
3452 tmp |= BUF_SWAP_32BIT; 3463 tmp |= BUF_SWAP_32BIT;
3453#endif 3464#endif
@@ -3493,7 +3504,7 @@ static int si_cp_resume(struct radeon_device *rdev)
3493 return 0; 3504 return 0;
3494} 3505}
3495 3506
3496static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) 3507u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
3497{ 3508{
3498 u32 reset_mask = 0; 3509 u32 reset_mask = 0;
3499 u32 tmp; 3510 u32 tmp;
@@ -3742,34 +3753,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3742 return radeon_ring_test_lockup(rdev, ring); 3753 return radeon_ring_test_lockup(rdev, ring);
3743} 3754}
3744 3755
3745/**
3746 * si_dma_is_lockup - Check if the DMA engine is locked up
3747 *
3748 * @rdev: radeon_device pointer
3749 * @ring: radeon_ring structure holding ring information
3750 *
3751 * Check if the async DMA engine is locked up.
3752 * Returns true if the engine appears to be locked up, false if not.
3753 */
3754bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3755{
3756 u32 reset_mask = si_gpu_check_soft_reset(rdev);
3757 u32 mask;
3758
3759 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
3760 mask = RADEON_RESET_DMA;
3761 else
3762 mask = RADEON_RESET_DMA1;
3763
3764 if (!(reset_mask & mask)) {
3765 radeon_ring_lockup_update(ring);
3766 return false;
3767 }
3768 /* force ring activities */
3769 radeon_ring_force_activity(rdev, ring);
3770 return radeon_ring_test_lockup(rdev, ring);
3771}
3772
3773/* MC */ 3756/* MC */
3774static void si_mc_program(struct radeon_device *rdev) 3757static void si_mc_program(struct radeon_device *rdev)
3775{ 3758{
@@ -4083,13 +4066,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4083 return 0; 4066 return 0;
4084} 4067}
4085 4068
4069static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4070{
4071 u32 start_reg, reg, i;
4072 u32 command = ib[idx + 4];
4073 u32 info = ib[idx + 1];
4074 u32 idx_value = ib[idx];
4075 if (command & PACKET3_CP_DMA_CMD_SAS) {
4076 /* src address space is register */
4077 if (((info & 0x60000000) >> 29) == 0) {
4078 start_reg = idx_value << 2;
4079 if (command & PACKET3_CP_DMA_CMD_SAIC) {
4080 reg = start_reg;
4081 if (!si_vm_reg_valid(reg)) {
4082 DRM_ERROR("CP DMA Bad SRC register\n");
4083 return -EINVAL;
4084 }
4085 } else {
4086 for (i = 0; i < (command & 0x1fffff); i++) {
4087 reg = start_reg + (4 * i);
4088 if (!si_vm_reg_valid(reg)) {
4089 DRM_ERROR("CP DMA Bad SRC register\n");
4090 return -EINVAL;
4091 }
4092 }
4093 }
4094 }
4095 }
4096 if (command & PACKET3_CP_DMA_CMD_DAS) {
4097 /* dst address space is register */
4098 if (((info & 0x00300000) >> 20) == 0) {
4099 start_reg = ib[idx + 2];
4100 if (command & PACKET3_CP_DMA_CMD_DAIC) {
4101 reg = start_reg;
4102 if (!si_vm_reg_valid(reg)) {
4103 DRM_ERROR("CP DMA Bad DST register\n");
4104 return -EINVAL;
4105 }
4106 } else {
4107 for (i = 0; i < (command & 0x1fffff); i++) {
4108 reg = start_reg + (4 * i);
4109 if (!si_vm_reg_valid(reg)) {
4110 DRM_ERROR("CP DMA Bad DST register\n");
4111 return -EINVAL;
4112 }
4113 }
4114 }
4115 }
4116 }
4117 return 0;
4118}
4119
4086static int si_vm_packet3_gfx_check(struct radeon_device *rdev, 4120static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4087 u32 *ib, struct radeon_cs_packet *pkt) 4121 u32 *ib, struct radeon_cs_packet *pkt)
4088{ 4122{
4123 int r;
4089 u32 idx = pkt->idx + 1; 4124 u32 idx = pkt->idx + 1;
4090 u32 idx_value = ib[idx]; 4125 u32 idx_value = ib[idx];
4091 u32 start_reg, end_reg, reg, i; 4126 u32 start_reg, end_reg, reg, i;
4092 u32 command, info;
4093 4127
4094 switch (pkt->opcode) { 4128 switch (pkt->opcode) {
4095 case PACKET3_NOP: 4129 case PACKET3_NOP:
@@ -4190,50 +4224,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4190 } 4224 }
4191 break; 4225 break;
4192 case PACKET3_CP_DMA: 4226 case PACKET3_CP_DMA:
4193 command = ib[idx + 4]; 4227 r = si_vm_packet3_cp_dma_check(ib, idx);
4194 info = ib[idx + 1]; 4228 if (r)
4195 if (command & PACKET3_CP_DMA_CMD_SAS) { 4229 return r;
4196 /* src address space is register */
4197 if (((info & 0x60000000) >> 29) == 0) {
4198 start_reg = idx_value << 2;
4199 if (command & PACKET3_CP_DMA_CMD_SAIC) {
4200 reg = start_reg;
4201 if (!si_vm_reg_valid(reg)) {
4202 DRM_ERROR("CP DMA Bad SRC register\n");
4203 return -EINVAL;
4204 }
4205 } else {
4206 for (i = 0; i < (command & 0x1fffff); i++) {
4207 reg = start_reg + (4 * i);
4208 if (!si_vm_reg_valid(reg)) {
4209 DRM_ERROR("CP DMA Bad SRC register\n");
4210 return -EINVAL;
4211 }
4212 }
4213 }
4214 }
4215 }
4216 if (command & PACKET3_CP_DMA_CMD_DAS) {
4217 /* dst address space is register */
4218 if (((info & 0x00300000) >> 20) == 0) {
4219 start_reg = ib[idx + 2];
4220 if (command & PACKET3_CP_DMA_CMD_DAIC) {
4221 reg = start_reg;
4222 if (!si_vm_reg_valid(reg)) {
4223 DRM_ERROR("CP DMA Bad DST register\n");
4224 return -EINVAL;
4225 }
4226 } else {
4227 for (i = 0; i < (command & 0x1fffff); i++) {
4228 reg = start_reg + (4 * i);
4229 if (!si_vm_reg_valid(reg)) {
4230 DRM_ERROR("CP DMA Bad DST register\n");
4231 return -EINVAL;
4232 }
4233 }
4234 }
4235 }
4236 }
4237 break; 4230 break;
4238 default: 4231 default:
4239 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); 4232 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
@@ -4245,6 +4238,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4245static int si_vm_packet3_compute_check(struct radeon_device *rdev, 4238static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4246 u32 *ib, struct radeon_cs_packet *pkt) 4239 u32 *ib, struct radeon_cs_packet *pkt)
4247{ 4240{
4241 int r;
4248 u32 idx = pkt->idx + 1; 4242 u32 idx = pkt->idx + 1;
4249 u32 idx_value = ib[idx]; 4243 u32 idx_value = ib[idx];
4250 u32 start_reg, reg, i; 4244 u32 start_reg, reg, i;
@@ -4317,6 +4311,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4317 return -EINVAL; 4311 return -EINVAL;
4318 } 4312 }
4319 break; 4313 break;
4314 case PACKET3_CP_DMA:
4315 r = si_vm_packet3_cp_dma_check(ib, idx);
4316 if (r)
4317 return r;
4318 break;
4320 default: 4319 default:
4321 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); 4320 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4322 return -EINVAL; 4321 return -EINVAL;
@@ -4708,58 +4707,7 @@ void si_vm_set_page(struct radeon_device *rdev,
4708 } 4707 }
4709 } else { 4708 } else {
4710 /* DMA */ 4709 /* DMA */
4711 if (flags & RADEON_VM_PAGE_SYSTEM) { 4710 si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4712 while (count) {
4713 ndw = count * 2;
4714 if (ndw > 0xFFFFE)
4715 ndw = 0xFFFFE;
4716
4717 /* for non-physically contiguous pages (system) */
4718 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
4719 ib->ptr[ib->length_dw++] = pe;
4720 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
4721 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
4722 if (flags & RADEON_VM_PAGE_SYSTEM) {
4723 value = radeon_vm_map_gart(rdev, addr);
4724 value &= 0xFFFFFFFFFFFFF000ULL;
4725 } else if (flags & RADEON_VM_PAGE_VALID) {
4726 value = addr;
4727 } else {
4728 value = 0;
4729 }
4730 addr += incr;
4731 value |= r600_flags;
4732 ib->ptr[ib->length_dw++] = value;
4733 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4734 }
4735 }
4736 } else {
4737 while (count) {
4738 ndw = count * 2;
4739 if (ndw > 0xFFFFE)
4740 ndw = 0xFFFFE;
4741
4742 if (flags & RADEON_VM_PAGE_VALID)
4743 value = addr;
4744 else
4745 value = 0;
4746 /* for physically contiguous pages (vram) */
4747 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
4748 ib->ptr[ib->length_dw++] = pe; /* dst addr */
4749 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
4750 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
4751 ib->ptr[ib->length_dw++] = 0;
4752 ib->ptr[ib->length_dw++] = value; /* value */
4753 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4754 ib->ptr[ib->length_dw++] = incr; /* increment size */
4755 ib->ptr[ib->length_dw++] = 0;
4756 pe += ndw * 4;
4757 addr += (ndw / 2) * incr;
4758 count -= ndw / 2;
4759 }
4760 }
4761 while (ib->length_dw & 0x7)
4762 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
4763 } 4711 }
4764} 4712}
4765 4713
@@ -4806,32 +4754,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4806 radeon_ring_write(ring, 0x0); 4754 radeon_ring_write(ring, 0x0);
4807} 4755}
4808 4756
4809void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4810{
4811 struct radeon_ring *ring = &rdev->ring[ridx];
4812
4813 if (vm == NULL)
4814 return;
4815
4816 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
4817 if (vm->id < 8) {
4818 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
4819 } else {
4820 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
4821 }
4822 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4823
4824 /* flush hdp cache */
4825 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
4826 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
4827 radeon_ring_write(ring, 1);
4828
4829 /* bits 0-7 are the VM contexts0-7 */
4830 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
4831 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
4832 radeon_ring_write(ring, 1 << vm->id);
4833}
4834
4835/* 4757/*
4836 * Power and clock gating 4758 * Power and clock gating
4837 */ 4759 */
@@ -4899,7 +4821,7 @@ static void si_set_uvd_dcm(struct radeon_device *rdev,
4899 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2); 4821 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
4900} 4822}
4901 4823
4902static void si_init_uvd_internal_cg(struct radeon_device *rdev) 4824void si_init_uvd_internal_cg(struct radeon_device *rdev)
4903{ 4825{
4904 bool hw_mode = true; 4826 bool hw_mode = true;
4905 4827
@@ -4942,7 +4864,7 @@ static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
4942 u32 data, orig; 4864 u32 data, orig;
4943 4865
4944 orig = data = RREG32(DMA_PG); 4866 orig = data = RREG32(DMA_PG);
4945 if (enable) 4867 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
4946 data |= PG_CNTL_ENABLE; 4868 data |= PG_CNTL_ENABLE;
4947 else 4869 else
4948 data &= ~PG_CNTL_ENABLE; 4870 data &= ~PG_CNTL_ENABLE;
@@ -4966,7 +4888,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4966{ 4888{
4967 u32 tmp; 4889 u32 tmp;
4968 4890
4969 if (enable) { 4891 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
4970 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4892 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4971 WREG32(RLC_TTOP_D, tmp); 4893 WREG32(RLC_TTOP_D, tmp);
4972 4894
@@ -5069,9 +4991,9 @@ static void si_enable_cgcg(struct radeon_device *rdev,
5069 4991
5070 orig = data = RREG32(RLC_CGCG_CGLS_CTRL); 4992 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5071 4993
5072 si_enable_gui_idle_interrupt(rdev, enable); 4994 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
4995 si_enable_gui_idle_interrupt(rdev, true);
5073 4996
5074 if (enable) {
5075 WREG32(RLC_GCPM_GENERAL_3, 0x00000080); 4997 WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5076 4998
5077 tmp = si_halt_rlc(rdev); 4999 tmp = si_halt_rlc(rdev);
@@ -5088,6 +5010,8 @@ static void si_enable_cgcg(struct radeon_device *rdev,
5088 5010
5089 data |= CGCG_EN | CGLS_EN; 5011 data |= CGCG_EN | CGLS_EN;
5090 } else { 5012 } else {
5013 si_enable_gui_idle_interrupt(rdev, false);
5014
5091 RREG32(CB_CGTT_SCLK_CTRL); 5015 RREG32(CB_CGTT_SCLK_CTRL);
5092 RREG32(CB_CGTT_SCLK_CTRL); 5016 RREG32(CB_CGTT_SCLK_CTRL);
5093 RREG32(CB_CGTT_SCLK_CTRL); 5017 RREG32(CB_CGTT_SCLK_CTRL);
@@ -5105,16 +5029,18 @@ static void si_enable_mgcg(struct radeon_device *rdev,
5105{ 5029{
5106 u32 data, orig, tmp = 0; 5030 u32 data, orig, tmp = 0;
5107 5031
5108 if (enable) { 5032 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5109 orig = data = RREG32(CGTS_SM_CTRL_REG); 5033 orig = data = RREG32(CGTS_SM_CTRL_REG);
5110 data = 0x96940200; 5034 data = 0x96940200;
5111 if (orig != data) 5035 if (orig != data)
5112 WREG32(CGTS_SM_CTRL_REG, data); 5036 WREG32(CGTS_SM_CTRL_REG, data);
5113 5037
5114 orig = data = RREG32(CP_MEM_SLP_CNTL); 5038 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5115 data |= CP_MEM_LS_EN; 5039 orig = data = RREG32(CP_MEM_SLP_CNTL);
5116 if (orig != data) 5040 data |= CP_MEM_LS_EN;
5117 WREG32(CP_MEM_SLP_CNTL, data); 5041 if (orig != data)
5042 WREG32(CP_MEM_SLP_CNTL, data);
5043 }
5118 5044
5119 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); 5045 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5120 data &= 0xffffffc0; 5046 data &= 0xffffffc0;
@@ -5159,7 +5085,7 @@ static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5159{ 5085{
5160 u32 orig, data, tmp; 5086 u32 orig, data, tmp;
5161 5087
5162 if (enable) { 5088 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5163 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); 5089 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5164 tmp |= 0x3fff; 5090 tmp |= 0x3fff;
5165 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp); 5091 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
@@ -5207,7 +5133,7 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
5207 5133
5208 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 5134 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5209 orig = data = RREG32(mc_cg_registers[i]); 5135 orig = data = RREG32(mc_cg_registers[i]);
5210 if (enable) 5136 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5211 data |= MC_LS_ENABLE; 5137 data |= MC_LS_ENABLE;
5212 else 5138 else
5213 data &= ~MC_LS_ENABLE; 5139 data &= ~MC_LS_ENABLE;
@@ -5216,226 +5142,295 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
5216 } 5142 }
5217} 5143}
5218 5144
5219 5145static void si_enable_mc_mgcg(struct radeon_device *rdev,
5220static void si_init_cg(struct radeon_device *rdev) 5146 bool enable)
5221{ 5147{
5222 si_enable_mgcg(rdev, true); 5148 int i;
5223 si_enable_cgcg(rdev, false); 5149 u32 orig, data;
5224 /* disable MC LS on Tahiti */ 5150
5225 if (rdev->family == CHIP_TAHITI) 5151 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5226 si_enable_mc_ls(rdev, false); 5152 orig = data = RREG32(mc_cg_registers[i]);
5227 if (rdev->has_uvd) { 5153 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5228 si_enable_uvd_mgcg(rdev, true); 5154 data |= MC_CG_ENABLE;
5229 si_init_uvd_internal_cg(rdev); 5155 else
5156 data &= ~MC_CG_ENABLE;
5157 if (data != orig)
5158 WREG32(mc_cg_registers[i], data);
5230 } 5159 }
5231} 5160}
5232 5161
5233static void si_fini_cg(struct radeon_device *rdev) 5162static void si_enable_dma_mgcg(struct radeon_device *rdev,
5163 bool enable)
5234{ 5164{
5235 if (rdev->has_uvd) 5165 u32 orig, data, offset;
5236 si_enable_uvd_mgcg(rdev, false); 5166 int i;
5237 si_enable_cgcg(rdev, false);
5238 si_enable_mgcg(rdev, false);
5239}
5240 5167
5241static void si_init_pg(struct radeon_device *rdev) 5168 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5242{ 5169 for (i = 0; i < 2; i++) {
5243 bool has_pg = false; 5170 if (i == 0)
5244#if 0 5171 offset = DMA0_REGISTER_OFFSET;
5245 /* only cape verde supports PG */ 5172 else
5246 if (rdev->family == CHIP_VERDE) 5173 offset = DMA1_REGISTER_OFFSET;
5247 has_pg = true; 5174 orig = data = RREG32(DMA_POWER_CNTL + offset);
5248#endif 5175 data &= ~MEM_POWER_OVERRIDE;
5249 if (has_pg) { 5176 if (data != orig)
5250 si_init_ao_cu_mask(rdev); 5177 WREG32(DMA_POWER_CNTL + offset, data);
5251 si_init_dma_pg(rdev); 5178 WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5252 si_enable_dma_pg(rdev, true); 5179 }
5253 si_init_gfx_cgpg(rdev);
5254 si_enable_gfx_cgpg(rdev, true);
5255 } else { 5180 } else {
5256 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); 5181 for (i = 0; i < 2; i++) {
5257 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); 5182 if (i == 0)
5183 offset = DMA0_REGISTER_OFFSET;
5184 else
5185 offset = DMA1_REGISTER_OFFSET;
5186 orig = data = RREG32(DMA_POWER_CNTL + offset);
5187 data |= MEM_POWER_OVERRIDE;
5188 if (data != orig)
5189 WREG32(DMA_POWER_CNTL + offset, data);
5190
5191 orig = data = RREG32(DMA_CLK_CTRL + offset);
5192 data = 0xff000000;
5193 if (data != orig)
5194 WREG32(DMA_CLK_CTRL + offset, data);
5195 }
5258 } 5196 }
5259} 5197}
5260 5198
5261static void si_fini_pg(struct radeon_device *rdev) 5199static void si_enable_bif_mgls(struct radeon_device *rdev,
5200 bool enable)
5262{ 5201{
5263 bool has_pg = false; 5202 u32 orig, data;
5264 5203
5265 /* only cape verde supports PG */ 5204 orig = data = RREG32_PCIE(PCIE_CNTL2);
5266 if (rdev->family == CHIP_VERDE)
5267 has_pg = true;
5268 5205
5269 if (has_pg) { 5206 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5270 si_enable_dma_pg(rdev, false); 5207 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5271 si_enable_gfx_cgpg(rdev, false); 5208 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5272 } 5209 else
5210 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5211 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5212
5213 if (orig != data)
5214 WREG32_PCIE(PCIE_CNTL2, data);
5273} 5215}
5274 5216
5275/* 5217static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5276 * RLC 5218 bool enable)
5277 */
5278void si_rlc_fini(struct radeon_device *rdev)
5279{ 5219{
5280 int r; 5220 u32 orig, data;
5281
5282 /* save restore block */
5283 if (rdev->rlc.save_restore_obj) {
5284 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
5285 if (unlikely(r != 0))
5286 dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
5287 radeon_bo_unpin(rdev->rlc.save_restore_obj);
5288 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5289 5221
5290 radeon_bo_unref(&rdev->rlc.save_restore_obj); 5222 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5291 rdev->rlc.save_restore_obj = NULL;
5292 }
5293 5223
5294 /* clear state block */ 5224 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5295 if (rdev->rlc.clear_state_obj) { 5225 data &= ~CLOCK_GATING_DIS;
5296 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 5226 else
5297 if (unlikely(r != 0)) 5227 data |= CLOCK_GATING_DIS;
5298 dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
5299 radeon_bo_unpin(rdev->rlc.clear_state_obj);
5300 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
5301 5228
5302 radeon_bo_unref(&rdev->rlc.clear_state_obj); 5229 if (orig != data)
5303 rdev->rlc.clear_state_obj = NULL; 5230 WREG32(HDP_HOST_PATH_CNTL, data);
5304 }
5305} 5231}
5306 5232
5307#define RLC_CLEAR_STATE_END_MARKER 0x00000001 5233static void si_enable_hdp_ls(struct radeon_device *rdev,
5308 5234 bool enable)
5309int si_rlc_init(struct radeon_device *rdev)
5310{ 5235{
5311 volatile u32 *dst_ptr; 5236 u32 orig, data;
5312 u32 dws, data, i, j, k, reg_num;
5313 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
5314 u64 reg_list_mc_addr;
5315 const struct cs_section_def *cs_data = si_cs_data;
5316 int r;
5317 5237
5318 /* save restore block */ 5238 orig = data = RREG32(HDP_MEM_POWER_LS);
5319 if (rdev->rlc.save_restore_obj == NULL) { 5239
5320 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 5240 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5321 RADEON_GEM_DOMAIN_VRAM, NULL, 5241 data |= HDP_LS_ENABLE;
5322 &rdev->rlc.save_restore_obj); 5242 else
5323 if (r) { 5243 data &= ~HDP_LS_ENABLE;
5324 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 5244
5325 return r; 5245 if (orig != data)
5246 WREG32(HDP_MEM_POWER_LS, data);
5247}
5248
5249void si_update_cg(struct radeon_device *rdev,
5250 u32 block, bool enable)
5251{
5252 if (block & RADEON_CG_BLOCK_GFX) {
5253 /* order matters! */
5254 if (enable) {
5255 si_enable_mgcg(rdev, true);
5256 si_enable_cgcg(rdev, true);
5257 } else {
5258 si_enable_cgcg(rdev, false);
5259 si_enable_mgcg(rdev, false);
5326 } 5260 }
5327 } 5261 }
5328 5262
5329 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 5263 if (block & RADEON_CG_BLOCK_MC) {
5330 if (unlikely(r != 0)) { 5264 si_enable_mc_mgcg(rdev, enable);
5331 si_rlc_fini(rdev); 5265 si_enable_mc_ls(rdev, enable);
5332 return r;
5333 } 5266 }
5334 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 5267
5335 &rdev->rlc.save_restore_gpu_addr); 5268 if (block & RADEON_CG_BLOCK_SDMA) {
5336 if (r) { 5269 si_enable_dma_mgcg(rdev, enable);
5337 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5338 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
5339 si_rlc_fini(rdev);
5340 return r;
5341 } 5270 }
5342 5271
5343 if (rdev->family == CHIP_VERDE) { 5272 if (block & RADEON_CG_BLOCK_BIF) {
5344 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); 5273 si_enable_bif_mgls(rdev, enable);
5345 if (r) {
5346 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
5347 si_rlc_fini(rdev);
5348 return r;
5349 }
5350 /* write the sr buffer */
5351 dst_ptr = rdev->rlc.sr_ptr;
5352 for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) {
5353 dst_ptr[i] = verde_rlc_save_restore_register_list[i];
5354 }
5355 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
5356 } 5274 }
5357 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5358 5275
5359 /* clear state block */ 5276 if (block & RADEON_CG_BLOCK_UVD) {
5360 reg_list_num = 0; 5277 if (rdev->has_uvd) {
5361 dws = 0; 5278 si_enable_uvd_mgcg(rdev, enable);
5362 for (i = 0; cs_data[i].section != NULL; i++) {
5363 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
5364 reg_list_num++;
5365 dws += cs_data[i].section[j].reg_count;
5366 } 5279 }
5367 } 5280 }
5368 reg_list_blk_index = (3 * reg_list_num + 2);
5369 dws += reg_list_blk_index;
5370 5281
5371 if (rdev->rlc.clear_state_obj == NULL) { 5282 if (block & RADEON_CG_BLOCK_HDP) {
5372 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 5283 si_enable_hdp_mgcg(rdev, enable);
5373 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); 5284 si_enable_hdp_ls(rdev, enable);
5374 if (r) {
5375 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
5376 si_rlc_fini(rdev);
5377 return r;
5378 }
5379 } 5285 }
5380 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 5286}
5381 if (unlikely(r != 0)) { 5287
5382 si_rlc_fini(rdev); 5288static void si_init_cg(struct radeon_device *rdev)
5383 return r; 5289{
5290 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5291 RADEON_CG_BLOCK_MC |
5292 RADEON_CG_BLOCK_SDMA |
5293 RADEON_CG_BLOCK_BIF |
5294 RADEON_CG_BLOCK_HDP), true);
5295 if (rdev->has_uvd) {
5296 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
5297 si_init_uvd_internal_cg(rdev);
5384 } 5298 }
5385 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 5299}
5386 &rdev->rlc.clear_state_gpu_addr);
5387 if (r) {
5388 5300
5389 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 5301static void si_fini_cg(struct radeon_device *rdev)
5390 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 5302{
5391 si_rlc_fini(rdev); 5303 if (rdev->has_uvd) {
5392 return r; 5304 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
5393 } 5305 }
5394 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); 5306 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5395 if (r) { 5307 RADEON_CG_BLOCK_MC |
5396 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); 5308 RADEON_CG_BLOCK_SDMA |
5397 si_rlc_fini(rdev); 5309 RADEON_CG_BLOCK_BIF |
5398 return r; 5310 RADEON_CG_BLOCK_HDP), false);
5311}
5312
5313u32 si_get_csb_size(struct radeon_device *rdev)
5314{
5315 u32 count = 0;
5316 const struct cs_section_def *sect = NULL;
5317 const struct cs_extent_def *ext = NULL;
5318
5319 if (rdev->rlc.cs_data == NULL)
5320 return 0;
5321
5322 /* begin clear state */
5323 count += 2;
5324 /* context control state */
5325 count += 3;
5326
5327 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5328 for (ext = sect->section; ext->extent != NULL; ++ext) {
5329 if (sect->id == SECT_CONTEXT)
5330 count += 2 + ext->reg_count;
5331 else
5332 return 0;
5333 }
5399 } 5334 }
5400 /* set up the cs buffer */ 5335 /* pa_sc_raster_config */
5401 dst_ptr = rdev->rlc.cs_ptr; 5336 count += 3;
5402 reg_list_hdr_blk_index = 0; 5337 /* end clear state */
5403 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); 5338 count += 2;
5404 data = upper_32_bits(reg_list_mc_addr); 5339 /* clear state */
5405 dst_ptr[reg_list_hdr_blk_index] = data; 5340 count += 2;
5406 reg_list_hdr_blk_index++; 5341
5407 for (i = 0; cs_data[i].section != NULL; i++) { 5342 return count;
5408 for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 5343}
5409 reg_num = cs_data[i].section[j].reg_count; 5344
5410 data = reg_list_mc_addr & 0xffffffff; 5345void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5411 dst_ptr[reg_list_hdr_blk_index] = data; 5346{
5412 reg_list_hdr_blk_index++; 5347 u32 count = 0, i;
5413 5348 const struct cs_section_def *sect = NULL;
5414 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; 5349 const struct cs_extent_def *ext = NULL;
5415 dst_ptr[reg_list_hdr_blk_index] = data; 5350
5416 reg_list_hdr_blk_index++; 5351 if (rdev->rlc.cs_data == NULL)
5417 5352 return;
5418 data = 0x08000000 | (reg_num * 4); 5353 if (buffer == NULL)
5419 dst_ptr[reg_list_hdr_blk_index] = data; 5354 return;
5420 reg_list_hdr_blk_index++; 5355
5421 5356 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5422 for (k = 0; k < reg_num; k++) { 5357 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
5423 data = cs_data[i].section[j].extent[k]; 5358
5424 dst_ptr[reg_list_blk_index + k] = data; 5359 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
5360 buffer[count++] = 0x80000000;
5361 buffer[count++] = 0x80000000;
5362
5363 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5364 for (ext = sect->section; ext->extent != NULL; ++ext) {
5365 if (sect->id == SECT_CONTEXT) {
5366 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
5367 buffer[count++] = ext->reg_index - 0xa000;
5368 for (i = 0; i < ext->reg_count; i++)
5369 buffer[count++] = ext->extent[i];
5370 } else {
5371 return;
5425 } 5372 }
5426 reg_list_mc_addr += reg_num * 4;
5427 reg_list_blk_index += reg_num;
5428 } 5373 }
5429 } 5374 }
5430 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
5431 5375
5432 radeon_bo_kunmap(rdev->rlc.clear_state_obj); 5376 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
5433 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 5377 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
5378 switch (rdev->family) {
5379 case CHIP_TAHITI:
5380 case CHIP_PITCAIRN:
5381 buffer[count++] = 0x2a00126a;
5382 break;
5383 case CHIP_VERDE:
5384 buffer[count++] = 0x0000124a;
5385 break;
5386 case CHIP_OLAND:
5387 buffer[count++] = 0x00000082;
5388 break;
5389 case CHIP_HAINAN:
5390 buffer[count++] = 0x00000000;
5391 break;
5392 default:
5393 buffer[count++] = 0x00000000;
5394 break;
5395 }
5396
5397 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5398 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
5434 5399
5435 return 0; 5400 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
5401 buffer[count++] = 0;
5436} 5402}
5437 5403
5438static void si_rlc_reset(struct radeon_device *rdev) 5404static void si_init_pg(struct radeon_device *rdev)
5405{
5406 if (rdev->pg_flags) {
5407 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5408 si_init_dma_pg(rdev);
5409 }
5410 si_init_ao_cu_mask(rdev);
5411 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
5412 si_init_gfx_cgpg(rdev);
5413 }
5414 si_enable_dma_pg(rdev, true);
5415 si_enable_gfx_cgpg(rdev, true);
5416 } else {
5417 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5418 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5419 }
5420}
5421
5422static void si_fini_pg(struct radeon_device *rdev)
5423{
5424 if (rdev->pg_flags) {
5425 si_enable_dma_pg(rdev, false);
5426 si_enable_gfx_cgpg(rdev, false);
5427 }
5428}
5429
5430/*
5431 * RLC
5432 */
5433void si_rlc_reset(struct radeon_device *rdev)
5439{ 5434{
5440 u32 tmp = RREG32(GRBM_SOFT_RESET); 5435 u32 tmp = RREG32(GRBM_SOFT_RESET);
5441 5436
@@ -5651,7 +5646,7 @@ static int si_irq_init(struct radeon_device *rdev)
5651 WREG32(INTERRUPT_CNTL, interrupt_cntl); 5646 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5652 5647
5653 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 5648 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5654 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 5649 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5655 5650
5656 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 5651 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5657 IH_WPTR_OVERFLOW_CLEAR | 5652 IH_WPTR_OVERFLOW_CLEAR |
@@ -6335,80 +6330,6 @@ restart_ih:
6335 return IRQ_HANDLED; 6330 return IRQ_HANDLED;
6336} 6331}
6337 6332
6338/**
6339 * si_copy_dma - copy pages using the DMA engine
6340 *
6341 * @rdev: radeon_device pointer
6342 * @src_offset: src GPU address
6343 * @dst_offset: dst GPU address
6344 * @num_gpu_pages: number of GPU pages to xfer
6345 * @fence: radeon fence object
6346 *
6347 * Copy GPU paging using the DMA engine (SI).
6348 * Used by the radeon ttm implementation to move pages if
6349 * registered as the asic copy callback.
6350 */
6351int si_copy_dma(struct radeon_device *rdev,
6352 uint64_t src_offset, uint64_t dst_offset,
6353 unsigned num_gpu_pages,
6354 struct radeon_fence **fence)
6355{
6356 struct radeon_semaphore *sem = NULL;
6357 int ring_index = rdev->asic->copy.dma_ring_index;
6358 struct radeon_ring *ring = &rdev->ring[ring_index];
6359 u32 size_in_bytes, cur_size_in_bytes;
6360 int i, num_loops;
6361 int r = 0;
6362
6363 r = radeon_semaphore_create(rdev, &sem);
6364 if (r) {
6365 DRM_ERROR("radeon: moving bo (%d).\n", r);
6366 return r;
6367 }
6368
6369 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
6370 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
6371 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
6372 if (r) {
6373 DRM_ERROR("radeon: moving bo (%d).\n", r);
6374 radeon_semaphore_free(rdev, &sem, NULL);
6375 return r;
6376 }
6377
6378 if (radeon_fence_need_sync(*fence, ring->idx)) {
6379 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
6380 ring->idx);
6381 radeon_fence_note_sync(*fence, ring->idx);
6382 } else {
6383 radeon_semaphore_free(rdev, &sem, NULL);
6384 }
6385
6386 for (i = 0; i < num_loops; i++) {
6387 cur_size_in_bytes = size_in_bytes;
6388 if (cur_size_in_bytes > 0xFFFFF)
6389 cur_size_in_bytes = 0xFFFFF;
6390 size_in_bytes -= cur_size_in_bytes;
6391 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
6392 radeon_ring_write(ring, dst_offset & 0xffffffff);
6393 radeon_ring_write(ring, src_offset & 0xffffffff);
6394 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
6395 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
6396 src_offset += cur_size_in_bytes;
6397 dst_offset += cur_size_in_bytes;
6398 }
6399
6400 r = radeon_fence_emit(rdev, fence, ring->idx);
6401 if (r) {
6402 radeon_ring_unlock_undo(rdev, ring);
6403 return r;
6404 }
6405
6406 radeon_ring_unlock_commit(rdev, ring);
6407 radeon_semaphore_free(rdev, &sem, *fence);
6408
6409 return r;
6410}
6411
6412/* 6333/*
6413 * startup/shutdown callbacks 6334 * startup/shutdown callbacks
6414 */ 6335 */
@@ -6422,6 +6343,11 @@ static int si_startup(struct radeon_device *rdev)
6422 /* enable aspm */ 6343 /* enable aspm */
6423 si_program_aspm(rdev); 6344 si_program_aspm(rdev);
6424 6345
6346 /* scratch needs to be initialized before MC */
6347 r = r600_vram_scratch_init(rdev);
6348 if (r)
6349 return r;
6350
6425 si_mc_program(rdev); 6351 si_mc_program(rdev);
6426 6352
6427 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 6353 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
@@ -6439,17 +6365,19 @@ static int si_startup(struct radeon_device *rdev)
6439 return r; 6365 return r;
6440 } 6366 }
6441 6367
6442 r = r600_vram_scratch_init(rdev);
6443 if (r)
6444 return r;
6445
6446 r = si_pcie_gart_enable(rdev); 6368 r = si_pcie_gart_enable(rdev);
6447 if (r) 6369 if (r)
6448 return r; 6370 return r;
6449 si_gpu_init(rdev); 6371 si_gpu_init(rdev);
6450 6372
6451 /* allocate rlc buffers */ 6373 /* allocate rlc buffers */
6452 r = si_rlc_init(rdev); 6374 if (rdev->family == CHIP_VERDE) {
6375 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6376 rdev->rlc.reg_list_size =
6377 (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
6378 }
6379 rdev->rlc.cs_data = si_cs_data;
6380 r = sumo_rlc_init(rdev);
6453 if (r) { 6381 if (r) {
6454 DRM_ERROR("Failed to init rlc BOs!\n"); 6382 DRM_ERROR("Failed to init rlc BOs!\n");
6455 return r; 6383 return r;
@@ -6491,7 +6419,7 @@ static int si_startup(struct radeon_device *rdev)
6491 } 6419 }
6492 6420
6493 if (rdev->has_uvd) { 6421 if (rdev->has_uvd) {
6494 r = rv770_uvd_resume(rdev); 6422 r = uvd_v2_2_resume(rdev);
6495 if (!r) { 6423 if (!r) {
6496 r = radeon_fence_driver_start_ring(rdev, 6424 r = radeon_fence_driver_start_ring(rdev,
6497 R600_RING_TYPE_UVD_INDEX); 6425 R600_RING_TYPE_UVD_INDEX);
@@ -6520,21 +6448,21 @@ static int si_startup(struct radeon_device *rdev)
6520 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 6448 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6521 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 6449 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6522 CP_RB0_RPTR, CP_RB0_WPTR, 6450 CP_RB0_RPTR, CP_RB0_WPTR,
6523 0, 0xfffff, RADEON_CP_PACKET2); 6451 RADEON_CP_PACKET2);
6524 if (r) 6452 if (r)
6525 return r; 6453 return r;
6526 6454
6527 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 6455 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6528 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, 6456 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6529 CP_RB1_RPTR, CP_RB1_WPTR, 6457 CP_RB1_RPTR, CP_RB1_WPTR,
6530 0, 0xfffff, RADEON_CP_PACKET2); 6458 RADEON_CP_PACKET2);
6531 if (r) 6459 if (r)
6532 return r; 6460 return r;
6533 6461
6534 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 6462 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6535 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, 6463 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6536 CP_RB2_RPTR, CP_RB2_WPTR, 6464 CP_RB2_RPTR, CP_RB2_WPTR,
6537 0, 0xfffff, RADEON_CP_PACKET2); 6465 RADEON_CP_PACKET2);
6538 if (r) 6466 if (r)
6539 return r; 6467 return r;
6540 6468
@@ -6542,7 +6470,7 @@ static int si_startup(struct radeon_device *rdev)
6542 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 6470 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6543 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 6471 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
6544 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 6472 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
6545 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 6473 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
6546 if (r) 6474 if (r)
6547 return r; 6475 return r;
6548 6476
@@ -6550,7 +6478,7 @@ static int si_startup(struct radeon_device *rdev)
6550 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 6478 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6551 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 6479 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
6552 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 6480 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
6553 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 6481 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
6554 if (r) 6482 if (r)
6555 return r; 6483 return r;
6556 6484
@@ -6568,12 +6496,11 @@ static int si_startup(struct radeon_device *rdev)
6568 if (rdev->has_uvd) { 6496 if (rdev->has_uvd) {
6569 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 6497 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6570 if (ring->ring_size) { 6498 if (ring->ring_size) {
6571 r = radeon_ring_init(rdev, ring, ring->ring_size, 6499 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
6572 R600_WB_UVD_RPTR_OFFSET,
6573 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 6500 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
6574 0, 0xfffff, RADEON_CP_PACKET2); 6501 RADEON_CP_PACKET2);
6575 if (!r) 6502 if (!r)
6576 r = r600_uvd_init(rdev); 6503 r = uvd_v1_0_init(rdev);
6577 if (r) 6504 if (r)
6578 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 6505 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6579 } 6506 }
@@ -6591,6 +6518,10 @@ static int si_startup(struct radeon_device *rdev)
6591 return r; 6518 return r;
6592 } 6519 }
6593 6520
6521 r = dce6_audio_init(rdev);
6522 if (r)
6523 return r;
6524
6594 return 0; 6525 return 0;
6595} 6526}
6596 6527
@@ -6622,13 +6553,16 @@ int si_resume(struct radeon_device *rdev)
6622 6553
6623int si_suspend(struct radeon_device *rdev) 6554int si_suspend(struct radeon_device *rdev)
6624{ 6555{
6556 dce6_audio_fini(rdev);
6625 radeon_vm_manager_fini(rdev); 6557 radeon_vm_manager_fini(rdev);
6626 si_cp_enable(rdev, false); 6558 si_cp_enable(rdev, false);
6627 cayman_dma_stop(rdev); 6559 cayman_dma_stop(rdev);
6628 if (rdev->has_uvd) { 6560 if (rdev->has_uvd) {
6629 r600_uvd_stop(rdev); 6561 uvd_v1_0_fini(rdev);
6630 radeon_uvd_suspend(rdev); 6562 radeon_uvd_suspend(rdev);
6631 } 6563 }
6564 si_fini_pg(rdev);
6565 si_fini_cg(rdev);
6632 si_irq_suspend(rdev); 6566 si_irq_suspend(rdev);
6633 radeon_wb_disable(rdev); 6567 radeon_wb_disable(rdev);
6634 si_pcie_gart_disable(rdev); 6568 si_pcie_gart_disable(rdev);
@@ -6735,7 +6669,7 @@ int si_init(struct radeon_device *rdev)
6735 si_cp_fini(rdev); 6669 si_cp_fini(rdev);
6736 cayman_dma_fini(rdev); 6670 cayman_dma_fini(rdev);
6737 si_irq_fini(rdev); 6671 si_irq_fini(rdev);
6738 si_rlc_fini(rdev); 6672 sumo_rlc_fini(rdev);
6739 radeon_wb_fini(rdev); 6673 radeon_wb_fini(rdev);
6740 radeon_ib_pool_fini(rdev); 6674 radeon_ib_pool_fini(rdev);
6741 radeon_vm_manager_fini(rdev); 6675 radeon_vm_manager_fini(rdev);
@@ -6760,16 +6694,16 @@ void si_fini(struct radeon_device *rdev)
6760{ 6694{
6761 si_cp_fini(rdev); 6695 si_cp_fini(rdev);
6762 cayman_dma_fini(rdev); 6696 cayman_dma_fini(rdev);
6763 si_irq_fini(rdev);
6764 si_rlc_fini(rdev);
6765 si_fini_cg(rdev);
6766 si_fini_pg(rdev); 6697 si_fini_pg(rdev);
6698 si_fini_cg(rdev);
6699 si_irq_fini(rdev);
6700 sumo_rlc_fini(rdev);
6767 radeon_wb_fini(rdev); 6701 radeon_wb_fini(rdev);
6768 radeon_vm_manager_fini(rdev); 6702 radeon_vm_manager_fini(rdev);
6769 radeon_ib_pool_fini(rdev); 6703 radeon_ib_pool_fini(rdev);
6770 radeon_irq_kms_fini(rdev); 6704 radeon_irq_kms_fini(rdev);
6771 if (rdev->has_uvd) { 6705 if (rdev->has_uvd) {
6772 r600_uvd_stop(rdev); 6706 uvd_v1_0_fini(rdev);
6773 radeon_uvd_fini(rdev); 6707 radeon_uvd_fini(rdev);
6774 } 6708 }
6775 si_pcie_gart_fini(rdev); 6709 si_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
new file mode 100644
index 000000000000..49909d23dfce
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "sid.h"
28
29u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/**
32 * si_dma_is_lockup - Check if the DMA engine is locked up
33 *
34 * @rdev: radeon_device pointer
35 * @ring: radeon_ring structure holding ring information
36 *
37 * Check if the async DMA engine is locked up.
38 * Returns true if the engine appears to be locked up, false if not.
39 */
40bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
41{
42 u32 reset_mask = si_gpu_check_soft_reset(rdev);
43 u32 mask;
44
45 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
46 mask = RADEON_RESET_DMA;
47 else
48 mask = RADEON_RESET_DMA1;
49
50 if (!(reset_mask & mask)) {
51 radeon_ring_lockup_update(ring);
52 return false;
53 }
54 /* force ring activities */
55 radeon_ring_force_activity(rdev, ring);
56 return radeon_ring_test_lockup(rdev, ring);
57}
58
59/**
60 * si_dma_vm_set_page - update the page tables using the DMA
61 *
62 * @rdev: radeon_device pointer
63 * @ib: indirect buffer to fill with commands
64 * @pe: addr of the page entry
65 * @addr: dst addr to write into pe
66 * @count: number of page entries to update
67 * @incr: increase next addr by incr bytes
68 * @flags: access flags
69 *
70 * Update the page tables using the DMA (SI).
71 */
72void si_dma_vm_set_page(struct radeon_device *rdev,
73 struct radeon_ib *ib,
74 uint64_t pe,
75 uint64_t addr, unsigned count,
76 uint32_t incr, uint32_t flags)
77{
78 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
79 uint64_t value;
80 unsigned ndw;
81
82 if (flags & RADEON_VM_PAGE_SYSTEM) {
83 while (count) {
84 ndw = count * 2;
85 if (ndw > 0xFFFFE)
86 ndw = 0xFFFFE;
87
88 /* for non-physically contiguous pages (system) */
89 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
90 ib->ptr[ib->length_dw++] = pe;
91 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
92 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
93 if (flags & RADEON_VM_PAGE_SYSTEM) {
94 value = radeon_vm_map_gart(rdev, addr);
95 value &= 0xFFFFFFFFFFFFF000ULL;
96 } else if (flags & RADEON_VM_PAGE_VALID) {
97 value = addr;
98 } else {
99 value = 0;
100 }
101 addr += incr;
102 value |= r600_flags;
103 ib->ptr[ib->length_dw++] = value;
104 ib->ptr[ib->length_dw++] = upper_32_bits(value);
105 }
106 }
107 } else {
108 while (count) {
109 ndw = count * 2;
110 if (ndw > 0xFFFFE)
111 ndw = 0xFFFFE;
112
113 if (flags & RADEON_VM_PAGE_VALID)
114 value = addr;
115 else
116 value = 0;
117 /* for physically contiguous pages (vram) */
118 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
119 ib->ptr[ib->length_dw++] = pe; /* dst addr */
120 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
121 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
122 ib->ptr[ib->length_dw++] = 0;
123 ib->ptr[ib->length_dw++] = value; /* value */
124 ib->ptr[ib->length_dw++] = upper_32_bits(value);
125 ib->ptr[ib->length_dw++] = incr; /* increment size */
126 ib->ptr[ib->length_dw++] = 0;
127 pe += ndw * 4;
128 addr += (ndw / 2) * incr;
129 count -= ndw / 2;
130 }
131 }
132 while (ib->length_dw & 0x7)
133 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
134}
135
136void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
137{
138 struct radeon_ring *ring = &rdev->ring[ridx];
139
140 if (vm == NULL)
141 return;
142
143 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
144 if (vm->id < 8) {
145 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
146 } else {
147 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
148 }
149 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
150
151 /* flush hdp cache */
152 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
153 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
154 radeon_ring_write(ring, 1);
155
156 /* bits 0-7 are the VM contexts0-7 */
157 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
158 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
159 radeon_ring_write(ring, 1 << vm->id);
160}
161
162/**
163 * si_copy_dma - copy pages using the DMA engine
164 *
165 * @rdev: radeon_device pointer
166 * @src_offset: src GPU address
167 * @dst_offset: dst GPU address
168 * @num_gpu_pages: number of GPU pages to xfer
169 * @fence: radeon fence object
170 *
171 * Copy GPU paging using the DMA engine (SI).
172 * Used by the radeon ttm implementation to move pages if
173 * registered as the asic copy callback.
174 */
175int si_copy_dma(struct radeon_device *rdev,
176 uint64_t src_offset, uint64_t dst_offset,
177 unsigned num_gpu_pages,
178 struct radeon_fence **fence)
179{
180 struct radeon_semaphore *sem = NULL;
181 int ring_index = rdev->asic->copy.dma_ring_index;
182 struct radeon_ring *ring = &rdev->ring[ring_index];
183 u32 size_in_bytes, cur_size_in_bytes;
184 int i, num_loops;
185 int r = 0;
186
187 r = radeon_semaphore_create(rdev, &sem);
188 if (r) {
189 DRM_ERROR("radeon: moving bo (%d).\n", r);
190 return r;
191 }
192
193 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
194 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
195 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
196 if (r) {
197 DRM_ERROR("radeon: moving bo (%d).\n", r);
198 radeon_semaphore_free(rdev, &sem, NULL);
199 return r;
200 }
201
202 if (radeon_fence_need_sync(*fence, ring->idx)) {
203 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
204 ring->idx);
205 radeon_fence_note_sync(*fence, ring->idx);
206 } else {
207 radeon_semaphore_free(rdev, &sem, NULL);
208 }
209
210 for (i = 0; i < num_loops; i++) {
211 cur_size_in_bytes = size_in_bytes;
212 if (cur_size_in_bytes > 0xFFFFF)
213 cur_size_in_bytes = 0xFFFFF;
214 size_in_bytes -= cur_size_in_bytes;
215 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
216 radeon_ring_write(ring, dst_offset & 0xffffffff);
217 radeon_ring_write(ring, src_offset & 0xffffffff);
218 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
219 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
220 src_offset += cur_size_in_bytes;
221 dst_offset += cur_size_in_bytes;
222 }
223
224 r = radeon_fence_emit(rdev, fence, ring->idx);
225 if (r) {
226 radeon_ring_unlock_undo(rdev, ring);
227 return r;
228 }
229
230 radeon_ring_unlock_commit(rdev, ring);
231 radeon_semaphore_free(rdev, &sem, *fence);
232
233 return r;
234}
235
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 88699e3cd868..5be9b4e72350 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1753,6 +1753,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
1753 u32 engine_clock, 1753 u32 engine_clock,
1754 SISLANDS_SMC_SCLK_VALUE *sclk); 1754 SISLANDS_SMC_SCLK_VALUE *sclk);
1755 1755
1756extern void si_update_cg(struct radeon_device *rdev,
1757 u32 block, bool enable);
1758
1756static struct si_power_info *si_get_pi(struct radeon_device *rdev) 1759static struct si_power_info *si_get_pi(struct radeon_device *rdev)
1757{ 1760{
1758 struct si_power_info *pi = rdev->pm.dpm.priv; 1761 struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -3663,7 +3666,7 @@ static void si_clear_vc(struct radeon_device *rdev)
3663 WREG32(CG_FTV, 0); 3666 WREG32(CG_FTV, 0);
3664} 3667}
3665 3668
3666static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) 3669u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3667{ 3670{
3668 u8 mc_para_index; 3671 u8 mc_para_index;
3669 3672
@@ -3676,7 +3679,7 @@ static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3676 return mc_para_index; 3679 return mc_para_index;
3677} 3680}
3678 3681
3679static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) 3682u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
3680{ 3683{
3681 u8 mc_para_index; 3684 u8 mc_para_index;
3682 3685
@@ -3758,20 +3761,21 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
3758 return true; 3761 return true;
3759} 3762}
3760 3763
3761static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, 3764void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
3762 struct atom_voltage_table *voltage_table) 3765 u32 max_voltage_steps,
3766 struct atom_voltage_table *voltage_table)
3763{ 3767{
3764 unsigned int i, diff; 3768 unsigned int i, diff;
3765 3769
3766 if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS) 3770 if (voltage_table->count <= max_voltage_steps)
3767 return; 3771 return;
3768 3772
3769 diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS; 3773 diff = voltage_table->count - max_voltage_steps;
3770 3774
3771 for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++) 3775 for (i= 0; i < max_voltage_steps; i++)
3772 voltage_table->entries[i] = voltage_table->entries[i + diff]; 3776 voltage_table->entries[i] = voltage_table->entries[i + diff];
3773 3777
3774 voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS; 3778 voltage_table->count = max_voltage_steps;
3775} 3779}
3776 3780
3777static int si_construct_voltage_tables(struct radeon_device *rdev) 3781static int si_construct_voltage_tables(struct radeon_device *rdev)
@@ -3787,7 +3791,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3787 return ret; 3791 return ret;
3788 3792
3789 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 3793 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3790 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table); 3794 si_trim_voltage_table_to_fit_state_table(rdev,
3795 SISLANDS_MAX_NO_VREG_STEPS,
3796 &eg_pi->vddc_voltage_table);
3791 3797
3792 if (eg_pi->vddci_control) { 3798 if (eg_pi->vddci_control) {
3793 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 3799 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3796,7 +3802,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3796 return ret; 3802 return ret;
3797 3803
3798 if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 3804 if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3799 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table); 3805 si_trim_voltage_table_to_fit_state_table(rdev,
3806 SISLANDS_MAX_NO_VREG_STEPS,
3807 &eg_pi->vddci_voltage_table);
3800 } 3808 }
3801 3809
3802 if (pi->mvdd_control) { 3810 if (pi->mvdd_control) {
@@ -3814,7 +3822,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3814 } 3822 }
3815 3823
3816 if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 3824 if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3817 si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table); 3825 si_trim_voltage_table_to_fit_state_table(rdev,
3826 SISLANDS_MAX_NO_VREG_STEPS,
3827 &si_pi->mvdd_voltage_table);
3818 } 3828 }
3819 3829
3820 if (si_pi->vddc_phase_shed_control) { 3830 if (si_pi->vddc_phase_shed_control) {
@@ -5752,6 +5762,13 @@ int si_dpm_enable(struct radeon_device *rdev)
5752 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5762 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5753 int ret; 5763 int ret;
5754 5764
5765 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5766 RADEON_CG_BLOCK_MC |
5767 RADEON_CG_BLOCK_SDMA |
5768 RADEON_CG_BLOCK_BIF |
5769 RADEON_CG_BLOCK_UVD |
5770 RADEON_CG_BLOCK_HDP), false);
5771
5755 if (si_is_smc_running(rdev)) 5772 if (si_is_smc_running(rdev))
5756 return -EINVAL; 5773 return -EINVAL;
5757 if (pi->voltage_control) 5774 if (pi->voltage_control)
@@ -5871,6 +5888,13 @@ int si_dpm_enable(struct radeon_device *rdev)
5871 5888
5872 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5889 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5873 5890
5891 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5892 RADEON_CG_BLOCK_MC |
5893 RADEON_CG_BLOCK_SDMA |
5894 RADEON_CG_BLOCK_BIF |
5895 RADEON_CG_BLOCK_UVD |
5896 RADEON_CG_BLOCK_HDP), true);
5897
5874 ni_update_current_ps(rdev, boot_ps); 5898 ni_update_current_ps(rdev, boot_ps);
5875 5899
5876 return 0; 5900 return 0;
@@ -5881,6 +5905,13 @@ void si_dpm_disable(struct radeon_device *rdev)
5881 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 5905 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5882 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5906 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5883 5907
5908 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5909 RADEON_CG_BLOCK_MC |
5910 RADEON_CG_BLOCK_SDMA |
5911 RADEON_CG_BLOCK_BIF |
5912 RADEON_CG_BLOCK_UVD |
5913 RADEON_CG_BLOCK_HDP), false);
5914
5884 if (!si_is_smc_running(rdev)) 5915 if (!si_is_smc_running(rdev))
5885 return; 5916 return;
5886 si_disable_ulv(rdev); 5917 si_disable_ulv(rdev);
@@ -5945,6 +5976,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
5945 struct radeon_ps *old_ps = &eg_pi->current_rps; 5976 struct radeon_ps *old_ps = &eg_pi->current_rps;
5946 int ret; 5977 int ret;
5947 5978
5979 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5980 RADEON_CG_BLOCK_MC |
5981 RADEON_CG_BLOCK_SDMA |
5982 RADEON_CG_BLOCK_BIF |
5983 RADEON_CG_BLOCK_UVD |
5984 RADEON_CG_BLOCK_HDP), false);
5985
5948 ret = si_disable_ulv(rdev); 5986 ret = si_disable_ulv(rdev);
5949 if (ret) { 5987 if (ret) {
5950 DRM_ERROR("si_disable_ulv failed\n"); 5988 DRM_ERROR("si_disable_ulv failed\n");
@@ -6043,6 +6081,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
6043 return ret; 6081 return ret;
6044 } 6082 }
6045 6083
6084 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
6085 RADEON_CG_BLOCK_MC |
6086 RADEON_CG_BLOCK_SDMA |
6087 RADEON_CG_BLOCK_BIF |
6088 RADEON_CG_BLOCK_UVD |
6089 RADEON_CG_BLOCK_HDP), true);
6090
6046 return 0; 6091 return 0;
6047} 6092}
6048 6093
@@ -6232,6 +6277,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
6232 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 6277 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
6233 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 6278 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
6234 for (i = 0; i < state_array->ucNumEntries; i++) { 6279 for (i = 0; i < state_array->ucNumEntries; i++) {
6280 u8 *idx;
6235 power_state = (union pplib_power_state *)power_state_offset; 6281 power_state = (union pplib_power_state *)power_state_offset;
6236 non_clock_array_index = power_state->v2.nonClockInfoIndex; 6282 non_clock_array_index = power_state->v2.nonClockInfoIndex;
6237 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 6283 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -6248,14 +6294,16 @@ static int si_parse_power_table(struct radeon_device *rdev)
6248 non_clock_info, 6294 non_clock_info,
6249 non_clock_info_array->ucEntrySize); 6295 non_clock_info_array->ucEntrySize);
6250 k = 0; 6296 k = 0;
6297 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
6251 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 6298 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
6252 clock_array_index = power_state->v2.clockInfoIndex[j]; 6299 clock_array_index = idx[j];
6253 if (clock_array_index >= clock_info_array->ucNumEntries) 6300 if (clock_array_index >= clock_info_array->ucNumEntries)
6254 continue; 6301 continue;
6255 if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) 6302 if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
6256 break; 6303 break;
6257 clock_info = (union pplib_clock_info *) 6304 clock_info = (union pplib_clock_info *)
6258 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 6305 ((u8 *)&clock_info_array->clockInfo[0] +
6306 (clock_array_index * clock_info_array->ucEntrySize));
6259 si_parse_pplib_clock_info(rdev, 6307 si_parse_pplib_clock_info(rdev,
6260 &rdev->pm.dpm.ps[i], k, 6308 &rdev->pm.dpm.ps[i], k,
6261 clock_info); 6309 clock_info);
@@ -6401,6 +6449,12 @@ int si_dpm_init(struct radeon_device *rdev)
6401 6449
6402 si_initialize_powertune_defaults(rdev); 6450 si_initialize_powertune_defaults(rdev);
6403 6451
6452 /* make sure dc limits are valid */
6453 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6454 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6455 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6456 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6457
6404 return 0; 6458 return 0;
6405} 6459}
6406 6460
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 2c8da27a929f..52d2ab6b67a0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -282,6 +282,10 @@
282 282
283#define DMIF_ADDR_CALC 0xC00 283#define DMIF_ADDR_CALC 0xC00
284 284
285#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
286# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
287# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
288
285#define SRBM_STATUS 0xE50 289#define SRBM_STATUS 0xE50
286#define GRBM_RQ_PENDING (1 << 5) 290#define GRBM_RQ_PENDING (1 << 5)
287#define VMC_BUSY (1 << 8) 291#define VMC_BUSY (1 << 8)
@@ -581,6 +585,7 @@
581#define CLKS_MASK (0xfff << 0) 585#define CLKS_MASK (0xfff << 0)
582 586
583#define HDP_HOST_PATH_CNTL 0x2C00 587#define HDP_HOST_PATH_CNTL 0x2C00
588#define CLOCK_GATING_DIS (1 << 23)
584#define HDP_NONSURFACE_BASE 0x2C04 589#define HDP_NONSURFACE_BASE 0x2C04
585#define HDP_NONSURFACE_INFO 0x2C08 590#define HDP_NONSURFACE_INFO 0x2C08
586#define HDP_NONSURFACE_SIZE 0x2C0C 591#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -588,6 +593,8 @@
588#define HDP_ADDR_CONFIG 0x2F48 593#define HDP_ADDR_CONFIG 0x2F48
589#define HDP_MISC_CNTL 0x2F4C 594#define HDP_MISC_CNTL 0x2F4C
590#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 595#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
596#define HDP_MEM_POWER_LS 0x2F50
597#define HDP_LS_ENABLE (1 << 0)
591 598
592#define ATC_MISC_CG 0x3350 599#define ATC_MISC_CG 0x3350
593 600
@@ -635,6 +642,54 @@
635 642
636#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 643#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
637 644
645/* DCE6 ELD audio interface */
646#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00
647# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
648# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
649#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04
650
651#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
652#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
653#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
654#define SPEAKER_ALLOCATION_SHIFT 0
655#define HDMI_CONNECTION (1 << 16)
656#define DP_CONNECTION (1 << 17)
657
658#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
659#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
660#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
661#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
662#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
663#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
664#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
665#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
666#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
667#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
668#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
669#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
670#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
671#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
672# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
673/* max channels minus one. 7 = 8 channels */
674# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
675# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
676# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
677/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
678 * bit0 = 32 kHz
679 * bit1 = 44.1 kHz
680 * bit2 = 48 kHz
681 * bit3 = 88.2 kHz
682 * bit4 = 96 kHz
683 * bit5 = 176.4 kHz
684 * bit6 = 192 kHz
685 */
686#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
687# define AUDIO_ENABLED (1 << 31)
688
689#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
690#define PORT_CONNECTIVITY_MASK (3 << 30)
691#define PORT_CONNECTIVITY_SHIFT 30
692
638#define DC_LB_MEMORY_SPLIT 0x6b0c 693#define DC_LB_MEMORY_SPLIT 0x6b0c
639#define DC_LB_MEMORY_CONFIG(x) ((x) << 20) 694#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
640 695
@@ -755,6 +810,17 @@
755/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ 810/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
756#define CRTC_STATUS_FRAME_COUNT 0x6e98 811#define CRTC_STATUS_FRAME_COUNT 0x6e98
757 812
813#define AFMT_AUDIO_SRC_CONTROL 0x713c
814#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
815/* AFMT_AUDIO_SRC_SELECT
816 * 0 = stream0
817 * 1 = stream1
818 * 2 = stream2
819 * 3 = stream3
820 * 4 = stream4
821 * 5 = stream5
822 */
823
758#define GRBM_CNTL 0x8000 824#define GRBM_CNTL 0x8000
759#define GRBM_READ_TIMEOUT(x) ((x) << 0) 825#define GRBM_READ_TIMEOUT(x) ((x) << 0)
760 826
@@ -1295,6 +1361,7 @@
1295/* PCIE registers idx/data 0x30/0x34 */ 1361/* PCIE registers idx/data 0x30/0x34 */
1296#define PCIE_CNTL2 0x1c /* PCIE */ 1362#define PCIE_CNTL2 0x1c /* PCIE */
1297# define SLV_MEM_LS_EN (1 << 16) 1363# define SLV_MEM_LS_EN (1 << 16)
1364# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
1298# define MST_MEM_LS_EN (1 << 18) 1365# define MST_MEM_LS_EN (1 << 18)
1299# define REPLAY_MEM_LS_EN (1 << 19) 1366# define REPLAY_MEM_LS_EN (1 << 19)
1300#define PCIE_LC_STATUS1 0x28 /* PCIE */ 1367#define PCIE_LC_STATUS1 0x28 /* PCIE */
@@ -1644,6 +1711,10 @@
1644# define DMA_IDLE (1 << 0) 1711# define DMA_IDLE (1 << 0)
1645#define DMA_TILING_CONFIG 0xd0b8 1712#define DMA_TILING_CONFIG 0xd0b8
1646 1713
1714#define DMA_POWER_CNTL 0xd0bc
1715# define MEM_POWER_OVERRIDE (1 << 8)
1716#define DMA_CLK_CTRL 0xd0c0
1717
1647#define DMA_PG 0xd0d4 1718#define DMA_PG 0xd0d4
1648# define PG_CNTL_ENABLE (1 << 0) 1719# define PG_CNTL_ENABLE (1 << 0)
1649#define DMA_PGFSM_CONFIG 0xd0d8 1720#define DMA_PGFSM_CONFIG 0xd0d8
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
new file mode 100644
index 000000000000..75a380a15292
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU7_H
25#define SMU7_H
26
27#pragma pack(push, 1)
28
29#define SMU7_CONTEXT_ID_SMC 1
30#define SMU7_CONTEXT_ID_VBIOS 2
31
32
33#define SMU7_CONTEXT_ID_SMC 1
34#define SMU7_CONTEXT_ID_VBIOS 2
35
36#define SMU7_MAX_LEVELS_VDDC 8
37#define SMU7_MAX_LEVELS_VDDCI 4
38#define SMU7_MAX_LEVELS_MVDD 4
39#define SMU7_MAX_LEVELS_VDDNB 8
40
41#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV
42#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM
43#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels
44#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes.
45#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD.
46#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE.
47#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP.
48#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU.
49#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table.
50
51#define DPM_NO_LIMIT 0
52#define DPM_NO_UP 1
53#define DPM_GO_DOWN 2
54#define DPM_GO_UP 3
55
56#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
57#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
58
59#define GPIO_CLAMP_MODE_VRHOT 1
60#define GPIO_CLAMP_MODE_THERM 2
61#define GPIO_CLAMP_MODE_DC 4
62
63#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
64#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
65#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
66#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
67#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
68#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
69#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
70#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
71#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
72#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
73#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
74#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
75#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
76#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
77#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
78#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
79#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
80#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
81#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
82#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
83
84
85struct SMU7_PIDController
86{
87 uint32_t Ki;
88 int32_t LFWindupUL;
89 int32_t LFWindupLL;
90 uint32_t StatePrecision;
91 uint32_t LfPrecision;
92 uint32_t LfOffset;
93 uint32_t MaxState;
94 uint32_t MaxLfFraction;
95 uint32_t StateShift;
96};
97
98typedef struct SMU7_PIDController SMU7_PIDController;
99
100// -------------------------------------------------------------------------------------------------------------------------
101#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
102
103#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
104#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
105#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
106#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
107#define SMU7_UVD_DPM_CONFIG_MASK 0x10
108#define SMU7_VCE_DPM_CONFIG_MASK 0x20
109#define SMU7_ACP_DPM_CONFIG_MASK 0x40
110#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
111#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
112
113#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
114#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
115#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
116#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
117#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
118#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
119
120struct SMU7_Firmware_Header
121{
122 uint32_t Digest[5];
123 uint32_t Version;
124 uint32_t HeaderSize;
125 uint32_t Flags;
126 uint32_t EntryPoint;
127 uint32_t CodeSize;
128 uint32_t ImageSize;
129
130 uint32_t Rtos;
131 uint32_t SoftRegisters;
132 uint32_t DpmTable;
133 uint32_t FanTable;
134 uint32_t CacConfigTable;
135 uint32_t CacStatusTable;
136
137 uint32_t mcRegisterTable;
138
139 uint32_t mcArbDramTimingTable;
140
141 uint32_t PmFuseTable;
142 uint32_t Globals;
143 uint32_t Reserved[42];
144 uint32_t Signature;
145};
146
147typedef struct SMU7_Firmware_Header SMU7_Firmware_Header;
148
149#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
150
151enum DisplayConfig {
152 PowerDown = 1,
153 DP54x4,
154 DP54x2,
155 DP54x1,
156 DP27x4,
157 DP27x2,
158 DP27x1,
159 HDMI297,
160 HDMI162,
161 LVDS,
162 DP324x4,
163 DP324x2,
164 DP324x1
165};
166
167#pragma pack(pop)
168
169#endif
170
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
new file mode 100644
index 000000000000..82f70c90a9ee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -0,0 +1,486 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU7_DISCRETE_H
25#define SMU7_DISCRETE_H
26
27#include "smu7.h"
28
29#pragma pack(push, 1)
30
31#define SMU7_DTE_ITERATIONS 5
32#define SMU7_DTE_SOURCES 3
33#define SMU7_DTE_SINKS 1
34#define SMU7_NUM_CPU_TES 0
35#define SMU7_NUM_GPU_TES 1
36#define SMU7_NUM_NON_TES 2
37
38struct SMU7_SoftRegisters
39{
40 uint32_t RefClockFrequency;
41 uint32_t PmTimerP;
42 uint32_t FeatureEnables;
43 uint32_t PreVBlankGap;
44 uint32_t VBlankTimeout;
45 uint32_t TrainTimeGap;
46
47 uint32_t MvddSwitchTime;
48 uint32_t LongestAcpiTrainTime;
49 uint32_t AcpiDelay;
50 uint32_t G5TrainTime;
51 uint32_t DelayMpllPwron;
52 uint32_t VoltageChangeTimeout;
53 uint32_t HandshakeDisables;
54
55 uint8_t DisplayPhy1Config;
56 uint8_t DisplayPhy2Config;
57 uint8_t DisplayPhy3Config;
58 uint8_t DisplayPhy4Config;
59
60 uint8_t DisplayPhy5Config;
61 uint8_t DisplayPhy6Config;
62 uint8_t DisplayPhy7Config;
63 uint8_t DisplayPhy8Config;
64
65 uint32_t AverageGraphicsA;
66 uint32_t AverageMemoryA;
67 uint32_t AverageGioA;
68
69 uint8_t SClkDpmEnabledLevels;
70 uint8_t MClkDpmEnabledLevels;
71 uint8_t LClkDpmEnabledLevels;
72 uint8_t PCIeDpmEnabledLevels;
73
74 uint8_t UVDDpmEnabledLevels;
75 uint8_t SAMUDpmEnabledLevels;
76 uint8_t ACPDpmEnabledLevels;
77 uint8_t VCEDpmEnabledLevels;
78
79 uint32_t DRAM_LOG_ADDR_H;
80 uint32_t DRAM_LOG_ADDR_L;
81 uint32_t DRAM_LOG_PHY_ADDR_H;
82 uint32_t DRAM_LOG_PHY_ADDR_L;
83 uint32_t DRAM_LOG_BUFF_SIZE;
84 uint32_t UlvEnterC;
85 uint32_t UlvTime;
86 uint32_t Reserved[3];
87
88};
89
90typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
91
92struct SMU7_Discrete_VoltageLevel
93{
94 uint16_t Voltage;
95 uint16_t StdVoltageHiSidd;
96 uint16_t StdVoltageLoSidd;
97 uint8_t Smio;
98 uint8_t padding;
99};
100
101typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
102
103struct SMU7_Discrete_GraphicsLevel
104{
105 uint32_t Flags;
106 uint32_t MinVddc;
107 uint32_t MinVddcPhases;
108
109 uint32_t SclkFrequency;
110
111 uint8_t padding1[2];
112 uint16_t ActivityLevel;
113
114 uint32_t CgSpllFuncCntl3;
115 uint32_t CgSpllFuncCntl4;
116 uint32_t SpllSpreadSpectrum;
117 uint32_t SpllSpreadSpectrum2;
118 uint32_t CcPwrDynRm;
119 uint32_t CcPwrDynRm1;
120 uint8_t SclkDid;
121 uint8_t DisplayWatermark;
122 uint8_t EnabledForActivity;
123 uint8_t EnabledForThrottle;
124 uint8_t UpH;
125 uint8_t DownH;
126 uint8_t VoltageDownH;
127 uint8_t PowerThrottle;
128 uint8_t DeepSleepDivId;
129 uint8_t padding[3];
130};
131
132typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
133
134struct SMU7_Discrete_ACPILevel
135{
136 uint32_t Flags;
137 uint32_t MinVddc;
138 uint32_t MinVddcPhases;
139 uint32_t SclkFrequency;
140 uint8_t SclkDid;
141 uint8_t DisplayWatermark;
142 uint8_t DeepSleepDivId;
143 uint8_t padding;
144 uint32_t CgSpllFuncCntl;
145 uint32_t CgSpllFuncCntl2;
146 uint32_t CgSpllFuncCntl3;
147 uint32_t CgSpllFuncCntl4;
148 uint32_t SpllSpreadSpectrum;
149 uint32_t SpllSpreadSpectrum2;
150 uint32_t CcPwrDynRm;
151 uint32_t CcPwrDynRm1;
152};
153
154typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
155
156struct SMU7_Discrete_Ulv
157{
158 uint32_t CcPwrDynRm;
159 uint32_t CcPwrDynRm1;
160 uint16_t VddcOffset;
161 uint8_t VddcOffsetVid;
162 uint8_t VddcPhase;
163 uint32_t Reserved;
164};
165
166typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
167
168struct SMU7_Discrete_MemoryLevel
169{
170 uint32_t MinVddc;
171 uint32_t MinVddcPhases;
172 uint32_t MinVddci;
173 uint32_t MinMvdd;
174
175 uint32_t MclkFrequency;
176
177 uint8_t EdcReadEnable;
178 uint8_t EdcWriteEnable;
179 uint8_t RttEnable;
180 uint8_t StutterEnable;
181
182 uint8_t StrobeEnable;
183 uint8_t StrobeRatio;
184 uint8_t EnabledForThrottle;
185 uint8_t EnabledForActivity;
186
187 uint8_t UpH;
188 uint8_t DownH;
189 uint8_t VoltageDownH;
190 uint8_t padding;
191
192 uint16_t ActivityLevel;
193 uint8_t DisplayWatermark;
194 uint8_t padding1;
195
196 uint32_t MpllFuncCntl;
197 uint32_t MpllFuncCntl_1;
198 uint32_t MpllFuncCntl_2;
199 uint32_t MpllAdFuncCntl;
200 uint32_t MpllDqFuncCntl;
201 uint32_t MclkPwrmgtCntl;
202 uint32_t DllCntl;
203 uint32_t MpllSs1;
204 uint32_t MpllSs2;
205};
206
207typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
208
209struct SMU7_Discrete_LinkLevel
210{
211 uint8_t PcieGenSpeed;
212 uint8_t PcieLaneCount;
213 uint8_t EnabledForActivity;
214 uint8_t Padding;
215 uint32_t DownT;
216 uint32_t UpT;
217 uint32_t Reserved;
218};
219
220typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
221
222
223struct SMU7_Discrete_MCArbDramTimingTableEntry
224{
225 uint32_t McArbDramTiming;
226 uint32_t McArbDramTiming2;
227 uint8_t McArbBurstTime;
228 uint8_t padding[3];
229};
230
231typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
232
233struct SMU7_Discrete_MCArbDramTimingTable
234{
235 SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
236};
237
238typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
239
240struct SMU7_Discrete_UvdLevel
241{
242 uint32_t VclkFrequency;
243 uint32_t DclkFrequency;
244 uint16_t MinVddc;
245 uint8_t MinVddcPhases;
246 uint8_t VclkDivider;
247 uint8_t DclkDivider;
248 uint8_t padding[3];
249};
250
251typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
252
253struct SMU7_Discrete_ExtClkLevel
254{
255 uint32_t Frequency;
256 uint16_t MinVoltage;
257 uint8_t MinPhases;
258 uint8_t Divider;
259};
260
261typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
262
263struct SMU7_Discrete_StateInfo
264{
265 uint32_t SclkFrequency;
266 uint32_t MclkFrequency;
267 uint32_t VclkFrequency;
268 uint32_t DclkFrequency;
269 uint32_t SamclkFrequency;
270 uint32_t AclkFrequency;
271 uint32_t EclkFrequency;
272 uint16_t MvddVoltage;
273 uint16_t padding16;
274 uint8_t DisplayWatermark;
275 uint8_t McArbIndex;
276 uint8_t McRegIndex;
277 uint8_t SeqIndex;
278 uint8_t SclkDid;
279 int8_t SclkIndex;
280 int8_t MclkIndex;
281 uint8_t PCIeGen;
282
283};
284
285typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
286
287
288struct SMU7_Discrete_DpmTable
289{
290 SMU7_PIDController GraphicsPIDController;
291 SMU7_PIDController MemoryPIDController;
292 SMU7_PIDController LinkPIDController;
293
294 uint32_t SystemFlags;
295
296
297 uint32_t SmioMaskVddcVid;
298 uint32_t SmioMaskVddcPhase;
299 uint32_t SmioMaskVddciVid;
300 uint32_t SmioMaskMvddVid;
301
302 uint32_t VddcLevelCount;
303 uint32_t VddciLevelCount;
304 uint32_t MvddLevelCount;
305
306 SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC];
307// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC];
308 SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI];
309 SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD];
310
311 uint8_t GraphicsDpmLevelCount;
312 uint8_t MemoryDpmLevelCount;
313 uint8_t LinkLevelCount;
314 uint8_t UvdLevelCount;
315 uint8_t VceLevelCount;
316 uint8_t AcpLevelCount;
317 uint8_t SamuLevelCount;
318 uint8_t MasterDeepSleepControl;
319 uint32_t Reserved[5];
320// uint32_t SamuDefaultLevel;
321
322 SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
323 SMU7_Discrete_MemoryLevel MemoryACPILevel;
324 SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY];
325 SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK];
326 SMU7_Discrete_ACPILevel ACPILevel;
327 SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
328 SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
329 SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
330 SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
331 SMU7_Discrete_Ulv Ulv;
332
333 uint32_t SclkStepSize;
334 uint32_t Smio [SMU7_MAX_ENTRIES_SMIO];
335
336 uint8_t UvdBootLevel;
337 uint8_t VceBootLevel;
338 uint8_t AcpBootLevel;
339 uint8_t SamuBootLevel;
340
341 uint8_t UVDInterval;
342 uint8_t VCEInterval;
343 uint8_t ACPInterval;
344 uint8_t SAMUInterval;
345
346 uint8_t GraphicsBootLevel;
347 uint8_t GraphicsVoltageChangeEnable;
348 uint8_t GraphicsThermThrottleEnable;
349 uint8_t GraphicsInterval;
350
351 uint8_t VoltageInterval;
352 uint8_t ThermalInterval;
353 uint16_t TemperatureLimitHigh;
354
355 uint16_t TemperatureLimitLow;
356 uint8_t MemoryBootLevel;
357 uint8_t MemoryVoltageChangeEnable;
358
359 uint8_t MemoryInterval;
360 uint8_t MemoryThermThrottleEnable;
361 uint16_t VddcVddciDelta;
362
363 uint16_t VoltageResponseTime;
364 uint16_t PhaseResponseTime;
365
366 uint8_t PCIeBootLinkLevel;
367 uint8_t PCIeGenInterval;
368 uint8_t DTEInterval;
369 uint8_t DTEMode;
370
371 uint8_t SVI2Enable;
372 uint8_t VRHotGpio;
373 uint8_t AcDcGpio;
374 uint8_t ThermGpio;
375
376 uint16_t PPM_PkgPwrLimit;
377 uint16_t PPM_TemperatureLimit;
378
379 uint16_t DefaultTdp;
380 uint16_t TargetTdp;
381
382 uint16_t FpsHighT;
383 uint16_t FpsLowT;
384
385 uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
386 uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
387
388 uint8_t DTEAmbientTempBase;
389 uint8_t DTETjOffset;
390 uint8_t GpuTjMax;
391 uint8_t GpuTjHyst;
392
393 uint16_t BootVddc;
394 uint16_t BootVddci;
395
396 uint16_t BootMVdd;
397 uint16_t padding;
398
399 uint32_t BAPM_TEMP_GRADIENT;
400
401 uint32_t LowSclkInterruptT;
402};
403
404typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
405
406#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
407#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
408
409struct SMU7_Discrete_MCRegisterAddress
410{
411 uint16_t s0;
412 uint16_t s1;
413};
414
415typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
416
417struct SMU7_Discrete_MCRegisterSet
418{
419 uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
420};
421
422typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
423
424struct SMU7_Discrete_MCRegisters
425{
426 uint8_t last;
427 uint8_t reserved[3];
428 SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
429 SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
430};
431
432typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
433
434struct SMU7_Discrete_PmFuses {
435 // dw0-dw1
436 uint8_t BapmVddCVidHiSidd[8];
437
438 // dw2-dw3
439 uint8_t BapmVddCVidLoSidd[8];
440
441 // dw4-dw5
442 uint8_t VddCVid[8];
443
444 // dw6
445 uint8_t SviLoadLineEn;
446 uint8_t SviLoadLineVddC;
447 uint8_t SviLoadLineTrimVddC;
448 uint8_t SviLoadLineOffsetVddC;
449
450 // dw7
451 uint16_t TDC_VDDC_PkgLimit;
452 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
453 uint8_t TDC_MAWt;
454
455 // dw8
456 uint8_t TdcWaterfallCtl;
457 uint8_t LPMLTemperatureMin;
458 uint8_t LPMLTemperatureMax;
459 uint8_t Reserved;
460
461 // dw9-dw10
462 uint8_t BapmVddCVidHiSidd2[8];
463
464 // dw11-dw12
465 uint32_t Reserved6[2];
466
467 // dw13-dw16
468 uint8_t GnbLPML[16];
469
470 // dw17
471 uint8_t GnbLPMLMaxVid;
472 uint8_t GnbLPMLMinVid;
473 uint8_t Reserved1[2];
474
475 // dw18
476 uint16_t BapmVddCBaseLeakageHiSidd;
477 uint16_t BapmVddCBaseLeakageLoSidd;
478};
479
480typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
481
482
483#pragma pack(pop)
484
485#endif
486
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
new file mode 100644
index 000000000000..78ada9ffd508
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -0,0 +1,300 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU7_FUSION_H
25#define SMU7_FUSION_H
26
27#include "smu7.h"
28
29#pragma pack(push, 1)
30
31#define SMU7_DTE_ITERATIONS 5
32#define SMU7_DTE_SOURCES 5
33#define SMU7_DTE_SINKS 3
34#define SMU7_NUM_CPU_TES 2
35#define SMU7_NUM_GPU_TES 1
36#define SMU7_NUM_NON_TES 2
37
38// All 'soft registers' should be uint32_t.
39struct SMU7_SoftRegisters
40{
41 uint32_t RefClockFrequency;
42 uint32_t PmTimerP;
43 uint32_t FeatureEnables;
44 uint32_t HandshakeDisables;
45
46 uint8_t DisplayPhy1Config;
47 uint8_t DisplayPhy2Config;
48 uint8_t DisplayPhy3Config;
49 uint8_t DisplayPhy4Config;
50
51 uint8_t DisplayPhy5Config;
52 uint8_t DisplayPhy6Config;
53 uint8_t DisplayPhy7Config;
54 uint8_t DisplayPhy8Config;
55
56 uint32_t AverageGraphicsA;
57 uint32_t AverageMemoryA;
58 uint32_t AverageGioA;
59
60 uint8_t SClkDpmEnabledLevels;
61 uint8_t MClkDpmEnabledLevels;
62 uint8_t LClkDpmEnabledLevels;
63 uint8_t PCIeDpmEnabledLevels;
64
65 uint8_t UVDDpmEnabledLevels;
66 uint8_t SAMUDpmEnabledLevels;
67 uint8_t ACPDpmEnabledLevels;
68 uint8_t VCEDpmEnabledLevels;
69
70 uint32_t DRAM_LOG_ADDR_H;
71 uint32_t DRAM_LOG_ADDR_L;
72 uint32_t DRAM_LOG_PHY_ADDR_H;
73 uint32_t DRAM_LOG_PHY_ADDR_L;
74 uint32_t DRAM_LOG_BUFF_SIZE;
75 uint32_t UlvEnterC;
76 uint32_t UlvTime;
77 uint32_t Reserved[3];
78
79};
80
81typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
82
83struct SMU7_Fusion_GraphicsLevel
84{
85 uint32_t MinVddNb;
86
87 uint32_t SclkFrequency;
88
89 uint8_t Vid;
90 uint8_t VidOffset;
91 uint16_t AT;
92
93 uint8_t PowerThrottle;
94 uint8_t GnbSlow;
95 uint8_t ForceNbPs1;
96 uint8_t SclkDid;
97
98 uint8_t DisplayWatermark;
99 uint8_t EnabledForActivity;
100 uint8_t EnabledForThrottle;
101 uint8_t UpH;
102
103 uint8_t DownH;
104 uint8_t VoltageDownH;
105 uint8_t DeepSleepDivId;
106
107 uint8_t ClkBypassCntl;
108
109 uint32_t reserved;
110};
111
112typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
113
114struct SMU7_Fusion_GIOLevel
115{
116 uint8_t EnabledForActivity;
117 uint8_t LclkDid;
118 uint8_t Vid;
119 uint8_t VoltageDownH;
120
121 uint32_t MinVddNb;
122
123 uint16_t ResidencyCounter;
124 uint8_t UpH;
125 uint8_t DownH;
126
127 uint32_t LclkFrequency;
128
129 uint8_t ActivityLevel;
130 uint8_t EnabledForThrottle;
131
132 uint8_t ClkBypassCntl;
133
134 uint8_t padding;
135};
136
137typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
138
139// UVD VCLK/DCLK state (level) definition.
140struct SMU7_Fusion_UvdLevel
141{
142 uint32_t VclkFrequency;
143 uint32_t DclkFrequency;
144 uint16_t MinVddNb;
145 uint8_t VclkDivider;
146 uint8_t DclkDivider;
147
148 uint8_t VClkBypassCntl;
149 uint8_t DClkBypassCntl;
150
151 uint8_t padding[2];
152
153};
154
155typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
156
157// Clocks for other external blocks (VCE, ACP, SAMU).
158struct SMU7_Fusion_ExtClkLevel
159{
160 uint32_t Frequency;
161 uint16_t MinVoltage;
162 uint8_t Divider;
163 uint8_t ClkBypassCntl;
164
165 uint32_t Reserved;
166};
167typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
168
169struct SMU7_Fusion_ACPILevel
170{
171 uint32_t Flags;
172 uint32_t MinVddNb;
173 uint32_t SclkFrequency;
174 uint8_t SclkDid;
175 uint8_t GnbSlow;
176 uint8_t ForceNbPs1;
177 uint8_t DisplayWatermark;
178 uint8_t DeepSleepDivId;
179 uint8_t padding[3];
180};
181
182typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
183
184struct SMU7_Fusion_NbDpm
185{
186 uint8_t DpmXNbPsHi;
187 uint8_t DpmXNbPsLo;
188 uint8_t Dpm0PgNbPsHi;
189 uint8_t Dpm0PgNbPsLo;
190 uint8_t EnablePsi1;
191 uint8_t SkipDPM0;
192 uint8_t SkipPG;
193 uint8_t Hysteresis;
194 uint8_t EnableDpmPstatePoll;
195 uint8_t padding[3];
196};
197
198typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
199
200struct SMU7_Fusion_StateInfo
201{
202 uint32_t SclkFrequency;
203 uint32_t LclkFrequency;
204 uint32_t VclkFrequency;
205 uint32_t DclkFrequency;
206 uint32_t SamclkFrequency;
207 uint32_t AclkFrequency;
208 uint32_t EclkFrequency;
209 uint8_t DisplayWatermark;
210 uint8_t McArbIndex;
211 int8_t SclkIndex;
212 int8_t MclkIndex;
213};
214
215typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
216
217struct SMU7_Fusion_DpmTable
218{
219 uint32_t SystemFlags;
220
221 SMU7_PIDController GraphicsPIDController;
222 SMU7_PIDController GioPIDController;
223
224 uint8_t GraphicsDpmLevelCount;
225 uint8_t GIOLevelCount;
226 uint8_t UvdLevelCount;
227 uint8_t VceLevelCount;
228
229 uint8_t AcpLevelCount;
230 uint8_t SamuLevelCount;
231 uint16_t FpsHighT;
232
233 SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
234 SMU7_Fusion_ACPILevel ACPILevel;
235 SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
236 SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
237 SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
238 SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
239
240 uint8_t UvdBootLevel;
241 uint8_t VceBootLevel;
242 uint8_t AcpBootLevel;
243 uint8_t SamuBootLevel;
244 uint8_t UVDInterval;
245 uint8_t VCEInterval;
246 uint8_t ACPInterval;
247 uint8_t SAMUInterval;
248
249 uint8_t GraphicsBootLevel;
250 uint8_t GraphicsInterval;
251 uint8_t GraphicsThermThrottleEnable;
252 uint8_t GraphicsVoltageChangeEnable;
253
254 uint8_t GraphicsClkSlowEnable;
255 uint8_t GraphicsClkSlowDivider;
256 uint16_t FpsLowT;
257
258 uint32_t DisplayCac;
259 uint32_t LowSclkInterruptT;
260
261 uint32_t DRAM_LOG_ADDR_H;
262 uint32_t DRAM_LOG_ADDR_L;
263 uint32_t DRAM_LOG_PHY_ADDR_H;
264 uint32_t DRAM_LOG_PHY_ADDR_L;
265 uint32_t DRAM_LOG_BUFF_SIZE;
266
267};
268
269struct SMU7_Fusion_GIODpmTable
270{
271
272 SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
273
274 SMU7_PIDController GioPIDController;
275
276 uint32_t GIOLevelCount;
277
278 uint8_t Enable;
279 uint8_t GIOVoltageChangeEnable;
280 uint8_t GIOBootLevel;
281 uint8_t padding;
282 uint8_t padding1[2];
283 uint8_t TargetState;
284 uint8_t CurrenttState;
285 uint8_t ThrottleOnHtc;
286 uint8_t ThermThrottleStatus;
287 uint8_t ThermThrottleTempSelect;
288 uint8_t ThermThrottleEnable;
289 uint16_t TemperatureLimitHigh;
290 uint16_t TemperatureLimitLow;
291
292};
293
294typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable;
295typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable;
296
297#pragma pack(pop)
298
299#endif
300
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index c0a850319908..864761c0120e 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1483,6 +1483,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
1483 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 1483 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1484 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 1484 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1485 for (i = 0; i < state_array->ucNumEntries; i++) { 1485 for (i = 0; i < state_array->ucNumEntries; i++) {
1486 u8 *idx;
1486 power_state = (union pplib_power_state *)power_state_offset; 1487 power_state = (union pplib_power_state *)power_state_offset;
1487 non_clock_array_index = power_state->v2.nonClockInfoIndex; 1488 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1488 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 1489 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1496,12 +1497,15 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
1496 } 1497 }
1497 rdev->pm.dpm.ps[i].ps_priv = ps; 1498 rdev->pm.dpm.ps[i].ps_priv = ps;
1498 k = 0; 1499 k = 0;
1500 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
1499 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 1501 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1500 clock_array_index = power_state->v2.clockInfoIndex[j]; 1502 clock_array_index = idx[j];
1501 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 1503 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1502 break; 1504 break;
1505
1503 clock_info = (union pplib_clock_info *) 1506 clock_info = (union pplib_clock_info *)
1504 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 1507 ((u8 *)&clock_info_array->clockInfo[0] +
1508 (clock_array_index * clock_info_array->ucEntrySize));
1505 sumo_parse_pplib_clock_info(rdev, 1509 sumo_parse_pplib_clock_info(rdev,
1506 &rdev->pm.dpm.ps[i], k, 1510 &rdev->pm.dpm.ps[i], k,
1507 clock_info); 1511 clock_info);
@@ -1530,6 +1534,20 @@ u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
1530 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 1534 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
1531} 1535}
1532 1536
1537u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
1538 struct sumo_vid_mapping_table *vid_mapping_table,
1539 u32 vid_7bit)
1540{
1541 u32 i;
1542
1543 for (i = 0; i < vid_mapping_table->num_entries; i++) {
1544 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
1545 return vid_mapping_table->entries[i].vid_2bit;
1546 }
1547
1548 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
1549}
1550
1533static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev, 1551static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
1534 u32 vid_2bit) 1552 u32 vid_2bit)
1535{ 1553{
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
index 07dda299c784..db1ea32a907b 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.h
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -202,6 +202,9 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
202u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, 202u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
203 struct sumo_vid_mapping_table *vid_mapping_table, 203 struct sumo_vid_mapping_table *vid_mapping_table,
204 u32 vid_2bit); 204 u32 vid_2bit);
205u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
206 struct sumo_vid_mapping_table *vid_mapping_table,
207 u32 vid_7bit);
205u32 sumo_get_sleep_divider_from_id(u32 id); 208u32 sumo_get_sleep_divider_from_id(u32 id);
206u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 209u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
207 u32 sclk, 210 u32 sclk,
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index a1eb5f59939f..b07b7b8f1aff 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1675,6 +1675,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
1675 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 1675 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1676 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 1676 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1677 for (i = 0; i < state_array->ucNumEntries; i++) { 1677 for (i = 0; i < state_array->ucNumEntries; i++) {
1678 u8 *idx;
1678 power_state = (union pplib_power_state *)power_state_offset; 1679 power_state = (union pplib_power_state *)power_state_offset;
1679 non_clock_array_index = power_state->v2.nonClockInfoIndex; 1680 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1680 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 1681 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1688,14 +1689,16 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
1688 } 1689 }
1689 rdev->pm.dpm.ps[i].ps_priv = ps; 1690 rdev->pm.dpm.ps[i].ps_priv = ps;
1690 k = 0; 1691 k = 0;
1692 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
1691 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 1693 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1692 clock_array_index = power_state->v2.clockInfoIndex[j]; 1694 clock_array_index = idx[j];
1693 if (clock_array_index >= clock_info_array->ucNumEntries) 1695 if (clock_array_index >= clock_info_array->ucNumEntries)
1694 continue; 1696 continue;
1695 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 1697 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1696 break; 1698 break;
1697 clock_info = (union pplib_clock_info *) 1699 clock_info = (union pplib_clock_info *)
1698 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 1700 ((u8 *)&clock_info_array->clockInfo[0] +
1701 (clock_array_index * clock_info_array->ucEntrySize));
1699 trinity_parse_pplib_clock_info(rdev, 1702 trinity_parse_pplib_clock_info(rdev,
1700 &rdev->pm.dpm.ps[i], k, 1703 &rdev->pm.dpm.ps[i], k,
1701 clock_info); 1704 clock_info);
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
new file mode 100644
index 000000000000..7266805d9786
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <drm/drmP.h>
26#include "radeon.h"
27#include "radeon_asic.h"
28#include "r600d.h"
29
30/**
31 * uvd_v1_0_get_rptr - get read pointer
32 *
33 * @rdev: radeon_device pointer
34 * @ring: radeon_ring pointer
35 *
36 * Returns the current hardware read pointer
37 */
38uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
39 struct radeon_ring *ring)
40{
41 return RREG32(UVD_RBC_RB_RPTR);
42}
43
44/**
45 * uvd_v1_0_get_wptr - get write pointer
46 *
47 * @rdev: radeon_device pointer
48 * @ring: radeon_ring pointer
49 *
50 * Returns the current hardware write pointer
51 */
52uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
53 struct radeon_ring *ring)
54{
55 return RREG32(UVD_RBC_RB_WPTR);
56}
57
58/**
59 * uvd_v1_0_set_wptr - set write pointer
60 *
61 * @rdev: radeon_device pointer
62 * @ring: radeon_ring pointer
63 *
64 * Commits the write pointer to the hardware
65 */
66void uvd_v1_0_set_wptr(struct radeon_device *rdev,
67 struct radeon_ring *ring)
68{
69 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
70}
71
72/**
73 * uvd_v1_0_init - start and test UVD block
74 *
75 * @rdev: radeon_device pointer
76 *
77 * Initialize the hardware, boot up the VCPU and do some testing
78 */
79int uvd_v1_0_init(struct radeon_device *rdev)
80{
81 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
82 uint32_t tmp;
83 int r;
84
85 /* raise clocks while booting up the VCPU */
86 radeon_set_uvd_clocks(rdev, 53300, 40000);
87
88 r = uvd_v1_0_start(rdev);
89 if (r)
90 goto done;
91
92 ring->ready = true;
93 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
94 if (r) {
95 ring->ready = false;
96 goto done;
97 }
98
99 r = radeon_ring_lock(rdev, ring, 10);
100 if (r) {
101 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
102 goto done;
103 }
104
105 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
106 radeon_ring_write(ring, tmp);
107 radeon_ring_write(ring, 0xFFFFF);
108
109 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
110 radeon_ring_write(ring, tmp);
111 radeon_ring_write(ring, 0xFFFFF);
112
113 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
114 radeon_ring_write(ring, tmp);
115 radeon_ring_write(ring, 0xFFFFF);
116
117 /* Clear timeout status bits */
118 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
119 radeon_ring_write(ring, 0x8);
120
121 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
122 radeon_ring_write(ring, 3);
123
124 radeon_ring_unlock_commit(rdev, ring);
125
126done:
127 /* lower clocks again */
128 radeon_set_uvd_clocks(rdev, 0, 0);
129
130 if (!r)
131 DRM_INFO("UVD initialized successfully.\n");
132
133 return r;
134}
135
136/**
137 * uvd_v1_0_fini - stop the hardware block
138 *
139 * @rdev: radeon_device pointer
140 *
141 * Stop the UVD block, mark ring as not ready any more
142 */
143void uvd_v1_0_fini(struct radeon_device *rdev)
144{
145 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
146
147 uvd_v1_0_stop(rdev);
148 ring->ready = false;
149}
150
151/**
152 * uvd_v1_0_start - start UVD block
153 *
154 * @rdev: radeon_device pointer
155 *
156 * Setup and start the UVD block
157 */
158int uvd_v1_0_start(struct radeon_device *rdev)
159{
160 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
161 uint32_t rb_bufsz;
162 int i, j, r;
163
164 /* disable byte swapping */
165 u32 lmi_swap_cntl = 0;
166 u32 mp_swap_cntl = 0;
167
168 /* disable clock gating */
169 WREG32(UVD_CGC_GATE, 0);
170
171 /* disable interupt */
172 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
173
174 /* Stall UMC and register bus before resetting VCPU */
175 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
176 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
177 mdelay(1);
178
179 /* put LMI, VCPU, RBC etc... into reset */
180 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
181 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
182 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
183 mdelay(5);
184
185 /* take UVD block out of reset */
186 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
187 mdelay(5);
188
189 /* initialize UVD memory controller */
190 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
191 (1 << 21) | (1 << 9) | (1 << 20));
192
193#ifdef __BIG_ENDIAN
194 /* swap (8 in 32) RB and IB */
195 lmi_swap_cntl = 0xa;
196 mp_swap_cntl = 0;
197#endif
198 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
199 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
200
201 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
202 WREG32(UVD_MPC_SET_MUXA1, 0x0);
203 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
204 WREG32(UVD_MPC_SET_MUXB1, 0x0);
205 WREG32(UVD_MPC_SET_ALU, 0);
206 WREG32(UVD_MPC_SET_MUX, 0x88);
207
208 /* take all subblocks out of reset, except VCPU */
209 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
210 mdelay(5);
211
212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214
215 /* enable UMC */
216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
217
218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0);
220 mdelay(10);
221
222 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
223
224 for (i = 0; i < 10; ++i) {
225 uint32_t status;
226 for (j = 0; j < 100; ++j) {
227 status = RREG32(UVD_STATUS);
228 if (status & 2)
229 break;
230 mdelay(10);
231 }
232 r = 0;
233 if (status & 2)
234 break;
235
236 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
237 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
238 mdelay(10);
239 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
240 mdelay(10);
241 r = -1;
242 }
243
244 if (r) {
245 DRM_ERROR("UVD not responding, giving up!!!\n");
246 return r;
247 }
248
249 /* enable interupt */
250 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
251
252 /* force RBC into idle state */
253 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
254
255 /* Set the write pointer delay */
256 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
257
258 /* programm the 4GB memory segment for rptr and ring buffer */
259 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
260 (0x7 << 16) | (0x1 << 31));
261
262 /* Initialize the ring buffer's read and write pointers */
263 WREG32(UVD_RBC_RB_RPTR, 0x0);
264
265 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
266 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
267
268 /* set the ring address */
269 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
270
271 /* Set ring buffer size */
272 rb_bufsz = order_base_2(ring->ring_size);
273 rb_bufsz = (0x1 << 8) | rb_bufsz;
274 WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
275
276 return 0;
277}
278
279/**
280 * uvd_v1_0_stop - stop UVD block
281 *
282 * @rdev: radeon_device pointer
283 *
284 * stop the UVD block
285 */
286void uvd_v1_0_stop(struct radeon_device *rdev)
287{
288 /* force RBC into idle state */
289 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
290
291 /* Stall UMC and register bus before resetting VCPU */
292 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
293 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
294 mdelay(1);
295
296 /* put VCPU into reset */
297 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
298 mdelay(5);
299
300 /* disable VCPU clock */
301 WREG32(UVD_VCPU_CNTL, 0x0);
302
303 /* Unstall UMC and register bus */
304 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
305 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
306}
307
308/**
309 * uvd_v1_0_ring_test - register write test
310 *
311 * @rdev: radeon_device pointer
312 * @ring: radeon_ring pointer
313 *
314 * Test if we can successfully write to the context register
315 */
316int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
317{
318 uint32_t tmp = 0;
319 unsigned i;
320 int r;
321
322 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
323 r = radeon_ring_lock(rdev, ring, 3);
324 if (r) {
325 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
326 ring->idx, r);
327 return r;
328 }
329 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
330 radeon_ring_write(ring, 0xDEADBEEF);
331 radeon_ring_unlock_commit(rdev, ring);
332 for (i = 0; i < rdev->usec_timeout; i++) {
333 tmp = RREG32(UVD_CONTEXT_ID);
334 if (tmp == 0xDEADBEEF)
335 break;
336 DRM_UDELAY(1);
337 }
338
339 if (i < rdev->usec_timeout) {
340 DRM_INFO("ring test on %d succeeded in %d usecs\n",
341 ring->idx, i);
342 } else {
343 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
344 ring->idx, tmp);
345 r = -EINVAL;
346 }
347 return r;
348}
349
350/**
351 * uvd_v1_0_semaphore_emit - emit semaphore command
352 *
353 * @rdev: radeon_device pointer
354 * @ring: radeon_ring pointer
355 * @semaphore: semaphore to emit commands for
356 * @emit_wait: true if we should emit a wait command
357 *
358 * Emit a semaphore command (either wait or signal) to the UVD ring.
359 */
360void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
361 struct radeon_ring *ring,
362 struct radeon_semaphore *semaphore,
363 bool emit_wait)
364{
365 uint64_t addr = semaphore->gpu_addr;
366
367 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
368 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
369
370 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
371 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
372
373 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
374 radeon_ring_write(ring, emit_wait ? 1 : 0);
375}
376
377/**
378 * uvd_v1_0_ib_execute - execute indirect buffer
379 *
380 * @rdev: radeon_device pointer
381 * @ib: indirect buffer to execute
382 *
383 * Write ring commands to execute the indirect buffer
384 */
385void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
386{
387 struct radeon_ring *ring = &rdev->ring[ib->ring];
388
389 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
390 radeon_ring_write(ring, ib->gpu_addr);
391 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
392 radeon_ring_write(ring, ib->length_dw);
393}
394
395/**
396 * uvd_v1_0_ib_test - test ib execution
397 *
398 * @rdev: radeon_device pointer
399 * @ring: radeon_ring pointer
400 *
401 * Test if we can successfully execute an IB
402 */
403int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
404{
405 struct radeon_fence *fence = NULL;
406 int r;
407
408 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
409 if (r) {
410 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
411 return r;
412 }
413
414 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
415 if (r) {
416 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
417 goto error;
418 }
419
420 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
421 if (r) {
422 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
423 goto error;
424 }
425
426 r = radeon_fence_wait(fence, false);
427 if (r) {
428 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
429 goto error;
430 }
431 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
432error:
433 radeon_fence_unref(&fence);
434 radeon_set_uvd_clocks(rdev, 0, 0);
435 return r;
436}
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
new file mode 100644
index 000000000000..b19ef4951085
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "radeon.h"
28#include "radeon_asic.h"
29#include "rv770d.h"
30
31/**
32 * uvd_v2_2_fence_emit - emit an fence & trap command
33 *
34 * @rdev: radeon_device pointer
35 * @fence: fence to emit
36 *
37 * Write a fence and a trap command to the ring.
38 */
39void uvd_v2_2_fence_emit(struct radeon_device *rdev,
40 struct radeon_fence *fence)
41{
42 struct radeon_ring *ring = &rdev->ring[fence->ring];
43 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
44
45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
46 radeon_ring_write(ring, fence->seq);
47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
48 radeon_ring_write(ring, addr & 0xffffffff);
49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
52 radeon_ring_write(ring, 0);
53
54 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
55 radeon_ring_write(ring, 0);
56 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
57 radeon_ring_write(ring, 0);
58 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
59 radeon_ring_write(ring, 2);
60 return;
61}
62
63/**
64 * uvd_v2_2_resume - memory controller programming
65 *
66 * @rdev: radeon_device pointer
67 *
68 * Let the UVD memory controller know it's offsets
69 */
70int uvd_v2_2_resume(struct radeon_device *rdev)
71{
72 uint64_t addr;
73 uint32_t chip_id, size;
74 int r;
75
76 r = radeon_uvd_resume(rdev);
77 if (r)
78 return r;
79
80 /* programm the VCPU memory controller bits 0-27 */
81 addr = rdev->uvd.gpu_addr >> 3;
82 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
83 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
84 WREG32(UVD_VCPU_CACHE_SIZE0, size);
85
86 addr += size;
87 size = RADEON_UVD_STACK_SIZE >> 3;
88 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
89 WREG32(UVD_VCPU_CACHE_SIZE1, size);
90
91 addr += size;
92 size = RADEON_UVD_HEAP_SIZE >> 3;
93 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
94 WREG32(UVD_VCPU_CACHE_SIZE2, size);
95
96 /* bits 28-31 */
97 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
98 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
99
100 /* bits 32-39 */
101 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
102 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
103
104 /* tell firmware which hardware it is running on */
105 switch (rdev->family) {
106 default:
107 return -EINVAL;
108 case CHIP_RV710:
109 chip_id = 0x01000005;
110 break;
111 case CHIP_RV730:
112 chip_id = 0x01000006;
113 break;
114 case CHIP_RV740:
115 chip_id = 0x01000007;
116 break;
117 case CHIP_CYPRESS:
118 case CHIP_HEMLOCK:
119 chip_id = 0x01000008;
120 break;
121 case CHIP_JUNIPER:
122 chip_id = 0x01000009;
123 break;
124 case CHIP_REDWOOD:
125 chip_id = 0x0100000a;
126 break;
127 case CHIP_CEDAR:
128 chip_id = 0x0100000b;
129 break;
130 case CHIP_SUMO:
131 case CHIP_SUMO2:
132 chip_id = 0x0100000c;
133 break;
134 case CHIP_PALM:
135 chip_id = 0x0100000e;
136 break;
137 case CHIP_CAYMAN:
138 chip_id = 0x0100000f;
139 break;
140 case CHIP_BARTS:
141 chip_id = 0x01000010;
142 break;
143 case CHIP_TURKS:
144 chip_id = 0x01000011;
145 break;
146 case CHIP_CAICOS:
147 chip_id = 0x01000012;
148 break;
149 case CHIP_TAHITI:
150 chip_id = 0x01000014;
151 break;
152 case CHIP_VERDE:
153 chip_id = 0x01000015;
154 break;
155 case CHIP_PITCAIRN:
156 chip_id = 0x01000016;
157 break;
158 case CHIP_ARUBA:
159 chip_id = 0x01000017;
160 break;
161 }
162 WREG32(UVD_VCPU_CHIP_ID, chip_id);
163
164 return 0;
165}
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
new file mode 100644
index 000000000000..5b6fa1f62d4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <drm/drmP.h>
26#include "radeon.h"
27#include "radeon_asic.h"
28#include "nid.h"
29
30/**
31 * uvd_v3_1_semaphore_emit - emit semaphore command
32 *
33 * @rdev: radeon_device pointer
34 * @ring: radeon_ring pointer
35 * @semaphore: semaphore to emit commands for
36 * @emit_wait: true if we should emit a wait command
37 *
38 * Emit a semaphore command (either wait or signal) to the UVD ring.
39 */
40void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
41 struct radeon_ring *ring,
42 struct radeon_semaphore *semaphore,
43 bool emit_wait)
44{
45 uint64_t addr = semaphore->gpu_addr;
46
47 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
48 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
49
50 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
51 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
52
53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
55}
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
new file mode 100644
index 000000000000..d04d5073eef2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "radeon.h"
28#include "radeon_asic.h"
29#include "cikd.h"
30
31/**
32 * uvd_v4_2_resume - memory controller programming
33 *
34 * @rdev: radeon_device pointer
35 *
36 * Let the UVD memory controller know it's offsets
37 */
38int uvd_v4_2_resume(struct radeon_device *rdev)
39{
40 uint64_t addr;
41 uint32_t size;
42
43 /* programm the VCPU memory controller bits 0-27 */
44 addr = rdev->uvd.gpu_addr >> 3;
45 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
46 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
47 WREG32(UVD_VCPU_CACHE_SIZE0, size);
48
49 addr += size;
50 size = RADEON_UVD_STACK_SIZE >> 3;
51 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
52 WREG32(UVD_VCPU_CACHE_SIZE1, size);
53
54 addr += size;
55 size = RADEON_UVD_HEAP_SIZE >> 3;
56 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
57 WREG32(UVD_VCPU_CACHE_SIZE2, size);
58
59 /* bits 28-31 */
60 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
61 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
62
63 /* bits 32-39 */
64 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
65 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
66
67 return 0;
68}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 72887df8dd76..c590cd9dca0b 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -7,3 +7,10 @@ config DRM_RCAR_DU
7 help 7 help
8 Choose this option if you have an R-Car chipset. 8 Choose this option if you have an R-Car chipset.
9 If M is selected the module will be called rcar-du-drm. 9 If M is selected the module will be called rcar-du-drm.
10
11config DRM_RCAR_LVDS
12 bool "R-Car DU LVDS Encoder Support"
13 depends on DRM_RCAR_DU
14 help
15 Enable support the R-Car Display Unit embedded LVDS encoders
16 (currently only on R8A7790).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 7333c0094015..12b8d4477835 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -1,8 +1,12 @@
1rcar-du-drm-y := rcar_du_crtc.o \ 1rcar-du-drm-y := rcar_du_crtc.o \
2 rcar_du_drv.o \ 2 rcar_du_drv.o \
3 rcar_du_encoder.o \
4 rcar_du_group.o \
3 rcar_du_kms.o \ 5 rcar_du_kms.o \
4 rcar_du_lvds.o \ 6 rcar_du_lvdscon.o \
5 rcar_du_plane.o \ 7 rcar_du_plane.o \
6 rcar_du_vga.o 8 rcar_du_vgacon.o
7 9
8obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o 10rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
11
12obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 24183fb93592..a9d24e4bf792 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -23,30 +23,26 @@
23#include "rcar_du_crtc.h" 23#include "rcar_du_crtc.h"
24#include "rcar_du_drv.h" 24#include "rcar_du_drv.h"
25#include "rcar_du_kms.h" 25#include "rcar_du_kms.h"
26#include "rcar_du_lvds.h"
27#include "rcar_du_plane.h" 26#include "rcar_du_plane.h"
28#include "rcar_du_regs.h" 27#include "rcar_du_regs.h"
29#include "rcar_du_vga.h"
30
31#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
32 28
33static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) 29static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
34{ 30{
35 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 31 struct rcar_du_device *rcdu = rcrtc->group->dev;
36 32
37 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg); 33 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
38} 34}
39 35
40static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data) 36static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
41{ 37{
42 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 38 struct rcar_du_device *rcdu = rcrtc->group->dev;
43 39
44 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data); 40 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
45} 41}
46 42
47static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) 43static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
48{ 44{
49 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 45 struct rcar_du_device *rcdu = rcrtc->group->dev;
50 46
51 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 47 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
52 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr); 48 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +50,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
54 50
55static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) 51static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
56{ 52{
57 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 53 struct rcar_du_device *rcdu = rcrtc->group->dev;
58 54
59 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 55 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
60 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set); 56 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -63,29 +59,48 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
63static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, 59static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
64 u32 clr, u32 set) 60 u32 clr, u32 set)
65{ 61{
66 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 62 struct rcar_du_device *rcdu = rcrtc->group->dev;
67 u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg); 63 u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
68 64
69 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set); 65 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
70} 66}
71 67
68static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
69{
70 int ret;
71
72 ret = clk_prepare_enable(rcrtc->clock);
73 if (ret < 0)
74 return ret;
75
76 ret = rcar_du_group_get(rcrtc->group);
77 if (ret < 0)
78 clk_disable_unprepare(rcrtc->clock);
79
80 return ret;
81}
82
83static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
84{
85 rcar_du_group_put(rcrtc->group);
86 clk_disable_unprepare(rcrtc->clock);
87}
88
72static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) 89static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
73{ 90{
74 struct drm_crtc *crtc = &rcrtc->crtc; 91 const struct drm_display_mode *mode = &rcrtc->crtc.mode;
75 struct rcar_du_device *rcdu = crtc->dev->dev_private;
76 const struct drm_display_mode *mode = &crtc->mode;
77 unsigned long clk; 92 unsigned long clk;
78 u32 value; 93 u32 value;
79 u32 div; 94 u32 div;
80 95
81 /* Dot clock */ 96 /* Dot clock */
82 clk = clk_get_rate(rcdu->clock); 97 clk = clk_get_rate(rcrtc->clock);
83 div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000); 98 div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
84 div = clamp(div, 1U, 64U) - 1; 99 div = clamp(div, 1U, 64U) - 1;
85 100
86 rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR, 101 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
87 ESCR_DCLKSEL_CLKS | div); 102 ESCR_DCLKSEL_CLKS | div);
88 rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0); 103 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
89 104
90 /* Signal polarities */ 105 /* Signal polarities */
91 value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) 106 value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
@@ -112,68 +127,25 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
112 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); 127 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
113} 128}
114 129
115static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc) 130void rcar_du_crtc_route_output(struct drm_crtc *crtc,
116{ 131 enum rcar_du_output output)
117 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
118 u32 dorcr = rcar_du_read(rcdu, DORCR);
119
120 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
121
122 /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
123 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
124 * default.
125 */
126 if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
127 dorcr |= DORCR_PG2D_DS1;
128 else
129 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
130
131 rcar_du_write(rcdu, DORCR, dorcr);
132}
133
134static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
135{
136 rcar_du_write(rcdu, DSYSR,
137 (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
138 (start ? DSYSR_DEN : DSYSR_DRES));
139}
140
141static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
142{
143 /* Many of the configuration bits are only updated when the display
144 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
145 * of those bits could be pre-configured, but others (especially the
146 * bits related to plane assignment to display timing controllers) need
147 * to be modified at runtime.
148 *
149 * Restart the display controller if a start is requested. Sorry for the
150 * flicker. It should be possible to move most of the "DRES-update" bits
151 * setup to driver initialization time and minimize the number of cases
152 * when the display controller will have to be restarted.
153 */
154 if (start) {
155 if (rcdu->used_crtcs++ != 0)
156 __rcar_du_start_stop(rcdu, false);
157 __rcar_du_start_stop(rcdu, true);
158 } else {
159 if (--rcdu->used_crtcs == 0)
160 __rcar_du_start_stop(rcdu, false);
161 }
162}
163
164void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
165{ 132{
166 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 133 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
134 struct rcar_du_device *rcdu = rcrtc->group->dev;
167 135
168 /* Store the route from the CRTC output to the DU output. The DU will be 136 /* Store the route from the CRTC output to the DU output. The DU will be
169 * configured when starting the CRTC. 137 * configured when starting the CRTC.
170 */ 138 */
171 rcrtc->outputs |= 1 << output; 139 rcrtc->outputs |= BIT(output);
140
141 /* Store RGB routing to DPAD0 for R8A7790. */
142 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
143 output == RCAR_DU_OUTPUT_DPAD0)
144 rcdu->dpad0_source = rcrtc->index;
172} 145}
173 146
174void rcar_du_crtc_update_planes(struct drm_crtc *crtc) 147void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
175{ 148{
176 struct rcar_du_device *rcdu = crtc->dev->dev_private;
177 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 149 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
178 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; 150 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
179 unsigned int num_planes = 0; 151 unsigned int num_planes = 0;
@@ -182,8 +154,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
182 u32 dptsr = 0; 154 u32 dptsr = 0;
183 u32 dspr = 0; 155 u32 dspr = 0;
184 156
185 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 157 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
186 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 158 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
187 unsigned int j; 159 unsigned int j;
188 160
189 if (plane->crtc != &rcrtc->crtc || !plane->enabled) 161 if (plane->crtc != &rcrtc->crtc || !plane->enabled)
@@ -220,8 +192,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
220 /* Select display timing and dot clock generator 2 for planes associated 192 /* Select display timing and dot clock generator 2 for planes associated
221 * with superposition controller 2. 193 * with superposition controller 2.
222 */ 194 */
223 if (rcrtc->index) { 195 if (rcrtc->index % 2) {
224 u32 value = rcar_du_read(rcdu, DPTSR); 196 u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
225 197
226 /* The DPTSR register is updated when the display controller is 198 /* The DPTSR register is updated when the display controller is
227 * stopped. We thus need to restart the DU. Once again, sorry 199 * stopped. We thus need to restart the DU. Once again, sorry
@@ -231,21 +203,19 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
231 * occur only if we need to break the pre-association. 203 * occur only if we need to break the pre-association.
232 */ 204 */
233 if (value != dptsr) { 205 if (value != dptsr) {
234 rcar_du_write(rcdu, DPTSR, dptsr); 206 rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
235 if (rcdu->used_crtcs) { 207 if (rcrtc->group->used_crtcs)
236 __rcar_du_start_stop(rcdu, false); 208 rcar_du_group_restart(rcrtc->group);
237 __rcar_du_start_stop(rcdu, true);
238 }
239 } 209 }
240 } 210 }
241 211
242 rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr); 212 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
213 dspr);
243} 214}
244 215
245static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) 216static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
246{ 217{
247 struct drm_crtc *crtc = &rcrtc->crtc; 218 struct drm_crtc *crtc = &rcrtc->crtc;
248 struct rcar_du_device *rcdu = crtc->dev->dev_private;
249 unsigned int i; 219 unsigned int i;
250 220
251 if (rcrtc->started) 221 if (rcrtc->started)
@@ -260,16 +230,16 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
260 230
261 /* Configure display timings and output routing */ 231 /* Configure display timings and output routing */
262 rcar_du_crtc_set_display_timing(rcrtc); 232 rcar_du_crtc_set_display_timing(rcrtc);
263 rcar_du_crtc_set_routing(rcrtc); 233 rcar_du_group_set_routing(rcrtc->group);
264 234
265 mutex_lock(&rcdu->planes.lock); 235 mutex_lock(&rcrtc->group->planes.lock);
266 rcrtc->plane->enabled = true; 236 rcrtc->plane->enabled = true;
267 rcar_du_crtc_update_planes(crtc); 237 rcar_du_crtc_update_planes(crtc);
268 mutex_unlock(&rcdu->planes.lock); 238 mutex_unlock(&rcrtc->group->planes.lock);
269 239
270 /* Setup planes. */ 240 /* Setup planes. */
271 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 241 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
272 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 242 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
273 243
274 if (plane->crtc != crtc || !plane->enabled) 244 if (plane->crtc != crtc || !plane->enabled)
275 continue; 245 continue;
@@ -283,7 +253,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
283 */ 253 */
284 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER); 254 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
285 255
286 rcar_du_start_stop(rcdu, true); 256 rcar_du_group_start_stop(rcrtc->group, true);
287 257
288 rcrtc->started = true; 258 rcrtc->started = true;
289} 259}
@@ -291,42 +261,37 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
291static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) 261static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
292{ 262{
293 struct drm_crtc *crtc = &rcrtc->crtc; 263 struct drm_crtc *crtc = &rcrtc->crtc;
294 struct rcar_du_device *rcdu = crtc->dev->dev_private;
295 264
296 if (!rcrtc->started) 265 if (!rcrtc->started)
297 return; 266 return;
298 267
299 mutex_lock(&rcdu->planes.lock); 268 mutex_lock(&rcrtc->group->planes.lock);
300 rcrtc->plane->enabled = false; 269 rcrtc->plane->enabled = false;
301 rcar_du_crtc_update_planes(crtc); 270 rcar_du_crtc_update_planes(crtc);
302 mutex_unlock(&rcdu->planes.lock); 271 mutex_unlock(&rcrtc->group->planes.lock);
303 272
304 /* Select switch sync mode. This stops display operation and configures 273 /* Select switch sync mode. This stops display operation and configures
305 * the HSYNC and VSYNC signals as inputs. 274 * the HSYNC and VSYNC signals as inputs.
306 */ 275 */
307 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); 276 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
308 277
309 rcar_du_start_stop(rcdu, false); 278 rcar_du_group_start_stop(rcrtc->group, false);
310 279
311 rcrtc->started = false; 280 rcrtc->started = false;
312} 281}
313 282
314void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) 283void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
315{ 284{
316 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
317
318 rcar_du_crtc_stop(rcrtc); 285 rcar_du_crtc_stop(rcrtc);
319 rcar_du_put(rcdu); 286 rcar_du_crtc_put(rcrtc);
320} 287}
321 288
322void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) 289void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
323{ 290{
324 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
325
326 if (rcrtc->dpms != DRM_MODE_DPMS_ON) 291 if (rcrtc->dpms != DRM_MODE_DPMS_ON)
327 return; 292 return;
328 293
329 rcar_du_get(rcdu); 294 rcar_du_crtc_get(rcrtc);
330 rcar_du_crtc_start(rcrtc); 295 rcar_du_crtc_start(rcrtc);
331} 296}
332 297
@@ -340,18 +305,17 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
340 305
341static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode) 306static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
342{ 307{
343 struct rcar_du_device *rcdu = crtc->dev->dev_private;
344 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 308 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
345 309
346 if (rcrtc->dpms == mode) 310 if (rcrtc->dpms == mode)
347 return; 311 return;
348 312
349 if (mode == DRM_MODE_DPMS_ON) { 313 if (mode == DRM_MODE_DPMS_ON) {
350 rcar_du_get(rcdu); 314 rcar_du_crtc_get(rcrtc);
351 rcar_du_crtc_start(rcrtc); 315 rcar_du_crtc_start(rcrtc);
352 } else { 316 } else {
353 rcar_du_crtc_stop(rcrtc); 317 rcar_du_crtc_stop(rcrtc);
354 rcar_du_put(rcdu); 318 rcar_du_crtc_put(rcrtc);
355 } 319 }
356 320
357 rcrtc->dpms = mode; 321 rcrtc->dpms = mode;
@@ -367,13 +331,12 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
367 331
368static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc) 332static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
369{ 333{
370 struct rcar_du_device *rcdu = crtc->dev->dev_private;
371 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 334 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
372 335
373 /* We need to access the hardware during mode set, acquire a reference 336 /* We need to access the hardware during mode set, acquire a reference
374 * to the DU. 337 * to the CRTC.
375 */ 338 */
376 rcar_du_get(rcdu); 339 rcar_du_crtc_get(rcrtc);
377 340
378 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a 341 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a
379 * result. 342 * result.
@@ -390,8 +353,8 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
390 int x, int y, 353 int x, int y,
391 struct drm_framebuffer *old_fb) 354 struct drm_framebuffer *old_fb)
392{ 355{
393 struct rcar_du_device *rcdu = crtc->dev->dev_private;
394 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 356 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
357 struct rcar_du_device *rcdu = rcrtc->group->dev;
395 const struct rcar_du_format_info *format; 358 const struct rcar_du_format_info *format;
396 int ret; 359 int ret;
397 360
@@ -423,10 +386,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
423 386
424error: 387error:
425 /* There's no rollback/abort operation to clean up in case of error. We 388 /* There's no rollback/abort operation to clean up in case of error. We
426 * thus need to release the reference to the DU acquired in prepare() 389 * thus need to release the reference to the CRTC acquired in prepare()
427 * here. 390 * here.
428 */ 391 */
429 rcar_du_put(rcdu); 392 rcar_du_crtc_put(rcrtc);
430 return ret; 393 return ret;
431} 394}
432 395
@@ -514,9 +477,28 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
514 drm_vblank_put(dev, rcrtc->index); 477 drm_vblank_put(dev, rcrtc->index);
515} 478}
516 479
480static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
481{
482 struct rcar_du_crtc *rcrtc = arg;
483 irqreturn_t ret = IRQ_NONE;
484 u32 status;
485
486 status = rcar_du_crtc_read(rcrtc, DSSR);
487 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
488
489 if (status & DSSR_VBK) {
490 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
491 rcar_du_crtc_finish_page_flip(rcrtc);
492 ret = IRQ_HANDLED;
493 }
494
495 return ret;
496}
497
517static int rcar_du_crtc_page_flip(struct drm_crtc *crtc, 498static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
518 struct drm_framebuffer *fb, 499 struct drm_framebuffer *fb,
519 struct drm_pending_vblank_event *event) 500 struct drm_pending_vblank_event *event,
501 uint32_t page_flip_flags)
520{ 502{
521 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 503 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
522 struct drm_device *dev = rcrtc->crtc.dev; 504 struct drm_device *dev = rcrtc->crtc.dev;
@@ -549,16 +531,41 @@ static const struct drm_crtc_funcs crtc_funcs = {
549 .page_flip = rcar_du_crtc_page_flip, 531 .page_flip = rcar_du_crtc_page_flip,
550}; 532};
551 533
552int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index) 534int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
553{ 535{
536 static const unsigned int mmio_offsets[] = {
537 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
538 };
539
540 struct rcar_du_device *rcdu = rgrp->dev;
541 struct platform_device *pdev = to_platform_device(rcdu->dev);
554 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index]; 542 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
555 struct drm_crtc *crtc = &rcrtc->crtc; 543 struct drm_crtc *crtc = &rcrtc->crtc;
544 unsigned int irqflags;
545 char clk_name[5];
546 char *name;
547 int irq;
556 int ret; 548 int ret;
557 549
558 rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0; 550 /* Get the CRTC clock. */
551 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
552 sprintf(clk_name, "du.%u", index);
553 name = clk_name;
554 } else {
555 name = NULL;
556 }
557
558 rcrtc->clock = devm_clk_get(rcdu->dev, name);
559 if (IS_ERR(rcrtc->clock)) {
560 dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
561 return PTR_ERR(rcrtc->clock);
562 }
563
564 rcrtc->group = rgrp;
565 rcrtc->mmio_offset = mmio_offsets[index];
559 rcrtc->index = index; 566 rcrtc->index = index;
560 rcrtc->dpms = DRM_MODE_DPMS_OFF; 567 rcrtc->dpms = DRM_MODE_DPMS_OFF;
561 rcrtc->plane = &rcdu->planes.planes[index]; 568 rcrtc->plane = &rgrp->planes.planes[index % 2];
562 569
563 rcrtc->plane->crtc = crtc; 570 rcrtc->plane->crtc = crtc;
564 571
@@ -568,6 +575,28 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
568 575
569 drm_crtc_helper_add(crtc, &crtc_helper_funcs); 576 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
570 577
578 /* Register the interrupt handler. */
579 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
580 irq = platform_get_irq(pdev, index);
581 irqflags = 0;
582 } else {
583 irq = platform_get_irq(pdev, 0);
584 irqflags = IRQF_SHARED;
585 }
586
587 if (irq < 0) {
588 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
589 return ret;
590 }
591
592 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
593 dev_name(rcdu->dev), rcrtc);
594 if (ret < 0) {
595 dev_err(rcdu->dev,
596 "failed to register IRQ for CRTC %u\n", index);
597 return ret;
598 }
599
571 return 0; 600 return 0;
572} 601}
573 602
@@ -580,16 +609,3 @@ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
580 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE); 609 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
581 } 610 }
582} 611}
583
584void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
585{
586 u32 status;
587
588 status = rcar_du_crtc_read(rcrtc, DSSR);
589 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
590
591 if (status & DSSR_VBK) {
592 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
593 rcar_du_crtc_finish_page_flip(rcrtc);
594 }
595}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 2a0365bcbd14..43e7575c700c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,16 +15,18 @@
15#define __RCAR_DU_CRTC_H__ 15#define __RCAR_DU_CRTC_H__
16 16
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/platform_data/rcar-du.h>
18 19
19#include <drm/drmP.h> 20#include <drm/drmP.h>
20#include <drm/drm_crtc.h> 21#include <drm/drm_crtc.h>
21 22
22struct rcar_du_device; 23struct rcar_du_group;
23struct rcar_du_plane; 24struct rcar_du_plane;
24 25
25struct rcar_du_crtc { 26struct rcar_du_crtc {
26 struct drm_crtc crtc; 27 struct drm_crtc crtc;
27 28
29 struct clk *clock;
28 unsigned int mmio_offset; 30 unsigned int mmio_offset;
29 unsigned int index; 31 unsigned int index;
30 bool started; 32 bool started;
@@ -33,18 +35,21 @@ struct rcar_du_crtc {
33 unsigned int outputs; 35 unsigned int outputs;
34 int dpms; 36 int dpms;
35 37
38 struct rcar_du_group *group;
36 struct rcar_du_plane *plane; 39 struct rcar_du_plane *plane;
37}; 40};
38 41
39int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index); 42#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
43
44int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
40void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); 45void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
41void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
42void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, 46void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
43 struct drm_file *file); 47 struct drm_file *file);
44void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); 48void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
45void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); 49void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
46 50
47void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output); 51void rcar_du_crtc_route_output(struct drm_crtc *crtc,
52 enum rcar_du_output output);
48void rcar_du_crtc_update_planes(struct drm_crtc *crtc); 53void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
49 54
50#endif /* __RCAR_DU_CRTC_H__ */ 55#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index dc0fe09b2ba1..0023f9719cf1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
21 21
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_gem_cma_helper.h> 25#include <drm/drm_gem_cma_helper.h>
25 26
26#include "rcar_du_crtc.h" 27#include "rcar_du_crtc.h"
@@ -29,74 +30,21 @@
29#include "rcar_du_regs.h" 30#include "rcar_du_regs.h"
30 31
31/* ----------------------------------------------------------------------------- 32/* -----------------------------------------------------------------------------
32 * Core device operations
33 */
34
35/*
36 * rcar_du_get - Acquire a reference to the DU
37 *
38 * Acquiring a reference enables the device clock and setup core registers. A
39 * reference must be held before accessing any hardware registers.
40 *
41 * This function must be called with the DRM mode_config lock held.
42 *
43 * Return 0 in case of success or a negative error code otherwise.
44 */
45int rcar_du_get(struct rcar_du_device *rcdu)
46{
47 int ret;
48
49 if (rcdu->use_count)
50 goto done;
51
52 /* Enable clocks before accessing the hardware. */
53 ret = clk_prepare_enable(rcdu->clock);
54 if (ret < 0)
55 return ret;
56
57 /* Enable extended features */
58 rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
59 rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
60 rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
61 rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
62 rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
63
64 /* Use DS1PR and DS2PR to configure planes priorities and connects the
65 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
66 */
67 rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
68
69done:
70 rcdu->use_count++;
71 return 0;
72}
73
74/*
75 * rcar_du_put - Release a reference to the DU
76 *
77 * Releasing the last reference disables the device clock.
78 *
79 * This function must be called with the DRM mode_config lock held.
80 */
81void rcar_du_put(struct rcar_du_device *rcdu)
82{
83 if (--rcdu->use_count)
84 return;
85
86 clk_disable_unprepare(rcdu->clock);
87}
88
89/* -----------------------------------------------------------------------------
90 * DRM operations 33 * DRM operations
91 */ 34 */
92 35
93static int rcar_du_unload(struct drm_device *dev) 36static int rcar_du_unload(struct drm_device *dev)
94{ 37{
38 struct rcar_du_device *rcdu = dev->dev_private;
39
40 if (rcdu->fbdev)
41 drm_fbdev_cma_fini(rcdu->fbdev);
42
95 drm_kms_helper_poll_fini(dev); 43 drm_kms_helper_poll_fini(dev);
96 drm_mode_config_cleanup(dev); 44 drm_mode_config_cleanup(dev);
97 drm_vblank_cleanup(dev); 45 drm_vblank_cleanup(dev);
98 drm_irq_uninstall(dev);
99 46
47 dev->irq_enabled = 0;
100 dev->dev_private = NULL; 48 dev->dev_private = NULL;
101 49
102 return 0; 50 return 0;
@@ -107,7 +55,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
107 struct platform_device *pdev = dev->platformdev; 55 struct platform_device *pdev = dev->platformdev;
108 struct rcar_du_platform_data *pdata = pdev->dev.platform_data; 56 struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
109 struct rcar_du_device *rcdu; 57 struct rcar_du_device *rcdu;
110 struct resource *ioarea;
111 struct resource *mem; 58 struct resource *mem;
112 int ret; 59 int ret;
113 60
@@ -124,35 +71,15 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
124 71
125 rcdu->dev = &pdev->dev; 72 rcdu->dev = &pdev->dev;
126 rcdu->pdata = pdata; 73 rcdu->pdata = pdata;
74 rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data;
127 rcdu->ddev = dev; 75 rcdu->ddev = dev;
128 dev->dev_private = rcdu; 76 dev->dev_private = rcdu;
129 77
130 /* I/O resources and clocks */ 78 /* I/O resources */
131 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 79 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132 if (mem == NULL) { 80 rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
133 dev_err(&pdev->dev, "failed to get memory resource\n"); 81 if (IS_ERR(rcdu->mmio))
134 return -EINVAL; 82 return PTR_ERR(rcdu->mmio);
135 }
136
137 ioarea = devm_request_mem_region(&pdev->dev, mem->start,
138 resource_size(mem), pdev->name);
139 if (ioarea == NULL) {
140 dev_err(&pdev->dev, "failed to request memory region\n");
141 return -EBUSY;
142 }
143
144 rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
145 resource_size(ioarea));
146 if (rcdu->mmio == NULL) {
147 dev_err(&pdev->dev, "failed to remap memory resource\n");
148 return -ENOMEM;
149 }
150
151 rcdu->clock = devm_clk_get(&pdev->dev, NULL);
152 if (IS_ERR(rcdu->clock)) {
153 dev_err(&pdev->dev, "failed to get clock\n");
154 return -ENOENT;
155 }
156 83
157 /* DRM/KMS objects */ 84 /* DRM/KMS objects */
158 ret = rcar_du_modeset_init(rcdu); 85 ret = rcar_du_modeset_init(rcdu);
@@ -161,18 +88,14 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
161 goto done; 88 goto done;
162 } 89 }
163 90
164 /* IRQ and vblank handling */ 91 /* vblank handling */
165 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1); 92 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
166 if (ret < 0) { 93 if (ret < 0) {
167 dev_err(&pdev->dev, "failed to initialize vblank\n"); 94 dev_err(&pdev->dev, "failed to initialize vblank\n");
168 goto done; 95 goto done;
169 } 96 }
170 97
171 ret = drm_irq_install(dev); 98 dev->irq_enabled = 1;
172 if (ret < 0) {
173 dev_err(&pdev->dev, "failed to install IRQ handler\n");
174 goto done;
175 }
176 99
177 platform_set_drvdata(pdev, rcdu); 100 platform_set_drvdata(pdev, rcdu);
178 101
@@ -188,20 +111,15 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
188 struct rcar_du_device *rcdu = dev->dev_private; 111 struct rcar_du_device *rcdu = dev->dev_private;
189 unsigned int i; 112 unsigned int i;
190 113
191 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) 114 for (i = 0; i < rcdu->num_crtcs; ++i)
192 rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); 115 rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
193} 116}
194 117
195static irqreturn_t rcar_du_irq(int irq, void *arg) 118static void rcar_du_lastclose(struct drm_device *dev)
196{ 119{
197 struct drm_device *dev = arg;
198 struct rcar_du_device *rcdu = dev->dev_private; 120 struct rcar_du_device *rcdu = dev->dev_private;
199 unsigned int i;
200
201 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
202 rcar_du_crtc_irq(&rcdu->crtcs[i]);
203 121
204 return IRQ_HANDLED; 122 drm_fbdev_cma_restore_mode(rcdu->fbdev);
205} 123}
206 124
207static int rcar_du_enable_vblank(struct drm_device *dev, int crtc) 125static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
@@ -230,18 +148,16 @@ static const struct file_operations rcar_du_fops = {
230#endif 148#endif
231 .poll = drm_poll, 149 .poll = drm_poll,
232 .read = drm_read, 150 .read = drm_read,
233 .fasync = drm_fasync,
234 .llseek = no_llseek, 151 .llseek = no_llseek,
235 .mmap = drm_gem_cma_mmap, 152 .mmap = drm_gem_cma_mmap,
236}; 153};
237 154
238static struct drm_driver rcar_du_driver = { 155static struct drm_driver rcar_du_driver = {
239 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET 156 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
240 | DRIVER_PRIME,
241 .load = rcar_du_load, 157 .load = rcar_du_load,
242 .unload = rcar_du_unload, 158 .unload = rcar_du_unload,
243 .preclose = rcar_du_preclose, 159 .preclose = rcar_du_preclose,
244 .irq_handler = rcar_du_irq, 160 .lastclose = rcar_du_lastclose,
245 .get_vblank_counter = drm_vblank_count, 161 .get_vblank_counter = drm_vblank_count,
246 .enable_vblank = rcar_du_enable_vblank, 162 .enable_vblank = rcar_du_enable_vblank,
247 .disable_vblank = rcar_du_disable_vblank, 163 .disable_vblank = rcar_du_disable_vblank,
@@ -258,7 +174,7 @@ static struct drm_driver rcar_du_driver = {
258 .gem_prime_mmap = drm_gem_cma_prime_mmap, 174 .gem_prime_mmap = drm_gem_cma_prime_mmap,
259 .dumb_create = rcar_du_dumb_create, 175 .dumb_create = rcar_du_dumb_create,
260 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 176 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
261 .dumb_destroy = drm_gem_cma_dumb_destroy, 177 .dumb_destroy = drm_gem_dumb_destroy,
262 .fops = &rcar_du_fops, 178 .fops = &rcar_du_fops,
263 .name = "rcar-du", 179 .name = "rcar-du",
264 .desc = "Renesas R-Car Display Unit", 180 .desc = "Renesas R-Car Display Unit",
@@ -313,6 +229,57 @@ static int rcar_du_remove(struct platform_device *pdev)
313 return 0; 229 return 0;
314} 230}
315 231
232static const struct rcar_du_device_info rcar_du_r8a7779_info = {
233 .features = 0,
234 .num_crtcs = 2,
235 .routes = {
236 /* R8A7779 has two RGB outputs and one (currently unsupported)
237 * TCON output.
238 */
239 [RCAR_DU_OUTPUT_DPAD0] = {
240 .possible_crtcs = BIT(0),
241 .encoder_type = DRM_MODE_ENCODER_NONE,
242 },
243 [RCAR_DU_OUTPUT_DPAD1] = {
244 .possible_crtcs = BIT(1) | BIT(0),
245 .encoder_type = DRM_MODE_ENCODER_NONE,
246 },
247 },
248 .num_lvds = 0,
249};
250
251static const struct rcar_du_device_info rcar_du_r8a7790_info = {
252 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
253 | RCAR_DU_FEATURE_DEFR8,
254 .num_crtcs = 3,
255 .routes = {
256 /* R8A7790 has one RGB output, two LVDS outputs and one
257 * (currently unsupported) TCON output.
258 */
259 [RCAR_DU_OUTPUT_DPAD0] = {
260 .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
261 .encoder_type = DRM_MODE_ENCODER_NONE,
262 },
263 [RCAR_DU_OUTPUT_LVDS0] = {
264 .possible_crtcs = BIT(0),
265 .encoder_type = DRM_MODE_ENCODER_LVDS,
266 },
267 [RCAR_DU_OUTPUT_LVDS1] = {
268 .possible_crtcs = BIT(2) | BIT(1),
269 .encoder_type = DRM_MODE_ENCODER_LVDS,
270 },
271 },
272 .num_lvds = 2,
273};
274
275static const struct platform_device_id rcar_du_id_table[] = {
276 { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
277 { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
278 { }
279};
280
281MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
282
316static struct platform_driver rcar_du_platform_driver = { 283static struct platform_driver rcar_du_platform_driver = {
317 .probe = rcar_du_probe, 284 .probe = rcar_du_probe,
318 .remove = rcar_du_remove, 285 .remove = rcar_du_remove,
@@ -321,6 +288,7 @@ static struct platform_driver rcar_du_platform_driver = {
321 .name = "rcar-du", 288 .name = "rcar-du",
322 .pm = &rcar_du_pm_ops, 289 .pm = &rcar_du_pm_ops,
323 }, 290 },
291 .id_table = rcar_du_id_table,
324}; 292};
325 293
326module_platform_driver(rcar_du_platform_driver); 294module_platform_driver(rcar_du_platform_driver);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 193cc59d495c..65d2d636b002 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,43 +15,74 @@
15#define __RCAR_DU_DRV_H__ 15#define __RCAR_DU_DRV_H__
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mutex.h>
19#include <linux/platform_data/rcar-du.h> 18#include <linux/platform_data/rcar-du.h>
20 19
21#include "rcar_du_crtc.h" 20#include "rcar_du_crtc.h"
22#include "rcar_du_plane.h" 21#include "rcar_du_group.h"
23 22
24struct clk; 23struct clk;
25struct device; 24struct device;
26struct drm_device; 25struct drm_device;
26struct drm_fbdev_cma;
27struct rcar_du_device;
28struct rcar_du_lvdsenc;
29
30#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
31#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */
32#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */
33
34/*
35 * struct rcar_du_output_routing - Output routing specification
36 * @possible_crtcs: bitmask of possible CRTCs for the output
37 * @encoder_type: DRM type of the internal encoder associated with the output
38 *
39 * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
40 * specify the valid SoC outputs, which CRTCs can drive the output, and the type
41 * of in-SoC encoder for the output.
42 */
43struct rcar_du_output_routing {
44 unsigned int possible_crtcs;
45 unsigned int encoder_type;
46};
47
48/*
49 * struct rcar_du_device_info - DU model-specific information
50 * @features: device features (RCAR_DU_FEATURE_*)
51 * @num_crtcs: total number of CRTCs
52 * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
53 * @num_lvds: number of internal LVDS encoders
54 */
55struct rcar_du_device_info {
56 unsigned int features;
57 unsigned int num_crtcs;
58 struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
59 unsigned int num_lvds;
60};
27 61
28struct rcar_du_device { 62struct rcar_du_device {
29 struct device *dev; 63 struct device *dev;
30 const struct rcar_du_platform_data *pdata; 64 const struct rcar_du_platform_data *pdata;
65 const struct rcar_du_device_info *info;
31 66
32 void __iomem *mmio; 67 void __iomem *mmio;
33 struct clk *clock;
34 unsigned int use_count;
35 68
36 struct drm_device *ddev; 69 struct drm_device *ddev;
70 struct drm_fbdev_cma *fbdev;
37 71
38 struct rcar_du_crtc crtcs[2]; 72 struct rcar_du_crtc crtcs[3];
39 unsigned int used_crtcs;
40 unsigned int num_crtcs; 73 unsigned int num_crtcs;
41 74
42 struct { 75 struct rcar_du_group groups[2];
43 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
44 unsigned int free;
45 struct mutex lock;
46 76
47 struct drm_property *alpha; 77 unsigned int dpad0_source;
48 struct drm_property *colorkey; 78 struct rcar_du_lvdsenc *lvds[2];
49 struct drm_property *zpos;
50 } planes;
51}; 79};
52 80
53int rcar_du_get(struct rcar_du_device *rcdu); 81static inline bool rcar_du_has(struct rcar_du_device *rcdu,
54void rcar_du_put(struct rcar_du_device *rcdu); 82 unsigned int feature)
83{
84 return rcdu->info->features & feature;
85}
55 86
56static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg) 87static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
57{ 88{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
new file mode 100644
index 000000000000..3daa7a168dc6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -0,0 +1,202 @@
1/*
2 * rcar_du_encoder.c -- R-Car Display Unit Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/export.h>
15
16#include <drm/drmP.h>
17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h"
22#include "rcar_du_kms.h"
23#include "rcar_du_lvdscon.h"
24#include "rcar_du_lvdsenc.h"
25#include "rcar_du_vgacon.h"
26
27/* -----------------------------------------------------------------------------
28 * Common connector functions
29 */
30
31struct drm_encoder *
32rcar_du_connector_best_encoder(struct drm_connector *connector)
33{
34 struct rcar_du_connector *rcon = to_rcar_connector(connector);
35
36 return &rcon->encoder->encoder;
37}
38
39/* -----------------------------------------------------------------------------
40 * Encoder
41 */
42
43static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
44{
45 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
46
47 if (renc->lvds)
48 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
49}
50
51static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
52 const struct drm_display_mode *mode,
53 struct drm_display_mode *adjusted_mode)
54{
55 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
56 const struct drm_display_mode *panel_mode;
57 struct drm_device *dev = encoder->dev;
58 struct drm_connector *connector;
59 bool found = false;
60
61 /* DAC encoders have currently no restriction on the mode. */
62 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
63 return true;
64
65 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
66 if (connector->encoder == encoder) {
67 found = true;
68 break;
69 }
70 }
71
72 if (!found) {
73 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
74 return false;
75 }
76
77 if (list_empty(&connector->modes)) {
78 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
79 return false;
80 }
81
82 panel_mode = list_first_entry(&connector->modes,
83 struct drm_display_mode, head);
84
85 /* We're not allowed to modify the resolution. */
86 if (mode->hdisplay != panel_mode->hdisplay ||
87 mode->vdisplay != panel_mode->vdisplay)
88 return false;
89
90 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
91 drm_mode_copy(adjusted_mode, panel_mode);
92
93 /* The internal LVDS encoder has a clock frequency operating range of
94 * 30MHz to 150MHz. Clamp the clock accordingly.
95 */
96 if (renc->lvds)
97 adjusted_mode->clock = clamp(adjusted_mode->clock,
98 30000, 150000);
99
100 return true;
101}
102
103static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
104{
105 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
106
107 if (renc->lvds)
108 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
109 DRM_MODE_DPMS_OFF);
110}
111
112static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
113{
114 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
115
116 if (renc->lvds)
117 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
118 DRM_MODE_DPMS_ON);
119}
120
121static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
122 struct drm_display_mode *mode,
123 struct drm_display_mode *adjusted_mode)
124{
125 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
126
127 rcar_du_crtc_route_output(encoder->crtc, renc->output);
128}
129
130static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
131 .dpms = rcar_du_encoder_dpms,
132 .mode_fixup = rcar_du_encoder_mode_fixup,
133 .prepare = rcar_du_encoder_mode_prepare,
134 .commit = rcar_du_encoder_mode_commit,
135 .mode_set = rcar_du_encoder_mode_set,
136};
137
138static const struct drm_encoder_funcs encoder_funcs = {
139 .destroy = drm_encoder_cleanup,
140};
141
142int rcar_du_encoder_init(struct rcar_du_device *rcdu,
143 enum rcar_du_encoder_type type,
144 enum rcar_du_output output,
145 const struct rcar_du_encoder_data *data)
146{
147 struct rcar_du_encoder *renc;
148 unsigned int encoder_type;
149 int ret;
150
151 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
152 if (renc == NULL)
153 return -ENOMEM;
154
155 renc->output = output;
156
157 switch (output) {
158 case RCAR_DU_OUTPUT_LVDS0:
159 renc->lvds = rcdu->lvds[0];
160 break;
161
162 case RCAR_DU_OUTPUT_LVDS1:
163 renc->lvds = rcdu->lvds[1];
164 break;
165
166 default:
167 break;
168 }
169
170 switch (type) {
171 case RCAR_DU_ENCODER_VGA:
172 encoder_type = DRM_MODE_ENCODER_DAC;
173 break;
174 case RCAR_DU_ENCODER_LVDS:
175 encoder_type = DRM_MODE_ENCODER_LVDS;
176 break;
177 case RCAR_DU_ENCODER_NONE:
178 default:
179 /* No external encoder, use the internal encoder type. */
180 encoder_type = rcdu->info->routes[output].encoder_type;
181 break;
182 }
183
184 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
185 encoder_type);
186 if (ret < 0)
187 return ret;
188
189 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
190
191 switch (encoder_type) {
192 case DRM_MODE_ENCODER_LVDS:
193 return rcar_du_lvds_connector_init(rcdu, renc,
194 &data->connector.lvds.panel);
195
196 case DRM_MODE_ENCODER_DAC:
197 return rcar_du_vga_connector_init(rcdu, renc);
198
199 default:
200 return -EINVAL;
201 }
202}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
new file mode 100644
index 000000000000..0e5a65e45d0e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -0,0 +1,49 @@
1/*
2 * rcar_du_encoder.h -- R-Car Display Unit Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_ENCODER_H__
15#define __RCAR_DU_ENCODER_H__
16
17#include <linux/platform_data/rcar-du.h>
18
19#include <drm/drm_crtc.h>
20
21struct rcar_du_device;
22struct rcar_du_lvdsenc;
23
24struct rcar_du_encoder {
25 struct drm_encoder encoder;
26 enum rcar_du_output output;
27 struct rcar_du_lvdsenc *lvds;
28};
29
30#define to_rcar_encoder(e) \
31 container_of(e, struct rcar_du_encoder, encoder)
32
33struct rcar_du_connector {
34 struct drm_connector connector;
35 struct rcar_du_encoder *encoder;
36};
37
38#define to_rcar_connector(c) \
39 container_of(c, struct rcar_du_connector, connector)
40
41struct drm_encoder *
42rcar_du_connector_best_encoder(struct drm_connector *connector);
43
44int rcar_du_encoder_init(struct rcar_du_device *rcdu,
45 enum rcar_du_encoder_type type,
46 enum rcar_du_output output,
47 const struct rcar_du_encoder_data *data);
48
49#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
new file mode 100644
index 000000000000..eb53cd97e8c6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -0,0 +1,187 @@
1/*
2 * rcar_du_group.c -- R-Car Display Unit Channels Pair
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14/*
15 * The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending
16 * unit, timings generator, ...) and device-global resources (start/stop
17 * control, planes, ...) shared between the two CRTCs.
18 *
19 * The R8A7790 introduced a third CRTC with its own set of global resources.
20 * This would be modeled as two separate DU device instances if it wasn't for
21 * a handful or resources that are shared between the three CRTCs (mostly
22 * related to input and output routing). For this reason the R8A7790 DU must be
23 * modeled as a single device with three CRTCs, two sets of "semi-global"
24 * resources, and a few device-global resources.
25 *
26 * The rcar_du_group object is a driver specific object, without any real
27 * counterpart in the DU documentation, that models those semi-global resources.
28 */
29
30#include <linux/clk.h>
31#include <linux/io.h>
32
33#include "rcar_du_drv.h"
34#include "rcar_du_group.h"
35#include "rcar_du_regs.h"
36
37u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg)
38{
39 return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg);
40}
41
42void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
43{
44 rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
45}
46
47static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
48{
49 u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
50
51 if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
52 return;
53
54 /* The DEFR8 register for the first group also controls RGB output
55 * routing to DPAD0
56 */
57 if (rgrp->index == 0)
58 defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
59
60 rcar_du_group_write(rgrp, DEFR8, defr8);
61}
62
63static void rcar_du_group_setup(struct rcar_du_group *rgrp)
64{
65 /* Enable extended features */
66 rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
67 rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
68 rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
69 rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
70 rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
71
72 rcar_du_group_setup_defr8(rgrp);
73
74 /* Use DS1PR and DS2PR to configure planes priorities and connects the
75 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
76 */
77 rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
78}
79
80/*
81 * rcar_du_group_get - Acquire a reference to the DU channels group
82 *
83 * Acquiring the first reference setups core registers. A reference must be held
84 * before accessing any hardware registers.
85 *
86 * This function must be called with the DRM mode_config lock held.
87 *
88 * Return 0 in case of success or a negative error code otherwise.
89 */
90int rcar_du_group_get(struct rcar_du_group *rgrp)
91{
92 if (rgrp->use_count)
93 goto done;
94
95 rcar_du_group_setup(rgrp);
96
97done:
98 rgrp->use_count++;
99 return 0;
100}
101
102/*
103 * rcar_du_group_put - Release a reference to the DU
104 *
105 * This function must be called with the DRM mode_config lock held.
106 */
107void rcar_du_group_put(struct rcar_du_group *rgrp)
108{
109 --rgrp->use_count;
110}
111
112static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
113{
114 rcar_du_group_write(rgrp, DSYSR,
115 (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
116 (start ? DSYSR_DEN : DSYSR_DRES));
117}
118
119void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
120{
121 /* Many of the configuration bits are only updated when the display
122 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
123 * of those bits could be pre-configured, but others (especially the
124 * bits related to plane assignment to display timing controllers) need
125 * to be modified at runtime.
126 *
127 * Restart the display controller if a start is requested. Sorry for the
128 * flicker. It should be possible to move most of the "DRES-update" bits
129 * setup to driver initialization time and minimize the number of cases
130 * when the display controller will have to be restarted.
131 */
132 if (start) {
133 if (rgrp->used_crtcs++ != 0)
134 __rcar_du_group_start_stop(rgrp, false);
135 __rcar_du_group_start_stop(rgrp, true);
136 } else {
137 if (--rgrp->used_crtcs == 0)
138 __rcar_du_group_start_stop(rgrp, false);
139 }
140}
141
142void rcar_du_group_restart(struct rcar_du_group *rgrp)
143{
144 __rcar_du_group_start_stop(rgrp, false);
145 __rcar_du_group_start_stop(rgrp, true);
146}
147
148static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
149{
150 int ret;
151
152 /* RGB output routing to DPAD0 is configured in the DEFR8 register of
153 * the first group. As this function can be called with the DU0 and DU1
154 * CRTCs disabled, we need to enable the first group clock before
155 * accessing the register.
156 */
157 ret = clk_prepare_enable(rcdu->crtcs[0].clock);
158 if (ret < 0)
159 return ret;
160
161 rcar_du_group_setup_defr8(&rcdu->groups[0]);
162
163 clk_disable_unprepare(rcdu->crtcs[0].clock);
164
165 return 0;
166}
167
168int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
169{
170 struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2];
171 u32 dorcr = rcar_du_group_read(rgrp, DORCR);
172
173 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
174
175 /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
176 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
177 * by default.
178 */
179 if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
180 dorcr |= DORCR_PG2D_DS1;
181 else
182 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
183
184 rcar_du_group_write(rgrp, DORCR, dorcr);
185
186 return rcar_du_set_dpad0_routing(rgrp->dev);
187}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
new file mode 100644
index 000000000000..5025930972ec
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -0,0 +1,50 @@
1/*
2 * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_GROUP_H__
15#define __RCAR_DU_GROUP_H__
16
17#include "rcar_du_plane.h"
18
19struct rcar_du_device;
20
21/*
22 * struct rcar_du_group - CRTCs and planes group
23 * @dev: the DU device
24 * @mmio_offset: registers offset in the device memory map
25 * @index: group index
26 * @use_count: number of users of the group (rcar_du_group_(get|put))
27 * @used_crtcs: number of CRTCs currently in use
28 * @planes: planes handled by the group
29 */
30struct rcar_du_group {
31 struct rcar_du_device *dev;
32 unsigned int mmio_offset;
33 unsigned int index;
34
35 unsigned int use_count;
36 unsigned int used_crtcs;
37
38 struct rcar_du_planes planes;
39};
40
41u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
42void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data);
43
44int rcar_du_group_get(struct rcar_du_group *rgrp);
45void rcar_du_group_put(struct rcar_du_group *rgrp);
46void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start);
47void rcar_du_group_restart(struct rcar_du_group *rgrp);
48int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
49
50#endif /* __RCAR_DU_GROUP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index d30c2e29bee2..b31ac080c4a7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -19,10 +19,10 @@
19 19
20#include "rcar_du_crtc.h" 20#include "rcar_du_crtc.h"
21#include "rcar_du_drv.h" 21#include "rcar_du_drv.h"
22#include "rcar_du_encoder.h"
22#include "rcar_du_kms.h" 23#include "rcar_du_kms.h"
23#include "rcar_du_lvds.h" 24#include "rcar_du_lvdsenc.h"
24#include "rcar_du_regs.h" 25#include "rcar_du_regs.h"
25#include "rcar_du_vga.h"
26 26
27/* ----------------------------------------------------------------------------- 27/* -----------------------------------------------------------------------------
28 * Format helpers 28 * Format helpers
@@ -106,46 +106,24 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
106} 106}
107 107
108/* ----------------------------------------------------------------------------- 108/* -----------------------------------------------------------------------------
109 * Common connector and encoder functions
110 */
111
112struct drm_encoder *
113rcar_du_connector_best_encoder(struct drm_connector *connector)
114{
115 struct rcar_du_connector *rcon = to_rcar_connector(connector);
116
117 return &rcon->encoder->encoder;
118}
119
120void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
121{
122}
123
124void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
125 struct drm_display_mode *mode,
126 struct drm_display_mode *adjusted_mode)
127{
128 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
129
130 rcar_du_crtc_route_output(encoder->crtc, renc->output);
131}
132
133void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
134{
135}
136
137/* -----------------------------------------------------------------------------
138 * Frame buffer 109 * Frame buffer
139 */ 110 */
140 111
141int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, 112int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
142 struct drm_mode_create_dumb *args) 113 struct drm_mode_create_dumb *args)
143{ 114{
115 struct rcar_du_device *rcdu = dev->dev_private;
144 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 116 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
145 unsigned int align; 117 unsigned int align;
146 118
147 /* The pitch must be aligned to a 16 pixels boundary. */ 119 /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
148 align = 16 * args->bpp / 8; 120 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
121 */
122 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
123 align = 128;
124 else
125 align = 16 * args->bpp / 8;
126
149 args->pitch = roundup(max(args->pitch, min_pitch), align); 127 args->pitch = roundup(max(args->pitch, min_pitch), align);
150 128
151 return drm_gem_cma_dumb_create(file, dev, args); 129 return drm_gem_cma_dumb_create(file, dev, args);
@@ -155,6 +133,7 @@ static struct drm_framebuffer *
155rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, 133rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
156 struct drm_mode_fb_cmd2 *mode_cmd) 134 struct drm_mode_fb_cmd2 *mode_cmd)
157{ 135{
136 struct rcar_du_device *rcdu = dev->dev_private;
158 const struct rcar_du_format_info *format; 137 const struct rcar_du_format_info *format;
159 unsigned int align; 138 unsigned int align;
160 139
@@ -165,7 +144,10 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
165 return ERR_PTR(-EINVAL); 144 return ERR_PTR(-EINVAL);
166 } 145 }
167 146
168 align = 16 * format->bpp / 8; 147 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
148 align = 128;
149 else
150 align = 16 * format->bpp / 8;
169 151
170 if (mode_cmd->pitches[0] & (align - 1) || 152 if (mode_cmd->pitches[0] & (align - 1) ||
171 mode_cmd->pitches[0] >= 8192) { 153 mode_cmd->pitches[0] >= 8192) {
@@ -185,81 +167,124 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
185 return drm_fb_cma_create(dev, file_priv, mode_cmd); 167 return drm_fb_cma_create(dev, file_priv, mode_cmd);
186} 168}
187 169
170static void rcar_du_output_poll_changed(struct drm_device *dev)
171{
172 struct rcar_du_device *rcdu = dev->dev_private;
173
174 drm_fbdev_cma_hotplug_event(rcdu->fbdev);
175}
176
188static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { 177static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
189 .fb_create = rcar_du_fb_create, 178 .fb_create = rcar_du_fb_create,
179 .output_poll_changed = rcar_du_output_poll_changed,
190}; 180};
191 181
192int rcar_du_modeset_init(struct rcar_du_device *rcdu) 182int rcar_du_modeset_init(struct rcar_du_device *rcdu)
193{ 183{
184 static const unsigned int mmio_offsets[] = {
185 DU0_REG_OFFSET, DU2_REG_OFFSET
186 };
187
194 struct drm_device *dev = rcdu->ddev; 188 struct drm_device *dev = rcdu->ddev;
195 struct drm_encoder *encoder; 189 struct drm_encoder *encoder;
190 struct drm_fbdev_cma *fbdev;
191 unsigned int num_groups;
196 unsigned int i; 192 unsigned int i;
197 int ret; 193 int ret;
198 194
199 drm_mode_config_init(rcdu->ddev); 195 drm_mode_config_init(dev);
200 196
201 rcdu->ddev->mode_config.min_width = 0; 197 dev->mode_config.min_width = 0;
202 rcdu->ddev->mode_config.min_height = 0; 198 dev->mode_config.min_height = 0;
203 rcdu->ddev->mode_config.max_width = 4095; 199 dev->mode_config.max_width = 4095;
204 rcdu->ddev->mode_config.max_height = 2047; 200 dev->mode_config.max_height = 2047;
205 rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs; 201 dev->mode_config.funcs = &rcar_du_mode_config_funcs;
206 202
207 ret = rcar_du_plane_init(rcdu); 203 rcdu->num_crtcs = rcdu->info->num_crtcs;
208 if (ret < 0) 204
209 return ret; 205 /* Initialize the groups. */
206 num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
207
208 for (i = 0; i < num_groups; ++i) {
209 struct rcar_du_group *rgrp = &rcdu->groups[i];
210
211 rgrp->dev = rcdu;
212 rgrp->mmio_offset = mmio_offsets[i];
213 rgrp->index = i;
214
215 ret = rcar_du_planes_init(rgrp);
216 if (ret < 0)
217 return ret;
218 }
219
220 /* Create the CRTCs. */
221 for (i = 0; i < rcdu->num_crtcs; ++i) {
222 struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
210 223
211 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) { 224 ret = rcar_du_crtc_create(rgrp, i);
212 ret = rcar_du_crtc_create(rcdu, i);
213 if (ret < 0) 225 if (ret < 0)
214 return ret; 226 return ret;
215 } 227 }
216 228
217 rcdu->used_crtcs = 0; 229 /* Initialize the encoders. */
218 rcdu->num_crtcs = i; 230 ret = rcar_du_lvdsenc_init(rcdu);
231 if (ret < 0)
232 return ret;
219 233
220 for (i = 0; i < rcdu->pdata->num_encoders; ++i) { 234 for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
221 const struct rcar_du_encoder_data *pdata = 235 const struct rcar_du_encoder_data *pdata =
222 &rcdu->pdata->encoders[i]; 236 &rcdu->pdata->encoders[i];
237 const struct rcar_du_output_routing *route =
238 &rcdu->info->routes[pdata->output];
239
240 if (pdata->type == RCAR_DU_ENCODER_UNUSED)
241 continue;
223 242
224 if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) { 243 if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
244 route->possible_crtcs == 0) {
225 dev_warn(rcdu->dev, 245 dev_warn(rcdu->dev,
226 "encoder %u references unexisting output %u, skipping\n", 246 "encoder %u references unexisting output %u, skipping\n",
227 i, pdata->output); 247 i, pdata->output);
228 continue; 248 continue;
229 } 249 }
230 250
231 switch (pdata->encoder) { 251 rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata);
232 case RCAR_DU_ENCODER_VGA:
233 rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
234 break;
235
236 case RCAR_DU_ENCODER_LVDS:
237 rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
238 break;
239
240 default:
241 break;
242 }
243 } 252 }
244 253
245 /* Set the possible CRTCs and possible clones. All encoders can be 254 /* Set the possible CRTCs and possible clones. There's always at least
246 * driven by the CRTC associated with the output they're connected to, 255 * one way for all encoders to clone each other, set all bits in the
247 * as well as by CRTC 0. 256 * possible clones field.
248 */ 257 */
249 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 258 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
250 struct rcar_du_encoder *renc = to_rcar_encoder(encoder); 259 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
260 const struct rcar_du_output_routing *route =
261 &rcdu->info->routes[renc->output];
251 262
252 encoder->possible_crtcs = (1 << 0) | (1 << renc->output); 263 encoder->possible_crtcs = route->possible_crtcs;
253 encoder->possible_clones = 1 << 0; 264 encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1;
254 } 265 }
255 266
256 ret = rcar_du_plane_register(rcdu); 267 /* Now that the CRTCs have been initialized register the planes. */
257 if (ret < 0) 268 for (i = 0; i < num_groups; ++i) {
258 return ret; 269 ret = rcar_du_planes_register(&rcdu->groups[i]);
270 if (ret < 0)
271 return ret;
272 }
273
274 drm_kms_helper_poll_init(dev);
275
276 drm_helper_disable_unused_functions(dev);
277
278 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
279 dev->mode_config.num_connector);
280 if (IS_ERR(fbdev))
281 return PTR_ERR(fbdev);
259 282
260 drm_kms_helper_poll_init(rcdu->ddev); 283#ifndef CONFIG_FRAMEBUFFER_CONSOLE
284 drm_fbdev_cma_restore_mode(fbdev);
285#endif
261 286
262 drm_helper_disable_unused_functions(rcdu->ddev); 287 rcdu->fbdev = fbdev;
263 288
264 return 0; 289 return 0;
265} 290}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index dba472263486..5750e6af5655 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -16,8 +16,9 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18 18
19#include <drm/drm_crtc.h> 19struct drm_file;
20 20struct drm_device;
21struct drm_mode_create_dumb;
21struct rcar_du_device; 22struct rcar_du_device;
22 23
23struct rcar_du_format_info { 24struct rcar_du_format_info {
@@ -28,32 +29,8 @@ struct rcar_du_format_info {
28 unsigned int edf; 29 unsigned int edf;
29}; 30};
30 31
31struct rcar_du_encoder {
32 struct drm_encoder encoder;
33 unsigned int output;
34};
35
36#define to_rcar_encoder(e) \
37 container_of(e, struct rcar_du_encoder, encoder)
38
39struct rcar_du_connector {
40 struct drm_connector connector;
41 struct rcar_du_encoder *encoder;
42};
43
44#define to_rcar_connector(c) \
45 container_of(c, struct rcar_du_connector, connector)
46
47const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc); 32const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
48 33
49struct drm_encoder *
50rcar_du_connector_best_encoder(struct drm_connector *connector);
51void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
52void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
53 struct drm_display_mode *mode,
54 struct drm_display_mode *adjusted_mode);
55void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
56
57int rcar_du_modeset_init(struct rcar_du_device *rcdu); 34int rcar_du_modeset_init(struct rcar_du_device *rcdu);
58 35
59int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, 36int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 7aefe7267e1d..4f3ba93cd91d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder and Connector 2 * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -16,8 +16,9 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "rcar_du_drv.h" 18#include "rcar_du_drv.h"
19#include "rcar_du_encoder.h"
19#include "rcar_du_kms.h" 20#include "rcar_du_kms.h"
20#include "rcar_du_lvds.h" 21#include "rcar_du_lvdscon.h"
21 22
22struct rcar_du_lvds_connector { 23struct rcar_du_lvds_connector {
23 struct rcar_du_connector connector; 24 struct rcar_du_connector connector;
@@ -28,13 +29,10 @@ struct rcar_du_lvds_connector {
28#define to_rcar_lvds_connector(c) \ 29#define to_rcar_lvds_connector(c) \
29 container_of(c, struct rcar_du_lvds_connector, connector.connector) 30 container_of(c, struct rcar_du_lvds_connector, connector.connector)
30 31
31/* -----------------------------------------------------------------------------
32 * Connector
33 */
34
35static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) 32static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
36{ 33{
37 struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector); 34 struct rcar_du_lvds_connector *lvdscon =
35 to_rcar_lvds_connector(connector);
38 struct drm_display_mode *mode; 36 struct drm_display_mode *mode;
39 37
40 mode = drm_mode_create(connector->dev); 38 mode = drm_mode_create(connector->dev);
@@ -90,9 +88,9 @@ static const struct drm_connector_funcs connector_funcs = {
90 .destroy = rcar_du_lvds_connector_destroy, 88 .destroy = rcar_du_lvds_connector_destroy,
91}; 89};
92 90
93static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 91int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
94 struct rcar_du_encoder *renc, 92 struct rcar_du_encoder *renc,
95 const struct rcar_du_panel_data *panel) 93 const struct rcar_du_panel_data *panel)
96{ 94{
97 struct rcar_du_lvds_connector *lvdscon; 95 struct rcar_du_lvds_connector *lvdscon;
98 struct drm_connector *connector; 96 struct drm_connector *connector;
@@ -131,86 +129,3 @@ static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
131 129
132 return 0; 130 return 0;
133} 131}
134
135/* -----------------------------------------------------------------------------
136 * Encoder
137 */
138
139static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
140{
141}
142
143static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
144 const struct drm_display_mode *mode,
145 struct drm_display_mode *adjusted_mode)
146{
147 const struct drm_display_mode *panel_mode;
148 struct drm_device *dev = encoder->dev;
149 struct drm_connector *connector;
150 bool found = false;
151
152 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
153 if (connector->encoder == encoder) {
154 found = true;
155 break;
156 }
157 }
158
159 if (!found) {
160 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
161 return false;
162 }
163
164 if (list_empty(&connector->modes)) {
165 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
166 return false;
167 }
168
169 panel_mode = list_first_entry(&connector->modes,
170 struct drm_display_mode, head);
171
172 /* We're not allowed to modify the resolution. */
173 if (mode->hdisplay != panel_mode->hdisplay ||
174 mode->vdisplay != panel_mode->vdisplay)
175 return false;
176
177 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
178 drm_mode_copy(adjusted_mode, panel_mode);
179
180 return true;
181}
182
183static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
184 .dpms = rcar_du_lvds_encoder_dpms,
185 .mode_fixup = rcar_du_lvds_encoder_mode_fixup,
186 .prepare = rcar_du_encoder_mode_prepare,
187 .commit = rcar_du_encoder_mode_commit,
188 .mode_set = rcar_du_encoder_mode_set,
189};
190
191static const struct drm_encoder_funcs encoder_funcs = {
192 .destroy = drm_encoder_cleanup,
193};
194
195int rcar_du_lvds_init(struct rcar_du_device *rcdu,
196 const struct rcar_du_encoder_lvds_data *data,
197 unsigned int output)
198{
199 struct rcar_du_encoder *renc;
200 int ret;
201
202 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
203 if (renc == NULL)
204 return -ENOMEM;
205
206 renc->output = output;
207
208 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
209 DRM_MODE_ENCODER_LVDS);
210 if (ret < 0)
211 return ret;
212
213 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
214
215 return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
216}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index b47f8328e103..bff8683699ca 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_lvds.h -- R-Car Display Unit LVDS Encoder and Connector 2 * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -11,14 +11,15 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __RCAR_DU_LVDS_H__ 14#ifndef __RCAR_DU_LVDSCON_H__
15#define __RCAR_DU_LVDS_H__ 15#define __RCAR_DU_LVDSCON_H__
16 16
17struct rcar_du_device; 17struct rcar_du_device;
18struct rcar_du_encoder_lvds_data; 18struct rcar_du_encoder;
19struct rcar_du_panel_data;
19 20
20int rcar_du_lvds_init(struct rcar_du_device *rcdu, 21int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_lvds_data *data, 22 struct rcar_du_encoder *renc,
22 unsigned int output); 23 const struct rcar_du_panel_data *panel);
23 24
24#endif /* __RCAR_DU_LVDS_H__ */ 25#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
new file mode 100644
index 000000000000..a0f6a1781925
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -0,0 +1,196 @@
1/*
2 * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h"
22#include "rcar_du_lvdsenc.h"
23#include "rcar_lvds_regs.h"
24
25struct rcar_du_lvdsenc {
26 struct rcar_du_device *dev;
27
28 unsigned int index;
29 void __iomem *mmio;
30 struct clk *clock;
31 int dpms;
32
33 enum rcar_lvds_input input;
34};
35
36static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
37{
38 iowrite32(data, lvds->mmio + reg);
39}
40
41static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
42 struct rcar_du_crtc *rcrtc)
43{
44 const struct drm_display_mode *mode = &rcrtc->crtc.mode;
45 unsigned int freq = mode->clock;
46 u32 lvdcr0;
47 u32 pllcr;
48 int ret;
49
50 if (lvds->dpms == DRM_MODE_DPMS_ON)
51 return 0;
52
53 ret = clk_prepare_enable(lvds->clock);
54 if (ret < 0)
55 return ret;
56
57 /* PLL clock configuration */
58 if (freq <= 38000)
59 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
60 else if (freq <= 60000)
61 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
62 else if (freq <= 121000)
63 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
64 else
65 pllcr = LVDPLLCR_PLLDLYCNT_150M;
66
67 rcar_lvds_write(lvds, LVDPLLCR, pllcr);
68
69 /* Hardcode the channels and control signals routing for now.
70 *
71 * HSYNC -> CTRL0
72 * VSYNC -> CTRL1
73 * DISP -> CTRL2
74 * 0 -> CTRL3
75 *
76 * Channels 1 and 3 are switched on ES1.
77 */
78 rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
79 LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
80 LVDCTRCR_CTR0SEL_HSYNC);
81 rcar_lvds_write(lvds, LVDCHCR,
82 LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
83 LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
84
85 /* Select the input, hardcode mode 0, enable LVDS operation and turn
86 * bias circuitry on.
87 */
88 lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
89 if (rcrtc->index == 2)
90 lvdcr0 |= LVDCR0_DUSEL;
91 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
92
93 /* Turn all the channels on. */
94 rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
95 LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
96
97 /* Turn the PLL on, wait for the startup delay, and turn the output
98 * on.
99 */
100 lvdcr0 |= LVDCR0_PLLEN;
101 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
102
103 usleep_range(100, 150);
104
105 lvdcr0 |= LVDCR0_LVRES;
106 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
107
108 lvds->dpms = DRM_MODE_DPMS_ON;
109 return 0;
110}
111
112static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
113{
114 if (lvds->dpms == DRM_MODE_DPMS_OFF)
115 return;
116
117 rcar_lvds_write(lvds, LVDCR0, 0);
118 rcar_lvds_write(lvds, LVDCR1, 0);
119
120 clk_disable_unprepare(lvds->clock);
121
122 lvds->dpms = DRM_MODE_DPMS_OFF;
123}
124
125int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
126 struct drm_crtc *crtc, int mode)
127{
128 if (mode == DRM_MODE_DPMS_OFF) {
129 rcar_du_lvdsenc_stop(lvds);
130 return 0;
131 } else if (crtc) {
132 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
133 return rcar_du_lvdsenc_start(lvds, rcrtc);
134 } else
135 return -EINVAL;
136}
137
138static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
139 struct platform_device *pdev)
140{
141 struct resource *mem;
142 char name[7];
143
144 sprintf(name, "lvds.%u", lvds->index);
145
146 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
147 if (mem == NULL) {
148 dev_err(&pdev->dev, "failed to get memory resource for %s\n",
149 name);
150 return -EINVAL;
151 }
152
153 lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
154 if (lvds->mmio == NULL) {
155 dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
156 name);
157 return -ENOMEM;
158 }
159
160 lvds->clock = devm_clk_get(&pdev->dev, name);
161 if (IS_ERR(lvds->clock)) {
162 dev_err(&pdev->dev, "failed to get clock for %s\n", name);
163 return PTR_ERR(lvds->clock);
164 }
165
166 return 0;
167}
168
169int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
170{
171 struct platform_device *pdev = to_platform_device(rcdu->dev);
172 struct rcar_du_lvdsenc *lvds;
173 unsigned int i;
174 int ret;
175
176 for (i = 0; i < rcdu->info->num_lvds; ++i) {
177 lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
178 if (lvds == NULL) {
179 dev_err(&pdev->dev, "failed to allocate private data\n");
180 return -ENOMEM;
181 }
182
183 lvds->dev = rcdu;
184 lvds->index = i;
185 lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
186 lvds->dpms = DRM_MODE_DPMS_OFF;
187
188 ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
189 if (ret < 0)
190 return ret;
191
192 rcdu->lvds[i] = lvds;
193 }
194
195 return 0;
196}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
new file mode 100644
index 000000000000..7051c6de19ae
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -0,0 +1,46 @@
1/*
2 * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_LVDSENC_H__
15#define __RCAR_DU_LVDSENC_H__
16
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/platform_data/rcar-du.h>
20
21struct rcar_drm_crtc;
22struct rcar_du_lvdsenc;
23
24enum rcar_lvds_input {
25 RCAR_LVDS_INPUT_DU0,
26 RCAR_LVDS_INPUT_DU1,
27 RCAR_LVDS_INPUT_DU2,
28};
29
30#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
31int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
32int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
33 struct drm_crtc *crtc, int mode);
34#else
35static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
36{
37 return 0;
38}
39static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
40 struct drm_crtc *crtc, int mode)
41{
42 return 0;
43}
44#endif
45
46#endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a65f81ddf51d..53000644733f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -36,90 +36,95 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane; 36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
37} 37}
38 38
39static u32 rcar_du_plane_read(struct rcar_du_device *rcdu, 39static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
40 unsigned int index, u32 reg) 40 unsigned int index, u32 reg)
41{ 41{
42 return rcar_du_read(rcdu, index * PLANE_OFF + reg); 42 return rcar_du_read(rgrp->dev,
43 rgrp->mmio_offset + index * PLANE_OFF + reg);
43} 44}
44 45
45static void rcar_du_plane_write(struct rcar_du_device *rcdu, 46static void rcar_du_plane_write(struct rcar_du_group *rgrp,
46 unsigned int index, u32 reg, u32 data) 47 unsigned int index, u32 reg, u32 data)
47{ 48{
48 rcar_du_write(rcdu, index * PLANE_OFF + reg, data); 49 rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg,
50 data);
49} 51}
50 52
51int rcar_du_plane_reserve(struct rcar_du_plane *plane, 53int rcar_du_plane_reserve(struct rcar_du_plane *plane,
52 const struct rcar_du_format_info *format) 54 const struct rcar_du_format_info *format)
53{ 55{
54 struct rcar_du_device *rcdu = plane->dev; 56 struct rcar_du_group *rgrp = plane->group;
55 unsigned int i; 57 unsigned int i;
56 int ret = -EBUSY; 58 int ret = -EBUSY;
57 59
58 mutex_lock(&rcdu->planes.lock); 60 mutex_lock(&rgrp->planes.lock);
59 61
60 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 62 for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
61 if (!(rcdu->planes.free & (1 << i))) 63 if (!(rgrp->planes.free & (1 << i)))
62 continue; 64 continue;
63 65
64 if (format->planes == 1 || 66 if (format->planes == 1 ||
65 rcdu->planes.free & (1 << ((i + 1) % 8))) 67 rgrp->planes.free & (1 << ((i + 1) % 8)))
66 break; 68 break;
67 } 69 }
68 70
69 if (i == ARRAY_SIZE(rcdu->planes.planes)) 71 if (i == ARRAY_SIZE(rgrp->planes.planes))
70 goto done; 72 goto done;
71 73
72 rcdu->planes.free &= ~(1 << i); 74 rgrp->planes.free &= ~(1 << i);
73 if (format->planes == 2) 75 if (format->planes == 2)
74 rcdu->planes.free &= ~(1 << ((i + 1) % 8)); 76 rgrp->planes.free &= ~(1 << ((i + 1) % 8));
75 77
76 plane->hwindex = i; 78 plane->hwindex = i;
77 79
78 ret = 0; 80 ret = 0;
79 81
80done: 82done:
81 mutex_unlock(&rcdu->planes.lock); 83 mutex_unlock(&rgrp->planes.lock);
82 return ret; 84 return ret;
83} 85}
84 86
85void rcar_du_plane_release(struct rcar_du_plane *plane) 87void rcar_du_plane_release(struct rcar_du_plane *plane)
86{ 88{
87 struct rcar_du_device *rcdu = plane->dev; 89 struct rcar_du_group *rgrp = plane->group;
88 90
89 if (plane->hwindex == -1) 91 if (plane->hwindex == -1)
90 return; 92 return;
91 93
92 mutex_lock(&rcdu->planes.lock); 94 mutex_lock(&rgrp->planes.lock);
93 rcdu->planes.free |= 1 << plane->hwindex; 95 rgrp->planes.free |= 1 << plane->hwindex;
94 if (plane->format->planes == 2) 96 if (plane->format->planes == 2)
95 rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8); 97 rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
96 mutex_unlock(&rcdu->planes.lock); 98 mutex_unlock(&rgrp->planes.lock);
97 99
98 plane->hwindex = -1; 100 plane->hwindex = -1;
99} 101}
100 102
101void rcar_du_plane_update_base(struct rcar_du_plane *plane) 103void rcar_du_plane_update_base(struct rcar_du_plane *plane)
102{ 104{
103 struct rcar_du_device *rcdu = plane->dev; 105 struct rcar_du_group *rgrp = plane->group;
104 unsigned int index = plane->hwindex; 106 unsigned int index = plane->hwindex;
105 107
106 /* According to the datasheet the Y position is expressed in raster line 108 /* The Y position is expressed in raster line units and must be doubled
107 * units. However, 32bpp formats seem to require a doubled Y position 109 * for 32bpp formats, according to the R8A7790 datasheet. No mention of
108 * value. Similarly, for the second plane, NV12 and NV21 formats seem to 110 * doubling the Y position is found in the R8A7779 datasheet, but the
111 * rule seems to apply there as well.
112 *
113 * Similarly, for the second plane, NV12 and NV21 formats seem to
109 * require a halved Y position value. 114 * require a halved Y position value.
110 */ 115 */
111 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); 116 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
112 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * 117 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
113 (plane->format->bpp == 32 ? 2 : 1)); 118 (plane->format->bpp == 32 ? 2 : 1));
114 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]); 119 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
115 120
116 if (plane->format->planes == 2) { 121 if (plane->format->planes == 2) {
117 index = (index + 1) % 8; 122 index = (index + 1) % 8;
118 123
119 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); 124 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
120 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * 125 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
121 (plane->format->bpp == 16 ? 2 : 1) / 2); 126 (plane->format->bpp == 16 ? 2 : 1) / 2);
122 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]); 127 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
123 } 128 }
124} 129}
125 130
@@ -140,7 +145,7 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
140static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, 145static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
141 unsigned int index) 146 unsigned int index)
142{ 147{
143 struct rcar_du_device *rcdu = plane->dev; 148 struct rcar_du_group *rgrp = plane->group;
144 u32 colorkey; 149 u32 colorkey;
145 u32 pnmr; 150 u32 pnmr;
146 151
@@ -154,9 +159,9 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
154 * enable alpha-blending regardless of the X bit value. 159 * enable alpha-blending regardless of the X bit value.
155 */ 160 */
156 if (plane->format->fourcc != DRM_FORMAT_XRGB1555) 161 if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
157 rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0); 162 rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
158 else 163 else
159 rcar_du_plane_write(rcdu, index, PnALPHAR, 164 rcar_du_plane_write(rgrp, index, PnALPHAR,
160 PnALPHAR_ABIT_X | plane->alpha); 165 PnALPHAR_ABIT_X | plane->alpha);
161 166
162 pnmr = PnMR_BM_MD | plane->format->pnmr; 167 pnmr = PnMR_BM_MD | plane->format->pnmr;
@@ -172,14 +177,14 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
172 if (plane->format->fourcc == DRM_FORMAT_YUYV) 177 if (plane->format->fourcc == DRM_FORMAT_YUYV)
173 pnmr |= PnMR_YCDF_YUYV; 178 pnmr |= PnMR_YCDF_YUYV;
174 179
175 rcar_du_plane_write(rcdu, index, PnMR, pnmr); 180 rcar_du_plane_write(rgrp, index, PnMR, pnmr);
176 181
177 switch (plane->format->fourcc) { 182 switch (plane->format->fourcc) {
178 case DRM_FORMAT_RGB565: 183 case DRM_FORMAT_RGB565:
179 colorkey = ((plane->colorkey & 0xf80000) >> 8) 184 colorkey = ((plane->colorkey & 0xf80000) >> 8)
180 | ((plane->colorkey & 0x00fc00) >> 5) 185 | ((plane->colorkey & 0x00fc00) >> 5)
181 | ((plane->colorkey & 0x0000f8) >> 3); 186 | ((plane->colorkey & 0x0000f8) >> 3);
182 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); 187 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
183 break; 188 break;
184 189
185 case DRM_FORMAT_ARGB1555: 190 case DRM_FORMAT_ARGB1555:
@@ -187,12 +192,12 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
187 colorkey = ((plane->colorkey & 0xf80000) >> 9) 192 colorkey = ((plane->colorkey & 0xf80000) >> 9)
188 | ((plane->colorkey & 0x00f800) >> 6) 193 | ((plane->colorkey & 0x00f800) >> 6)
189 | ((plane->colorkey & 0x0000f8) >> 3); 194 | ((plane->colorkey & 0x0000f8) >> 3);
190 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); 195 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
191 break; 196 break;
192 197
193 case DRM_FORMAT_XRGB8888: 198 case DRM_FORMAT_XRGB8888:
194 case DRM_FORMAT_ARGB8888: 199 case DRM_FORMAT_ARGB8888:
195 rcar_du_plane_write(rcdu, index, PnTC3R, 200 rcar_du_plane_write(rgrp, index, PnTC3R,
196 PnTC3R_CODE | (plane->colorkey & 0xffffff)); 201 PnTC3R_CODE | (plane->colorkey & 0xffffff));
197 break; 202 break;
198 } 203 }
@@ -201,7 +206,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
201static void __rcar_du_plane_setup(struct rcar_du_plane *plane, 206static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
202 unsigned int index) 207 unsigned int index)
203{ 208{
204 struct rcar_du_device *rcdu = plane->dev; 209 struct rcar_du_group *rgrp = plane->group;
205 u32 ddcr2 = PnDDCR2_CODE; 210 u32 ddcr2 = PnDDCR2_CODE;
206 u32 ddcr4; 211 u32 ddcr4;
207 u32 mwr; 212 u32 mwr;
@@ -211,7 +216,7 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
211 * The data format is selected by the DDDF field in PnMR and the EDF 216 * The data format is selected by the DDDF field in PnMR and the EDF
212 * field in DDCR4. 217 * field in DDCR4.
213 */ 218 */
214 ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4); 219 ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
215 ddcr4 &= ~PnDDCR4_EDF_MASK; 220 ddcr4 &= ~PnDDCR4_EDF_MASK;
216 ddcr4 |= plane->format->edf | PnDDCR4_CODE; 221 ddcr4 |= plane->format->edf | PnDDCR4_CODE;
217 222
@@ -232,8 +237,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
232 } 237 }
233 } 238 }
234 239
235 rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2); 240 rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
236 rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4); 241 rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
237 242
238 /* Memory pitch (expressed in pixels) */ 243 /* Memory pitch (expressed in pixels) */
239 if (plane->format->planes == 2) 244 if (plane->format->planes == 2)
@@ -241,19 +246,19 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
241 else 246 else
242 mwr = plane->pitch * 8 / plane->format->bpp; 247 mwr = plane->pitch * 8 / plane->format->bpp;
243 248
244 rcar_du_plane_write(rcdu, index, PnMWR, mwr); 249 rcar_du_plane_write(rgrp, index, PnMWR, mwr);
245 250
246 /* Destination position and size */ 251 /* Destination position and size */
247 rcar_du_plane_write(rcdu, index, PnDSXR, plane->width); 252 rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
248 rcar_du_plane_write(rcdu, index, PnDSYR, plane->height); 253 rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
249 rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x); 254 rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
250 rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y); 255 rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
251 256
252 /* Wrap-around and blinking, disabled */ 257 /* Wrap-around and blinking, disabled */
253 rcar_du_plane_write(rcdu, index, PnWASPR, 0); 258 rcar_du_plane_write(rgrp, index, PnWASPR, 0);
254 rcar_du_plane_write(rcdu, index, PnWAMWR, 4095); 259 rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
255 rcar_du_plane_write(rcdu, index, PnBTR, 0); 260 rcar_du_plane_write(rgrp, index, PnBTR, 0);
256 rcar_du_plane_write(rcdu, index, PnMLR, 0); 261 rcar_du_plane_write(rgrp, index, PnMLR, 0);
257} 262}
258 263
259void rcar_du_plane_setup(struct rcar_du_plane *plane) 264void rcar_du_plane_setup(struct rcar_du_plane *plane)
@@ -273,7 +278,7 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
273 uint32_t src_w, uint32_t src_h) 278 uint32_t src_w, uint32_t src_h)
274{ 279{
275 struct rcar_du_plane *rplane = to_rcar_plane(plane); 280 struct rcar_du_plane *rplane = to_rcar_plane(plane);
276 struct rcar_du_device *rcdu = plane->dev->dev_private; 281 struct rcar_du_device *rcdu = rplane->group->dev;
277 const struct rcar_du_format_info *format; 282 const struct rcar_du_format_info *format;
278 unsigned int nplanes; 283 unsigned int nplanes;
279 int ret; 284 int ret;
@@ -316,26 +321,25 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
316 rcar_du_plane_compute_base(rplane, fb); 321 rcar_du_plane_compute_base(rplane, fb);
317 rcar_du_plane_setup(rplane); 322 rcar_du_plane_setup(rplane);
318 323
319 mutex_lock(&rcdu->planes.lock); 324 mutex_lock(&rplane->group->planes.lock);
320 rplane->enabled = true; 325 rplane->enabled = true;
321 rcar_du_crtc_update_planes(rplane->crtc); 326 rcar_du_crtc_update_planes(rplane->crtc);
322 mutex_unlock(&rcdu->planes.lock); 327 mutex_unlock(&rplane->group->planes.lock);
323 328
324 return 0; 329 return 0;
325} 330}
326 331
327static int rcar_du_plane_disable(struct drm_plane *plane) 332static int rcar_du_plane_disable(struct drm_plane *plane)
328{ 333{
329 struct rcar_du_device *rcdu = plane->dev->dev_private;
330 struct rcar_du_plane *rplane = to_rcar_plane(plane); 334 struct rcar_du_plane *rplane = to_rcar_plane(plane);
331 335
332 if (!rplane->enabled) 336 if (!rplane->enabled)
333 return 0; 337 return 0;
334 338
335 mutex_lock(&rcdu->planes.lock); 339 mutex_lock(&rplane->group->planes.lock);
336 rplane->enabled = false; 340 rplane->enabled = false;
337 rcar_du_crtc_update_planes(rplane->crtc); 341 rcar_du_crtc_update_planes(rplane->crtc);
338 mutex_unlock(&rcdu->planes.lock); 342 mutex_unlock(&rplane->group->planes.lock);
339 343
340 rcar_du_plane_release(rplane); 344 rcar_du_plane_release(rplane);
341 345
@@ -377,9 +381,7 @@ static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
377static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane, 381static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
378 unsigned int zpos) 382 unsigned int zpos)
379{ 383{
380 struct rcar_du_device *rcdu = plane->dev; 384 mutex_lock(&plane->group->planes.lock);
381
382 mutex_lock(&rcdu->planes.lock);
383 if (plane->zpos == zpos) 385 if (plane->zpos == zpos)
384 goto done; 386 goto done;
385 387
@@ -390,21 +392,21 @@ static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
390 rcar_du_crtc_update_planes(plane->crtc); 392 rcar_du_crtc_update_planes(plane->crtc);
391 393
392done: 394done:
393 mutex_unlock(&rcdu->planes.lock); 395 mutex_unlock(&plane->group->planes.lock);
394} 396}
395 397
396static int rcar_du_plane_set_property(struct drm_plane *plane, 398static int rcar_du_plane_set_property(struct drm_plane *plane,
397 struct drm_property *property, 399 struct drm_property *property,
398 uint64_t value) 400 uint64_t value)
399{ 401{
400 struct rcar_du_device *rcdu = plane->dev->dev_private;
401 struct rcar_du_plane *rplane = to_rcar_plane(plane); 402 struct rcar_du_plane *rplane = to_rcar_plane(plane);
403 struct rcar_du_group *rgrp = rplane->group;
402 404
403 if (property == rcdu->planes.alpha) 405 if (property == rgrp->planes.alpha)
404 rcar_du_plane_set_alpha(rplane, value); 406 rcar_du_plane_set_alpha(rplane, value);
405 else if (property == rcdu->planes.colorkey) 407 else if (property == rgrp->planes.colorkey)
406 rcar_du_plane_set_colorkey(rplane, value); 408 rcar_du_plane_set_colorkey(rplane, value);
407 else if (property == rcdu->planes.zpos) 409 else if (property == rgrp->planes.zpos)
408 rcar_du_plane_set_zpos(rplane, value); 410 rcar_du_plane_set_zpos(rplane, value);
409 else 411 else
410 return -EINVAL; 412 return -EINVAL;
@@ -432,37 +434,39 @@ static const uint32_t formats[] = {
432 DRM_FORMAT_NV16, 434 DRM_FORMAT_NV16,
433}; 435};
434 436
435int rcar_du_plane_init(struct rcar_du_device *rcdu) 437int rcar_du_planes_init(struct rcar_du_group *rgrp)
436{ 438{
439 struct rcar_du_planes *planes = &rgrp->planes;
440 struct rcar_du_device *rcdu = rgrp->dev;
437 unsigned int i; 441 unsigned int i;
438 442
439 mutex_init(&rcdu->planes.lock); 443 mutex_init(&planes->lock);
440 rcdu->planes.free = 0xff; 444 planes->free = 0xff;
441 445
442 rcdu->planes.alpha = 446 planes->alpha =
443 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255); 447 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
444 if (rcdu->planes.alpha == NULL) 448 if (planes->alpha == NULL)
445 return -ENOMEM; 449 return -ENOMEM;
446 450
447 /* The color key is expressed as an RGB888 triplet stored in a 32-bit 451 /* The color key is expressed as an RGB888 triplet stored in a 32-bit
448 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0) 452 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
449 * or enable source color keying (1). 453 * or enable source color keying (1).
450 */ 454 */
451 rcdu->planes.colorkey = 455 planes->colorkey =
452 drm_property_create_range(rcdu->ddev, 0, "colorkey", 456 drm_property_create_range(rcdu->ddev, 0, "colorkey",
453 0, 0x01ffffff); 457 0, 0x01ffffff);
454 if (rcdu->planes.colorkey == NULL) 458 if (planes->colorkey == NULL)
455 return -ENOMEM; 459 return -ENOMEM;
456 460
457 rcdu->planes.zpos = 461 planes->zpos =
458 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7); 462 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
459 if (rcdu->planes.zpos == NULL) 463 if (planes->zpos == NULL)
460 return -ENOMEM; 464 return -ENOMEM;
461 465
462 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 466 for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
463 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 467 struct rcar_du_plane *plane = &planes->planes[i];
464 468
465 plane->dev = rcdu; 469 plane->group = rgrp;
466 plane->hwindex = -1; 470 plane->hwindex = -1;
467 plane->alpha = 255; 471 plane->alpha = 255;
468 plane->colorkey = RCAR_DU_COLORKEY_NONE; 472 plane->colorkey = RCAR_DU_COLORKEY_NONE;
@@ -472,11 +476,16 @@ int rcar_du_plane_init(struct rcar_du_device *rcdu)
472 return 0; 476 return 0;
473} 477}
474 478
475int rcar_du_plane_register(struct rcar_du_device *rcdu) 479int rcar_du_planes_register(struct rcar_du_group *rgrp)
476{ 480{
481 struct rcar_du_planes *planes = &rgrp->planes;
482 struct rcar_du_device *rcdu = rgrp->dev;
483 unsigned int crtcs;
477 unsigned int i; 484 unsigned int i;
478 int ret; 485 int ret;
479 486
487 crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
488
480 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { 489 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
481 struct rcar_du_kms_plane *plane; 490 struct rcar_du_kms_plane *plane;
482 491
@@ -484,23 +493,22 @@ int rcar_du_plane_register(struct rcar_du_device *rcdu)
484 if (plane == NULL) 493 if (plane == NULL)
485 return -ENOMEM; 494 return -ENOMEM;
486 495
487 plane->hwplane = &rcdu->planes.planes[i + 2]; 496 plane->hwplane = &planes->planes[i + 2];
488 plane->hwplane->zpos = 1; 497 plane->hwplane->zpos = 1;
489 498
490 ret = drm_plane_init(rcdu->ddev, &plane->plane, 499 ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
491 (1 << rcdu->num_crtcs) - 1,
492 &rcar_du_plane_funcs, formats, 500 &rcar_du_plane_funcs, formats,
493 ARRAY_SIZE(formats), false); 501 ARRAY_SIZE(formats), false);
494 if (ret < 0) 502 if (ret < 0)
495 return ret; 503 return ret;
496 504
497 drm_object_attach_property(&plane->plane.base, 505 drm_object_attach_property(&plane->plane.base,
498 rcdu->planes.alpha, 255); 506 planes->alpha, 255);
499 drm_object_attach_property(&plane->plane.base, 507 drm_object_attach_property(&plane->plane.base,
500 rcdu->planes.colorkey, 508 planes->colorkey,
501 RCAR_DU_COLORKEY_NONE); 509 RCAR_DU_COLORKEY_NONE);
502 drm_object_attach_property(&plane->plane.base, 510 drm_object_attach_property(&plane->plane.base,
503 rcdu->planes.zpos, 1); 511 planes->zpos, 1);
504 } 512 }
505 513
506 return 0; 514 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 5397dba2fe57..f94f9ce84998 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,10 +14,13 @@
14#ifndef __RCAR_DU_PLANE_H__ 14#ifndef __RCAR_DU_PLANE_H__
15#define __RCAR_DU_PLANE_H__ 15#define __RCAR_DU_PLANE_H__
16 16
17struct drm_crtc; 17#include <linux/mutex.h>
18struct drm_framebuffer; 18
19struct rcar_du_device; 19#include <drm/drmP.h>
20#include <drm/drm_crtc.h>
21
20struct rcar_du_format_info; 22struct rcar_du_format_info;
23struct rcar_du_group;
21 24
22/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As 25/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
23 * using KMS planes requires at least one of the CRTCs being enabled, no more 26 * using KMS planes requires at least one of the CRTCs being enabled, no more
@@ -30,7 +33,7 @@ struct rcar_du_format_info;
30#define RCAR_DU_NUM_SW_PLANES 9 33#define RCAR_DU_NUM_SW_PLANES 9
31 34
32struct rcar_du_plane { 35struct rcar_du_plane {
33 struct rcar_du_device *dev; 36 struct rcar_du_group *group;
34 struct drm_crtc *crtc; 37 struct drm_crtc *crtc;
35 38
36 bool enabled; 39 bool enabled;
@@ -54,8 +57,19 @@ struct rcar_du_plane {
54 unsigned int dst_y; 57 unsigned int dst_y;
55}; 58};
56 59
57int rcar_du_plane_init(struct rcar_du_device *rcdu); 60struct rcar_du_planes {
58int rcar_du_plane_register(struct rcar_du_device *rcdu); 61 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
62 unsigned int free;
63 struct mutex lock;
64
65 struct drm_property *alpha;
66 struct drm_property *colorkey;
67 struct drm_property *zpos;
68};
69
70int rcar_du_planes_init(struct rcar_du_group *rgrp);
71int rcar_du_planes_register(struct rcar_du_group *rgrp);
72
59void rcar_du_plane_setup(struct rcar_du_plane *plane); 73void rcar_du_plane_setup(struct rcar_du_plane *plane);
60void rcar_du_plane_update_base(struct rcar_du_plane *plane); 74void rcar_du_plane_update_base(struct rcar_du_plane *plane);
61void rcar_du_plane_compute_base(struct rcar_du_plane *plane, 75void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 69f21f19b51c..73f7347f740b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -13,14 +13,15 @@
13#ifndef __RCAR_DU_REGS_H__ 13#ifndef __RCAR_DU_REGS_H__
14#define __RCAR_DU_REGS_H__ 14#define __RCAR_DU_REGS_H__
15 15
16#define DISP2_REG_OFFSET 0x30000 16#define DU0_REG_OFFSET 0x00000
17#define DU1_REG_OFFSET 0x30000
18#define DU2_REG_OFFSET 0x40000
17 19
18/* ----------------------------------------------------------------------------- 20/* -----------------------------------------------------------------------------
19 * Display Control Registers 21 * Display Control Registers
20 */ 22 */
21 23
22#define DSYSR 0x00000 /* display 1 */ 24#define DSYSR 0x00000 /* display 1 */
23#define D2SYSR 0x30000 /* display 2 */
24#define DSYSR_ILTS (1 << 29) 25#define DSYSR_ILTS (1 << 29)
25#define DSYSR_DSEC (1 << 20) 26#define DSYSR_DSEC (1 << 20)
26#define DSYSR_IUPD (1 << 16) 27#define DSYSR_IUPD (1 << 16)
@@ -35,7 +36,6 @@
35#define DSYSR_SCM_INT_VIDEO (3 << 4) 36#define DSYSR_SCM_INT_VIDEO (3 << 4)
36 37
37#define DSMR 0x00004 38#define DSMR 0x00004
38#define D2SMR 0x30004
39#define DSMR_VSPM (1 << 28) 39#define DSMR_VSPM (1 << 28)
40#define DSMR_ODPM (1 << 27) 40#define DSMR_ODPM (1 << 27)
41#define DSMR_DIPM_DISP (0 << 25) 41#define DSMR_DIPM_DISP (0 << 25)
@@ -60,7 +60,6 @@
60#define DSMR_CSY_MASK (3 << 6) 60#define DSMR_CSY_MASK (3 << 6)
61 61
62#define DSSR 0x00008 62#define DSSR 0x00008
63#define D2SSR 0x30008
64#define DSSR_VC1FB_DSA0 (0 << 30) 63#define DSSR_VC1FB_DSA0 (0 << 30)
65#define DSSR_VC1FB_DSA1 (1 << 30) 64#define DSSR_VC1FB_DSA1 (1 << 30)
66#define DSSR_VC1FB_DSA2 (2 << 30) 65#define DSSR_VC1FB_DSA2 (2 << 30)
@@ -80,7 +79,6 @@
80#define DSSR_ADC(n) (1 << ((n)-1)) 79#define DSSR_ADC(n) (1 << ((n)-1))
81 80
82#define DSRCR 0x0000c 81#define DSRCR 0x0000c
83#define D2SRCR 0x3000c
84#define DSRCR_TVCL (1 << 15) 82#define DSRCR_TVCL (1 << 15)
85#define DSRCR_FRCL (1 << 14) 83#define DSRCR_FRCL (1 << 14)
86#define DSRCR_VBCL (1 << 11) 84#define DSRCR_VBCL (1 << 11)
@@ -90,7 +88,6 @@
90#define DSRCR_MASK 0x0000cbff 88#define DSRCR_MASK 0x0000cbff
91 89
92#define DIER 0x00010 90#define DIER 0x00010
93#define D2IER 0x30010
94#define DIER_TVE (1 << 15) 91#define DIER_TVE (1 << 15)
95#define DIER_FRE (1 << 14) 92#define DIER_FRE (1 << 14)
96#define DIER_VBE (1 << 11) 93#define DIER_VBE (1 << 11)
@@ -114,7 +111,6 @@
114#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */ 111#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */
115 112
116#define DEFR 0x00020 113#define DEFR 0x00020
117#define D2EFR 0x30020
118#define DEFR_CODE (0x7773 << 16) 114#define DEFR_CODE (0x7773 << 16)
119#define DEFR_EXSL (1 << 12) 115#define DEFR_EXSL (1 << 12)
120#define DEFR_EXVL (1 << 11) 116#define DEFR_EXVL (1 << 11)
@@ -137,12 +133,10 @@
137#define DCPCR_DCE (1 << 0) 133#define DCPCR_DCE (1 << 0)
138 134
139#define DEFR2 0x00034 135#define DEFR2 0x00034
140#define D2EFR2 0x30034
141#define DEFR2_CODE (0x7775 << 16) 136#define DEFR2_CODE (0x7775 << 16)
142#define DEFR2_DEFE2G (1 << 0) 137#define DEFR2_DEFE2G (1 << 0)
143 138
144#define DEFR3 0x00038 139#define DEFR3 0x00038
145#define D2EFR3 0x30038
146#define DEFR3_CODE (0x7776 << 16) 140#define DEFR3_CODE (0x7776 << 16)
147#define DEFR3_EVDA (1 << 14) 141#define DEFR3_EVDA (1 << 14)
148#define DEFR3_EVDM_1 (1 << 12) 142#define DEFR3_EVDM_1 (1 << 12)
@@ -153,7 +147,6 @@
153#define DEFR3_DEFE3 (1 << 0) 147#define DEFR3_DEFE3 (1 << 0)
154 148
155#define DEFR4 0x0003c 149#define DEFR4 0x0003c
156#define D2EFR4 0x3003c
157#define DEFR4_CODE (0x7777 << 16) 150#define DEFR4_CODE (0x7777 << 16)
158#define DEFR4_LRUO (1 << 5) 151#define DEFR4_LRUO (1 << 5)
159#define DEFR4_SPCE (1 << 4) 152#define DEFR4_SPCE (1 << 4)
@@ -205,6 +198,68 @@
205#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) 198#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
206 199
207/* ----------------------------------------------------------------------------- 200/* -----------------------------------------------------------------------------
201 * R8A7790-only Control Registers
202 */
203
204#define DD1SSR 0x20008
205#define DD1SSR_TVR (1 << 15)
206#define DD1SSR_FRM (1 << 14)
207#define DD1SSR_BUF (1 << 12)
208#define DD1SSR_VBK (1 << 11)
209#define DD1SSR_RINT (1 << 9)
210#define DD1SSR_HBK (1 << 8)
211#define DD1SSR_ADC(n) (1 << ((n)-1))
212
213#define DD1SRCR 0x2000c
214#define DD1SRCR_TVR (1 << 15)
215#define DD1SRCR_FRM (1 << 14)
216#define DD1SRCR_BUF (1 << 12)
217#define DD1SRCR_VBK (1 << 11)
218#define DD1SRCR_RINT (1 << 9)
219#define DD1SRCR_HBK (1 << 8)
220#define DD1SRCR_ADC(n) (1 << ((n)-1))
221
222#define DD1IER 0x20010
223#define DD1IER_TVR (1 << 15)
224#define DD1IER_FRM (1 << 14)
225#define DD1IER_BUF (1 << 12)
226#define DD1IER_VBK (1 << 11)
227#define DD1IER_RINT (1 << 9)
228#define DD1IER_HBK (1 << 8)
229#define DD1IER_ADC(n) (1 << ((n)-1))
230
231#define DEFR8 0x20020
232#define DEFR8_CODE (0x7790 << 16)
233#define DEFR8_VSCS (1 << 6)
234#define DEFR8_DRGBS_DU(n) ((n) << 4)
235#define DEFR8_DRGBS_MASK (3 << 4)
236#define DEFR8_DEFE8 (1 << 0)
237
238#define DOFLR 0x20024
239#define DOFLR_CODE (0x7790 << 16)
240#define DOFLR_HSYCFL1 (1 << 13)
241#define DOFLR_VSYCFL1 (1 << 12)
242#define DOFLR_ODDFL1 (1 << 11)
243#define DOFLR_DISPFL1 (1 << 10)
244#define DOFLR_CDEFL1 (1 << 9)
245#define DOFLR_RGBFL1 (1 << 8)
246#define DOFLR_HSYCFL0 (1 << 5)
247#define DOFLR_VSYCFL0 (1 << 4)
248#define DOFLR_ODDFL0 (1 << 3)
249#define DOFLR_DISPFL0 (1 << 2)
250#define DOFLR_CDEFL0 (1 << 1)
251#define DOFLR_RGBFL0 (1 << 0)
252
253#define DIDSR 0x20028
254#define DIDSR_CODE (0x7790 << 16)
255#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2))
256#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
257#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
258#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
259#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
260#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2))
261
262/* -----------------------------------------------------------------------------
208 * Display Timing Generation Registers 263 * Display Timing Generation Registers
209 */ 264 */
210 265
@@ -349,21 +404,34 @@
349#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */ 404#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */
350 405
351#define APnMWR 0x0a104 406#define APnMWR 0x0a104
407
408#define APnDSXR 0x0a110
409#define APnDSYR 0x0a114
410#define APnDPXR 0x0a118
411#define APnDPYR 0x0a11c
412
352#define APnDSA0R 0x0a120 413#define APnDSA0R 0x0a120
353#define APnDSA1R 0x0a124 414#define APnDSA1R 0x0a124
354#define APnDSA2R 0x0a128 415#define APnDSA2R 0x0a128
416
417#define APnSPXR 0x0a130
418#define APnSPYR 0x0a134
419#define APnWASPR 0x0a138
420#define APnWAMWR 0x0a13c
421
422#define APnBTR 0x0a140
423
355#define APnMLR 0x0a150 424#define APnMLR 0x0a150
425#define APnSWAPR 0x0a180
356 426
357/* ----------------------------------------------------------------------------- 427/* -----------------------------------------------------------------------------
358 * Display Capture Registers 428 * Display Capture Registers
359 */ 429 */
360 430
431#define DCMR 0x0c100
361#define DCMWR 0x0c104 432#define DCMWR 0x0c104
362#define DC2MWR 0x0c204
363#define DCSAR 0x0c120 433#define DCSAR 0x0c120
364#define DC2SAR 0x0c220
365#define DCMLR 0x0c150 434#define DCMLR 0x0c150
366#define DC2MLR 0x0c250
367 435
368/* ----------------------------------------------------------------------------- 436/* -----------------------------------------------------------------------------
369 * Color Palette Registers 437 * Color Palette Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 327289ec380d..41d563adfeaa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_vga.c -- R-Car Display Unit VGA DAC and Connector 2 * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -16,12 +16,9 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "rcar_du_drv.h" 18#include "rcar_du_drv.h"
19#include "rcar_du_encoder.h"
19#include "rcar_du_kms.h" 20#include "rcar_du_kms.h"
20#include "rcar_du_vga.h" 21#include "rcar_du_vgacon.h"
21
22/* -----------------------------------------------------------------------------
23 * Connector
24 */
25 22
26static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) 23static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
27{ 24{
@@ -49,7 +46,7 @@ static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
49static enum drm_connector_status 46static enum drm_connector_status
50rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) 47rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
51{ 48{
52 return connector_status_unknown; 49 return connector_status_connected;
53} 50}
54 51
55static const struct drm_connector_funcs connector_funcs = { 52static const struct drm_connector_funcs connector_funcs = {
@@ -59,8 +56,8 @@ static const struct drm_connector_funcs connector_funcs = {
59 .destroy = rcar_du_vga_connector_destroy, 56 .destroy = rcar_du_vga_connector_destroy,
60}; 57};
61 58
62static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, 59int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
63 struct rcar_du_encoder *renc) 60 struct rcar_du_encoder *renc)
64{ 61{
65 struct rcar_du_connector *rcon; 62 struct rcar_du_connector *rcon;
66 struct drm_connector *connector; 63 struct drm_connector *connector;
@@ -97,53 +94,3 @@ static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
97 94
98 return 0; 95 return 0;
99} 96}
100
101/* -----------------------------------------------------------------------------
102 * Encoder
103 */
104
105static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
106{
107}
108
109static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
110 const struct drm_display_mode *mode,
111 struct drm_display_mode *adjusted_mode)
112{
113 return true;
114}
115
116static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
117 .dpms = rcar_du_vga_encoder_dpms,
118 .mode_fixup = rcar_du_vga_encoder_mode_fixup,
119 .prepare = rcar_du_encoder_mode_prepare,
120 .commit = rcar_du_encoder_mode_commit,
121 .mode_set = rcar_du_encoder_mode_set,
122};
123
124static const struct drm_encoder_funcs encoder_funcs = {
125 .destroy = drm_encoder_cleanup,
126};
127
128int rcar_du_vga_init(struct rcar_du_device *rcdu,
129 const struct rcar_du_encoder_vga_data *data,
130 unsigned int output)
131{
132 struct rcar_du_encoder *renc;
133 int ret;
134
135 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
136 if (renc == NULL)
137 return -ENOMEM;
138
139 renc->output = output;
140
141 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
142 DRM_MODE_ENCODER_DAC);
143 if (ret < 0)
144 return ret;
145
146 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
147
148 return rcar_du_vga_connector_init(rcdu, renc);
149}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
index 66b4d2d7190d..b12b0cf7f117 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_vga.h -- R-Car Display Unit VGA DAC and Connector 2 * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -11,14 +11,13 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __RCAR_DU_VGA_H__ 14#ifndef __RCAR_DU_VGACON_H__
15#define __RCAR_DU_VGA_H__ 15#define __RCAR_DU_VGACON_H__
16 16
17struct rcar_du_device; 17struct rcar_du_device;
18struct rcar_du_encoder_vga_data; 18struct rcar_du_encoder;
19 19
20int rcar_du_vga_init(struct rcar_du_device *rcdu, 20int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_vga_data *data, 21 struct rcar_du_encoder *renc);
22 unsigned int output);
23 22
24#endif /* __RCAR_DU_VGA_H__ */ 23#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
new file mode 100644
index 000000000000..77cf9289ab65
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -0,0 +1,69 @@
1/*
2 * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions
3 *
4 * Copyright (C) 2013 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
11 */
12
13#ifndef __RCAR_LVDS_REGS_H__
14#define __RCAR_LVDS_REGS_H__
15
16#define LVDCR0 0x0000
17#define LVDCR0_DUSEL (1 << 15)
18#define LVDCR0_DMD (1 << 12)
19#define LVDCR0_LVMD_MASK (0xf << 8)
20#define LVDCR0_LVMD_SHIFT 8
21#define LVDCR0_PLLEN (1 << 4)
22#define LVDCR0_BEN (1 << 2)
23#define LVDCR0_LVEN (1 << 1)
24#define LVDCR0_LVRES (1 << 0)
25
26#define LVDCR1 0x0004
27#define LVDCR1_CKSEL (1 << 15)
28#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2))
29#define LVDCR1_CLKSTBY (3 << 0)
30
31#define LVDPLLCR 0x0008
32#define LVDPLLCR_CEEN (1 << 14)
33#define LVDPLLCR_FBEN (1 << 13)
34#define LVDPLLCR_COSEL (1 << 12)
35#define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0)
36#define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0)
37#define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0)
38#define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0)
39#define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0)
40
41#define LVDCTRCR 0x000c
42#define LVDCTRCR_CTR3SEL_ZERO (0 << 12)
43#define LVDCTRCR_CTR3SEL_ODD (1 << 12)
44#define LVDCTRCR_CTR3SEL_CDE (2 << 12)
45#define LVDCTRCR_CTR3SEL_MASK (7 << 12)
46#define LVDCTRCR_CTR2SEL_DISP (0 << 8)
47#define LVDCTRCR_CTR2SEL_ODD (1 << 8)
48#define LVDCTRCR_CTR2SEL_CDE (2 << 8)
49#define LVDCTRCR_CTR2SEL_HSYNC (3 << 8)
50#define LVDCTRCR_CTR2SEL_VSYNC (4 << 8)
51#define LVDCTRCR_CTR2SEL_MASK (7 << 8)
52#define LVDCTRCR_CTR1SEL_VSYNC (0 << 4)
53#define LVDCTRCR_CTR1SEL_DISP (1 << 4)
54#define LVDCTRCR_CTR1SEL_ODD (2 << 4)
55#define LVDCTRCR_CTR1SEL_CDE (3 << 4)
56#define LVDCTRCR_CTR1SEL_HSYNC (4 << 4)
57#define LVDCTRCR_CTR1SEL_MASK (7 << 4)
58#define LVDCTRCR_CTR0SEL_HSYNC (0 << 0)
59#define LVDCTRCR_CTR0SEL_VSYNC (1 << 0)
60#define LVDCTRCR_CTR0SEL_DISP (2 << 0)
61#define LVDCTRCR_CTR0SEL_ODD (3 << 0)
62#define LVDCTRCR_CTR0SEL_CDE (4 << 0)
63#define LVDCTRCR_CTR0SEL_MASK (7 << 0)
64
65#define LVDCHCR 0x0010
66#define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4))
67#define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4))
68
69#endif /* __RCAR_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bd6b2cf508d5..b17d0710871a 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1072 drm_idlelock_release(&file_priv->master->lock); 1072 drm_idlelock_release(&file_priv->master->lock);
1073} 1073}
1074 1074
1075struct drm_ioctl_desc savage_ioctls[] = { 1075const struct drm_ioctl_desc savage_ioctls[] = {
1076 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1076 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1077 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), 1077 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1078 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), 1078 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 71b2081e7835..3c030216e888 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,7 +42,6 @@ static const struct file_operations savage_driver_fops = {
42 .unlocked_ioctl = drm_ioctl, 42 .unlocked_ioctl = drm_ioctl,
43 .mmap = drm_mmap, 43 .mmap = drm_mmap,
44 .poll = drm_poll, 44 .poll = drm_poll,
45 .fasync = drm_fasync,
46#ifdef CONFIG_COMPAT 45#ifdef CONFIG_COMPAT
47 .compat_ioctl = drm_compat_ioctl, 46 .compat_ioctl = drm_compat_ioctl,
48#endif 47#endif
@@ -51,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
51 50
52static struct drm_driver driver = { 51static struct drm_driver driver = {
53 .driver_features = 52 .driver_features =
54 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, 53 DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
55 .dev_priv_size = sizeof(drm_savage_buf_priv_t), 54 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
56 .load = savage_driver_load, 55 .load = savage_driver_load,
57 .firstopen = savage_driver_firstopen, 56 .firstopen = savage_driver_firstopen,
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index c05082a59f6f..335f8fcf1041 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
104 S3_LAST 104 S3_LAST
105}; 105};
106 106
107extern struct drm_ioctl_desc savage_ioctls[]; 107extern const struct drm_ioctl_desc savage_ioctls[];
108extern int savage_max_ioctl; 108extern int savage_max_ioctl;
109 109
110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 99e2034e49cc..54bad98e9477 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -465,7 +465,8 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
465 465
466static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, 466static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
467 struct drm_framebuffer *fb, 467 struct drm_framebuffer *fb,
468 struct drm_pending_vblank_event *event) 468 struct drm_pending_vblank_event *event,
469 uint32_t page_flip_flags)
469{ 470{
470 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 471 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
471 struct drm_device *dev = scrtc->crtc.dev; 472 struct drm_device *dev = scrtc->crtc.dev;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 5f83f9a3ef59..015551866b4a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -257,7 +257,6 @@ static const struct file_operations shmob_drm_fops = {
257#endif 257#endif
258 .poll = drm_poll, 258 .poll = drm_poll,
259 .read = drm_read, 259 .read = drm_read,
260 .fasync = drm_fasync,
261 .llseek = no_llseek, 260 .llseek = no_llseek,
262 .mmap = drm_gem_cma_mmap, 261 .mmap = drm_gem_cma_mmap,
263}; 262};
@@ -285,7 +284,7 @@ static struct drm_driver shmob_drm_driver = {
285 .gem_prime_mmap = drm_gem_cma_prime_mmap, 284 .gem_prime_mmap = drm_gem_cma_prime_mmap,
286 .dumb_create = drm_gem_cma_dumb_create, 285 .dumb_create = drm_gem_cma_dumb_create,
287 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 286 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
288 .dumb_destroy = drm_gem_cma_dumb_destroy, 287 .dumb_destroy = drm_gem_dumb_destroy,
289 .fops = &shmob_drm_fops, 288 .fops = &shmob_drm_fops,
290 .name = "shmob-drm", 289 .name = "shmob-drm",
291 .desc = "Renesas SH Mobile DRM", 290 .desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 5a5325e6b759..4383b74a3aa4 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,7 +72,6 @@ static const struct file_operations sis_driver_fops = {
72 .unlocked_ioctl = drm_ioctl, 72 .unlocked_ioctl = drm_ioctl,
73 .mmap = drm_mmap, 73 .mmap = drm_mmap,
74 .poll = drm_poll, 74 .poll = drm_poll,
75 .fasync = drm_fasync,
76#ifdef CONFIG_COMPAT 75#ifdef CONFIG_COMPAT
77 .compat_ioctl = drm_compat_ioctl, 76 .compat_ioctl = drm_compat_ioctl,
78#endif 77#endif
@@ -103,7 +102,7 @@ void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
103} 102}
104 103
105static struct drm_driver driver = { 104static struct drm_driver driver = {
106 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, 105 .driver_features = DRIVER_USE_AGP,
107 .load = sis_driver_load, 106 .load = sis_driver_load,
108 .unload = sis_driver_unload, 107 .unload = sis_driver_unload,
109 .open = sis_driver_open, 108 .open = sis_driver_open,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 13b527bb83be..c31c0253054d 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
70 struct drm_file *file_priv); 70 struct drm_file *file_priv);
71extern void sis_lastclose(struct drm_device *dev); 71extern void sis_lastclose(struct drm_device *dev);
72 72
73extern struct drm_ioctl_desc sis_ioctls[]; 73extern const struct drm_ioctl_desc sis_ioctls[];
74extern int sis_max_ioctl; 74extern int sis_max_ioctl;
75 75
76#endif 76#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 9a43d98e5003..01857d836350 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
109 if (pool == AGP_TYPE) { 109 if (pool == AGP_TYPE) {
110 retval = drm_mm_insert_node(&dev_priv->agp_mm, 110 retval = drm_mm_insert_node(&dev_priv->agp_mm,
111 &item->mm_node, 111 &item->mm_node,
112 mem->size, 0); 112 mem->size, 0,
113 DRM_MM_SEARCH_DEFAULT);
113 offset = item->mm_node.start; 114 offset = item->mm_node.start;
114 } else { 115 } else {
115#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 116#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
121#else 122#else
122 retval = drm_mm_insert_node(&dev_priv->vram_mm, 123 retval = drm_mm_insert_node(&dev_priv->vram_mm,
123 &item->mm_node, 124 &item->mm_node,
124 mem->size, 0); 125 mem->size, 0,
126 DRM_MM_SEARCH_DEFAULT);
125 offset = item->mm_node.start; 127 offset = item->mm_node.start;
126#endif 128#endif
127 } 129 }
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
348 return; 350 return;
349} 351}
350 352
351struct drm_ioctl_desc sis_ioctls[] = { 353const struct drm_ioctl_desc sis_ioctls[] = {
352 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), 354 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
353 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH), 355 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
354 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 356 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ddfa743459d0..3492ca5c46d3 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations tdfx_driver_fops = {
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51 .fasync = drm_fasync,
52#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
53 .compat_ioctl = drm_compat_ioctl, 52 .compat_ioctl = drm_compat_ioctl,
54#endif 53#endif
@@ -56,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = {
56}; 55};
57 56
58static struct drm_driver driver = { 57static struct drm_driver driver = {
59 .driver_features = DRIVER_USE_MTRR,
60 .fops = &tdfx_driver_fops, 58 .fops = &tdfx_driver_fops,
61 .name = DRIVER_NAME, 59 .name = DRIVER_NAME,
62 .desc = DRIVER_DESC, 60 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7418dcd986d3..d36efc13b16f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -15,7 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/kfifo.h> 18#include "drm_flip_work.h"
19 19
20#include "tilcdc_drv.h" 20#include "tilcdc_drv.h"
21#include "tilcdc_regs.h" 21#include "tilcdc_regs.h"
@@ -35,21 +35,18 @@ struct tilcdc_crtc {
35 struct drm_framebuffer *scanout[2]; 35 struct drm_framebuffer *scanout[2];
36 36
37 /* for deferred fb unref's: */ 37 /* for deferred fb unref's: */
38 DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *); 38 struct drm_flip_work unref_work;
39 struct work_struct work;
40}; 39};
41#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base) 40#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
42 41
43static void unref_worker(struct work_struct *work) 42static void unref_worker(struct drm_flip_work *work, void *val)
44{ 43{
45 struct tilcdc_crtc *tilcdc_crtc = 44 struct tilcdc_crtc *tilcdc_crtc =
46 container_of(work, struct tilcdc_crtc, work); 45 container_of(work, struct tilcdc_crtc, unref_work);
47 struct drm_device *dev = tilcdc_crtc->base.dev; 46 struct drm_device *dev = tilcdc_crtc->base.dev;
48 struct drm_framebuffer *fb;
49 47
50 mutex_lock(&dev->mode_config.mutex); 48 mutex_lock(&dev->mode_config.mutex);
51 while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb)) 49 drm_framebuffer_unreference(val);
52 drm_framebuffer_unreference(fb);
53 mutex_unlock(&dev->mode_config.mutex); 50 mutex_unlock(&dev->mode_config.mutex);
54} 51}
55 52
@@ -68,19 +65,14 @@ static void set_scanout(struct drm_crtc *crtc, int n)
68 }; 65 };
69 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 66 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
70 struct drm_device *dev = crtc->dev; 67 struct drm_device *dev = crtc->dev;
68 struct tilcdc_drm_private *priv = dev->dev_private;
71 69
72 pm_runtime_get_sync(dev->dev); 70 pm_runtime_get_sync(dev->dev);
73 tilcdc_write(dev, base_reg[n], tilcdc_crtc->start); 71 tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
74 tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end); 72 tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
75 if (tilcdc_crtc->scanout[n]) { 73 if (tilcdc_crtc->scanout[n]) {
76 if (kfifo_put(&tilcdc_crtc->unref_fifo, 74 drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
77 (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) { 75 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
78 struct tilcdc_drm_private *priv = dev->dev_private;
79 queue_work(priv->wq, &tilcdc_crtc->work);
80 } else {
81 dev_err(dev->dev, "unref fifo full!\n");
82 drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
83 }
84 } 76 }
85 tilcdc_crtc->scanout[n] = crtc->fb; 77 tilcdc_crtc->scanout[n] = crtc->fb;
86 drm_framebuffer_reference(tilcdc_crtc->scanout[n]); 78 drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
@@ -149,14 +141,15 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
149 WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON); 141 WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
150 142
151 drm_crtc_cleanup(crtc); 143 drm_crtc_cleanup(crtc);
152 WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo)); 144 drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
153 kfifo_free(&tilcdc_crtc->unref_fifo); 145
154 kfree(tilcdc_crtc); 146 kfree(tilcdc_crtc);
155} 147}
156 148
157static int tilcdc_crtc_page_flip(struct drm_crtc *crtc, 149static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
158 struct drm_framebuffer *fb, 150 struct drm_framebuffer *fb,
159 struct drm_pending_vblank_event *event) 151 struct drm_pending_vblank_event *event,
152 uint32_t page_flip_flags)
160{ 153{
161 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 154 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
162 struct drm_device *dev = crtc->dev; 155 struct drm_device *dev = crtc->dev;
@@ -379,7 +372,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
379 else 372 else
380 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); 373 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
381 374
382 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 375 /*
376 * use value from adjusted_mode here as this might have been
377 * changed as part of the fixup for slave encoders to solve the
378 * issue where tilcdc timings are not VESA compliant
379 */
380 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
383 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 381 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
384 else 382 else
385 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 383 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -666,14 +664,13 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
666 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF; 664 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
667 init_waitqueue_head(&tilcdc_crtc->frame_done_wq); 665 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
668 666
669 ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL); 667 ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
668 "unref", unref_worker);
670 if (ret) { 669 if (ret) {
671 dev_err(dev->dev, "could not allocate unref FIFO\n"); 670 dev_err(dev->dev, "could not allocate unref FIFO\n");
672 goto fail; 671 goto fail;
673 } 672 }
674 673
675 INIT_WORK(&tilcdc_crtc->work, unref_worker);
676
677 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs); 674 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
678 if (ret < 0) 675 if (ret < 0)
679 goto fail; 676 goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 40b71da5a214..116da199b942 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -497,7 +497,6 @@ static const struct file_operations fops = {
497#endif 497#endif
498 .poll = drm_poll, 498 .poll = drm_poll,
499 .read = drm_read, 499 .read = drm_read,
500 .fasync = drm_fasync,
501 .llseek = no_llseek, 500 .llseek = no_llseek,
502 .mmap = drm_gem_cma_mmap, 501 .mmap = drm_gem_cma_mmap,
503}; 502};
@@ -519,7 +518,7 @@ static struct drm_driver tilcdc_driver = {
519 .gem_vm_ops = &drm_gem_cma_vm_ops, 518 .gem_vm_ops = &drm_gem_cma_vm_ops,
520 .dumb_create = drm_gem_cma_dumb_create, 519 .dumb_create = drm_gem_cma_dumb_create,
521 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 520 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
522 .dumb_destroy = drm_gem_cma_dumb_destroy, 521 .dumb_destroy = drm_gem_dumb_destroy,
523#ifdef CONFIG_DEBUG_FS 522#ifdef CONFIG_DEBUG_FS
524 .debugfs_init = tilcdc_debugfs_init, 523 .debugfs_init = tilcdc_debugfs_init,
525 .debugfs_cleanup = tilcdc_debugfs_cleanup, 524 .debugfs_cleanup = tilcdc_debugfs_cleanup,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index a19f657dfa59..595068ba2d5e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -72,13 +72,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder)
72 tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info); 72 tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
73} 73}
74 74
75static bool slave_encoder_fixup(struct drm_encoder *encoder,
76 const struct drm_display_mode *mode,
77 struct drm_display_mode *adjusted_mode)
78{
79 /*
80 * tilcdc does not generate VESA-complient sync but aligns
81 * VS on the second edge of HS instead of first edge.
82 * We use adjusted_mode, to fixup sync by aligning both rising
83 * edges and add HSKEW offset to let the slave encoder fix it up.
84 */
85 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
86 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
87
88 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
89 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
90 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
91 } else {
92 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
93 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
94 }
95
96 return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode);
97}
98
99
75static const struct drm_encoder_funcs slave_encoder_funcs = { 100static const struct drm_encoder_funcs slave_encoder_funcs = {
76 .destroy = slave_encoder_destroy, 101 .destroy = slave_encoder_destroy,
77}; 102};
78 103
79static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = { 104static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
80 .dpms = drm_i2c_encoder_dpms, 105 .dpms = drm_i2c_encoder_dpms,
81 .mode_fixup = drm_i2c_encoder_mode_fixup, 106 .mode_fixup = slave_encoder_fixup,
82 .prepare = slave_encoder_prepare, 107 .prepare = slave_encoder_prepare,
83 .commit = drm_i2c_encoder_commit, 108 .commit = drm_i2c_encoder_commit,
84 .mode_set = drm_i2c_encoder_mode_set, 109 .mode_set = drm_i2c_encoder_mode_set,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb9dd674670c..f1a857ec1021 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,7 +45,6 @@
45#define TTM_DEBUG(fmt, arg...) 45#define TTM_DEBUG(fmt, arg...)
46#define TTM_BO_HASH_ORDER 13 46#define TTM_BO_HASH_ORDER 13
47 47
48static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
49static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 48static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50static void ttm_bo_global_kobj_release(struct kobject *kobj); 49static void ttm_bo_global_kobj_release(struct kobject *kobj);
51 50
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
615 struct ttm_bo_device *bdev = bo->bdev; 614 struct ttm_bo_device *bdev = bo->bdev;
616 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 615 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
617 616
618 write_lock(&bdev->vm_lock); 617 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
619 if (likely(bo->vm_node != NULL)) {
620 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
621 drm_mm_put_block(bo->vm_node);
622 bo->vm_node = NULL;
623 }
624 write_unlock(&bdev->vm_lock);
625 ttm_mem_io_lock(man, false); 618 ttm_mem_io_lock(man, false);
626 ttm_mem_io_free_vm(bo); 619 ttm_mem_io_free_vm(bo);
627 ttm_mem_io_unlock(man); 620 ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1129 bo->resv = &bo->ttm_resv; 1122 bo->resv = &bo->ttm_resv;
1130 reservation_object_init(bo->resv); 1123 reservation_object_init(bo->resv);
1131 atomic_inc(&bo->glob->bo_count); 1124 atomic_inc(&bo->glob->bo_count);
1125 drm_vma_node_reset(&bo->vma_node);
1132 1126
1133 ret = ttm_bo_check_placement(bo, placement); 1127 ret = ttm_bo_check_placement(bo, placement);
1134 1128
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1139 if (likely(!ret) && 1133 if (likely(!ret) &&
1140 (bo->type == ttm_bo_type_device || 1134 (bo->type == ttm_bo_type_device ||
1141 bo->type == ttm_bo_type_sg)) 1135 bo->type == ttm_bo_type_sg))
1142 ret = ttm_bo_setup_vm(bo); 1136 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1137 bo->mem.num_pages);
1143 1138
1144 locked = ww_mutex_trylock(&bo->resv->lock); 1139 locked = ww_mutex_trylock(&bo->resv->lock);
1145 WARN_ON(!locked); 1140 WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
1424 TTM_DEBUG("Swap list was clean\n"); 1419 TTM_DEBUG("Swap list was clean\n");
1425 spin_unlock(&glob->lru_lock); 1420 spin_unlock(&glob->lru_lock);
1426 1421
1427 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1422 drm_vma_offset_manager_destroy(&bdev->vma_manager);
1428 write_lock(&bdev->vm_lock);
1429 drm_mm_takedown(&bdev->addr_space_mm);
1430 write_unlock(&bdev->vm_lock);
1431 1423
1432 return ret; 1424 return ret;
1433} 1425}
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1441{ 1433{
1442 int ret = -EINVAL; 1434 int ret = -EINVAL;
1443 1435
1444 rwlock_init(&bdev->vm_lock);
1445 bdev->driver = driver; 1436 bdev->driver = driver;
1446 1437
1447 memset(bdev->man, 0, sizeof(bdev->man)); 1438 memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1454 if (unlikely(ret != 0)) 1445 if (unlikely(ret != 0))
1455 goto out_no_sys; 1446 goto out_no_sys;
1456 1447
1457 bdev->addr_space_rb = RB_ROOT; 1448 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1458 drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1449 0x10000000);
1459
1460 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1450 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1461 INIT_LIST_HEAD(&bdev->ddestroy); 1451 INIT_LIST_HEAD(&bdev->ddestroy);
1462 bdev->dev_mapping = NULL; 1452 bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1498void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1488void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1499{ 1489{
1500 struct ttm_bo_device *bdev = bo->bdev; 1490 struct ttm_bo_device *bdev = bo->bdev;
1501 loff_t offset = (loff_t) bo->addr_space_offset;
1502 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1503 1491
1504 if (!bdev->dev_mapping) 1492 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1505 return;
1506 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1507 ttm_mem_io_free_vm(bo); 1493 ttm_mem_io_free_vm(bo);
1508} 1494}
1509 1495
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1520 1506
1521EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1507EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1522 1508
1523static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1524{
1525 struct ttm_bo_device *bdev = bo->bdev;
1526 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1527 struct rb_node *parent = NULL;
1528 struct ttm_buffer_object *cur_bo;
1529 unsigned long offset = bo->vm_node->start;
1530 unsigned long cur_offset;
1531
1532 while (*cur) {
1533 parent = *cur;
1534 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1535 cur_offset = cur_bo->vm_node->start;
1536 if (offset < cur_offset)
1537 cur = &parent->rb_left;
1538 else if (offset > cur_offset)
1539 cur = &parent->rb_right;
1540 else
1541 BUG();
1542 }
1543
1544 rb_link_node(&bo->vm_rb, parent, cur);
1545 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1546}
1547
1548/**
1549 * ttm_bo_setup_vm:
1550 *
1551 * @bo: the buffer to allocate address space for
1552 *
1553 * Allocate address space in the drm device so that applications
1554 * can mmap the buffer and access the contents. This only
1555 * applies to ttm_bo_type_device objects as others are not
1556 * placed in the drm device address space.
1557 */
1558
1559static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1560{
1561 struct ttm_bo_device *bdev = bo->bdev;
1562 int ret;
1563
1564retry_pre_get:
1565 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1566 if (unlikely(ret != 0))
1567 return ret;
1568
1569 write_lock(&bdev->vm_lock);
1570 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1571 bo->mem.num_pages, 0, 0);
1572
1573 if (unlikely(bo->vm_node == NULL)) {
1574 ret = -ENOMEM;
1575 goto out_unlock;
1576 }
1577
1578 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1579 bo->mem.num_pages, 0);
1580
1581 if (unlikely(bo->vm_node == NULL)) {
1582 write_unlock(&bdev->vm_lock);
1583 goto retry_pre_get;
1584 }
1585
1586 ttm_bo_vm_insert_rb(bo);
1587 write_unlock(&bdev->vm_lock);
1588 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1589
1590 return 0;
1591out_unlock:
1592 write_unlock(&bdev->vm_lock);
1593 return ret;
1594}
1595 1509
1596int ttm_bo_wait(struct ttm_buffer_object *bo, 1510int ttm_bo_wait(struct ttm_buffer_object *bo,
1597 bool lazy, bool interruptible, bool no_wait) 1511 bool lazy, bool interruptible, bool no_wait)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4367f91472a..c58eba33bd5f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
61 lpfn = placement->lpfn; 61 lpfn = placement->lpfn;
62 if (!lpfn) 62 if (!lpfn)
63 lpfn = man->size; 63 lpfn = man->size;
64 do {
65 ret = drm_mm_pre_get(mm);
66 if (unlikely(ret))
67 return ret;
68 64
69 spin_lock(&rman->lock); 65 node = kzalloc(sizeof(*node), GFP_KERNEL);
70 node = drm_mm_search_free_in_range(mm, 66 if (!node)
71 mem->num_pages, mem->page_alignment, 67 return -ENOMEM;
72 placement->fpfn, lpfn, 1); 68
73 if (unlikely(node == NULL)) { 69 spin_lock(&rman->lock);
74 spin_unlock(&rman->lock); 70 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
75 return 0; 71 mem->page_alignment,
76 } 72 placement->fpfn, lpfn,
77 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 73 DRM_MM_SEARCH_BEST);
78 mem->page_alignment, 74 spin_unlock(&rman->lock);
79 placement->fpfn, 75
80 lpfn); 76 if (unlikely(ret)) {
81 spin_unlock(&rman->lock); 77 kfree(node);
82 } while (node == NULL); 78 } else {
79 mem->mm_node = node;
80 mem->start = node->start;
81 }
83 82
84 mem->mm_node = node;
85 mem->start = node->start;
86 return 0; 83 return 0;
87} 84}
88 85
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93 90
94 if (mem->mm_node) { 91 if (mem->mm_node) {
95 spin_lock(&rman->lock); 92 spin_lock(&rman->lock);
96 drm_mm_put_block(mem->mm_node); 93 drm_mm_remove_node(mem->mm_node);
97 spin_unlock(&rman->lock); 94 spin_unlock(&rman->lock);
95
96 kfree(mem->mm_node);
98 mem->mm_node = NULL; 97 mem->mm_node = NULL;
99 } 98 }
100} 99}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 319cf4127c5b..7cc904d3a4d1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@
30 30
31#include <drm/ttm/ttm_bo_driver.h> 31#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h> 32#include <drm/ttm/ttm_placement.h>
33#include <drm/drm_vma_manager.h>
33#include <linux/io.h> 34#include <linux/io.h>
34#include <linux/highmem.h> 35#include <linux/highmem.h>
35#include <linux/wait.h> 36#include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
450 INIT_LIST_HEAD(&fbo->lru); 451 INIT_LIST_HEAD(&fbo->lru);
451 INIT_LIST_HEAD(&fbo->swap); 452 INIT_LIST_HEAD(&fbo->swap);
452 INIT_LIST_HEAD(&fbo->io_reserve_lru); 453 INIT_LIST_HEAD(&fbo->io_reserve_lru);
453 fbo->vm_node = NULL; 454 drm_vma_node_reset(&fbo->vma_node);
454 atomic_set(&fbo->cpu_writers, 0); 455 atomic_set(&fbo->cpu_writers, 0);
455 456
456 spin_lock(&bdev->fence_lock); 457 spin_lock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16b041c..1006c15445e9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -33,6 +33,7 @@
33#include <ttm/ttm_module.h> 33#include <ttm/ttm_module.h>
34#include <ttm/ttm_bo_driver.h> 34#include <ttm/ttm_bo_driver.h>
35#include <ttm/ttm_placement.h> 35#include <ttm/ttm_placement.h>
36#include <drm/drm_vma_manager.h>
36#include <linux/mm.h> 37#include <linux/mm.h>
37#include <linux/rbtree.h> 38#include <linux/rbtree.h>
38#include <linux/module.h> 39#include <linux/module.h>
@@ -40,37 +41,6 @@
40 41
41#define TTM_BO_VM_NUM_PREFAULT 16 42#define TTM_BO_VM_NUM_PREFAULT 16
42 43
43static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
44 unsigned long page_start,
45 unsigned long num_pages)
46{
47 struct rb_node *cur = bdev->addr_space_rb.rb_node;
48 unsigned long cur_offset;
49 struct ttm_buffer_object *bo;
50 struct ttm_buffer_object *best_bo = NULL;
51
52 while (likely(cur != NULL)) {
53 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
54 cur_offset = bo->vm_node->start;
55 if (page_start >= cur_offset) {
56 cur = cur->rb_right;
57 best_bo = bo;
58 if (page_start == cur_offset)
59 break;
60 } else
61 cur = cur->rb_left;
62 }
63
64 if (unlikely(best_bo == NULL))
65 return NULL;
66
67 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
68 (page_start + num_pages)))
69 return NULL;
70
71 return best_bo;
72}
73
74static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 44static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75{ 45{
76 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 46 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
146 } 116 }
147 117
148 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 118 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149 bo->vm_node->start - vma->vm_pgoff; 119 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
150 page_last = vma_pages(vma) + 120 page_last = vma_pages(vma) +
151 bo->vm_node->start - vma->vm_pgoff; 121 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
152 122
153 if (unlikely(page_offset >= bo->num_pages)) { 123 if (unlikely(page_offset >= bo->num_pages)) {
154 retval = VM_FAULT_SIGBUS; 124 retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
249 .close = ttm_bo_vm_close 219 .close = ttm_bo_vm_close
250}; 220};
251 221
222static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
223 unsigned long offset,
224 unsigned long pages)
225{
226 struct drm_vma_offset_node *node;
227 struct ttm_buffer_object *bo = NULL;
228
229 drm_vma_offset_lock_lookup(&bdev->vma_manager);
230
231 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
232 if (likely(node)) {
233 bo = container_of(node, struct ttm_buffer_object, vma_node);
234 if (!kref_get_unless_zero(&bo->kref))
235 bo = NULL;
236 }
237
238 drm_vma_offset_unlock_lookup(&bdev->vma_manager);
239
240 if (!bo)
241 pr_err("Could not find buffer object to map\n");
242
243 return bo;
244}
245
252int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 246int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
253 struct ttm_bo_device *bdev) 247 struct ttm_bo_device *bdev)
254{ 248{
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
256 struct ttm_buffer_object *bo; 250 struct ttm_buffer_object *bo;
257 int ret; 251 int ret;
258 252
259 read_lock(&bdev->vm_lock); 253 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, 254 if (unlikely(!bo))
261 vma_pages(vma));
262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
263 bo = NULL;
264 read_unlock(&bdev->vm_lock);
265
266 if (unlikely(bo == NULL)) {
267 pr_err("Could not find buffer object to map\n");
268 return -EINVAL; 255 return -EINVAL;
269 }
270 256
271 driver = bo->bdev->driver; 257 driver = bo->bdev->driver;
272 if (unlikely(!driver->verify_access)) { 258 if (unlikely(!driver->verify_access)) {
@@ -304,162 +290,3 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
304 return 0; 290 return 0;
305} 291}
306EXPORT_SYMBOL(ttm_fbdev_mmap); 292EXPORT_SYMBOL(ttm_fbdev_mmap);
307
308
309ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
310 const char __user *wbuf, char __user *rbuf, size_t count,
311 loff_t *f_pos, bool write)
312{
313 struct ttm_buffer_object *bo;
314 struct ttm_bo_driver *driver;
315 struct ttm_bo_kmap_obj map;
316 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
317 unsigned long kmap_offset;
318 unsigned long kmap_end;
319 unsigned long kmap_num;
320 size_t io_size;
321 unsigned int page_offset;
322 char *virtual;
323 int ret;
324 bool no_wait = false;
325 bool dummy;
326
327 read_lock(&bdev->vm_lock);
328 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
329 if (likely(bo != NULL))
330 ttm_bo_reference(bo);
331 read_unlock(&bdev->vm_lock);
332
333 if (unlikely(bo == NULL))
334 return -EFAULT;
335
336 driver = bo->bdev->driver;
337 if (unlikely(!driver->verify_access)) {
338 ret = -EPERM;
339 goto out_unref;
340 }
341
342 ret = driver->verify_access(bo, filp);
343 if (unlikely(ret != 0))
344 goto out_unref;
345
346 kmap_offset = dev_offset - bo->vm_node->start;
347 if (unlikely(kmap_offset >= bo->num_pages)) {
348 ret = -EFBIG;
349 goto out_unref;
350 }
351
352 page_offset = *f_pos & ~PAGE_MASK;
353 io_size = bo->num_pages - kmap_offset;
354 io_size = (io_size << PAGE_SHIFT) - page_offset;
355 if (count < io_size)
356 io_size = count;
357
358 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
359 kmap_num = kmap_end - kmap_offset + 1;
360
361 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
362
363 switch (ret) {
364 case 0:
365 break;
366 case -EBUSY:
367 ret = -EAGAIN;
368 goto out_unref;
369 default:
370 goto out_unref;
371 }
372
373 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
374 if (unlikely(ret != 0)) {
375 ttm_bo_unreserve(bo);
376 goto out_unref;
377 }
378
379 virtual = ttm_kmap_obj_virtual(&map, &dummy);
380 virtual += page_offset;
381
382 if (write)
383 ret = copy_from_user(virtual, wbuf, io_size);
384 else
385 ret = copy_to_user(rbuf, virtual, io_size);
386
387 ttm_bo_kunmap(&map);
388 ttm_bo_unreserve(bo);
389 ttm_bo_unref(&bo);
390
391 if (unlikely(ret != 0))
392 return -EFBIG;
393
394 *f_pos += io_size;
395
396 return io_size;
397out_unref:
398 ttm_bo_unref(&bo);
399 return ret;
400}
401
402ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
403 char __user *rbuf, size_t count, loff_t *f_pos,
404 bool write)
405{
406 struct ttm_bo_kmap_obj map;
407 unsigned long kmap_offset;
408 unsigned long kmap_end;
409 unsigned long kmap_num;
410 size_t io_size;
411 unsigned int page_offset;
412 char *virtual;
413 int ret;
414 bool no_wait = false;
415 bool dummy;
416
417 kmap_offset = (*f_pos >> PAGE_SHIFT);
418 if (unlikely(kmap_offset >= bo->num_pages))
419 return -EFBIG;
420
421 page_offset = *f_pos & ~PAGE_MASK;
422 io_size = bo->num_pages - kmap_offset;
423 io_size = (io_size << PAGE_SHIFT) - page_offset;
424 if (count < io_size)
425 io_size = count;
426
427 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
428 kmap_num = kmap_end - kmap_offset + 1;
429
430 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
431
432 switch (ret) {
433 case 0:
434 break;
435 case -EBUSY:
436 return -EAGAIN;
437 default:
438 return ret;
439 }
440
441 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
442 if (unlikely(ret != 0)) {
443 ttm_bo_unreserve(bo);
444 return ret;
445 }
446
447 virtual = ttm_kmap_obj_virtual(&map, &dummy);
448 virtual += page_offset;
449
450 if (write)
451 ret = copy_from_user(virtual, wbuf, io_size);
452 else
453 ret = copy_to_user(rbuf, virtual, io_size);
454
455 ttm_bo_kunmap(&map);
456 ttm_bo_unreserve(bo);
457 ttm_bo_unref(&bo);
458
459 if (unlikely(ret != 0))
460 return ret;
461
462 *f_pos += io_size;
463
464 return io_size;
465}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c0770dbba74a..7650dc0d78ce 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -65,7 +65,6 @@ static const struct file_operations udl_driver_fops = {
65 .read = drm_read, 65 .read = drm_read,
66 .unlocked_ioctl = drm_ioctl, 66 .unlocked_ioctl = drm_ioctl,
67 .release = drm_release, 67 .release = drm_release,
68 .fasync = drm_fasync,
69#ifdef CONFIG_COMPAT 68#ifdef CONFIG_COMPAT
70 .compat_ioctl = drm_compat_ioctl, 69 .compat_ioctl = drm_compat_ioctl,
71#endif 70#endif
@@ -84,7 +83,7 @@ static struct drm_driver driver = {
84 83
85 .dumb_create = udl_dumb_create, 84 .dumb_create = udl_dumb_create,
86 .dumb_map_offset = udl_gem_mmap, 85 .dumb_map_offset = udl_gem_mmap,
87 .dumb_destroy = udl_dumb_destroy, 86 .dumb_destroy = drm_gem_dumb_destroy,
88 .fops = &udl_driver_fops, 87 .fops = &udl_driver_fops,
89 88
90 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 89 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc6d90f28c71..56aec9409fa3 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
114 struct drm_mode_create_dumb *args); 114 struct drm_mode_create_dumb *args);
115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, 115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
116 uint32_t handle, uint64_t *offset); 116 uint32_t handle, uint64_t *offset);
117int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
118 uint32_t handle);
119 117
120int udl_gem_init_object(struct drm_gem_object *obj); 118int udl_gem_init_object(struct drm_gem_object *obj);
121void udl_gem_free_object(struct drm_gem_object *gem_obj); 119void udl_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index ef034fa3e6f5..8dbe9d0ae9a7 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
66 args->size, &args->handle); 66 args->size, &args->handle);
67} 67}
68 68
69int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
70 uint32_t handle)
71{
72 return drm_gem_handle_delete(file, handle);
73}
74
75int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 69int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
76{ 70{
77 int ret; 71 int ret;
@@ -123,55 +117,23 @@ int udl_gem_init_object(struct drm_gem_object *obj)
123 117
124static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 118static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125{ 119{
126 int page_count, i; 120 struct page **pages;
127 struct page *page;
128 struct inode *inode;
129 struct address_space *mapping;
130 121
131 if (obj->pages) 122 if (obj->pages)
132 return 0; 123 return 0;
133 124
134 page_count = obj->base.size / PAGE_SIZE; 125 pages = drm_gem_get_pages(&obj->base, gfpmask);
135 BUG_ON(obj->pages != NULL); 126 if (IS_ERR(pages))
136 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); 127 return PTR_ERR(pages);
137 if (obj->pages == NULL)
138 return -ENOMEM;
139 128
140 inode = file_inode(obj->base.filp); 129 obj->pages = pages;
141 mapping = inode->i_mapping;
142 gfpmask |= mapping_gfp_mask(mapping);
143
144 for (i = 0; i < page_count; i++) {
145 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
146 if (IS_ERR(page))
147 goto err_pages;
148 obj->pages[i] = page;
149 }
150 130
151 return 0; 131 return 0;
152err_pages:
153 while (i--)
154 page_cache_release(obj->pages[i]);
155 drm_free_large(obj->pages);
156 obj->pages = NULL;
157 return PTR_ERR(page);
158} 132}
159 133
160static void udl_gem_put_pages(struct udl_gem_object *obj) 134static void udl_gem_put_pages(struct udl_gem_object *obj)
161{ 135{
162 int page_count = obj->base.size / PAGE_SIZE; 136 drm_gem_put_pages(&obj->base, obj->pages, false, false);
163 int i;
164
165 if (obj->base.import_attach) {
166 drm_free_large(obj->pages);
167 obj->pages = NULL;
168 return;
169 }
170
171 for (i = 0; i < page_count; i++)
172 page_cache_release(obj->pages[i]);
173
174 drm_free_large(obj->pages);
175 obj->pages = NULL; 137 obj->pages = NULL;
176} 138}
177 139
@@ -223,8 +185,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
223 if (obj->pages) 185 if (obj->pages)
224 udl_gem_put_pages(obj); 186 udl_gem_put_pages(obj);
225 187
226 if (gem_obj->map_list.map) 188 drm_gem_free_mmap_offset(gem_obj);
227 drm_gem_free_mmap_offset(gem_obj);
228} 189}
229 190
230/* the dumb interface doesn't work with the GEM straight MMAP 191/* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +208,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
247 ret = udl_gem_get_pages(gobj, GFP_KERNEL); 208 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
248 if (ret) 209 if (ret)
249 goto out; 210 goto out;
250 if (!gobj->base.map_list.map) { 211 ret = drm_gem_create_mmap_offset(obj);
251 ret = drm_gem_create_mmap_offset(obj); 212 if (ret)
252 if (ret) 213 goto out;
253 goto out;
254 }
255 214
256 *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; 215 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
257 216
258out: 217out:
259 drm_gem_object_unreference(&gobj->base); 218 drm_gem_object_unreference(&gobj->base);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0ce2d7195256..f5ae57406f34 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ 41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
43 if (total_len > 5) { 43 if (total_len > 5) {
44 DRM_INFO("vendor descriptor length:%x data:%*ph\n", 44 DRM_INFO("vendor descriptor length:%x data:%11ph\n",
45 total_len, 11, desc); 45 total_len, desc);
46 46
47 if ((desc[0] != total_len) || /* descriptor length */ 47 if ((desc[0] != total_len) || /* descriptor length */
48 (desc[1] != 0x5f) || /* vendor descriptor type */ 48 (desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 13558f5a2422..652f9b43ec9d 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
720 return ret; 720 return ret;
721} 721}
722 722
723struct drm_ioctl_desc via_ioctls[] = { 723const struct drm_ioctl_desc via_ioctls[] = {
724 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), 724 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
725 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), 725 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
726 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), 726 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index f4ae20327941..92684a9b7e34 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,7 +64,6 @@ static const struct file_operations via_driver_fops = {
64 .unlocked_ioctl = drm_ioctl, 64 .unlocked_ioctl = drm_ioctl,
65 .mmap = drm_mmap, 65 .mmap = drm_mmap,
66 .poll = drm_poll, 66 .poll = drm_poll,
67 .fasync = drm_fasync,
68#ifdef CONFIG_COMPAT 67#ifdef CONFIG_COMPAT
69 .compat_ioctl = drm_compat_ioctl, 68 .compat_ioctl = drm_compat_ioctl,
70#endif 69#endif
@@ -73,7 +72,7 @@ static const struct file_operations via_driver_fops = {
73 72
74static struct drm_driver driver = { 73static struct drm_driver driver = {
75 .driver_features = 74 .driver_features =
76 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 75 DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
77 DRIVER_IRQ_SHARED, 76 DRIVER_IRQ_SHARED,
78 .load = via_driver_load, 77 .load = via_driver_load,
79 .unload = via_driver_unload, 78 .unload = via_driver_unload,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 893a65090c36..a811ef2b505f 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -114,7 +114,7 @@ enum via_family {
114#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 114#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
115#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val) 115#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
116 116
117extern struct drm_ioctl_desc via_ioctls[]; 117extern const struct drm_ioctl_desc via_ioctls[];
118extern int via_max_ioctl; 118extern int via_max_ioctl;
119 119
120extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 120extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0ab93ff09873..7e3ad87c366c 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
140 if (mem->type == VIA_MEM_AGP) 140 if (mem->type == VIA_MEM_AGP)
141 retval = drm_mm_insert_node(&dev_priv->agp_mm, 141 retval = drm_mm_insert_node(&dev_priv->agp_mm,
142 &item->mm_node, 142 &item->mm_node,
143 tmpSize, 0); 143 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
144 else 144 else
145 retval = drm_mm_insert_node(&dev_priv->vram_mm, 145 retval = drm_mm_insert_node(&dev_priv->vram_mm,
146 &item->mm_node, 146 &item->mm_node,
147 tmpSize, 0); 147 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
148 if (retval) 148 if (retval)
149 goto fail_alloc; 149 goto fail_alloc;
150 150
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 78e21649d48a..1a90f0a2f7e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -124,7 +124,7 @@
124 * Ioctl definitions. 124 * Ioctl definitions.
125 */ 125 */
126 126
127static struct drm_ioctl_desc vmw_ioctls[] = { 127static const struct drm_ioctl_desc vmw_ioctls[] = {
128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
129 DRM_AUTH | DRM_UNLOCKED), 129 DRM_AUTH | DRM_UNLOCKED),
130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -622,8 +622,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
622 } 622 }
623 623
624 dev_priv->fman = vmw_fence_manager_init(dev_priv); 624 dev_priv->fman = vmw_fence_manager_init(dev_priv);
625 if (unlikely(dev_priv->fman == NULL)) 625 if (unlikely(dev_priv->fman == NULL)) {
626 ret = -ENOMEM;
626 goto out_no_fman; 627 goto out_no_fman;
628 }
627 629
628 vmw_kms_save_vga(dev_priv); 630 vmw_kms_save_vga(dev_priv);
629 631
@@ -782,7 +784,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
782 784
783 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 785 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
784 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 786 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
785 struct drm_ioctl_desc *ioctl = 787 const struct drm_ioctl_desc *ioctl =
786 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 788 &vmw_ioctls[nr - DRM_COMMAND_BASE];
787 789
788 if (unlikely(ioctl->cmd_drv != cmd)) { 790 if (unlikely(ioctl->cmd_drv != cmd)) {
@@ -795,29 +797,12 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
795 return drm_ioctl(filp, cmd, arg); 797 return drm_ioctl(filp, cmd, arg);
796} 798}
797 799
798static int vmw_firstopen(struct drm_device *dev)
799{
800 struct vmw_private *dev_priv = vmw_priv(dev);
801 dev_priv->is_opened = true;
802
803 return 0;
804}
805
806static void vmw_lastclose(struct drm_device *dev) 800static void vmw_lastclose(struct drm_device *dev)
807{ 801{
808 struct vmw_private *dev_priv = vmw_priv(dev);
809 struct drm_crtc *crtc; 802 struct drm_crtc *crtc;
810 struct drm_mode_set set; 803 struct drm_mode_set set;
811 int ret; 804 int ret;
812 805
813 /**
814 * Do nothing on the lastclose call from drm_unload.
815 */
816
817 if (!dev_priv->is_opened)
818 return;
819
820 dev_priv->is_opened = false;
821 set.x = 0; 806 set.x = 0;
822 set.y = 0; 807 set.y = 0;
823 set.fb = NULL; 808 set.fb = NULL;
@@ -1120,7 +1105,6 @@ static const struct file_operations vmwgfx_driver_fops = {
1120 .mmap = vmw_mmap, 1105 .mmap = vmw_mmap,
1121 .poll = vmw_fops_poll, 1106 .poll = vmw_fops_poll,
1122 .read = vmw_fops_read, 1107 .read = vmw_fops_read,
1123 .fasync = drm_fasync,
1124#if defined(CONFIG_COMPAT) 1108#if defined(CONFIG_COMPAT)
1125 .compat_ioctl = drm_compat_ioctl, 1109 .compat_ioctl = drm_compat_ioctl,
1126#endif 1110#endif
@@ -1132,7 +1116,6 @@ static struct drm_driver driver = {
1132 DRIVER_MODESET, 1116 DRIVER_MODESET,
1133 .load = vmw_driver_load, 1117 .load = vmw_driver_load,
1134 .unload = vmw_driver_unload, 1118 .unload = vmw_driver_unload,
1135 .firstopen = vmw_firstopen,
1136 .lastclose = vmw_lastclose, 1119 .lastclose = vmw_lastclose,
1137 .irq_preinstall = vmw_irq_preinstall, 1120 .irq_preinstall = vmw_irq_preinstall,
1138 .irq_postinstall = vmw_irq_postinstall, 1121 .irq_postinstall = vmw_irq_postinstall,
@@ -1143,7 +1126,6 @@ static struct drm_driver driver = {
1143 .disable_vblank = vmw_disable_vblank, 1126 .disable_vblank = vmw_disable_vblank,
1144 .ioctls = vmw_ioctls, 1127 .ioctls = vmw_ioctls,
1145 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), 1128 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1146 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1147 .master_create = vmw_master_create, 1129 .master_create = vmw_master_create,
1148 .master_destroy = vmw_master_destroy, 1130 .master_destroy = vmw_master_destroy,
1149 .master_set = vmw_master_set, 1131 .master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 13aeda71280e..150ec64af617 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -324,7 +324,6 @@ struct vmw_private {
324 */ 324 */
325 325
326 bool stealth; 326 bool stealth;
327 bool is_opened;
328 bool enable_fb; 327 bool enable_fb;
329 328
330 /** 329 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d4607b2530d6..fc43c0601236 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1706,7 +1706,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1706 1706
1707int vmw_du_page_flip(struct drm_crtc *crtc, 1707int vmw_du_page_flip(struct drm_crtc *crtc,
1708 struct drm_framebuffer *fb, 1708 struct drm_framebuffer *fb,
1709 struct drm_pending_vblank_event *event) 1709 struct drm_pending_vblank_event *event,
1710 uint32_t page_flip_flags)
1710{ 1711{
1711 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1712 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1712 struct drm_framebuffer *old_fb = crtc->fb; 1713 struct drm_framebuffer *old_fb = crtc->fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6fa89c9d6214..8d038c36bd57 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -123,7 +123,8 @@ struct vmw_display_unit {
123void vmw_display_unit_cleanup(struct vmw_display_unit *du); 123void vmw_display_unit_cleanup(struct vmw_display_unit *du);
124int vmw_du_page_flip(struct drm_crtc *crtc, 124int vmw_du_page_flip(struct drm_crtc *crtc,
125 struct drm_framebuffer *fb, 125 struct drm_framebuffer *fb,
126 struct drm_pending_vblank_event *event); 126 struct drm_pending_vblank_event *event,
127 uint32_t page_flip_flags);
127void vmw_du_crtc_save(struct drm_crtc *crtc); 128void vmw_du_crtc_save(struct drm_crtc *crtc);
128void vmw_du_crtc_restore(struct drm_crtc *crtc); 129void vmw_du_crtc_restore(struct drm_crtc *crtc);
129void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 130void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7953d1f90b63..0e67cf41065d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
500 goto out_no_dmabuf; 500 goto out_no_dmabuf;
501 501
502 rep->handle = handle; 502 rep->handle = handle;
503 rep->map_handle = dma_buf->base.addr_space_offset; 503 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
504 rep->cur_gmr_id = handle; 504 rep->cur_gmr_id = handle;
505 rep->cur_gmr_offset = 0; 505 rep->cur_gmr_offset = 0;
506 506
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
834 if (ret != 0) 834 if (ret != 0)
835 return -EINVAL; 835 return -EINVAL;
836 836
837 *offset = out_buf->base.addr_space_offset; 837 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
838 vmw_dmabuf_unreference(&out_buf); 838 vmw_dmabuf_unreference(&out_buf);
839 return 0; 839 return 0;
840} 840}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 28e28a23d444..471630299878 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -43,7 +43,7 @@ void host1x_set_drm_data(struct device *dev, void *data)
43void *host1x_get_drm_data(struct device *dev) 43void *host1x_get_drm_data(struct device *dev)
44{ 44{
45 struct host1x *host1x = dev_get_drvdata(dev); 45 struct host1x *host1x = dev_get_drvdata(dev);
46 return host1x->drm_data; 46 return host1x ? host1x->drm_data : NULL;
47} 47}
48 48
49void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) 49void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 790ddf114e58..bed90a8131be 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -301,8 +301,8 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
301 host->debug_op->show_mlocks(host, o); 301 host->debug_op->show_mlocks(host, o);
302} 302}
303 303
304extern struct platform_driver tegra_hdmi_driver;
305extern struct platform_driver tegra_dc_driver; 304extern struct platform_driver tegra_dc_driver;
305extern struct platform_driver tegra_hdmi_driver;
306extern struct platform_driver tegra_gr2d_driver; 306extern struct platform_driver tegra_gr2d_driver;
307 307
308#endif 308#endif
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
index 5360e5a57ecc..b1a05ad901c3 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -235,7 +235,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
235} 235}
236 236
237static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 237static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
238 struct drm_pending_vblank_event *event) 238 struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
239{ 239{
240 struct tegra_dc *dc = to_tegra_dc(crtc); 240 struct tegra_dc *dc = to_tegra_dc(crtc);
241 struct drm_device *drm = crtc->dev; 241 struct drm_device *drm = crtc->dev;
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index e184b00faacd..8c61ceeaa12d 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -356,7 +356,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
356 356
357 bo = to_tegra_bo(gem); 357 bo = to_tegra_bo(gem);
358 358
359 args->offset = tegra_bo_get_mmap_offset(bo); 359 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
360 360
361 drm_gem_object_unreference(gem); 361 drm_gem_object_unreference(gem);
362 362
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
487} 487}
488#endif 488#endif
489 489
490static struct drm_ioctl_desc tegra_drm_ioctls[] = { 490static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
491#ifdef CONFIG_DRM_TEGRA_STAGING 491#ifdef CONFIG_DRM_TEGRA_STAGING
492 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH), 492 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
493 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), 493 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -508,7 +508,6 @@ static const struct file_operations tegra_drm_fops = {
508 .unlocked_ioctl = drm_ioctl, 508 .unlocked_ioctl = drm_ioctl,
509 .mmap = tegra_drm_mmap, 509 .mmap = tegra_drm_mmap,
510 .poll = drm_poll, 510 .poll = drm_poll,
511 .fasync = drm_fasync,
512 .read = drm_read, 511 .read = drm_read,
513#ifdef CONFIG_COMPAT 512#ifdef CONFIG_COMPAT
514 .compat_ioctl = drm_compat_ioctl, 513 .compat_ioctl = drm_compat_ioctl,
@@ -633,7 +632,7 @@ struct drm_driver tegra_drm_driver = {
633 .gem_vm_ops = &tegra_bo_vm_ops, 632 .gem_vm_ops = &tegra_bo_vm_ops,
634 .dumb_create = tegra_bo_dumb_create, 633 .dumb_create = tegra_bo_dumb_create,
635 .dumb_map_offset = tegra_bo_dumb_map_offset, 634 .dumb_map_offset = tegra_bo_dumb_map_offset,
636 .dumb_destroy = tegra_bo_dumb_destroy, 635 .dumb_destroy = drm_gem_dumb_destroy,
637 636
638 .ioctls = tegra_drm_ioctls, 637 .ioctls = tegra_drm_ioctls,
639 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 638 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
index c5e9a9b494c2..59623de4ee15 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -106,11 +106,6 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107} 107}
108 108
109unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
110{
111 return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
112}
113
114struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size) 109struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
115{ 110{
116 struct tegra_bo *bo; 111 struct tegra_bo *bo;
@@ -182,8 +177,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
182{ 177{
183 struct tegra_bo *bo = to_tegra_bo(gem); 178 struct tegra_bo *bo = to_tegra_bo(gem);
184 179
185 if (gem->map_list.map) 180 drm_gem_free_mmap_offset(gem);
186 drm_gem_free_mmap_offset(gem);
187 181
188 drm_gem_object_release(gem); 182 drm_gem_object_release(gem);
189 tegra_bo_destroy(gem->dev, bo); 183 tegra_bo_destroy(gem->dev, bo);
@@ -228,7 +222,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
228 222
229 bo = to_tegra_bo(gem); 223 bo = to_tegra_bo(gem);
230 224
231 *offset = tegra_bo_get_mmap_offset(bo); 225 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
232 226
233 drm_gem_object_unreference(gem); 227 drm_gem_object_unreference(gem);
234 228
@@ -262,9 +256,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
262 256
263 return ret; 257 return ret;
264} 258}
265
266int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
267 unsigned int handle)
268{
269 return drm_gem_handle_delete(file, handle);
270}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
index 34de2b486eb7..492533a2dacb 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -44,13 +44,10 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
44 unsigned int size, 44 unsigned int size,
45 unsigned int *handle); 45 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem); 46void tegra_bo_free_object(struct drm_gem_object *gem);
47unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
48int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 47int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
49 struct drm_mode_create_dumb *args); 48 struct drm_mode_create_dumb *args);
50int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 49int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
51 uint32_t handle, uint64_t *offset); 50 uint32_t handle, uint64_t *offset);
52int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
53 unsigned int handle);
54 51
55int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); 52int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
56 53
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index 01097da09f7f..644d95c7d489 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -551,24 +551,8 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
551 return; 551 return;
552 } 552 }
553 553
554 memset(&frame, 0, sizeof(frame)); 554 hdmi_vendor_infoframe_init(&frame);
555 555 frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
556 frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
557 frame.version = 0x01;
558 frame.length = 6;
559
560 frame.data[0] = 0x03; /* regid0 */
561 frame.data[1] = 0x0c; /* regid1 */
562 frame.data[2] = 0x00; /* regid2 */
563 frame.data[3] = 0x02 << 5; /* video format */
564
565 /* TODO: 74 MHz limit? */
566 if (1) {
567 frame.data[4] = 0x00 << 4; /* 3D structure */
568 } else {
569 frame.data[4] = 0x08 << 4; /* 3D structure */
570 frame.data[5] = 0x00 << 4; /* 3D ext. data */
571 }
572 556
573 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); 557 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
574 if (err < 0) { 558 if (err < 0) {
@@ -904,6 +888,11 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
904{ 888{
905 struct drm_info_node *node = s->private; 889 struct drm_info_node *node = s->private;
906 struct tegra_hdmi *hdmi = node->info_ent->data; 890 struct tegra_hdmi *hdmi = node->info_ent->data;
891 int err;
892
893 err = clk_enable(hdmi->clk);
894 if (err)
895 return err;
907 896
908#define DUMP_REG(name) \ 897#define DUMP_REG(name) \
909 seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \ 898 seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
@@ -1069,6 +1058,8 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1069 1058
1070#undef DUMP_REG 1059#undef DUMP_REG
1071 1060
1061 clk_disable(hdmi->clk);
1062
1072 return 0; 1063 return 0;
1073} 1064}
1074 1065
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/host1x/drm/rgb.c
index ed4416f20260..5aa66ef7a946 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/host1x/drm/rgb.c
@@ -147,6 +147,13 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
147 if (!rgb) 147 if (!rgb)
148 return -ENOMEM; 148 return -ENOMEM;
149 149
150 rgb->output.dev = dc->dev;
151 rgb->output.of_node = np;
152
153 err = tegra_output_parse_dt(&rgb->output);
154 if (err < 0)
155 return err;
156
150 rgb->clk = devm_clk_get(dc->dev, NULL); 157 rgb->clk = devm_clk_get(dc->dev, NULL);
151 if (IS_ERR(rgb->clk)) { 158 if (IS_ERR(rgb->clk)) {
152 dev_err(dc->dev, "failed to get clock\n"); 159 dev_err(dc->dev, "failed to get clock\n");
@@ -165,13 +172,6 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
165 return err; 172 return err;
166 } 173 }
167 174
168 rgb->output.dev = dc->dev;
169 rgb->output.of_node = np;
170
171 err = tegra_output_parse_dt(&rgb->output);
172 if (err < 0)
173 return err;
174
175 dc->rgb = &rgb->output; 175 dc->rgb = &rgb->output;
176 176
177 return 0; 177 return 0;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index cc807667d8f1..c4e1050f2252 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -42,12 +42,12 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
42 42
43 /* Check that we're not going to overflow */ 43 /* Check that we're not going to overflow */
44 total = sizeof(struct host1x_job) + 44 total = sizeof(struct host1x_job) +
45 num_relocs * sizeof(struct host1x_reloc) + 45 (u64)num_relocs * sizeof(struct host1x_reloc) +
46 num_unpins * sizeof(struct host1x_job_unpin_data) + 46 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
47 num_waitchks * sizeof(struct host1x_waitchk) + 47 (u64)num_waitchks * sizeof(struct host1x_waitchk) +
48 num_cmdbufs * sizeof(struct host1x_job_gather) + 48 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
49 num_unpins * sizeof(dma_addr_t) + 49 (u64)num_unpins * sizeof(dma_addr_t) +
50 num_unpins * sizeof(u32 *); 50 (u64)num_unpins * sizeof(u32 *);
51 if (total > ULONG_MAX) 51 if (total > ULONG_MAX)
52 return NULL; 52 return NULL;
53 53
@@ -466,9 +466,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
466 &job->gather_copy, 466 &job->gather_copy,
467 GFP_KERNEL); 467 GFP_KERNEL);
468 if (!job->gather_copy_mapped) { 468 if (!job->gather_copy_mapped) {
469 int err = PTR_ERR(job->gather_copy_mapped);
470 job->gather_copy_mapped = NULL; 469 job->gather_copy_mapped = NULL;
471 return err; 470 return -ENOMEM;
472 } 471 }
473 472
474 job->gather_copy_size = size; 473 job->gather_copy_size = size;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf787e1d9322..ec0ae2d1686a 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -27,6 +27,7 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/vga_switcheroo.h> 29#include <linux/vga_switcheroo.h>
30#include <linux/pm_runtime.h>
30 31
31#include <linux/vgaarb.h> 32#include <linux/vgaarb.h>
32 33
@@ -37,6 +38,7 @@ struct vga_switcheroo_client {
37 const struct vga_switcheroo_client_ops *ops; 38 const struct vga_switcheroo_client_ops *ops;
38 int id; 39 int id;
39 bool active; 40 bool active;
41 bool driver_power_control;
40 struct list_head list; 42 struct list_head list;
41}; 43};
42 44
@@ -132,7 +134,7 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
132 134
133static int register_client(struct pci_dev *pdev, 135static int register_client(struct pci_dev *pdev,
134 const struct vga_switcheroo_client_ops *ops, 136 const struct vga_switcheroo_client_ops *ops,
135 int id, bool active) 137 int id, bool active, bool driver_power_control)
136{ 138{
137 struct vga_switcheroo_client *client; 139 struct vga_switcheroo_client *client;
138 140
@@ -145,6 +147,7 @@ static int register_client(struct pci_dev *pdev,
145 client->ops = ops; 147 client->ops = ops;
146 client->id = id; 148 client->id = id;
147 client->active = active; 149 client->active = active;
150 client->driver_power_control = driver_power_control;
148 151
149 mutex_lock(&vgasr_mutex); 152 mutex_lock(&vgasr_mutex);
150 list_add_tail(&client->list, &vgasr_priv.clients); 153 list_add_tail(&client->list, &vgasr_priv.clients);
@@ -160,10 +163,11 @@ static int register_client(struct pci_dev *pdev,
160} 163}
161 164
162int vga_switcheroo_register_client(struct pci_dev *pdev, 165int vga_switcheroo_register_client(struct pci_dev *pdev,
163 const struct vga_switcheroo_client_ops *ops) 166 const struct vga_switcheroo_client_ops *ops,
167 bool driver_power_control)
164{ 168{
165 return register_client(pdev, ops, -1, 169 return register_client(pdev, ops, -1,
166 pdev == vga_default_device()); 170 pdev == vga_default_device(), driver_power_control);
167} 171}
168EXPORT_SYMBOL(vga_switcheroo_register_client); 172EXPORT_SYMBOL(vga_switcheroo_register_client);
169 173
@@ -171,7 +175,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
171 const struct vga_switcheroo_client_ops *ops, 175 const struct vga_switcheroo_client_ops *ops,
172 int id, bool active) 176 int id, bool active)
173{ 177{
174 return register_client(pdev, ops, id | ID_BIT_AUDIO, active); 178 return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false);
175} 179}
176EXPORT_SYMBOL(vga_switcheroo_register_audio_client); 180EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
177 181
@@ -258,10 +262,11 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
258 int i = 0; 262 int i = 0;
259 mutex_lock(&vgasr_mutex); 263 mutex_lock(&vgasr_mutex);
260 list_for_each_entry(client, &vgasr_priv.clients, list) { 264 list_for_each_entry(client, &vgasr_priv.clients, list) {
261 seq_printf(m, "%d:%s%s:%c:%s:%s\n", i, 265 seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
262 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", 266 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
263 client_is_vga(client) ? "" : "-Audio", 267 client_is_vga(client) ? "" : "-Audio",
264 client->active ? '+' : ' ', 268 client->active ? '+' : ' ',
269 client->driver_power_control ? "Dyn" : "",
265 client->pwr_state ? "Pwr" : "Off", 270 client->pwr_state ? "Pwr" : "Off",
266 pci_name(client->pdev)); 271 pci_name(client->pdev));
267 i++; 272 i++;
@@ -277,6 +282,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
277 282
278static int vga_switchon(struct vga_switcheroo_client *client) 283static int vga_switchon(struct vga_switcheroo_client *client)
279{ 284{
285 if (client->driver_power_control)
286 return 0;
280 if (vgasr_priv.handler->power_state) 287 if (vgasr_priv.handler->power_state)
281 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); 288 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
282 /* call the driver callback to turn on device */ 289 /* call the driver callback to turn on device */
@@ -287,6 +294,8 @@ static int vga_switchon(struct vga_switcheroo_client *client)
287 294
288static int vga_switchoff(struct vga_switcheroo_client *client) 295static int vga_switchoff(struct vga_switcheroo_client *client)
289{ 296{
297 if (client->driver_power_control)
298 return 0;
290 /* call the driver callback to turn off device */ 299 /* call the driver callback to turn off device */
291 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); 300 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
292 if (vgasr_priv.handler->power_state) 301 if (vgasr_priv.handler->power_state)
@@ -402,6 +411,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
402 list_for_each_entry(client, &vgasr_priv.clients, list) { 411 list_for_each_entry(client, &vgasr_priv.clients, list) {
403 if (client->active || client_is_audio(client)) 412 if (client->active || client_is_audio(client))
404 continue; 413 continue;
414 if (client->driver_power_control)
415 continue;
405 set_audio_state(client->id, VGA_SWITCHEROO_OFF); 416 set_audio_state(client->id, VGA_SWITCHEROO_OFF);
406 if (client->pwr_state == VGA_SWITCHEROO_ON) 417 if (client->pwr_state == VGA_SWITCHEROO_ON)
407 vga_switchoff(client); 418 vga_switchoff(client);
@@ -413,6 +424,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
413 list_for_each_entry(client, &vgasr_priv.clients, list) { 424 list_for_each_entry(client, &vgasr_priv.clients, list) {
414 if (client->active || client_is_audio(client)) 425 if (client->active || client_is_audio(client))
415 continue; 426 continue;
427 if (client->driver_power_control)
428 continue;
416 if (client->pwr_state == VGA_SWITCHEROO_OFF) 429 if (client->pwr_state == VGA_SWITCHEROO_OFF)
417 vga_switchon(client); 430 vga_switchon(client);
418 set_audio_state(client->id, VGA_SWITCHEROO_ON); 431 set_audio_state(client->id, VGA_SWITCHEROO_ON);
@@ -565,3 +578,127 @@ err:
565 return err; 578 return err;
566} 579}
567EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 580EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
581
582static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
583{
584 struct vga_switcheroo_client *client;
585
586 if (!vgasr_priv.handler->power_state)
587 return;
588
589 client = find_client_from_pci(&vgasr_priv.clients, pdev);
590 if (!client)
591 return;
592
593 if (!client->driver_power_control)
594 return;
595
596 vgasr_priv.handler->power_state(client->id, state);
597}
598
599/* force a PCI device to a certain state - mainly to turn off audio clients */
600
601void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
602{
603 struct vga_switcheroo_client *client;
604
605 client = find_client_from_pci(&vgasr_priv.clients, pdev);
606 if (!client)
607 return;
608
609 if (!client->driver_power_control)
610 return;
611
612 client->pwr_state = dynamic;
613 set_audio_state(client->id, dynamic);
614}
615EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
616
617/* switcheroo power domain */
618static int vga_switcheroo_runtime_suspend(struct device *dev)
619{
620 struct pci_dev *pdev = to_pci_dev(dev);
621 int ret;
622
623 ret = dev->bus->pm->runtime_suspend(dev);
624 if (ret)
625 return ret;
626
627 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
628 return 0;
629}
630
631static int vga_switcheroo_runtime_resume(struct device *dev)
632{
633 struct pci_dev *pdev = to_pci_dev(dev);
634 int ret;
635
636 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
637 ret = dev->bus->pm->runtime_resume(dev);
638 if (ret)
639 return ret;
640
641 return 0;
642}
643
644/* this version is for the case where the power switch is separate
645 to the device being powered down. */
646int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
647{
648 /* copy over all the bus versions */
649 if (dev->bus && dev->bus->pm) {
650 domain->ops = *dev->bus->pm;
651 domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend;
652 domain->ops.runtime_resume = vga_switcheroo_runtime_resume;
653
654 dev->pm_domain = domain;
655 return 0;
656 }
657 dev->pm_domain = NULL;
658 return -EINVAL;
659}
660EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
661
662static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
663{
664 struct pci_dev *pdev = to_pci_dev(dev);
665 int ret;
666 struct vga_switcheroo_client *client, *found = NULL;
667
668 /* we need to check if we have to switch back on the video
669 device so the audio device can come back */
670 list_for_each_entry(client, &vgasr_priv.clients, list) {
671 if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
672 found = client;
673 ret = pm_runtime_get_sync(&client->pdev->dev);
674 if (ret) {
675 if (ret != 1)
676 return ret;
677 }
678 break;
679 }
680 }
681 ret = dev->bus->pm->runtime_resume(dev);
682
683 /* put the reference for the gpu */
684 if (found) {
685 pm_runtime_mark_last_busy(&found->pdev->dev);
686 pm_runtime_put_autosuspend(&found->pdev->dev);
687 }
688 return ret;
689}
690
691int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
692{
693 /* copy over all the bus versions */
694 if (dev->bus && dev->bus->pm) {
695 domain->ops = *dev->bus->pm;
696 domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
697
698 dev->pm_domain = domain;
699 return 0;
700 }
701 dev->pm_domain = NULL;
702 return -EINVAL;
703}
704EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 878f16882107..47c5888461ff 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -199,7 +199,6 @@ static const struct file_operations imx_drm_driver_fops = {
199 .unlocked_ioctl = drm_ioctl, 199 .unlocked_ioctl = drm_ioctl,
200 .mmap = drm_gem_cma_mmap, 200 .mmap = drm_gem_cma_mmap,
201 .poll = drm_poll, 201 .poll = drm_poll,
202 .fasync = drm_fasync,
203 .read = drm_read, 202 .read = drm_read,
204 .llseek = noop_llseek, 203 .llseek = noop_llseek,
205}; 204};
@@ -775,7 +774,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
775} 774}
776EXPORT_SYMBOL_GPL(imx_drm_remove_connector); 775EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
777 776
778static struct drm_ioctl_desc imx_drm_ioctls[] = { 777static const struct drm_ioctl_desc imx_drm_ioctls[] = {
779 /* none so far */ 778 /* none so far */
780}; 779};
781 780
@@ -788,7 +787,7 @@ static struct drm_driver imx_drm_driver = {
788 .gem_vm_ops = &drm_gem_cma_vm_ops, 787 .gem_vm_ops = &drm_gem_cma_vm_ops,
789 .dumb_create = drm_gem_cma_dumb_create, 788 .dumb_create = drm_gem_cma_dumb_create,
790 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 789 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
791 .dumb_destroy = drm_gem_cma_dumb_destroy, 790 .dumb_destroy = drm_gem_dumb_destroy,
792 791
793 .get_vblank_counter = drm_vblank_count, 792 .get_vblank_counter = drm_vblank_count,
794 .enable_vblank = imx_drm_enable_vblank, 793 .enable_vblank = imx_drm_enable_vblank,
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index bae0e2e81916..6fd37a7453e9 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -129,7 +129,8 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
129 129
130static int ipu_page_flip(struct drm_crtc *crtc, 130static int ipu_page_flip(struct drm_crtc *crtc,
131 struct drm_framebuffer *fb, 131 struct drm_framebuffer *fb,
132 struct drm_pending_vblank_event *event) 132 struct drm_pending_vblank_event *event,
133 uint32_t page_flip_flags)
133{ 134{
134 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 135 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
135 int ret; 136 int ret;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 40178338b619..9e758a8f890d 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25#include <linux/bug.h>
25#include <linux/errno.h> 26#include <linux/errno.h>
26#include <linux/export.h> 27#include <linux/export.h>
27#include <linux/hdmi.h> 28#include <linux/hdmi.h>
@@ -52,7 +53,7 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
52 53
53 frame->type = HDMI_INFOFRAME_TYPE_AVI; 54 frame->type = HDMI_INFOFRAME_TYPE_AVI;
54 frame->version = 2; 55 frame->version = 2;
55 frame->length = 13; 56 frame->length = HDMI_AVI_INFOFRAME_SIZE;
56 57
57 return 0; 58 return 0;
58} 59}
@@ -83,7 +84,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
83 if (size < length) 84 if (size < length)
84 return -ENOSPC; 85 return -ENOSPC;
85 86
86 memset(buffer, 0, length); 87 memset(buffer, 0, size);
87 88
88 ptr[0] = frame->type; 89 ptr[0] = frame->type;
89 ptr[1] = frame->version; 90 ptr[1] = frame->version;
@@ -95,13 +96,18 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
95 96
96 ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3); 97 ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
97 98
98 if (frame->active_info_valid) 99 /*
100 * Data byte 1, bit 4 has to be set if we provide the active format
101 * aspect ratio
102 */
103 if (frame->active_aspect & 0xf)
99 ptr[0] |= BIT(4); 104 ptr[0] |= BIT(4);
100 105
101 if (frame->horizontal_bar_valid) 106 /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
107 if (frame->top_bar || frame->bottom_bar)
102 ptr[0] |= BIT(3); 108 ptr[0] |= BIT(3);
103 109
104 if (frame->vertical_bar_valid) 110 if (frame->left_bar || frame->right_bar)
105 ptr[0] |= BIT(2); 111 ptr[0] |= BIT(2);
106 112
107 ptr[1] = ((frame->colorimetry & 0x3) << 6) | 113 ptr[1] = ((frame->colorimetry & 0x3) << 6) |
@@ -151,7 +157,7 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
151 157
152 frame->type = HDMI_INFOFRAME_TYPE_SPD; 158 frame->type = HDMI_INFOFRAME_TYPE_SPD;
153 frame->version = 1; 159 frame->version = 1;
154 frame->length = 25; 160 frame->length = HDMI_SPD_INFOFRAME_SIZE;
155 161
156 strncpy(frame->vendor, vendor, sizeof(frame->vendor)); 162 strncpy(frame->vendor, vendor, sizeof(frame->vendor));
157 strncpy(frame->product, product, sizeof(frame->product)); 163 strncpy(frame->product, product, sizeof(frame->product));
@@ -185,7 +191,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
185 if (size < length) 191 if (size < length)
186 return -ENOSPC; 192 return -ENOSPC;
187 193
188 memset(buffer, 0, length); 194 memset(buffer, 0, size);
189 195
190 ptr[0] = frame->type; 196 ptr[0] = frame->type;
191 ptr[1] = frame->version; 197 ptr[1] = frame->version;
@@ -218,7 +224,7 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
218 224
219 frame->type = HDMI_INFOFRAME_TYPE_AUDIO; 225 frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
220 frame->version = 1; 226 frame->version = 1;
221 frame->length = 10; 227 frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
222 228
223 return 0; 229 return 0;
224} 230}
@@ -250,7 +256,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
250 if (size < length) 256 if (size < length)
251 return -ENOSPC; 257 return -ENOSPC;
252 258
253 memset(buffer, 0, length); 259 memset(buffer, 0, size);
254 260
255 if (frame->channels >= 2) 261 if (frame->channels >= 2)
256 channels = frame->channels - 1; 262 channels = frame->channels - 1;
@@ -282,9 +288,33 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
282EXPORT_SYMBOL(hdmi_audio_infoframe_pack); 288EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
283 289
284/** 290/**
285 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary 291 * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
286 * buffer
287 * @frame: HDMI vendor infoframe 292 * @frame: HDMI vendor infoframe
293 *
294 * Returns 0 on success or a negative error code on failure.
295 */
296int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
297{
298 memset(frame, 0, sizeof(*frame));
299
300 frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
301 frame->version = 1;
302
303 frame->oui = HDMI_IEEE_OUI;
304
305 /*
306 * 0 is a valid value for s3d_struct, so we use a special "not set"
307 * value
308 */
309 frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
310
311 return 0;
312}
313EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
314
315/**
316 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
317 * @frame: HDMI infoframe
288 * @buffer: destination buffer 318 * @buffer: destination buffer
289 * @size: size of buffer 319 * @size: size of buffer
290 * 320 *
@@ -297,27 +327,110 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
297 * error code on failure. 327 * error code on failure.
298 */ 328 */
299ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, 329ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
300 void *buffer, size_t size) 330 void *buffer, size_t size)
301{ 331{
302 u8 *ptr = buffer; 332 u8 *ptr = buffer;
303 size_t length; 333 size_t length;
304 334
335 /* empty info frame */
336 if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
337 return -EINVAL;
338
339 /* only one of those can be supplied */
340 if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
341 return -EINVAL;
342
343 /* for side by side (half) we also need to provide 3D_Ext_Data */
344 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
345 frame->length = 6;
346 else
347 frame->length = 5;
348
305 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; 349 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
306 350
307 if (size < length) 351 if (size < length)
308 return -ENOSPC; 352 return -ENOSPC;
309 353
310 memset(buffer, 0, length); 354 memset(buffer, 0, size);
311 355
312 ptr[0] = frame->type; 356 ptr[0] = frame->type;
313 ptr[1] = frame->version; 357 ptr[1] = frame->version;
314 ptr[2] = frame->length; 358 ptr[2] = frame->length;
315 ptr[3] = 0; /* checksum */ 359 ptr[3] = 0; /* checksum */
316 360
317 memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length); 361 /* HDMI OUI */
362 ptr[4] = 0x03;
363 ptr[5] = 0x0c;
364 ptr[6] = 0x00;
365
366 if (frame->vic) {
367 ptr[7] = 0x1 << 5; /* video format */
368 ptr[8] = frame->vic;
369 } else {
370 ptr[7] = 0x2 << 5; /* video format */
371 ptr[8] = (frame->s3d_struct & 0xf) << 4;
372 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
373 ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
374 }
318 375
319 hdmi_infoframe_checksum(buffer, length); 376 hdmi_infoframe_checksum(buffer, length);
320 377
321 return length; 378 return length;
322} 379}
323EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); 380EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
381
382/*
383 * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
384 */
385static ssize_t
386hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
387 void *buffer, size_t size)
388{
389 /* we only know about HDMI vendor infoframes */
390 if (frame->any.oui != HDMI_IEEE_OUI)
391 return -EINVAL;
392
393 return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
394}
395
396/**
397 * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
398 * @frame: HDMI infoframe
399 * @buffer: destination buffer
400 * @size: size of buffer
401 *
402 * Packs the information contained in the @frame structure into a binary
403 * representation that can be written into the corresponding controller
404 * registers. Also computes the checksum as required by section 5.3.5 of
405 * the HDMI 1.4 specification.
406 *
407 * Returns the number of bytes packed into the binary buffer or a negative
408 * error code on failure.
409 */
410ssize_t
411hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
412{
413 ssize_t length;
414
415 switch (frame->any.type) {
416 case HDMI_INFOFRAME_TYPE_AVI:
417 length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
418 break;
419 case HDMI_INFOFRAME_TYPE_SPD:
420 length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
421 break;
422 case HDMI_INFOFRAME_TYPE_AUDIO:
423 length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
424 break;
425 case HDMI_INFOFRAME_TYPE_VENDOR:
426 length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
427 buffer, size);
428 break;
429 default:
430 WARN(1, "Bad infoframe type %d\n", frame->any.type);
431 length = -EINVAL;
432 }
433
434 return length;
435}
436EXPORT_SYMBOL(hdmi_infoframe_pack);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 12083dc862a9..290734191f72 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -45,7 +45,6 @@
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/miscdevice.h> 46#include <linux/miscdevice.h>
47#include <linux/fs.h> 47#include <linux/fs.h>
48#include <linux/proc_fs.h>
49#include <linux/init.h> 48#include <linux/init.h>
50#include <linux/file.h> 49#include <linux/file.h>
51#include <linux/platform_device.h> 50#include <linux/platform_device.h>
@@ -62,20 +61,18 @@
62#endif 61#endif
63#include <asm/mman.h> 62#include <asm/mman.h>
64#include <asm/uaccess.h> 63#include <asm/uaccess.h>
65#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
66#include <linux/types.h> 64#include <linux/types.h>
67#include <linux/agp_backend.h> 65#include <linux/agp_backend.h>
68#endif
69#include <linux/workqueue.h> 66#include <linux/workqueue.h>
70#include <linux/poll.h> 67#include <linux/poll.h>
71#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
72#include <drm/drm.h> 69#include <drm/drm.h>
73#include <drm/drm_sarea.h> 70#include <drm/drm_sarea.h>
71#include <drm/drm_vma_manager.h>
74 72
75#include <linux/idr.h> 73#include <linux/idr.h>
76 74
77#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 75#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
78#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
79 76
80struct module; 77struct module;
81 78
@@ -140,19 +137,15 @@ int drm_err(const char *func, const char *format, ...);
140/* driver capabilities and requirements mask */ 137/* driver capabilities and requirements mask */
141#define DRIVER_USE_AGP 0x1 138#define DRIVER_USE_AGP 0x1
142#define DRIVER_REQUIRE_AGP 0x2 139#define DRIVER_REQUIRE_AGP 0x2
143#define DRIVER_USE_MTRR 0x4
144#define DRIVER_PCI_DMA 0x8 140#define DRIVER_PCI_DMA 0x8
145#define DRIVER_SG 0x10 141#define DRIVER_SG 0x10
146#define DRIVER_HAVE_DMA 0x20 142#define DRIVER_HAVE_DMA 0x20
147#define DRIVER_HAVE_IRQ 0x40 143#define DRIVER_HAVE_IRQ 0x40
148#define DRIVER_IRQ_SHARED 0x80 144#define DRIVER_IRQ_SHARED 0x80
149#define DRIVER_IRQ_VBL 0x100
150#define DRIVER_DMA_QUEUE 0x200
151#define DRIVER_FB_DMA 0x400
152#define DRIVER_IRQ_VBL2 0x800
153#define DRIVER_GEM 0x1000 145#define DRIVER_GEM 0x1000
154#define DRIVER_MODESET 0x2000 146#define DRIVER_MODESET 0x2000
155#define DRIVER_PRIME 0x4000 147#define DRIVER_PRIME 0x4000
148#define DRIVER_RENDER 0x8000
156 149
157#define DRIVER_BUS_PCI 0x1 150#define DRIVER_BUS_PCI 0x1
158#define DRIVER_BUS_PLATFORM 0x2 151#define DRIVER_BUS_PLATFORM 0x2
@@ -168,13 +161,7 @@ int drm_err(const char *func, const char *format, ...);
168#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ 161#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
169#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ 162#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
170#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ 163#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
171#define DRM_LOOPING_LIMIT 5000000
172#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
173#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
174
175#define DRM_FLAG_DEBUG 0x01
176 164
177#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
178#define DRM_MAP_HASH_OFFSET 0x10000000 165#define DRM_MAP_HASH_OFFSET 0x10000000
179 166
180/*@}*/ 167/*@}*/
@@ -263,9 +250,6 @@ int drm_err(const char *func, const char *format, ...);
263 250
264#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) 251#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
265 252
266#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
267#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
268
269#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 253#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
270 254
271/** 255/**
@@ -307,6 +291,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
307#define DRM_ROOT_ONLY 0x4 291#define DRM_ROOT_ONLY 0x4
308#define DRM_CONTROL_ALLOW 0x8 292#define DRM_CONTROL_ALLOW 0x8
309#define DRM_UNLOCKED 0x10 293#define DRM_UNLOCKED 0x10
294#define DRM_RENDER_ALLOW 0x20
310 295
311struct drm_ioctl_desc { 296struct drm_ioctl_desc {
312 unsigned int cmd; 297 unsigned int cmd;
@@ -587,7 +572,6 @@ struct drm_map_list {
587 struct drm_local_map *map; /**< mapping */ 572 struct drm_local_map *map; /**< mapping */
588 uint64_t user_token; 573 uint64_t user_token;
589 struct drm_master *master; 574 struct drm_master *master;
590 struct drm_mm_node *file_offset_node; /**< fake offset */
591}; 575};
592 576
593/** 577/**
@@ -622,8 +606,7 @@ struct drm_ati_pcigart_info {
622 * GEM specific mm private for tracking GEM objects 606 * GEM specific mm private for tracking GEM objects
623 */ 607 */
624struct drm_gem_mm { 608struct drm_gem_mm {
625 struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ 609 struct drm_vma_offset_manager vma_manager;
626 struct drm_open_hash offset_hash; /**< User token hash table for maps */
627}; 610};
628 611
629/** 612/**
@@ -634,8 +617,16 @@ struct drm_gem_object {
634 /** Reference count of this object */ 617 /** Reference count of this object */
635 struct kref refcount; 618 struct kref refcount;
636 619
637 /** Handle count of this object. Each handle also holds a reference */ 620 /**
638 atomic_t handle_count; /* number of handles on this object */ 621 * handle_count - gem file_priv handle count of this object
622 *
623 * Each handle also holds a reference. Note that when the handle_count
624 * drops to 0 any global names (e.g. the id in the flink namespace) will
625 * be cleared.
626 *
627 * Protected by dev->object_name_lock.
628 * */
629 unsigned handle_count;
639 630
640 /** Related drm device */ 631 /** Related drm device */
641 struct drm_device *dev; 632 struct drm_device *dev;
@@ -644,7 +635,7 @@ struct drm_gem_object {
644 struct file *filp; 635 struct file *filp;
645 636
646 /* Mapping info for this object */ 637 /* Mapping info for this object */
647 struct drm_map_list map_list; 638 struct drm_vma_offset_node vma_node;
648 639
649 /** 640 /**
650 * Size of the object, in bytes. Immutable over the object's 641 * Size of the object, in bytes. Immutable over the object's
@@ -678,10 +669,32 @@ struct drm_gem_object {
678 669
679 void *driver_private; 670 void *driver_private;
680 671
681 /* dma buf exported from this GEM object */ 672 /**
682 struct dma_buf *export_dma_buf; 673 * dma_buf - dma buf associated with this GEM object
674 *
675 * Pointer to the dma-buf associated with this gem object (either
676 * through importing or exporting). We break the resulting reference
677 * loop when the last gem handle for this object is released.
678 *
679 * Protected by obj->object_name_lock
680 */
681 struct dma_buf *dma_buf;
683 682
684 /* dma buf attachment backing this object */ 683 /**
684 * import_attach - dma buf attachment backing this object
685 *
686 * Any foreign dma_buf imported as a gem object has this set to the
687 * attachment point for the device. This is invariant over the lifetime
688 * of a gem object.
689 *
690 * The driver's ->gem_free_object callback is responsible for cleaning
691 * up the dma_buf attachment and references acquired at import time.
692 *
693 * Note that the drm gem/prime core does not depend upon drivers setting
694 * this field any more. So for drivers where this doesn't make sense
695 * (e.g. virtual devices or a displaylink behind an usb bus) they can
696 * simply leave it as NULL.
697 */
685 struct dma_buf_attachment *import_attach; 698 struct dma_buf_attachment *import_attach;
686}; 699};
687 700
@@ -737,6 +750,7 @@ struct drm_bus {
737 int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); 750 int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
738 /* hooks that are for PCI */ 751 /* hooks that are for PCI */
739 int (*agp_init)(struct drm_device *dev); 752 int (*agp_init)(struct drm_device *dev);
753 void (*agp_destroy)(struct drm_device *dev);
740 754
741}; 755};
742 756
@@ -885,8 +899,6 @@ struct drm_driver {
885 void (*irq_preinstall) (struct drm_device *dev); 899 void (*irq_preinstall) (struct drm_device *dev);
886 int (*irq_postinstall) (struct drm_device *dev); 900 int (*irq_postinstall) (struct drm_device *dev);
887 void (*irq_uninstall) (struct drm_device *dev); 901 void (*irq_uninstall) (struct drm_device *dev);
888 void (*set_version) (struct drm_device *dev,
889 struct drm_set_version *sv);
890 902
891 /* Master routines */ 903 /* Master routines */
892 int (*master_create)(struct drm_device *dev, struct drm_master *master); 904 int (*master_create)(struct drm_device *dev, struct drm_master *master);
@@ -966,7 +978,7 @@ struct drm_driver {
966 978
967 u32 driver_features; 979 u32 driver_features;
968 int dev_priv_size; 980 int dev_priv_size;
969 struct drm_ioctl_desc *ioctls; 981 const struct drm_ioctl_desc *ioctls;
970 int num_ioctls; 982 int num_ioctls;
971 const struct file_operations *fops; 983 const struct file_operations *fops;
972 union { 984 union {
@@ -1037,8 +1049,6 @@ struct drm_minor {
1037 struct device kdev; /**< Linux device */ 1049 struct device kdev; /**< Linux device */
1038 struct drm_device *dev; 1050 struct drm_device *dev;
1039 1051
1040 struct proc_dir_entry *proc_root; /**< proc directory entry */
1041 struct drm_info_node proc_nodes;
1042 struct dentry *debugfs_root; 1052 struct dentry *debugfs_root;
1043 1053
1044 struct list_head debugfs_list; 1054 struct list_head debugfs_list;
@@ -1131,12 +1141,7 @@ struct drm_device {
1131 /*@{ */ 1141 /*@{ */
1132 int irq_enabled; /**< True if irq handler is enabled */ 1142 int irq_enabled; /**< True if irq handler is enabled */
1133 __volatile__ long context_flag; /**< Context swapping flag */ 1143 __volatile__ long context_flag; /**< Context swapping flag */
1134 __volatile__ long interrupt_flag; /**< Interruption handler flag */
1135 __volatile__ long dma_flag; /**< DMA dispatch flag */
1136 wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
1137 int last_checked; /**< Last context checked for DMA */
1138 int last_context; /**< Last current context */ 1144 int last_context; /**< Last current context */
1139 unsigned long last_switch; /**< jiffies at last context switch */
1140 /*@} */ 1145 /*@} */
1141 1146
1142 struct work_struct work; 1147 struct work_struct work;
@@ -1174,12 +1179,6 @@ struct drm_device {
1174 spinlock_t event_lock; 1179 spinlock_t event_lock;
1175 1180
1176 /*@} */ 1181 /*@} */
1177 cycles_t ctx_start;
1178 cycles_t lck_start;
1179
1180 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
1181 wait_queue_head_t buf_readers; /**< Processes waiting to read */
1182 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
1183 1182
1184 struct drm_agp_head *agp; /**< AGP data */ 1183 struct drm_agp_head *agp; /**< AGP data */
1185 1184
@@ -1207,12 +1206,13 @@ struct drm_device {
1207 unsigned int agp_buffer_token; 1206 unsigned int agp_buffer_token;
1208 struct drm_minor *control; /**< Control node for card */ 1207 struct drm_minor *control; /**< Control node for card */
1209 struct drm_minor *primary; /**< render type primary screen head */ 1208 struct drm_minor *primary; /**< render type primary screen head */
1209 struct drm_minor *render; /**< render node for card */
1210 1210
1211 struct drm_mode_config mode_config; /**< Current mode config */ 1211 struct drm_mode_config mode_config; /**< Current mode config */
1212 1212
1213 /** \name GEM information */ 1213 /** \name GEM information */
1214 /*@{ */ 1214 /*@{ */
1215 spinlock_t object_name_lock; 1215 struct mutex object_name_lock;
1216 struct idr object_name_idr; 1216 struct idr object_name_idr;
1217 /*@} */ 1217 /*@} */
1218 int switch_power_state; 1218 int switch_power_state;
@@ -1223,6 +1223,7 @@ struct drm_device {
1223#define DRM_SWITCH_POWER_ON 0 1223#define DRM_SWITCH_POWER_ON 0
1224#define DRM_SWITCH_POWER_OFF 1 1224#define DRM_SWITCH_POWER_OFF 1
1225#define DRM_SWITCH_POWER_CHANGING 2 1225#define DRM_SWITCH_POWER_CHANGING 2
1226#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
1226 1227
1227static __inline__ int drm_core_check_feature(struct drm_device *dev, 1228static __inline__ int drm_core_check_feature(struct drm_device *dev,
1228 int feature) 1229 int feature)
@@ -1235,25 +1236,6 @@ static inline int drm_dev_to_irq(struct drm_device *dev)
1235 return dev->driver->bus->get_irq(dev); 1236 return dev->driver->bus->get_irq(dev);
1236} 1237}
1237 1238
1238
1239#if __OS_HAS_AGP
1240static inline int drm_core_has_AGP(struct drm_device *dev)
1241{
1242 return drm_core_check_feature(dev, DRIVER_USE_AGP);
1243}
1244#else
1245#define drm_core_has_AGP(dev) (0)
1246#endif
1247
1248#if __OS_HAS_MTRR
1249static inline int drm_core_has_MTRR(struct drm_device *dev)
1250{
1251 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
1252}
1253#else
1254#define drm_core_has_MTRR(dev) (0)
1255#endif
1256
1257static inline void drm_device_set_unplugged(struct drm_device *dev) 1239static inline void drm_device_set_unplugged(struct drm_device *dev)
1258{ 1240{
1259 smp_wmb(); 1241 smp_wmb();
@@ -1272,6 +1254,11 @@ static inline bool drm_modeset_is_locked(struct drm_device *dev)
1272 return mutex_is_locked(&dev->mode_config.mutex); 1254 return mutex_is_locked(&dev->mode_config.mutex);
1273} 1255}
1274 1256
1257static inline bool drm_is_render_client(struct drm_file *file_priv)
1258{
1259 return file_priv->minor->type == DRM_MINOR_RENDER;
1260}
1261
1275/******************************************************************/ 1262/******************************************************************/
1276/** \name Internal function definitions */ 1263/** \name Internal function definitions */
1277/*@{*/ 1264/*@{*/
@@ -1287,7 +1274,6 @@ extern int drm_lastclose(struct drm_device *dev);
1287extern struct mutex drm_global_mutex; 1274extern struct mutex drm_global_mutex;
1288extern int drm_open(struct inode *inode, struct file *filp); 1275extern int drm_open(struct inode *inode, struct file *filp);
1289extern int drm_stub_open(struct inode *inode, struct file *filp); 1276extern int drm_stub_open(struct inode *inode, struct file *filp);
1290extern int drm_fasync(int fd, struct file *filp, int on);
1291extern ssize_t drm_read(struct file *filp, char __user *buffer, 1277extern ssize_t drm_read(struct file *filp, char __user *buffer,
1292 size_t count, loff_t *offset); 1278 size_t count, loff_t *offset);
1293extern int drm_release(struct inode *inode, struct file *filp); 1279extern int drm_release(struct inode *inode, struct file *filp);
@@ -1301,14 +1287,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
1301 1287
1302 /* Memory management support (drm_memory.h) */ 1288 /* Memory management support (drm_memory.h) */
1303#include <drm/drm_memory.h> 1289#include <drm/drm_memory.h>
1304extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
1305extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
1306extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1307 struct page **pages,
1308 unsigned long num_pages,
1309 uint32_t gtt_offset,
1310 uint32_t type);
1311extern int drm_unbind_agp(DRM_AGP_MEM * handle);
1312 1290
1313 /* Misc. IOCTL support (drm_ioctl.h) */ 1291 /* Misc. IOCTL support (drm_ioctl.h) */
1314extern int drm_irq_by_busid(struct drm_device *dev, void *data, 1292extern int drm_irq_by_busid(struct drm_device *dev, void *data,
@@ -1335,8 +1313,6 @@ extern int drm_resctx(struct drm_device *dev, void *data,
1335 struct drm_file *file_priv); 1313 struct drm_file *file_priv);
1336extern int drm_addctx(struct drm_device *dev, void *data, 1314extern int drm_addctx(struct drm_device *dev, void *data,
1337 struct drm_file *file_priv); 1315 struct drm_file *file_priv);
1338extern int drm_modctx(struct drm_device *dev, void *data,
1339 struct drm_file *file_priv);
1340extern int drm_getctx(struct drm_device *dev, void *data, 1316extern int drm_getctx(struct drm_device *dev, void *data,
1341 struct drm_file *file_priv); 1317 struct drm_file *file_priv);
1342extern int drm_switchctx(struct drm_device *dev, void *data, 1318extern int drm_switchctx(struct drm_device *dev, void *data,
@@ -1346,9 +1322,10 @@ extern int drm_newctx(struct drm_device *dev, void *data,
1346extern int drm_rmctx(struct drm_device *dev, void *data, 1322extern int drm_rmctx(struct drm_device *dev, void *data,
1347 struct drm_file *file_priv); 1323 struct drm_file *file_priv);
1348 1324
1349extern int drm_ctxbitmap_init(struct drm_device *dev); 1325extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
1350extern void drm_ctxbitmap_cleanup(struct drm_device *dev); 1326extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
1351extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); 1327extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
1328 struct drm_file *file_priv);
1352 1329
1353extern int drm_setsareactx(struct drm_device *dev, void *data, 1330extern int drm_setsareactx(struct drm_device *dev, void *data,
1354 struct drm_file *file_priv); 1331 struct drm_file *file_priv);
@@ -1405,11 +1382,12 @@ extern int drm_freebufs(struct drm_device *dev, void *data,
1405 struct drm_file *file_priv); 1382 struct drm_file *file_priv);
1406extern int drm_mapbufs(struct drm_device *dev, void *data, 1383extern int drm_mapbufs(struct drm_device *dev, void *data,
1407 struct drm_file *file_priv); 1384 struct drm_file *file_priv);
1408extern int drm_order(unsigned long size); 1385extern int drm_dma_ioctl(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv);
1409 1387
1410 /* DMA support (drm_dma.h) */ 1388 /* DMA support (drm_dma.h) */
1411extern int drm_dma_setup(struct drm_device *dev); 1389extern int drm_legacy_dma_setup(struct drm_device *dev);
1412extern void drm_dma_takedown(struct drm_device *dev); 1390extern void drm_legacy_dma_takedown(struct drm_device *dev);
1413extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); 1391extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
1414extern void drm_core_reclaim_buffers(struct drm_device *dev, 1392extern void drm_core_reclaim_buffers(struct drm_device *dev,
1415 struct drm_file *filp); 1393 struct drm_file *filp);
@@ -1423,7 +1401,6 @@ extern int drm_irq_uninstall(struct drm_device *dev);
1423extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 1401extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
1424extern int drm_wait_vblank(struct drm_device *dev, void *data, 1402extern int drm_wait_vblank(struct drm_device *dev, void *data,
1425 struct drm_file *filp); 1403 struct drm_file *filp);
1426extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1427extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1404extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1428extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 1405extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1429 struct timeval *vblanktime); 1406 struct timeval *vblanktime);
@@ -1465,31 +1442,8 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1465 struct drm_file *file_priv); 1442 struct drm_file *file_priv);
1466 1443
1467 /* AGP/GART support (drm_agpsupport.h) */ 1444 /* AGP/GART support (drm_agpsupport.h) */
1468extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 1445
1469extern int drm_agp_acquire(struct drm_device *dev); 1446#include <drm/drm_agpsupport.h>
1470extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
1471 struct drm_file *file_priv);
1472extern int drm_agp_release(struct drm_device *dev);
1473extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
1474 struct drm_file *file_priv);
1475extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
1476extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
1477 struct drm_file *file_priv);
1478extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
1479extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
1480 struct drm_file *file_priv);
1481extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
1482extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
1483 struct drm_file *file_priv);
1484extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
1485extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
1486 struct drm_file *file_priv);
1487extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
1488extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1489 struct drm_file *file_priv);
1490extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1491extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1492 struct drm_file *file_priv);
1493 1447
1494 /* Stub support (drm_stub.h) */ 1448 /* Stub support (drm_stub.h) */
1495extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, 1449extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1504,23 +1458,19 @@ extern void drm_put_dev(struct drm_device *dev);
1504extern int drm_put_minor(struct drm_minor **minor); 1458extern int drm_put_minor(struct drm_minor **minor);
1505extern void drm_unplug_dev(struct drm_device *dev); 1459extern void drm_unplug_dev(struct drm_device *dev);
1506extern unsigned int drm_debug; 1460extern unsigned int drm_debug;
1461extern unsigned int drm_rnodes;
1507 1462
1508extern unsigned int drm_vblank_offdelay; 1463extern unsigned int drm_vblank_offdelay;
1509extern unsigned int drm_timestamp_precision; 1464extern unsigned int drm_timestamp_precision;
1510extern unsigned int drm_timestamp_monotonic; 1465extern unsigned int drm_timestamp_monotonic;
1511 1466
1512extern struct class *drm_class; 1467extern struct class *drm_class;
1513extern struct proc_dir_entry *drm_proc_root;
1514extern struct dentry *drm_debugfs_root; 1468extern struct dentry *drm_debugfs_root;
1515 1469
1516extern struct idr drm_minors_idr; 1470extern struct idr drm_minors_idr;
1517 1471
1518extern struct drm_local_map *drm_getsarea(struct drm_device *dev); 1472extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1519 1473
1520 /* Proc support (drm_proc.h) */
1521extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root);
1522extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1523
1524 /* Debugfs support */ 1474 /* Debugfs support */
1525#if defined(CONFIG_DEBUG_FS) 1475#if defined(CONFIG_DEBUG_FS)
1526extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, 1476extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
@@ -1550,6 +1500,7 @@ extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1550 struct dma_buf *dma_buf); 1500 struct dma_buf *dma_buf);
1551extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, 1501extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
1552 struct drm_file *file_priv, int prime_fd, uint32_t *handle); 1502 struct drm_file *file_priv, int prime_fd, uint32_t *handle);
1503extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
1553 1504
1554extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 1505extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
1555 struct drm_file *file_priv); 1506 struct drm_file *file_priv);
@@ -1561,25 +1512,22 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
1561extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages); 1512extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
1562extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); 1513extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
1563 1514
1515int drm_gem_dumb_destroy(struct drm_file *file,
1516 struct drm_device *dev,
1517 uint32_t handle);
1564 1518
1565void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); 1519void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
1566void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); 1520void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
1567int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); 1521void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
1568void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
1569
1570int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
1571int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
1572 struct drm_gem_object **obj);
1573 1522
1574#if DRM_DEBUG_CODE 1523#if DRM_DEBUG_CODE
1575extern int drm_vma_info(struct seq_file *m, void *data); 1524extern int drm_vma_info(struct seq_file *m, void *data);
1576#endif 1525#endif
1577 1526
1578 /* Scatter Gather Support (drm_scatter.h) */ 1527 /* Scatter Gather Support (drm_scatter.h) */
1579extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1528extern void drm_legacy_sg_cleanup(struct drm_device *dev);
1580extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, 1529extern int drm_sg_alloc(struct drm_device *dev, void *data,
1581 struct drm_file *file_priv); 1530 struct drm_file *file_priv);
1582extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1583extern int drm_sg_free(struct drm_device *dev, void *data, 1531extern int drm_sg_free(struct drm_device *dev, void *data,
1584 struct drm_file *file_priv); 1532 struct drm_file *file_priv);
1585 1533
@@ -1613,9 +1561,8 @@ struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1613 size_t size); 1561 size_t size);
1614int drm_gem_object_init(struct drm_device *dev, 1562int drm_gem_object_init(struct drm_device *dev,
1615 struct drm_gem_object *obj, size_t size); 1563 struct drm_gem_object *obj, size_t size);
1616int drm_gem_private_object_init(struct drm_device *dev, 1564void drm_gem_private_object_init(struct drm_device *dev,
1617 struct drm_gem_object *obj, size_t size); 1565 struct drm_gem_object *obj, size_t size);
1618void drm_gem_object_handle_free(struct drm_gem_object *obj);
1619void drm_gem_vm_open(struct vm_area_struct *vma); 1566void drm_gem_vm_open(struct vm_area_struct *vma);
1620void drm_gem_vm_close(struct vm_area_struct *vma); 1567void drm_gem_vm_close(struct vm_area_struct *vma);
1621int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1568int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
@@ -1640,66 +1587,32 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
1640static inline void 1587static inline void
1641drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 1588drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1642{ 1589{
1643 if (obj != NULL) { 1590 if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
1644 struct drm_device *dev = obj->dev; 1591 struct drm_device *dev = obj->dev;
1592
1645 mutex_lock(&dev->struct_mutex); 1593 mutex_lock(&dev->struct_mutex);
1646 kref_put(&obj->refcount, drm_gem_object_free); 1594 if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
1595 drm_gem_object_free(&obj->refcount);
1647 mutex_unlock(&dev->struct_mutex); 1596 mutex_unlock(&dev->struct_mutex);
1648 } 1597 }
1649} 1598}
1650 1599
1600int drm_gem_handle_create_tail(struct drm_file *file_priv,
1601 struct drm_gem_object *obj,
1602 u32 *handlep);
1651int drm_gem_handle_create(struct drm_file *file_priv, 1603int drm_gem_handle_create(struct drm_file *file_priv,
1652 struct drm_gem_object *obj, 1604 struct drm_gem_object *obj,
1653 u32 *handlep); 1605 u32 *handlep);
1654int drm_gem_handle_delete(struct drm_file *filp, u32 handle); 1606int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
1655 1607
1656static inline void
1657drm_gem_object_handle_reference(struct drm_gem_object *obj)
1658{
1659 drm_gem_object_reference(obj);
1660 atomic_inc(&obj->handle_count);
1661}
1662
1663static inline void
1664drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1665{
1666 if (obj == NULL)
1667 return;
1668
1669 if (atomic_read(&obj->handle_count) == 0)
1670 return;
1671 /*
1672 * Must bump handle count first as this may be the last
1673 * ref, in which case the object would disappear before we
1674 * checked for a name
1675 */
1676 if (atomic_dec_and_test(&obj->handle_count))
1677 drm_gem_object_handle_free(obj);
1678 drm_gem_object_unreference(obj);
1679}
1680
1681static inline void
1682drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1683{
1684 if (obj == NULL)
1685 return;
1686
1687 if (atomic_read(&obj->handle_count) == 0)
1688 return;
1689
1690 /*
1691 * Must bump handle count first as this may be the last
1692 * ref, in which case the object would disappear before we
1693 * checked for a name
1694 */
1695
1696 if (atomic_dec_and_test(&obj->handle_count))
1697 drm_gem_object_handle_free(obj);
1698 drm_gem_object_unreference_unlocked(obj);
1699}
1700 1608
1701void drm_gem_free_mmap_offset(struct drm_gem_object *obj); 1609void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
1702int drm_gem_create_mmap_offset(struct drm_gem_object *obj); 1610int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
1611int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
1612
1613struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
1614void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
1615 bool dirty, bool accessed);
1703 1616
1704struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1617struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1705 struct drm_file *filp, 1618 struct drm_file *filp,
@@ -1769,9 +1682,6 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
1769extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); 1682extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
1770extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device); 1683extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
1771 1684
1772extern int drm_get_platform_dev(struct platform_device *pdev,
1773 struct drm_driver *driver);
1774
1775/* returns true if currently okay to sleep */ 1685/* returns true if currently okay to sleep */
1776static __inline__ bool drm_can_sleep(void) 1686static __inline__ bool drm_can_sleep(void)
1777{ 1687{
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
new file mode 100644
index 000000000000..a184eeee9c96
--- /dev/null
+++ b/include/drm/drm_agpsupport.h
@@ -0,0 +1,194 @@
1#ifndef _DRM_AGPSUPPORT_H_
2#define _DRM_AGPSUPPORT_H_
3
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/mutex.h>
7#include <linux/types.h>
8#include <linux/agp_backend.h>
9#include <drm/drmP.h>
10
11#if __OS_HAS_AGP
12
13void drm_free_agp(DRM_AGP_MEM * handle, int pages);
14int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
15int drm_unbind_agp(DRM_AGP_MEM * handle);
16DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
17 struct page **pages,
18 unsigned long num_pages,
19 uint32_t gtt_offset,
20 uint32_t type);
21
22struct drm_agp_head *drm_agp_init(struct drm_device *dev);
23void drm_agp_destroy(struct drm_agp_head *agp);
24void drm_agp_clear(struct drm_device *dev);
25int drm_agp_acquire(struct drm_device *dev);
26int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
27 struct drm_file *file_priv);
28int drm_agp_release(struct drm_device *dev);
29int drm_agp_release_ioctl(struct drm_device *dev, void *data,
30 struct drm_file *file_priv);
31int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
32int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
33 struct drm_file *file_priv);
34int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
35int drm_agp_info_ioctl(struct drm_device *dev, void *data,
36 struct drm_file *file_priv);
37int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
38int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
39 struct drm_file *file_priv);
40int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
41int drm_agp_free_ioctl(struct drm_device *dev, void *data,
42 struct drm_file *file_priv);
43int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
44int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv);
46int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
47int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
48 struct drm_file *file_priv);
49
50static inline int drm_core_has_AGP(struct drm_device *dev)
51{
52 return drm_core_check_feature(dev, DRIVER_USE_AGP);
53}
54
55#else /* __OS_HAS_AGP */
56
57static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages)
58{
59}
60
61static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
62{
63 return -ENODEV;
64}
65
66static inline int drm_unbind_agp(DRM_AGP_MEM * handle)
67{
68 return -ENODEV;
69}
70
71static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
72 struct page **pages,
73 unsigned long num_pages,
74 uint32_t gtt_offset,
75 uint32_t type)
76{
77 return NULL;
78}
79
80static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
81{
82 return NULL;
83}
84
85static inline void drm_agp_destroy(struct drm_agp_head *agp)
86{
87}
88
89static inline void drm_agp_clear(struct drm_device *dev)
90{
91}
92
93static inline int drm_agp_acquire(struct drm_device *dev)
94{
95 return -ENODEV;
96}
97
98static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
99 struct drm_file *file_priv)
100{
101 return -ENODEV;
102}
103
104static inline int drm_agp_release(struct drm_device *dev)
105{
106 return -ENODEV;
107}
108
109static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
111{
112 return -ENODEV;
113}
114
115static inline int drm_agp_enable(struct drm_device *dev,
116 struct drm_agp_mode mode)
117{
118 return -ENODEV;
119}
120
121static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
122 struct drm_file *file_priv)
123{
124 return -ENODEV;
125}
126
127static inline int drm_agp_info(struct drm_device *dev,
128 struct drm_agp_info *info)
129{
130 return -ENODEV;
131}
132
133static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
134 struct drm_file *file_priv)
135{
136 return -ENODEV;
137}
138
139static inline int drm_agp_alloc(struct drm_device *dev,
140 struct drm_agp_buffer *request)
141{
142 return -ENODEV;
143}
144
145static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file_priv)
147{
148 return -ENODEV;
149}
150
151static inline int drm_agp_free(struct drm_device *dev,
152 struct drm_agp_buffer *request)
153{
154 return -ENODEV;
155}
156
157static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
158 struct drm_file *file_priv)
159{
160 return -ENODEV;
161}
162
163static inline int drm_agp_unbind(struct drm_device *dev,
164 struct drm_agp_binding *request)
165{
166 return -ENODEV;
167}
168
169static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
170 struct drm_file *file_priv)
171{
172 return -ENODEV;
173}
174
175static inline int drm_agp_bind(struct drm_device *dev,
176 struct drm_agp_binding *request)
177{
178 return -ENODEV;
179}
180
181static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
182 struct drm_file *file_priv)
183{
184 return -ENODEV;
185}
186
187static inline int drm_core_has_AGP(struct drm_device *dev)
188{
189 return 0;
190}
191
192#endif /* __OS_HAS_AGP */
193
194#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index fa12a2fa4293..24f499569a2f 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -49,6 +49,7 @@ struct drm_clip_rect;
49#define DRM_MODE_OBJECT_FB 0xfbfbfbfb 49#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
50#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb 50#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
51#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee 51#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
52#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
52 53
53struct drm_mode_object { 54struct drm_mode_object {
54 uint32_t id; 55 uint32_t id;
@@ -305,6 +306,7 @@ struct drm_connector;
305struct drm_encoder; 306struct drm_encoder;
306struct drm_pending_vblank_event; 307struct drm_pending_vblank_event;
307struct drm_plane; 308struct drm_plane;
309struct drm_bridge;
308 310
309/** 311/**
310 * drm_crtc_funcs - control CRTCs for a given device 312 * drm_crtc_funcs - control CRTCs for a given device
@@ -363,7 +365,8 @@ struct drm_crtc_funcs {
363 */ 365 */
364 int (*page_flip)(struct drm_crtc *crtc, 366 int (*page_flip)(struct drm_crtc *crtc,
365 struct drm_framebuffer *fb, 367 struct drm_framebuffer *fb,
366 struct drm_pending_vblank_event *event); 368 struct drm_pending_vblank_event *event,
369 uint32_t flags);
367 370
368 int (*set_property)(struct drm_crtc *crtc, 371 int (*set_property)(struct drm_crtc *crtc,
369 struct drm_property *property, uint64_t val); 372 struct drm_property *property, uint64_t val);
@@ -494,8 +497,6 @@ struct drm_encoder_funcs {
494 void (*destroy)(struct drm_encoder *encoder); 497 void (*destroy)(struct drm_encoder *encoder);
495}; 498};
496 499
497#define DRM_CONNECTOR_MAX_UMODES 16
498#define DRM_CONNECTOR_LEN 32
499#define DRM_CONNECTOR_MAX_ENCODER 3 500#define DRM_CONNECTOR_MAX_ENCODER 3
500 501
501/** 502/**
@@ -507,6 +508,7 @@ struct drm_encoder_funcs {
507 * @possible_crtcs: bitmask of potential CRTC bindings 508 * @possible_crtcs: bitmask of potential CRTC bindings
508 * @possible_clones: bitmask of potential sibling encoders for cloning 509 * @possible_clones: bitmask of potential sibling encoders for cloning
509 * @crtc: currently bound CRTC 510 * @crtc: currently bound CRTC
511 * @bridge: bridge associated to the encoder
510 * @funcs: control functions 512 * @funcs: control functions
511 * @helper_private: mid-layer private data 513 * @helper_private: mid-layer private data
512 * 514 *
@@ -523,6 +525,7 @@ struct drm_encoder {
523 uint32_t possible_clones; 525 uint32_t possible_clones;
524 526
525 struct drm_crtc *crtc; 527 struct drm_crtc *crtc;
528 struct drm_bridge *bridge;
526 const struct drm_encoder_funcs *funcs; 529 const struct drm_encoder_funcs *funcs;
527 void *helper_private; 530 void *helper_private;
528}; 531};
@@ -683,6 +686,48 @@ struct drm_plane {
683}; 686};
684 687
685/** 688/**
689 * drm_bridge_funcs - drm_bridge control functions
690 * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
691 * @disable: Called right before encoder prepare, disables the bridge
692 * @post_disable: Called right after encoder prepare, for lockstepped disable
693 * @mode_set: Set this mode to the bridge
694 * @pre_enable: Called right before encoder commit, for lockstepped commit
695 * @enable: Called right after encoder commit, enables the bridge
696 * @destroy: make object go away
697 */
698struct drm_bridge_funcs {
699 bool (*mode_fixup)(struct drm_bridge *bridge,
700 const struct drm_display_mode *mode,
701 struct drm_display_mode *adjusted_mode);
702 void (*disable)(struct drm_bridge *bridge);
703 void (*post_disable)(struct drm_bridge *bridge);
704 void (*mode_set)(struct drm_bridge *bridge,
705 struct drm_display_mode *mode,
706 struct drm_display_mode *adjusted_mode);
707 void (*pre_enable)(struct drm_bridge *bridge);
708 void (*enable)(struct drm_bridge *bridge);
709 void (*destroy)(struct drm_bridge *bridge);
710};
711
712/**
713 * drm_bridge - central DRM bridge control structure
714 * @dev: DRM device this bridge belongs to
715 * @head: list management
716 * @base: base mode object
717 * @funcs: control functions
718 * @driver_private: pointer to the bridge driver's internal context
719 */
720struct drm_bridge {
721 struct drm_device *dev;
722 struct list_head head;
723
724 struct drm_mode_object base;
725
726 const struct drm_bridge_funcs *funcs;
727 void *driver_private;
728};
729
730/**
686 * drm_mode_set - new values for a CRTC config change 731 * drm_mode_set - new values for a CRTC config change
687 * @head: list management 732 * @head: list management
688 * @fb: framebuffer to use for new config 733 * @fb: framebuffer to use for new config
@@ -742,6 +787,7 @@ struct drm_mode_group {
742 uint32_t num_crtcs; 787 uint32_t num_crtcs;
743 uint32_t num_encoders; 788 uint32_t num_encoders;
744 uint32_t num_connectors; 789 uint32_t num_connectors;
790 uint32_t num_bridges;
745 791
746 /* list of object IDs for this group */ 792 /* list of object IDs for this group */
747 uint32_t *id_list; 793 uint32_t *id_list;
@@ -756,6 +802,8 @@ struct drm_mode_group {
756 * @fb_list: list of framebuffers available 802 * @fb_list: list of framebuffers available
757 * @num_connector: number of connectors on this device 803 * @num_connector: number of connectors on this device
758 * @connector_list: list of connector objects 804 * @connector_list: list of connector objects
805 * @num_bridge: number of bridges on this device
806 * @bridge_list: list of bridge objects
759 * @num_encoder: number of encoders on this device 807 * @num_encoder: number of encoders on this device
760 * @encoder_list: list of encoder objects 808 * @encoder_list: list of encoder objects
761 * @num_crtc: number of CRTCs on this device 809 * @num_crtc: number of CRTCs on this device
@@ -793,6 +841,8 @@ struct drm_mode_config {
793 841
794 int num_connector; 842 int num_connector;
795 struct list_head connector_list; 843 struct list_head connector_list;
844 int num_bridge;
845 struct list_head bridge_list;
796 int num_encoder; 846 int num_encoder;
797 struct list_head encoder_list; 847 struct list_head encoder_list;
798 int num_plane; 848 int num_plane;
@@ -839,11 +889,13 @@ struct drm_mode_config {
839 889
840 /* Optional properties */ 890 /* Optional properties */
841 struct drm_property *scaling_mode_property; 891 struct drm_property *scaling_mode_property;
842 struct drm_property *dithering_mode_property;
843 struct drm_property *dirty_info_property; 892 struct drm_property *dirty_info_property;
844 893
845 /* dumb ioctl parameters */ 894 /* dumb ioctl parameters */
846 uint32_t preferred_depth, prefer_shadow; 895 uint32_t preferred_depth, prefer_shadow;
896
897 /* whether async page flip is supported or not */
898 bool async_page_flip;
847}; 899};
848 900
849#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) 901#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -869,6 +921,8 @@ extern int drm_crtc_init(struct drm_device *dev,
869 const struct drm_crtc_funcs *funcs); 921 const struct drm_crtc_funcs *funcs);
870extern void drm_crtc_cleanup(struct drm_crtc *crtc); 922extern void drm_crtc_cleanup(struct drm_crtc *crtc);
871 923
924extern void drm_connector_ida_init(void);
925extern void drm_connector_ida_destroy(void);
872extern int drm_connector_init(struct drm_device *dev, 926extern int drm_connector_init(struct drm_device *dev,
873 struct drm_connector *connector, 927 struct drm_connector *connector,
874 const struct drm_connector_funcs *funcs, 928 const struct drm_connector_funcs *funcs,
@@ -878,6 +932,10 @@ extern void drm_connector_cleanup(struct drm_connector *connector);
878/* helper to unplug all connectors from sysfs for device */ 932/* helper to unplug all connectors from sysfs for device */
879extern void drm_connector_unplug_all(struct drm_device *dev); 933extern void drm_connector_unplug_all(struct drm_device *dev);
880 934
935extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
936 const struct drm_bridge_funcs *funcs);
937extern void drm_bridge_cleanup(struct drm_bridge *bridge);
938
881extern int drm_encoder_init(struct drm_device *dev, 939extern int drm_encoder_init(struct drm_device *dev,
882 struct drm_encoder *encoder, 940 struct drm_encoder *encoder,
883 const struct drm_encoder_funcs *funcs, 941 const struct drm_encoder_funcs *funcs,
@@ -908,7 +966,6 @@ extern struct edid *drm_get_edid(struct drm_connector *connector,
908 struct i2c_adapter *adapter); 966 struct i2c_adapter *adapter);
909extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 967extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
910extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); 968extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
911extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
912extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); 969extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
913extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, 970extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
914 const struct drm_display_mode *mode); 971 const struct drm_display_mode *mode);
@@ -925,14 +982,9 @@ extern int drm_mode_height(const struct drm_display_mode *mode);
925/* for us by fb module */ 982/* for us by fb module */
926extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); 983extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
927extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); 984extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
928extern void drm_mode_list_concat(struct list_head *head,
929 struct list_head *new);
930extern void drm_mode_validate_size(struct drm_device *dev, 985extern void drm_mode_validate_size(struct drm_device *dev,
931 struct list_head *mode_list, 986 struct list_head *mode_list,
932 int maxX, int maxY, int maxPitch); 987 int maxX, int maxY, int maxPitch);
933extern void drm_mode_validate_clocks(struct drm_device *dev,
934 struct list_head *mode_list,
935 int *min, int *max, int n_ranges);
936extern void drm_mode_prune_invalid(struct drm_device *dev, 988extern void drm_mode_prune_invalid(struct drm_device *dev,
937 struct list_head *mode_list, bool verbose); 989 struct list_head *mode_list, bool verbose);
938extern void drm_mode_sort(struct list_head *mode_list); 990extern void drm_mode_sort(struct list_head *mode_list);
@@ -949,9 +1001,6 @@ extern int drm_object_property_set_value(struct drm_mode_object *obj,
949extern int drm_object_property_get_value(struct drm_mode_object *obj, 1001extern int drm_object_property_get_value(struct drm_mode_object *obj,
950 struct drm_property *property, 1002 struct drm_property *property,
951 uint64_t *value); 1003 uint64_t *value);
952extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
953extern void drm_framebuffer_set_object(struct drm_device *dev,
954 unsigned long handle);
955extern int drm_framebuffer_init(struct drm_device *dev, 1004extern int drm_framebuffer_init(struct drm_device *dev,
956 struct drm_framebuffer *fb, 1005 struct drm_framebuffer *fb,
957 const struct drm_framebuffer_funcs *funcs); 1006 const struct drm_framebuffer_funcs *funcs);
@@ -962,10 +1011,6 @@ extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
962extern void drm_framebuffer_remove(struct drm_framebuffer *fb); 1011extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
963extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); 1012extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
964extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); 1013extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
965extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
966extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
967extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
968extern bool drm_crtc_in_use(struct drm_crtc *crtc);
969 1014
970extern void drm_object_attach_property(struct drm_mode_object *obj, 1015extern void drm_object_attach_property(struct drm_mode_object *obj,
971 struct drm_property *property, 1016 struct drm_property *property,
@@ -990,7 +1035,6 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
990extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, 1035extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
991 char *formats[]); 1036 char *formats[]);
992extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1037extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
993extern int drm_mode_create_dithering_property(struct drm_device *dev);
994extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 1038extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
995extern const char *drm_get_encoder_name(const struct drm_encoder *encoder); 1039extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
996 1040
@@ -1040,17 +1084,12 @@ extern int drm_mode_getblob_ioctl(struct drm_device *dev,
1040 void *data, struct drm_file *file_priv); 1084 void *data, struct drm_file *file_priv);
1041extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 1085extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
1042 void *data, struct drm_file *file_priv); 1086 void *data, struct drm_file *file_priv);
1043extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
1044 void *data, struct drm_file *file_priv);
1045extern int drm_mode_replacefb(struct drm_device *dev,
1046 void *data, struct drm_file *file_priv);
1047extern int drm_mode_getencoder(struct drm_device *dev, 1087extern int drm_mode_getencoder(struct drm_device *dev,
1048 void *data, struct drm_file *file_priv); 1088 void *data, struct drm_file *file_priv);
1049extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, 1089extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
1050 void *data, struct drm_file *file_priv); 1090 void *data, struct drm_file *file_priv);
1051extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 1091extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
1052 void *data, struct drm_file *file_priv); 1092 void *data, struct drm_file *file_priv);
1053extern u8 *drm_find_cea_extension(struct edid *edid);
1054extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match); 1093extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
1055extern bool drm_detect_hdmi_monitor(struct edid *edid); 1094extern bool drm_detect_hdmi_monitor(struct edid *edid);
1056extern bool drm_detect_monitor_audio(struct edid *edid); 1095extern bool drm_detect_monitor_audio(struct edid *edid);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index e8e1417af3d9..ae8dbfb1207c 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -342,13 +342,42 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
343 int lane); 343 int lane);
344 344
345#define DP_RECEIVER_CAP_SIZE 0xf 345#define DP_RECEIVER_CAP_SIZE 0xf
346#define EDP_PSR_RECEIVER_CAP_SIZE 2
347
346void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 348void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
347void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 349void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
348 350
349u8 drm_dp_link_rate_to_bw_code(int link_rate); 351u8 drm_dp_link_rate_to_bw_code(int link_rate);
350int drm_dp_bw_code_to_link_rate(u8 link_bw); 352int drm_dp_bw_code_to_link_rate(u8 link_bw);
351 353
354struct edp_sdp_header {
355 u8 HB0; /* Secondary Data Packet ID */
356 u8 HB1; /* Secondary Data Packet Type */
357 u8 HB2; /* 7:5 reserved, 4:0 revision number */
358 u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
359} __packed;
360
361#define EDP_SDP_HEADER_REVISION_MASK 0x1F
362#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
363
364struct edp_vsc_psr {
365 struct edp_sdp_header sdp_header;
366 u8 DB0; /* Stereo Interface */
367 u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
368 u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
369 u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
370 u8 DB4; /* CRC value bits 7:0 of the G or Y component */
371 u8 DB5; /* CRC value bits 15:8 of the G or Y component */
372 u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
373 u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
374 u8 DB8_31[24]; /* Reserved */
375} __packed;
376
377#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
378#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
379#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
380
352static inline int 381static inline int
353drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) 382drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
354{ 383{
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index fc481fc17085..a1441c5ac63d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -256,9 +256,11 @@ struct drm_encoder;
256struct drm_connector; 256struct drm_connector;
257struct drm_display_mode; 257struct drm_display_mode;
258struct hdmi_avi_infoframe; 258struct hdmi_avi_infoframe;
259struct hdmi_vendor_infoframe;
259 260
260void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); 261void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
261int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); 262int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
263int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
262int drm_av_sync_delay(struct drm_connector *connector, 264int drm_av_sync_delay(struct drm_connector *connector,
263 struct drm_display_mode *mode); 265 struct drm_display_mode *mode);
264struct drm_connector *drm_select_eld(struct drm_encoder *encoder, 266struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
@@ -268,5 +270,8 @@ int drm_load_edid_firmware(struct drm_connector *connector);
268int 270int
269drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, 271drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
270 const struct drm_display_mode *mode); 272 const struct drm_display_mode *mode);
273int
274drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
275 const struct drm_display_mode *mode);
271 276
272#endif /* __DRM_EDID_H__ */ 277#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4a3fc244301c..c54cf3d4a03f 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -24,7 +24,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
24 unsigned int plane); 24 unsigned int plane);
25 25
26#ifdef CONFIG_DEBUG_FS 26#ifdef CONFIG_DEBUG_FS
27void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m);
28int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); 27int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
29#endif 28#endif
30 29
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
new file mode 100644
index 000000000000..35c776ae7d3b
--- /dev/null
+++ b/include/drm/drm_flip_work.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef DRM_FLIP_WORK_H
25#define DRM_FLIP_WORK_H
26
27#include <linux/kfifo.h>
28#include <linux/workqueue.h>
29
30/**
31 * DOC: flip utils
32 *
33 * Util to queue up work to run from work-queue context after flip/vblank.
34 * Typically this can be used to defer unref of framebuffer's, cursor
35 * bo's, etc until after vblank. The APIs are all safe (and lockless)
36 * for up to one producer and once consumer at a time. The single-consumer
37 * aspect is ensured by committing the queued work to a single work-queue.
38 */
39
40struct drm_flip_work;
41
42/*
43 * drm_flip_func_t - callback function
44 *
45 * @work: the flip work
46 * @val: value queued via drm_flip_work_queue()
47 *
48 * Callback function to be called for each of the queue'd work items after
49 * drm_flip_work_commit() is called.
50 */
51typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
52
53/**
54 * struct drm_flip_work - flip work queue
55 * @name: debug name
56 * @pending: number of queued but not committed items
57 * @count: number of committed items
58 * @func: callback fxn called for each committed item
59 * @worker: worker which calls @func
60 */
61struct drm_flip_work {
62 const char *name;
63 atomic_t pending, count;
64 drm_flip_func_t func;
65 struct work_struct worker;
66 DECLARE_KFIFO_PTR(fifo, void *);
67};
68
69void drm_flip_work_queue(struct drm_flip_work *work, void *val);
70void drm_flip_work_commit(struct drm_flip_work *work,
71 struct workqueue_struct *wq);
72int drm_flip_work_init(struct drm_flip_work *work, int size,
73 const char *name, drm_flip_func_t func);
74void drm_flip_work_cleanup(struct drm_flip_work *work);
75
76#endif /* DRM_FLIP_WORK_H */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index c34f27f80bcc..89b4d7db1ebd 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
30/* set vm_flags and we can change the vm attribute to other one at here. */ 30/* set vm_flags and we can change the vm attribute to other one at here. */
31int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); 31int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
32 32
33/*
34 * destroy memory region allocated.
35 * - a gem handle and physical memory region pointed by a gem object
36 * would be released by drm_gem_handle_delete().
37 */
38int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
39 struct drm_device *drm, unsigned int handle);
40
41/* allocate physical memory. */ 33/* allocate physical memory. */
42struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, 34struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
43 unsigned int size); 35 unsigned int size);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 4d06edb56d5f..cba67865d18f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -36,11 +36,19 @@
36/* 36/*
37 * Generic range manager structs 37 * Generic range manager structs
38 */ 38 */
39#include <linux/bug.h>
40#include <linux/kernel.h>
39#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/spinlock.h>
40#ifdef CONFIG_DEBUG_FS 43#ifdef CONFIG_DEBUG_FS
41#include <linux/seq_file.h> 44#include <linux/seq_file.h>
42#endif 45#endif
43 46
47enum drm_mm_search_flags {
48 DRM_MM_SEARCH_DEFAULT = 0,
49 DRM_MM_SEARCH_BEST = 1 << 0,
50};
51
44struct drm_mm_node { 52struct drm_mm_node {
45 struct list_head node_list; 53 struct list_head node_list;
46 struct list_head hole_stack; 54 struct list_head hole_stack;
@@ -62,9 +70,6 @@ struct drm_mm {
62 /* head_node.node_list is the list of all memory nodes, ordered 70 /* head_node.node_list is the list of all memory nodes, ordered
63 * according to the (increasing) start address of the memory node. */ 71 * according to the (increasing) start address of the memory node. */
64 struct drm_mm_node head_node; 72 struct drm_mm_node head_node;
65 struct list_head unused_nodes;
66 int num_unused;
67 spinlock_t unused_lock;
68 unsigned int scan_check_range : 1; 73 unsigned int scan_check_range : 1;
69 unsigned scan_alignment; 74 unsigned scan_alignment;
70 unsigned long scan_color; 75 unsigned long scan_color;
@@ -115,13 +120,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
115#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 120#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
116 &(mm)->head_node.node_list, \ 121 &(mm)->head_node.node_list, \
117 node_list) 122 node_list)
118#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
119 for (entry = (mm)->prev_scanned_node, \
120 next = entry ? list_entry(entry->node_list.next, \
121 struct drm_mm_node, node_list) : NULL; \
122 entry != NULL; entry = next, \
123 next = entry ? list_entry(entry->node_list.next, \
124 struct drm_mm_node, node_list) : NULL) \
125 123
126/* Note that we need to unroll list_for_each_entry in order to inline 124/* Note that we need to unroll list_for_each_entry in order to inline
127 * setting hole_start and hole_end on each iteration and keep the 125 * setting hole_start and hole_end on each iteration and keep the
@@ -138,124 +136,50 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
138/* 136/*
139 * Basic range manager support (drm_mm.c) 137 * Basic range manager support (drm_mm.c)
140 */ 138 */
141extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 139extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
142 unsigned long start,
143 unsigned long size,
144 bool atomic);
145extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
146 unsigned long size,
147 unsigned alignment,
148 unsigned long color,
149 int atomic);
150extern struct drm_mm_node *drm_mm_get_block_range_generic(
151 struct drm_mm_node *node,
152 unsigned long size,
153 unsigned alignment,
154 unsigned long color,
155 unsigned long start,
156 unsigned long end,
157 int atomic);
158static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
159 unsigned long size,
160 unsigned alignment)
161{
162 return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
163}
164static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
165 unsigned long size,
166 unsigned alignment)
167{
168 return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
169}
170static inline struct drm_mm_node *drm_mm_get_block_range(
171 struct drm_mm_node *parent,
172 unsigned long size,
173 unsigned alignment,
174 unsigned long start,
175 unsigned long end)
176{
177 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
178 start, end, 0);
179}
180static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
181 struct drm_mm_node *parent,
182 unsigned long size,
183 unsigned alignment,
184 unsigned long start,
185 unsigned long end)
186{
187 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
188 start, end, 1);
189}
190 140
191extern int drm_mm_insert_node(struct drm_mm *mm,
192 struct drm_mm_node *node,
193 unsigned long size,
194 unsigned alignment);
195extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
196 struct drm_mm_node *node,
197 unsigned long size,
198 unsigned alignment,
199 unsigned long start,
200 unsigned long end);
201extern int drm_mm_insert_node_generic(struct drm_mm *mm, 141extern int drm_mm_insert_node_generic(struct drm_mm *mm,
202 struct drm_mm_node *node, 142 struct drm_mm_node *node,
203 unsigned long size, 143 unsigned long size,
204 unsigned alignment, 144 unsigned alignment,
205 unsigned long color); 145 unsigned long color,
146 enum drm_mm_search_flags flags);
147static inline int drm_mm_insert_node(struct drm_mm *mm,
148 struct drm_mm_node *node,
149 unsigned long size,
150 unsigned alignment,
151 enum drm_mm_search_flags flags)
152{
153 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
154}
155
206extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 156extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
207 struct drm_mm_node *node, 157 struct drm_mm_node *node,
208 unsigned long size, 158 unsigned long size,
209 unsigned alignment, 159 unsigned alignment,
210 unsigned long color, 160 unsigned long color,
211 unsigned long start, 161 unsigned long start,
212 unsigned long end); 162 unsigned long end,
213extern void drm_mm_put_block(struct drm_mm_node *cur); 163 enum drm_mm_search_flags flags);
214extern void drm_mm_remove_node(struct drm_mm_node *node); 164static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
215extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 165 struct drm_mm_node *node,
216extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 166 unsigned long size,
217 unsigned long size, 167 unsigned alignment,
218 unsigned alignment, 168 unsigned long start,
219 unsigned long color, 169 unsigned long end,
220 bool best_match); 170 enum drm_mm_search_flags flags)
221extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
222 const struct drm_mm *mm,
223 unsigned long size,
224 unsigned alignment,
225 unsigned long color,
226 unsigned long start,
227 unsigned long end,
228 bool best_match);
229static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
230 unsigned long size,
231 unsigned alignment,
232 bool best_match)
233{ 171{
234 return drm_mm_search_free_generic(mm,size, alignment, 0, best_match); 172 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
235} 173 0, start, end, flags);
236static inline struct drm_mm_node *drm_mm_search_free_in_range(
237 const struct drm_mm *mm,
238 unsigned long size,
239 unsigned alignment,
240 unsigned long start,
241 unsigned long end,
242 bool best_match)
243{
244 return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
245 start, end, best_match);
246} 174}
247 175
176extern void drm_mm_remove_node(struct drm_mm_node *node);
177extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
248extern void drm_mm_init(struct drm_mm *mm, 178extern void drm_mm_init(struct drm_mm *mm,
249 unsigned long start, 179 unsigned long start,
250 unsigned long size); 180 unsigned long size);
251extern void drm_mm_takedown(struct drm_mm *mm); 181extern void drm_mm_takedown(struct drm_mm *mm);
252extern int drm_mm_clean(struct drm_mm *mm); 182extern int drm_mm_clean(struct drm_mm *mm);
253extern int drm_mm_pre_get(struct drm_mm *mm);
254
255static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
256{
257 return block->mm;
258}
259 183
260void drm_mm_init_scan(struct drm_mm *mm, 184void drm_mm_init_scan(struct drm_mm *mm,
261 unsigned long size, 185 unsigned long size,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 34efaf64cc87..fd54a14a7c2a 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -1,4 +1,22 @@
1#define radeon_PCI_IDS \ 1#define radeon_PCI_IDS \
2 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4 {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
5 {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
6 {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
7 {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
8 {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
9 {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
10 {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
11 {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
15 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
16 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
17 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
18 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
19 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 20 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
3 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 21 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 22 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -690,29 +708,6 @@
690 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ 708 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
691 {0, 0, 0} 709 {0, 0, 0}
692 710
693#define mach64_PCI_IDS \
694 {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
695 {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
696 {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
697 {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
698 {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
699 {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
700 {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
701 {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
702 {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
703 {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
704 {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
705 {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
706 {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
707 {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
708 {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
709 {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
710 {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
711 {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
712 {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
713 {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
714 {0, 0, 0}
715
716#define sisdrv_PCI_IDS \ 711#define sisdrv_PCI_IDS \
717 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 712 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
718 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 713 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
@@ -752,10 +747,6 @@
752 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 747 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
753 {0, 0, 0} 748 {0, 0, 0}
754 749
755#define gamma_PCI_IDS \
756 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
757 {0, 0, 0}
758
759#define savage_PCI_IDS \ 750#define savage_PCI_IDS \
760 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ 751 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
761 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ 752 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
@@ -781,6 +772,3 @@
781 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ 772 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
782 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ 773 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
783 {0, 0, 0} 774 {0, 0, 0}
784
785#define ffb_PCI_IDS \
786 {0, 0, 0}
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
new file mode 100644
index 000000000000..c18a593d1744
--- /dev/null
+++ b/include/drm/drm_vma_manager.h
@@ -0,0 +1,257 @@
1#ifndef __DRM_VMA_MANAGER_H__
2#define __DRM_VMA_MANAGER_H__
3
4/*
5 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drm/drm_mm.h>
27#include <linux/fs.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/rbtree.h>
31#include <linux/spinlock.h>
32#include <linux/types.h>
33
34struct drm_vma_offset_file {
35 struct rb_node vm_rb;
36 struct file *vm_filp;
37 unsigned long vm_count;
38};
39
40struct drm_vma_offset_node {
41 rwlock_t vm_lock;
42 struct drm_mm_node vm_node;
43 struct rb_node vm_rb;
44 struct rb_root vm_files;
45};
46
47struct drm_vma_offset_manager {
48 rwlock_t vm_lock;
49 struct rb_root vm_addr_space_rb;
50 struct drm_mm vm_addr_space_mm;
51};
52
53void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
54 unsigned long page_offset, unsigned long size);
55void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
56
57struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
58 unsigned long start,
59 unsigned long pages);
60struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
61 unsigned long start,
62 unsigned long pages);
63int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
64 struct drm_vma_offset_node *node, unsigned long pages);
65void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
66 struct drm_vma_offset_node *node);
67
68int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
69void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
70bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
71 struct file *filp);
72
73/**
74 * drm_vma_offset_exact_lookup() - Look up node by exact address
75 * @mgr: Manager object
76 * @start: Start address (page-based, not byte-based)
77 * @pages: Size of object (page-based)
78 *
79 * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
80 * It only returns the exact object with the given start address.
81 *
82 * RETURNS:
83 * Node at exact start address @start.
84 */
85static inline struct drm_vma_offset_node *
86drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
87 unsigned long start,
88 unsigned long pages)
89{
90 struct drm_vma_offset_node *node;
91
92 node = drm_vma_offset_lookup(mgr, start, pages);
93 return (node && node->vm_node.start == start) ? node : NULL;
94}
95
96/**
97 * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
98 * @mgr: Manager object
99 *
100 * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
101 * are allowed while holding this lock. All other contexts are blocked from VMA
102 * until the lock is released via drm_vma_offset_unlock_lookup().
103 *
104 * Use this if you need to take a reference to the objects returned by
105 * drm_vma_offset_lookup_locked() before releasing this lock again.
106 *
107 * This lock must not be used for anything else than extended lookups. You must
108 * not call any other VMA helpers while holding this lock.
109 *
110 * Note: You're in atomic-context while holding this lock!
111 *
112 * Example:
113 * drm_vma_offset_lock_lookup(mgr);
114 * node = drm_vma_offset_lookup_locked(mgr);
115 * if (node)
116 * kref_get_unless_zero(container_of(node, sth, entr));
117 * drm_vma_offset_unlock_lookup(mgr);
118 */
119static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
120{
121 read_lock(&mgr->vm_lock);
122}
123
124/**
125 * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
126 * @mgr: Manager object
127 *
128 * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
129 */
130static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
131{
132 read_unlock(&mgr->vm_lock);
133}
134
135/**
136 * drm_vma_node_reset() - Initialize or reset node object
137 * @node: Node to initialize or reset
138 *
139 * Reset a node to its initial state. This must be called before using it with
140 * any VMA offset manager.
141 *
142 * This must not be called on an already allocated node, or you will leak
143 * memory.
144 */
145static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
146{
147 memset(node, 0, sizeof(*node));
148 node->vm_files = RB_ROOT;
149 rwlock_init(&node->vm_lock);
150}
151
152/**
153 * drm_vma_node_start() - Return start address for page-based addressing
154 * @node: Node to inspect
155 *
156 * Return the start address of the given node. This can be used as offset into
157 * the linear VM space that is provided by the VMA offset manager. Note that
158 * this can only be used for page-based addressing. If you need a proper offset
159 * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
160 * drm_vma_node_offset_addr() helper instead.
161 *
162 * RETURNS:
163 * Start address of @node for page-based addressing. 0 if the node does not
164 * have an offset allocated.
165 */
166static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
167{
168 return node->vm_node.start;
169}
170
171/**
172 * drm_vma_node_size() - Return size (page-based)
173 * @node: Node to inspect
174 *
175 * Return the size as number of pages for the given node. This is the same size
176 * that was passed to drm_vma_offset_add(). If no offset is allocated for the
177 * node, this is 0.
178 *
179 * RETURNS:
180 * Size of @node as number of pages. 0 if the node does not have an offset
181 * allocated.
182 */
183static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
184{
185 return node->vm_node.size;
186}
187
188/**
189 * drm_vma_node_has_offset() - Check whether node is added to offset manager
190 * @node: Node to be checked
191 *
192 * RETURNS:
193 * true iff the node was previously allocated an offset and added to
194 * an vma offset manager.
195 */
196static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
197{
198 return drm_mm_node_allocated(&node->vm_node);
199}
200
201/**
202 * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
203 * @node: Linked offset node
204 *
205 * Same as drm_vma_node_start() but returns the address as a valid offset that
206 * can be used for user-space mappings during mmap().
207 * This must not be called on unlinked nodes.
208 *
209 * RETURNS:
210 * Offset of @node for byte-based addressing. 0 if the node does not have an
211 * object allocated.
212 */
213static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
214{
215 return ((__u64)node->vm_node.start) << PAGE_SHIFT;
216}
217
218/**
219 * drm_vma_node_unmap() - Unmap offset node
220 * @node: Offset node
221 * @file_mapping: Address space to unmap @node from
222 *
223 * Unmap all userspace mappings for a given offset node. The mappings must be
224 * associated with the @file_mapping address-space. If no offset exists or
225 * the address-space is invalid, nothing is done.
226 *
227 * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
228 * is not called on this node concurrently.
229 */
230static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
231 struct address_space *file_mapping)
232{
233 if (file_mapping && drm_vma_node_has_offset(node))
234 unmap_mapping_range(file_mapping,
235 drm_vma_node_offset_addr(node),
236 drm_vma_node_size(node) << PAGE_SHIFT, 1);
237}
238
239/**
240 * drm_vma_node_verify_access() - Access verification helper for TTM
241 * @node: Offset node
242 * @filp: Open-file
243 *
244 * This checks whether @filp is granted access to @node. It is the same as
245 * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
246 * verify_access() callbacks.
247 *
248 * RETURNS:
249 * 0 if access is granted, -EACCES otherwise.
250 */
251static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
252 struct file *filp)
253{
254 return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
255}
256
257#endif /* __DRM_VMA_MANAGER_H__ */
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index d6aeaf3c6d6c..cb65fa14acfc 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -15,6 +15,7 @@
15#define _EXYNOS_DRM_H_ 15#define _EXYNOS_DRM_H_
16 16
17#include <uapi/drm/exynos_drm.h> 17#include <uapi/drm/exynos_drm.h>
18#include <video/videomode.h>
18 19
19/** 20/**
20 * A structure for lcd panel information. 21 * A structure for lcd panel information.
@@ -24,7 +25,7 @@
24 * @height_mm: physical size of lcd height. 25 * @height_mm: physical size of lcd height.
25 */ 26 */
26struct exynos_drm_panel_info { 27struct exynos_drm_panel_info {
27 struct fb_videomode timing; 28 struct videomode vm;
28 u32 width_mm; 29 u32 width_mm;
29 u32 height_mm; 30 u32 height_mm;
30}; 31};
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
new file mode 100644
index 000000000000..3e419d92cf5a
--- /dev/null
+++ b/include/drm/i2c/tda998x.h
@@ -0,0 +1,30 @@
1#ifndef __DRM_I2C_TDA998X_H__
2#define __DRM_I2C_TDA998X_H__
3
4struct tda998x_encoder_params {
5 u8 swap_b:3;
6 u8 mirr_b:1;
7 u8 swap_a:3;
8 u8 mirr_a:1;
9 u8 swap_d:3;
10 u8 mirr_d:1;
11 u8 swap_c:3;
12 u8 mirr_c:1;
13 u8 swap_f:3;
14 u8 mirr_f:1;
15 u8 swap_e:3;
16 u8 mirr_e:1;
17
18 u8 audio_cfg;
19 u8 audio_clk_cfg;
20 u8 audio_frame[6];
21
22 enum {
23 AFMT_SPDIF,
24 AFMT_I2S
25 } audio_format;
26
27 unsigned audio_sample_rate;
28};
29
30#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 8a6aa56ece52..751eaffbf0d5 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -32,12 +32,12 @@
32#define _TTM_BO_API_H_ 32#define _TTM_BO_API_H_
33 33
34#include <drm/drm_hashtab.h> 34#include <drm/drm_hashtab.h>
35#include <drm/drm_vma_manager.h>
35#include <linux/kref.h> 36#include <linux/kref.h>
36#include <linux/list.h> 37#include <linux/list.h>
37#include <linux/wait.h> 38#include <linux/wait.h>
38#include <linux/mutex.h> 39#include <linux/mutex.h>
39#include <linux/mm.h> 40#include <linux/mm.h>
40#include <linux/rbtree.h>
41#include <linux/bitmap.h> 41#include <linux/bitmap.h>
42#include <linux/reservation.h> 42#include <linux/reservation.h>
43 43
@@ -145,7 +145,6 @@ struct ttm_tt;
145 * @type: The bo type. 145 * @type: The bo type.
146 * @destroy: Destruction function. If NULL, kfree is used. 146 * @destroy: Destruction function. If NULL, kfree is used.
147 * @num_pages: Actual number of pages. 147 * @num_pages: Actual number of pages.
148 * @addr_space_offset: Address space offset.
149 * @acc_size: Accounted size for this object. 148 * @acc_size: Accounted size for this object.
150 * @kref: Reference count of this buffer object. When this refcount reaches 149 * @kref: Reference count of this buffer object. When this refcount reaches
151 * zero, the object is put on the delayed delete list. 150 * zero, the object is put on the delayed delete list.
@@ -166,8 +165,7 @@ struct ttm_tt;
166 * @swap: List head for swap LRU list. 165 * @swap: List head for swap LRU list.
167 * @sync_obj: Pointer to a synchronization object. 166 * @sync_obj: Pointer to a synchronization object.
168 * @priv_flags: Flags describing buffer object internal state. 167 * @priv_flags: Flags describing buffer object internal state.
169 * @vm_rb: Rb node for the vm rb tree. 168 * @vma_node: Address space manager node.
170 * @vm_node: Address space manager node.
171 * @offset: The current GPU offset, which can have different meanings 169 * @offset: The current GPU offset, which can have different meanings
172 * depending on the memory type. For SYSTEM type memory, it should be 0. 170 * depending on the memory type. For SYSTEM type memory, it should be 0.
173 * @cur_placement: Hint of current placement. 171 * @cur_placement: Hint of current placement.
@@ -194,7 +192,6 @@ struct ttm_buffer_object {
194 enum ttm_bo_type type; 192 enum ttm_bo_type type;
195 void (*destroy) (struct ttm_buffer_object *); 193 void (*destroy) (struct ttm_buffer_object *);
196 unsigned long num_pages; 194 unsigned long num_pages;
197 uint64_t addr_space_offset;
198 size_t acc_size; 195 size_t acc_size;
199 196
200 /** 197 /**
@@ -238,13 +235,7 @@ struct ttm_buffer_object {
238 void *sync_obj; 235 void *sync_obj;
239 unsigned long priv_flags; 236 unsigned long priv_flags;
240 237
241 /** 238 struct drm_vma_offset_node vma_node;
242 * Members protected by the bdev::vm_lock
243 */
244
245 struct rb_node vm_rb;
246 struct drm_mm_node *vm_node;
247
248 239
249 /** 240 /**
250 * Special members that are protected by the reserve lock 241 * Special members that are protected by the reserve lock
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 984fc2d571a1..8639c85d61c4 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -36,6 +36,7 @@
36#include <ttm/ttm_placement.h> 36#include <ttm/ttm_placement.h>
37#include <drm/drm_mm.h> 37#include <drm/drm_mm.h>
38#include <drm/drm_global.h> 38#include <drm/drm_global.h>
39#include <drm/drm_vma_manager.h>
39#include <linux/workqueue.h> 40#include <linux/workqueue.h>
40#include <linux/fs.h> 41#include <linux/fs.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
@@ -519,7 +520,7 @@ struct ttm_bo_global {
519 * @man: An array of mem_type_managers. 520 * @man: An array of mem_type_managers.
520 * @fence_lock: Protects the synchronizing members on *all* bos belonging 521 * @fence_lock: Protects the synchronizing members on *all* bos belonging
521 * to this device. 522 * to this device.
522 * @addr_space_mm: Range manager for the device address space. 523 * @vma_manager: Address space manager
523 * lru_lock: Spinlock that protects the buffer+device lru lists and 524 * lru_lock: Spinlock that protects the buffer+device lru lists and
524 * ddestroy lists. 525 * ddestroy lists.
525 * @val_seq: Current validation sequence. 526 * @val_seq: Current validation sequence.
@@ -537,14 +538,13 @@ struct ttm_bo_device {
537 struct list_head device_list; 538 struct list_head device_list;
538 struct ttm_bo_global *glob; 539 struct ttm_bo_global *glob;
539 struct ttm_bo_driver *driver; 540 struct ttm_bo_driver *driver;
540 rwlock_t vm_lock;
541 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 541 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
542 spinlock_t fence_lock; 542 spinlock_t fence_lock;
543
543 /* 544 /*
544 * Protected by the vm lock. 545 * Protected by internal locks.
545 */ 546 */
546 struct rb_root addr_space_rb; 547 struct drm_vma_offset_manager vma_manager;
547 struct drm_mm addr_space_mm;
548 548
549 /* 549 /*
550 * Protected by the global:lru lock. 550 * Protected by the global:lru lock.
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 3b589440ecfe..9231be9e90a2 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -18,11 +18,21 @@ enum hdmi_infoframe_type {
18 HDMI_INFOFRAME_TYPE_AUDIO = 0x84, 18 HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
19}; 19};
20 20
21#define HDMI_IEEE_OUI 0x000c03
21#define HDMI_INFOFRAME_HEADER_SIZE 4 22#define HDMI_INFOFRAME_HEADER_SIZE 4
22#define HDMI_AVI_INFOFRAME_SIZE 13 23#define HDMI_AVI_INFOFRAME_SIZE 13
23#define HDMI_SPD_INFOFRAME_SIZE 25 24#define HDMI_SPD_INFOFRAME_SIZE 25
24#define HDMI_AUDIO_INFOFRAME_SIZE 10 25#define HDMI_AUDIO_INFOFRAME_SIZE 10
25 26
27#define HDMI_INFOFRAME_SIZE(type) \
28 (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
29
30struct hdmi_any_infoframe {
31 enum hdmi_infoframe_type type;
32 unsigned char version;
33 unsigned char length;
34};
35
26enum hdmi_colorspace { 36enum hdmi_colorspace {
27 HDMI_COLORSPACE_RGB, 37 HDMI_COLORSPACE_RGB,
28 HDMI_COLORSPACE_YUV422, 38 HDMI_COLORSPACE_YUV422,
@@ -100,9 +110,6 @@ struct hdmi_avi_infoframe {
100 unsigned char version; 110 unsigned char version;
101 unsigned char length; 111 unsigned char length;
102 enum hdmi_colorspace colorspace; 112 enum hdmi_colorspace colorspace;
103 bool active_info_valid;
104 bool horizontal_bar_valid;
105 bool vertical_bar_valid;
106 enum hdmi_scan_mode scan_mode; 113 enum hdmi_scan_mode scan_mode;
107 enum hdmi_colorimetry colorimetry; 114 enum hdmi_colorimetry colorimetry;
108 enum hdmi_picture_aspect picture_aspect; 115 enum hdmi_picture_aspect picture_aspect;
@@ -218,14 +225,52 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
218ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, 225ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
219 void *buffer, size_t size); 226 void *buffer, size_t size);
220 227
228enum hdmi_3d_structure {
229 HDMI_3D_STRUCTURE_INVALID = -1,
230 HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
231 HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE,
232 HDMI_3D_STRUCTURE_LINE_ALTERNATIVE,
233 HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL,
234 HDMI_3D_STRUCTURE_L_DEPTH,
235 HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH,
236 HDMI_3D_STRUCTURE_TOP_AND_BOTTOM,
237 HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
238};
239
240
221struct hdmi_vendor_infoframe { 241struct hdmi_vendor_infoframe {
222 enum hdmi_infoframe_type type; 242 enum hdmi_infoframe_type type;
223 unsigned char version; 243 unsigned char version;
224 unsigned char length; 244 unsigned char length;
225 u8 data[27]; 245 unsigned int oui;
246 u8 vic;
247 enum hdmi_3d_structure s3d_struct;
248 unsigned int s3d_ext_data;
226}; 249};
227 250
251int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
228ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, 252ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
229 void *buffer, size_t size); 253 void *buffer, size_t size);
230 254
255union hdmi_vendor_any_infoframe {
256 struct {
257 enum hdmi_infoframe_type type;
258 unsigned char version;
259 unsigned char length;
260 unsigned int oui;
261 } any;
262 struct hdmi_vendor_infoframe hdmi;
263};
264
265union hdmi_infoframe {
266 struct hdmi_any_infoframe any;
267 struct hdmi_avi_infoframe avi;
268 struct hdmi_spd_infoframe spd;
269 union hdmi_vendor_any_infoframe vendor;
270 struct hdmi_audio_infoframe audio;
271};
272
273ssize_t
274hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
275
231#endif /* _DRM_HDMI_H */ 276#endif /* _DRM_HDMI_H */
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
index 80587fdbba3e..1a2e9901a22e 100644
--- a/include/linux/platform_data/rcar-du.h
+++ b/include/linux/platform_data/rcar-du.h
@@ -16,8 +16,18 @@
16 16
17#include <drm/drm_mode.h> 17#include <drm/drm_mode.h>
18 18
19enum rcar_du_output {
20 RCAR_DU_OUTPUT_DPAD0,
21 RCAR_DU_OUTPUT_DPAD1,
22 RCAR_DU_OUTPUT_LVDS0,
23 RCAR_DU_OUTPUT_LVDS1,
24 RCAR_DU_OUTPUT_TCON,
25 RCAR_DU_OUTPUT_MAX,
26};
27
19enum rcar_du_encoder_type { 28enum rcar_du_encoder_type {
20 RCAR_DU_ENCODER_UNUSED = 0, 29 RCAR_DU_ENCODER_UNUSED = 0,
30 RCAR_DU_ENCODER_NONE,
21 RCAR_DU_ENCODER_VGA, 31 RCAR_DU_ENCODER_VGA,
22 RCAR_DU_ENCODER_LVDS, 32 RCAR_DU_ENCODER_LVDS,
23}; 33};
@@ -28,22 +38,32 @@ struct rcar_du_panel_data {
28 struct drm_mode_modeinfo mode; 38 struct drm_mode_modeinfo mode;
29}; 39};
30 40
31struct rcar_du_encoder_lvds_data { 41struct rcar_du_connector_lvds_data {
32 struct rcar_du_panel_data panel; 42 struct rcar_du_panel_data panel;
33}; 43};
34 44
35struct rcar_du_encoder_vga_data { 45struct rcar_du_connector_vga_data {
36 /* TODO: Add DDC information for EDID retrieval */ 46 /* TODO: Add DDC information for EDID retrieval */
37}; 47};
38 48
49/*
50 * struct rcar_du_encoder_data - Encoder platform data
51 * @type: the encoder type (RCAR_DU_ENCODER_*)
52 * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
53 * @connector.lvds: platform data for LVDS connectors
54 * @connector.vga: platform data for VGA connectors
55 *
56 * Encoder platform data describes an on-board encoder, its associated DU SoC
57 * output, and the connector.
58 */
39struct rcar_du_encoder_data { 59struct rcar_du_encoder_data {
40 enum rcar_du_encoder_type encoder; 60 enum rcar_du_encoder_type type;
41 unsigned int output; 61 enum rcar_du_output output;
42 62
43 union { 63 union {
44 struct rcar_du_encoder_lvds_data lvds; 64 struct rcar_du_connector_lvds_data lvds;
45 struct rcar_du_encoder_vga_data vga; 65 struct rcar_du_connector_vga_data vga;
46 } u; 66 } connector;
47}; 67};
48 68
49struct rcar_du_platform_data { 69struct rcar_du_platform_data {
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index ddb419cf4530..502073a53dd3 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -45,7 +45,8 @@ struct vga_switcheroo_client_ops {
45#if defined(CONFIG_VGA_SWITCHEROO) 45#if defined(CONFIG_VGA_SWITCHEROO)
46void vga_switcheroo_unregister_client(struct pci_dev *dev); 46void vga_switcheroo_unregister_client(struct pci_dev *dev);
47int vga_switcheroo_register_client(struct pci_dev *dev, 47int vga_switcheroo_register_client(struct pci_dev *dev,
48 const struct vga_switcheroo_client_ops *ops); 48 const struct vga_switcheroo_client_ops *ops,
49 bool driver_power_control);
49int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 50int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
50 const struct vga_switcheroo_client_ops *ops, 51 const struct vga_switcheroo_client_ops *ops,
51 int id, bool active); 52 int id, bool active);
@@ -60,11 +61,15 @@ int vga_switcheroo_process_delayed_switch(void);
60 61
61int vga_switcheroo_get_client_state(struct pci_dev *dev); 62int vga_switcheroo_get_client_state(struct pci_dev *dev);
62 63
64void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
65
66int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
67int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
63#else 68#else
64 69
65static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} 70static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
66static inline int vga_switcheroo_register_client(struct pci_dev *dev, 71static inline int vga_switcheroo_register_client(struct pci_dev *dev,
67 const struct vga_switcheroo_client_ops *ops) { return 0; } 72 const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
68static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} 73static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
69static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } 74static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
70static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 75static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
@@ -74,6 +79,10 @@ static inline void vga_switcheroo_unregister_handler(void) {}
74static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 79static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
75static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } 80static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
76 81
82static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
83
84static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
77 86
78#endif 87#endif
79#endif /* _LINUX_VGA_SWITCHEROO_H_ */ 88#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 119487e05e65..2d9a25daab05 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -16,3 +16,4 @@ header-y += sis_drm.h
16header-y += tegra_drm.h 16header-y += tegra_drm.h
17header-y += via_drm.h 17header-y += via_drm.h
18header-y += vmwgfx_drm.h 18header-y += vmwgfx_drm.h
19header-y += msm_drm.h
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 238a166b9fe6..ece867889cc7 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -181,7 +181,7 @@ enum drm_map_type {
181 _DRM_AGP = 3, /**< AGP/GART */ 181 _DRM_AGP = 3, /**< AGP/GART */
182 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ 182 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
183 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ 183 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
184 _DRM_GEM = 6, /**< GEM object */ 184 _DRM_GEM = 6, /**< GEM object (obsolete) */
185}; 185};
186 186
187/** 187/**
@@ -780,6 +780,7 @@ struct drm_event_vblank {
780#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 780#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
781#define DRM_CAP_PRIME 0x5 781#define DRM_CAP_PRIME 0x5
782#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 782#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
783#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
783 784
784#define DRM_PRIME_CAP_IMPORT 0x1 785#define DRM_PRIME_CAP_IMPORT 0x1
785#define DRM_PRIME_CAP_EXPORT 0x2 786#define DRM_PRIME_CAP_EXPORT 0x2
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 53db7cea373b..550811712f78 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -412,7 +412,8 @@ struct drm_mode_crtc_lut {
412}; 412};
413 413
414#define DRM_MODE_PAGE_FLIP_EVENT 0x01 414#define DRM_MODE_PAGE_FLIP_EVENT 0x01
415#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT 415#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
416#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
416 417
417/* 418/*
418 * Request a page flip on the specified crtc. 419 * Request a page flip on the specified crtc.
@@ -426,11 +427,14 @@ struct drm_mode_crtc_lut {
426 * flip is already pending as the ioctl is called, EBUSY will be 427 * flip is already pending as the ioctl is called, EBUSY will be
427 * returned. 428 * returned.
428 * 429 *
429 * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will 430 * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
430 * request that drm sends back a vblank event (see drm.h: struct 431 * event (see drm.h: struct drm_event_vblank) when the page flip is
431 * drm_event_vblank) when the page flip is done. The user_data field 432 * done. The user_data field passed in with this ioctl will be
432 * passed in with this ioctl will be returned as the user_data field 433 * returned as the user_data field in the vblank event struct.
433 * in the vblank event struct. 434 *
435 * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
436 * 'as soon as possible', meaning that it not delay waiting for vblank.
437 * This may cause tearing on the screen.
434 * 438 *
435 * The reserved field must be zero until we figure out something 439 * The reserved field must be zero until we figure out something
436 * clever to use it for. 440 * clever to use it for.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 923ed7fe5775..55bb5729bd78 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -33,6 +33,30 @@
33 * subject to backwards-compatibility constraints. 33 * subject to backwards-compatibility constraints.
34 */ 34 */
35 35
36/**
37 * DOC: uevents generated by i915 on it's device node
38 *
39 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
40 * event from the gpu l3 cache. Additional information supplied is ROW,
41 * BANK, SUBBANK of the affected cacheline. Userspace should keep track of
42 * these events and if a specific cache-line seems to have a persistent
43 * error remap it with the l3 remapping tool supplied in intel-gpu-tools.
44 * The value supplied with the event is always 1.
45 *
46 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
47 * hangcheck. The error detection event is a good indicator of when things
48 * began to go badly. The value supplied with the event is a 1 upon error
49 * detection, and a 0 upon reset completion, signifying no more error
50 * exists. NOTE: Disabling hangcheck or reset via module parameter will
51 * cause the related events to not be seen.
52 *
53 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
54 * the GPU. The value supplied with the event is always 1. NOTE: Disable
55 * reset via module parameter will cause this event to not be seen.
56 */
57#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
58#define I915_ERROR_UEVENT "ERROR"
59#define I915_RESET_UEVENT "RESET"
36 60
37/* Each region is a minimum of 16k, and there are at most 255 of them. 61/* Each region is a minimum of 16k, and there are at most 255 of them.
38 */ 62 */
@@ -310,6 +334,7 @@ typedef struct drm_i915_irq_wait {
310#define I915_PARAM_HAS_PINNED_BATCHES 24 334#define I915_PARAM_HAS_PINNED_BATCHES 24
311#define I915_PARAM_HAS_EXEC_NO_RELOC 25 335#define I915_PARAM_HAS_EXEC_NO_RELOC 25
312#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 336#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
337#define I915_PARAM_HAS_WT 27
313 338
314typedef struct drm_i915_getparam { 339typedef struct drm_i915_getparam {
315 int param; 340 int param;
@@ -744,8 +769,32 @@ struct drm_i915_gem_busy {
744 __u32 busy; 769 __u32 busy;
745}; 770};
746 771
772/**
773 * I915_CACHING_NONE
774 *
775 * GPU access is not coherent with cpu caches. Default for machines without an
776 * LLC.
777 */
747#define I915_CACHING_NONE 0 778#define I915_CACHING_NONE 0
779/**
780 * I915_CACHING_CACHED
781 *
782 * GPU access is coherent with cpu caches and furthermore the data is cached in
783 * last-level caches shared between cpu cores and the gpu GT. Default on
784 * machines with HAS_LLC.
785 */
748#define I915_CACHING_CACHED 1 786#define I915_CACHING_CACHED 1
787/**
788 * I915_CACHING_DISPLAY
789 *
790 * Special GPU caching mode which is coherent with the scanout engines.
791 * Transparently falls back to I915_CACHING_NONE on platforms where no special
792 * cache mode (like write-through or gfdt flushing) is available. The kernel
793 * automatically sets this mode when using a buffer as a scanout target.
794 * Userspace can manually set this mode to avoid a costly stall and clflush in
795 * the hotpath of drawing the first frame.
796 */
797#define I915_CACHING_DISPLAY 2
749 798
750struct drm_i915_gem_caching { 799struct drm_i915_gem_caching {
751 /** 800 /**
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
new file mode 100644
index 000000000000..d3c62074016d
--- /dev/null
+++ b/include/uapi/drm/msm_drm.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRM_H__
19#define __MSM_DRM_H__
20
21#include <stddef.h>
22#include <drm/drm.h>
23
24/* Please note that modifications to all structs defined here are
25 * subject to backwards-compatibility constraints:
26 * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit
27 * user/kernel compatibility
28 * 2) Keep fields aligned to their size
29 * 3) Because of how drm_ioctl() works, we can add new fields at
30 * the end of an ioctl if some care is taken: drm_ioctl() will
31 * zero out the new fields at the tail of the ioctl, so a zero
32 * value should have a backwards compatible meaning. And for
33 * output params, userspace won't see the newly added output
34 * fields.. so that has to be somehow ok.
35 */
36
37#define MSM_PIPE_NONE 0x00
38#define MSM_PIPE_2D0 0x01
39#define MSM_PIPE_2D1 0x02
40#define MSM_PIPE_3D0 0x10
41
42/* timeouts are specified in clock-monotonic absolute times (to simplify
43 * restarting interrupted ioctls). The following struct is logically the
44 * same as 'struct timespec' but 32/64b ABI safe.
45 */
46struct drm_msm_timespec {
47 int64_t tv_sec; /* seconds */
48 int64_t tv_nsec; /* nanoseconds */
49};
50
51#define MSM_PARAM_GPU_ID 0x01
52#define MSM_PARAM_GMEM_SIZE 0x02
53
54struct drm_msm_param {
55 uint32_t pipe; /* in, MSM_PIPE_x */
56 uint32_t param; /* in, MSM_PARAM_x */
57 uint64_t value; /* out (get_param) or in (set_param) */
58};
59
60/*
61 * GEM buffers:
62 */
63
64#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
65#define MSM_BO_GPU_READONLY 0x00000002
66#define MSM_BO_CACHE_MASK 0x000f0000
67/* cache modes */
68#define MSM_BO_CACHED 0x00010000
69#define MSM_BO_WC 0x00020000
70#define MSM_BO_UNCACHED 0x00040000
71
72struct drm_msm_gem_new {
73 uint64_t size; /* in */
74 uint32_t flags; /* in, mask of MSM_BO_x */
75 uint32_t handle; /* out */
76};
77
78struct drm_msm_gem_info {
79 uint32_t handle; /* in */
80 uint32_t pad;
81 uint64_t offset; /* out, offset to pass to mmap() */
82};
83
84#define MSM_PREP_READ 0x01
85#define MSM_PREP_WRITE 0x02
86#define MSM_PREP_NOSYNC 0x04
87
88struct drm_msm_gem_cpu_prep {
89 uint32_t handle; /* in */
90 uint32_t op; /* in, mask of MSM_PREP_x */
91 struct drm_msm_timespec timeout; /* in */
92};
93
94struct drm_msm_gem_cpu_fini {
95 uint32_t handle; /* in */
96};
97
98/*
99 * Cmdstream Submission:
100 */
101
102/* The value written into the cmdstream is logically:
103 *
104 * ((relocbuf->gpuaddr + reloc_offset) << shift) | or
105 *
106 * When we have GPU's w/ >32bit ptrs, it should be possible to deal
107 * with this by emit'ing two reloc entries with appropriate shift
108 * values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
109 *
110 * NOTE that reloc's must be sorted by order of increasing submit_offset,
111 * otherwise EINVAL.
112 */
113struct drm_msm_gem_submit_reloc {
114 uint32_t submit_offset; /* in, offset from submit_bo */
115 uint32_t or; /* in, value OR'd with result */
116 int32_t shift; /* in, amount of left shift (can be negative) */
117 uint32_t reloc_idx; /* in, index of reloc_bo buffer */
118 uint64_t reloc_offset; /* in, offset from start of reloc_bo */
119};
120
121/* submit-types:
122 * BUF - this cmd buffer is executed normally.
123 * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
124 * processed normally, but the kernel does not setup an IB to
125 * this buffer in the first-level ringbuffer
126 * CTX_RESTORE_BUF - only executed if there has been a GPU context
127 * switch since the last SUBMIT ioctl
128 */
129#define MSM_SUBMIT_CMD_BUF 0x0001
130#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
131#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
132struct drm_msm_gem_submit_cmd {
133 uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */
134 uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */
135 uint32_t submit_offset; /* in, offset into submit_bo */
136 uint32_t size; /* in, cmdstream size */
137 uint32_t pad;
138 uint32_t nr_relocs; /* in, number of submit_reloc's */
139 uint64_t __user relocs; /* in, ptr to array of submit_reloc's */
140};
141
142/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
143 * cmdstream buffer(s) themselves or reloc entries) has one (and only
144 * one) entry in the submit->bos[] table.
145 *
146 * As a optimization, the current buffer (gpu virtual address) can be
147 * passed back through the 'presumed' field. If on a subsequent reloc,
148 * userspace passes back a 'presumed' address that is still valid,
149 * then patching the cmdstream for this entry is skipped. This can
150 * avoid kernel needing to map/access the cmdstream bo in the common
151 * case.
152 */
153#define MSM_SUBMIT_BO_READ 0x0001
154#define MSM_SUBMIT_BO_WRITE 0x0002
155struct drm_msm_gem_submit_bo {
156 uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
157 uint32_t handle; /* in, GEM handle */
158 uint64_t presumed; /* in/out, presumed buffer address */
159};
160
161/* Each cmdstream submit consists of a table of buffers involved, and
162 * one or more cmdstream buffers. This allows for conditional execution
163 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
164 */
165struct drm_msm_gem_submit {
166 uint32_t pipe; /* in, MSM_PIPE_x */
167 uint32_t fence; /* out */
168 uint32_t nr_bos; /* in, number of submit_bo's */
169 uint32_t nr_cmds; /* in, number of submit_cmd's */
170 uint64_t __user bos; /* in, ptr to array of submit_bo's */
171 uint64_t __user cmds; /* in, ptr to array of submit_cmd's */
172};
173
174/* The normal way to synchronize with the GPU is just to CPU_PREP on
175 * a buffer if you need to access it from the CPU (other cmdstream
176 * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
177 * handle the required synchronization under the hood). This ioctl
178 * mainly just exists as a way to implement the gallium pipe_fence
179 * APIs without requiring a dummy bo to synchronize on.
180 */
181struct drm_msm_wait_fence {
182 uint32_t fence; /* in */
183 uint32_t pad;
184 struct drm_msm_timespec timeout; /* in */
185};
186
187#define DRM_MSM_GET_PARAM 0x00
188/* placeholder:
189#define DRM_MSM_SET_PARAM 0x01
190 */
191#define DRM_MSM_GEM_NEW 0x02
192#define DRM_MSM_GEM_INFO 0x03
193#define DRM_MSM_GEM_CPU_PREP 0x04
194#define DRM_MSM_GEM_CPU_FINI 0x05
195#define DRM_MSM_GEM_SUBMIT 0x06
196#define DRM_MSM_WAIT_FENCE 0x07
197#define DRM_MSM_NUM_IOCTLS 0x08
198
199#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
200#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
201#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
202#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
203#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
204#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
205#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
206
207#endif /* __MSM_DRM_H__ */
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 321d4ac5c512..fa8b3adf9ffb 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -979,6 +979,8 @@ struct drm_radeon_cs {
979#define RADEON_INFO_RING_WORKING 0x15 979#define RADEON_INFO_RING_WORKING 0x15
980/* SI tile mode array */ 980/* SI tile mode array */
981#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 981#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
982/* query if CP DMA is supported on the compute ring */
983#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
982 984
983 985
984struct drm_radeon_info { 986struct drm_radeon_info {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c6c98298ac39..e54ebd530849 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -555,6 +555,9 @@ struct azx {
555#ifdef CONFIG_SND_HDA_DSP_LOADER 555#ifdef CONFIG_SND_HDA_DSP_LOADER
556 struct azx_dev saved_azx_dev; 556 struct azx_dev saved_azx_dev;
557#endif 557#endif
558
559 /* secondary power domain for hdmi audio under vga device */
560 struct dev_pm_domain hdmi_pm_domain;
558}; 561};
559 562
560#define CREATE_TRACE_POINTS 563#define CREATE_TRACE_POINTS
@@ -1397,8 +1400,9 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1397 int i, ok; 1400 int i, ok;
1398 1401
1399#ifdef CONFIG_PM_RUNTIME 1402#ifdef CONFIG_PM_RUNTIME
1400 if (chip->pci->dev.power.runtime_status != RPM_ACTIVE) 1403 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1401 return IRQ_NONE; 1404 if (chip->pci->dev.power.runtime_status != RPM_ACTIVE)
1405 return IRQ_NONE;
1402#endif 1406#endif
1403 1407
1404 spin_lock(&chip->reg_lock); 1408 spin_lock(&chip->reg_lock);
@@ -1409,7 +1413,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1409 } 1413 }
1410 1414
1411 status = azx_readl(chip, INTSTS); 1415 status = azx_readl(chip, INTSTS);
1412 if (status == 0) { 1416 if (status == 0 || status == 0xffffffff) {
1413 spin_unlock(&chip->reg_lock); 1417 spin_unlock(&chip->reg_lock);
1414 return IRQ_NONE; 1418 return IRQ_NONE;
1415 } 1419 }
@@ -2971,6 +2975,12 @@ static int azx_runtime_suspend(struct device *dev)
2971 struct snd_card *card = dev_get_drvdata(dev); 2975 struct snd_card *card = dev_get_drvdata(dev);
2972 struct azx *chip = card->private_data; 2976 struct azx *chip = card->private_data;
2973 2977
2978 if (chip->disabled)
2979 return 0;
2980
2981 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
2982 return 0;
2983
2974 /* enable controller wake up event */ 2984 /* enable controller wake up event */
2975 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | 2985 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
2976 STATESTS_INT_MASK); 2986 STATESTS_INT_MASK);
@@ -2991,6 +3001,12 @@ static int azx_runtime_resume(struct device *dev)
2991 struct hda_codec *codec; 3001 struct hda_codec *codec;
2992 int status; 3002 int status;
2993 3003
3004 if (chip->disabled)
3005 return 0;
3006
3007 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
3008 return 0;
3009
2994 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 3010 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2995 hda_display_power(true); 3011 hda_display_power(true);
2996 3012
@@ -3020,6 +3036,9 @@ static int azx_runtime_idle(struct device *dev)
3020 struct snd_card *card = dev_get_drvdata(dev); 3036 struct snd_card *card = dev_get_drvdata(dev);
3021 struct azx *chip = card->private_data; 3037 struct azx *chip = card->private_data;
3022 3038
3039 if (chip->disabled)
3040 return 0;
3041
3023 if (!power_save_controller || 3042 if (!power_save_controller ||
3024 !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 3043 !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
3025 return -EBUSY; 3044 return -EBUSY;
@@ -3102,13 +3121,19 @@ static void azx_vs_set_state(struct pci_dev *pci,
3102 "%s: %s via VGA-switcheroo\n", pci_name(chip->pci), 3121 "%s: %s via VGA-switcheroo\n", pci_name(chip->pci),
3103 disabled ? "Disabling" : "Enabling"); 3122 disabled ? "Disabling" : "Enabling");
3104 if (disabled) { 3123 if (disabled) {
3124 pm_runtime_put_sync_suspend(&pci->dev);
3105 azx_suspend(&pci->dev); 3125 azx_suspend(&pci->dev);
3126 /* when we get suspended by vga switcheroo we end up in D3cold,
3127 * however we have no ACPI handle, so pci/acpi can't put us there,
3128 * put ourselves there */
3129 pci->current_state = PCI_D3cold;
3106 chip->disabled = true; 3130 chip->disabled = true;
3107 if (snd_hda_lock_devices(chip->bus)) 3131 if (snd_hda_lock_devices(chip->bus))
3108 snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n", 3132 snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n",
3109 pci_name(chip->pci)); 3133 pci_name(chip->pci));
3110 } else { 3134 } else {
3111 snd_hda_unlock_devices(chip->bus); 3135 snd_hda_unlock_devices(chip->bus);
3136 pm_runtime_get_noresume(&pci->dev);
3112 chip->disabled = false; 3137 chip->disabled = false;
3113 azx_resume(&pci->dev); 3138 azx_resume(&pci->dev);
3114 } 3139 }
@@ -3163,6 +3188,9 @@ static int register_vga_switcheroo(struct azx *chip)
3163 if (err < 0) 3188 if (err < 0)
3164 return err; 3189 return err;
3165 chip->vga_switcheroo_registered = 1; 3190 chip->vga_switcheroo_registered = 1;
3191
3192 /* register as an optimus hdmi audio power domain */
3193 vga_switcheroo_init_domain_pm_optimus_hdmi_audio(&chip->pci->dev, &chip->hdmi_pm_domain);
3166 return 0; 3194 return 0;
3167} 3195}
3168#else 3196#else
@@ -3913,7 +3941,7 @@ static int azx_probe_continue(struct azx *chip)
3913 power_down_all_codecs(chip); 3941 power_down_all_codecs(chip);
3914 azx_notifier_register(chip); 3942 azx_notifier_register(chip);
3915 azx_add_card_list(chip); 3943 azx_add_card_list(chip);
3916 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME) 3944 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo)
3917 pm_runtime_put_noidle(&pci->dev); 3945 pm_runtime_put_noidle(&pci->dev);
3918 3946
3919 return 0; 3947 return 0;